content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
###### Plotting Original Curves ######
# Packages
library(data.table)
library(ggplot2)
# Auxiliary functions
source("R/aux_functions.R")
# Function to evaluate function points; adds points evaluated in the beggining and ending of the interval
points_of_original_function = function(address_hash, dt){
dt = dt[address == address_hash]
x_vec = dt$ts_normal
y_vec = dt$value
eval_points = unique(sort(c(c(0,1), x_vec)))
eval_step_function = unlist(lapply(eval_points, step_function, x_vec, y_vec))
dt = data.table(address = address_hash, label = dt$label[1], category = dt$category[1],
ts_normal = eval_points, value = eval_step_function, count = dt$count[1])
return(dt)
}
## Function to plot step curves ##
plot_original_curves = function(wallets, addr_sample, nobs, free_scale=FALSE){
for (category in names(addr_sample)){
addr_cat = addr_sample[[category]]
dt = wallets[address %in% addr_cat]
dt = rbindlist(lapply(unique(dt$address), points_of_original_function, dt))
plt = ggplot(data = dt) +
ggtitle(category) +
xlab("Time") +
ylab("Balance") +
geom_step(aes(x = ts_normal, y = value))
if(free_scale == TRUE){
plt = plt + facet_wrap(~address, nrow = 2, scales = "free_y")
file_plt = paste0("graphics/curves/original/free scale/original_curves_", gsub("/", "_", gsub(" ", "_", tolower(category))))
wd = 11
}else{
plt = plt + facet_wrap(~address, nrow = 2)
file_plt = paste0("graphics/curves/original/fixed scale/original_curves_", gsub("/", "_", gsub(" ", "_", tolower(category))))
wd = 10
}
ggsave(paste0(file_plt, "_", nobs, "obs.png"), plt, width = wd, height = 6)
}
}
wallets = data.table(readRDS("data/treated/cred_debt_complete.rds"))
setorder(wallets, address, ts_normal)
wallets[, value := cumsum(increments), by = "address"]
## 10 obs ##
# sample to plot
addr_sample = readRDS("data/treated/balanced samples/plt_sample_addr_10obs.rds")
addr_sample = lapply(addr_sample, function(x) x[1:6])
plot_original_curves(wallets, addr_sample, 10)
plot_original_curves(wallets, addr_sample, 10, free_scale = TRUE)
## 20 obs ##
# sample to plot
addr_sample = readRDS("data/treated/balanced samples/plt_sample_addr_20obs.rds")
addr_sample = lapply(addr_sample, function(x) x[1:6])
plot_original_curves(wallets, addr_sample, 20)
plot_original_curves(wallets, addr_sample, 20, free_scale = TRUE)
|
/plot_original_curves.R
|
no_license
|
brendaprallon/ripple-FDA
|
R
| false | false | 2,542 |
r
|
###### Plotting Original Curves ######
# Packages
library(data.table)
library(ggplot2)
# Auxiliary functions
source("R/aux_functions.R")
# Function to evaluate function points; adds points evaluated in the beggining and ending of the interval
points_of_original_function = function(address_hash, dt){
dt = dt[address == address_hash]
x_vec = dt$ts_normal
y_vec = dt$value
eval_points = unique(sort(c(c(0,1), x_vec)))
eval_step_function = unlist(lapply(eval_points, step_function, x_vec, y_vec))
dt = data.table(address = address_hash, label = dt$label[1], category = dt$category[1],
ts_normal = eval_points, value = eval_step_function, count = dt$count[1])
return(dt)
}
## Function to plot step curves ##
plot_original_curves = function(wallets, addr_sample, nobs, free_scale=FALSE){
for (category in names(addr_sample)){
addr_cat = addr_sample[[category]]
dt = wallets[address %in% addr_cat]
dt = rbindlist(lapply(unique(dt$address), points_of_original_function, dt))
plt = ggplot(data = dt) +
ggtitle(category) +
xlab("Time") +
ylab("Balance") +
geom_step(aes(x = ts_normal, y = value))
if(free_scale == TRUE){
plt = plt + facet_wrap(~address, nrow = 2, scales = "free_y")
file_plt = paste0("graphics/curves/original/free scale/original_curves_", gsub("/", "_", gsub(" ", "_", tolower(category))))
wd = 11
}else{
plt = plt + facet_wrap(~address, nrow = 2)
file_plt = paste0("graphics/curves/original/fixed scale/original_curves_", gsub("/", "_", gsub(" ", "_", tolower(category))))
wd = 10
}
ggsave(paste0(file_plt, "_", nobs, "obs.png"), plt, width = wd, height = 6)
}
}
wallets = data.table(readRDS("data/treated/cred_debt_complete.rds"))
setorder(wallets, address, ts_normal)
wallets[, value := cumsum(increments), by = "address"]
## 10 obs ##
# sample to plot
addr_sample = readRDS("data/treated/balanced samples/plt_sample_addr_10obs.rds")
addr_sample = lapply(addr_sample, function(x) x[1:6])
plot_original_curves(wallets, addr_sample, 10)
plot_original_curves(wallets, addr_sample, 10, free_scale = TRUE)
## 20 obs ##
# sample to plot
addr_sample = readRDS("data/treated/balanced samples/plt_sample_addr_20obs.rds")
addr_sample = lapply(addr_sample, function(x) x[1:6])
plot_original_curves(wallets, addr_sample, 20)
plot_original_curves(wallets, addr_sample, 20, free_scale = TRUE)
|
#' Generate Polygons from Isobaths
#'
#' From an input bathymetry and chosen depths, turns areas between isobaths into polygons.
#' An input polygon may optionally be given to constrain boundaries.
#' The accuracy is dependent on the resolution of the bathymetry raster
#' (see \code{\link{load_Bathy}} to get high resolution data).
#'
#' @param Poly optional, single polygon inside which isobaths will be computed.
#' May be created using \code{\link{create_Polys}} or by subsetting an object obtained
#' using one of the \code{load_} functions (see examples).
#'
#' @param Bathy bathymetry raster with the appropriate projection, such as
#' \code{\link[CCAMLRGIS:SmallBathy]{SmallBathy}}.
#' It is highly recommended to use a raster of higher resolution (see \code{\link{load_Bathy}}).
#'
#' @param Depths numeric, vector of desired isobaths. For example,
#' \code{Depths=c(-2000,-1000,-500)}.
#'
#' @return Spatial object in your environment. Data within the resulting object contains
#' a polygon in each row. Columns are as follows: \code{ID} is a unique polygon identifier;
#' \code{Iso} is an isobath identifier; \code{Min} and \code{Max} is the depth range of isobaths;
#' \code{Grp} is a group identifier (e.g., a seamount constituted of several isobaths);
#' \code{AreaKm2} is the polygon area in square kilometers; \code{Labx} and \code{Laby} can be used
#' to label groups (see examples).
#'
#' @seealso
#' \code{\link{load_Bathy}}, \code{\link{create_Polys}}, \code{\link{get_depths}}.
#'
#' @examples
#'
#' # For more examples, see:
#' # https://github.com/ccamlr/CCAMLRGIS#46-get_iso_polys
#'
#'
#' Poly=create_Polys(Input=data.frame(ID=1,Lat=c(-55,-55,-61,-61),Lon=c(-30,-25,-25,-30)))
#' IsoPols=get_iso_polys(Bathy=SmallBathy,Poly=Poly,Depths=seq(-8000,0,length.out=10))
#'
#' plot(st_geometry(Poly))
#' for(i in unique(IsoPols$Iso)){
#' plot(st_geometry(IsoPols[IsoPols$Iso==i,]),col=rainbow(9)[i],add=TRUE)
#' }
#'
#'
#' @export
get_iso_polys=function(Bathy,Poly=NULL,Depths){
if(is.null(Poly)==FALSE){
Bathy=terra::crop(Bathy,Poly)
Bathy=terra::mask(Bathy,Poly)
}
Depths=sort(Depths)
B=stars::st_as_stars(Bathy)
Cs=stars::st_contour(B,breaks=Depths)
Cs=sf::st_cast(Cs,"POLYGON",warn=FALSE)
Cs=Cs%>%dplyr::filter(is.finite(Min)==TRUE & is.finite(Max)==TRUE)
Cs=Cs[,-1]
row.names(Cs)=NULL
Grp=sf::st_touches(Cs,sparse = TRUE)
#Add Isobath ID
tmp=data.frame(Min=sort(unique(Cs$Min)))
tmp$Iso=seq(1,nrow(tmp))
Cs=dplyr::left_join(Cs,tmp,by="Min")
#Add Group
Cs$Grp=NA
Cs$Grp[1]=1
for(i in seq(2,nrow(Cs))){
Gr=Grp[[i]]
if(length(Gr)==0){
Cs$Grp[i]=Cs$Grp[i-1]+1
}else{
if(is.na(Cs$Grp[i])){Cs$Grp[c(i,Gr)]=Cs$Grp[i-1]+1}
}
}
#Add area
Ar=round(st_area(Cs)/1000000,2)
Cs$AreaKm2=as.numeric(Ar)
#Add group label location
labs=st_coordinates(st_centroid(st_geometry(Cs)))
Cs$Labx=labs[,1]
Cs$Laby=labs[,2]
#Keep label location for shallowest pol within group
tmp=st_drop_geometry(Cs)%>%dplyr::select(Iso,Grp,AreaKm2)
tmp2=tmp%>%dplyr::group_by(Grp)%>%dplyr::summarise(Iso2=max(Iso),AreaKm2=max(AreaKm2[Iso==max(Iso)]))
colnames(tmp2)[2]="Iso"
tmp2$L="Y"
tmp=dplyr::left_join(tmp,tmp2,by = c("Iso", "Grp","AreaKm2"))
Cs$Labx[which(is.na(tmp$L))]=NA
Cs$Laby[which(is.na(tmp$L))]=NA
Cs$ID=seq(1,nrow(Cs))
Cs=Cs[,c(9,3,1,2,5,6,7,8,4)]
return(Cs)
}
|
/R/get_iso_polys.R
|
no_license
|
ccamlr/CCAMLRGIS
|
R
| false | false | 3,393 |
r
|
#' Generate Polygons from Isobaths
#'
#' From an input bathymetry and chosen depths, turns areas between isobaths into polygons.
#' An input polygon may optionally be given to constrain boundaries.
#' The accuracy is dependent on the resolution of the bathymetry raster
#' (see \code{\link{load_Bathy}} to get high resolution data).
#'
#' @param Poly optional, single polygon inside which isobaths will be computed.
#' May be created using \code{\link{create_Polys}} or by subsetting an object obtained
#' using one of the \code{load_} functions (see examples).
#'
#' @param Bathy bathymetry raster with the appropriate projection, such as
#' \code{\link[CCAMLRGIS:SmallBathy]{SmallBathy}}.
#' It is highly recommended to use a raster of higher resolution (see \code{\link{load_Bathy}}).
#'
#' @param Depths numeric, vector of desired isobaths. For example,
#' \code{Depths=c(-2000,-1000,-500)}.
#'
#' @return Spatial object in your environment. Data within the resulting object contains
#' a polygon in each row. Columns are as follows: \code{ID} is a unique polygon identifier;
#' \code{Iso} is an isobath identifier; \code{Min} and \code{Max} is the depth range of isobaths;
#' \code{Grp} is a group identifier (e.g., a seamount constituted of several isobaths);
#' \code{AreaKm2} is the polygon area in square kilometers; \code{Labx} and \code{Laby} can be used
#' to label groups (see examples).
#'
#' @seealso
#' \code{\link{load_Bathy}}, \code{\link{create_Polys}}, \code{\link{get_depths}}.
#'
#' @examples
#'
#' # For more examples, see:
#' # https://github.com/ccamlr/CCAMLRGIS#46-get_iso_polys
#'
#'
#' Poly=create_Polys(Input=data.frame(ID=1,Lat=c(-55,-55,-61,-61),Lon=c(-30,-25,-25,-30)))
#' IsoPols=get_iso_polys(Bathy=SmallBathy,Poly=Poly,Depths=seq(-8000,0,length.out=10))
#'
#' plot(st_geometry(Poly))
#' for(i in unique(IsoPols$Iso)){
#' plot(st_geometry(IsoPols[IsoPols$Iso==i,]),col=rainbow(9)[i],add=TRUE)
#' }
#'
#'
#' @export
get_iso_polys=function(Bathy,Poly=NULL,Depths){
if(is.null(Poly)==FALSE){
Bathy=terra::crop(Bathy,Poly)
Bathy=terra::mask(Bathy,Poly)
}
Depths=sort(Depths)
B=stars::st_as_stars(Bathy)
Cs=stars::st_contour(B,breaks=Depths)
Cs=sf::st_cast(Cs,"POLYGON",warn=FALSE)
Cs=Cs%>%dplyr::filter(is.finite(Min)==TRUE & is.finite(Max)==TRUE)
Cs=Cs[,-1]
row.names(Cs)=NULL
Grp=sf::st_touches(Cs,sparse = TRUE)
#Add Isobath ID
tmp=data.frame(Min=sort(unique(Cs$Min)))
tmp$Iso=seq(1,nrow(tmp))
Cs=dplyr::left_join(Cs,tmp,by="Min")
#Add Group
Cs$Grp=NA
Cs$Grp[1]=1
for(i in seq(2,nrow(Cs))){
Gr=Grp[[i]]
if(length(Gr)==0){
Cs$Grp[i]=Cs$Grp[i-1]+1
}else{
if(is.na(Cs$Grp[i])){Cs$Grp[c(i,Gr)]=Cs$Grp[i-1]+1}
}
}
#Add area
Ar=round(st_area(Cs)/1000000,2)
Cs$AreaKm2=as.numeric(Ar)
#Add group label location
labs=st_coordinates(st_centroid(st_geometry(Cs)))
Cs$Labx=labs[,1]
Cs$Laby=labs[,2]
#Keep label location for shallowest pol within group
tmp=st_drop_geometry(Cs)%>%dplyr::select(Iso,Grp,AreaKm2)
tmp2=tmp%>%dplyr::group_by(Grp)%>%dplyr::summarise(Iso2=max(Iso),AreaKm2=max(AreaKm2[Iso==max(Iso)]))
colnames(tmp2)[2]="Iso"
tmp2$L="Y"
tmp=dplyr::left_join(tmp,tmp2,by = c("Iso", "Grp","AreaKm2"))
Cs$Labx[which(is.na(tmp$L))]=NA
Cs$Laby[which(is.na(tmp$L))]=NA
Cs$ID=seq(1,nrow(Cs))
Cs=Cs[,c(9,3,1,2,5,6,7,8,4)]
return(Cs)
}
|
fig_path_sav = "C:\\Users\\orlov\\Desktop\\figures_sapovirus\\"
fig_path_nov = "C:\\Users\\orlov\\Desktop\\figures_norovirus\\"
sav = read.dna(as.character("C:\\Users\\orlov\\Documents\\term_project\\alignments\\sapovirus_genomes_full_aln_100gp_0.5_genotyped.fasta"), format="fasta", as.character=TRUE)
nov = read.dna(as.character("C:\\Users\\orlov\\Desktop\\norovirus\\norovirus_full_aln_100gp_0.5_genotyped.fasta"), format="fasta", as.character=TRUE)
l = plot_dist_test(sav,1,2500,2501,5091)
ggsave(file=paste(fig_path_sav, "SaV_NS-NS.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,1,5091,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_NS-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,5092,6711,6712,7191)
ggsave(file=paste(fig_path_sav, "SaV_VP1-VP2.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,1,6711,6712,7191)
ggsave(file=paste(fig_path_sav, "SaV_ORF1-ORF2.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,5092,6711,6712,7191)
ggsave(file=paste(fig_path_sav, "SaV_VP1-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,1,153,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_p11-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,154,909,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_p28-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,910,1935,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_NTPase-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,1936,2754,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_p32-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,2755,3093,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_VPg-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,3094,5091,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_propol-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,1,2541,2542,5082)
ggsave(file=paste(fig_path_nov, "NoV_NS-NS.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,1,5082,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_NS-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,5083,6687,6688,7431)
ggsave(file=paste(fig_path_nov, "NoV_VP1-VP2.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,1,5082,5083,6687)
ggsave(file=paste(fig_path_nov, "NoV_NS-VP1.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,1,5082,6688,7431)
ggsave(file=paste(fig_path_nov, "NoV_NS-VP2.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,5083,5885,5886,6687)
ggsave(file=paste(fig_path_nov, "NoV_VP1-VP1.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,6688,7060,7061,7431)
ggsave(file=paste(fig_path_nov, "NoV_VP2-VP2.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,1,981,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_p48-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,982,2079,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_NTPase-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,2080,2607,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_p22-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,2608,3006,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_VpG-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,3007,3549,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_pro-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,3550,5082,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_pol-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
|
/make_pairwise_dist.r
|
no_license
|
orlovartem/pairwise_distances
|
R
| false | false | 3,830 |
r
|
fig_path_sav = "C:\\Users\\orlov\\Desktop\\figures_sapovirus\\"
fig_path_nov = "C:\\Users\\orlov\\Desktop\\figures_norovirus\\"
sav = read.dna(as.character("C:\\Users\\orlov\\Documents\\term_project\\alignments\\sapovirus_genomes_full_aln_100gp_0.5_genotyped.fasta"), format="fasta", as.character=TRUE)
nov = read.dna(as.character("C:\\Users\\orlov\\Desktop\\norovirus\\norovirus_full_aln_100gp_0.5_genotyped.fasta"), format="fasta", as.character=TRUE)
l = plot_dist_test(sav,1,2500,2501,5091)
ggsave(file=paste(fig_path_sav, "SaV_NS-NS.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,1,5091,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_NS-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,5092,6711,6712,7191)
ggsave(file=paste(fig_path_sav, "SaV_VP1-VP2.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,1,6711,6712,7191)
ggsave(file=paste(fig_path_sav, "SaV_ORF1-ORF2.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,5092,6711,6712,7191)
ggsave(file=paste(fig_path_sav, "SaV_VP1-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,1,153,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_p11-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,154,909,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_p28-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,910,1935,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_NTPase-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,1936,2754,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_p32-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,2755,3093,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_VPg-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(sav,3094,5091,5092,7191)
ggsave(file=paste(fig_path_sav, "SaV_propol-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,1,2541,2542,5082)
ggsave(file=paste(fig_path_nov, "NoV_NS-NS.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,1,5082,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_NS-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,5083,6687,6688,7431)
ggsave(file=paste(fig_path_nov, "NoV_VP1-VP2.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,1,5082,5083,6687)
ggsave(file=paste(fig_path_nov, "NoV_NS-VP1.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,1,5082,6688,7431)
ggsave(file=paste(fig_path_nov, "NoV_NS-VP2.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,5083,5885,5886,6687)
ggsave(file=paste(fig_path_nov, "NoV_VP1-VP1.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,6688,7060,7061,7431)
ggsave(file=paste(fig_path_nov, "NoV_VP2-VP2.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,1,981,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_p48-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,982,2079,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_NTPase-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,2080,2607,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_p22-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,2608,3006,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_VpG-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,3007,3549,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_pro-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
l = plot_dist_test(nov,3550,5082,5083,7431)
ggsave(file=paste(fig_path_nov, "NoV_pol-VP12.svg", sep=""), plot=l[[1]], width=10, height=8)
|
test_rep_reportQualityCheckRawData <- function() {
normalData <- replicate(8, rnorm(n=3000, mean=11.5, sd=2.2))
normalData <- 2^normalData
## Test
reportQualityCheckRawData(
normalData,
file.path(getwd(), 'temp'),
stdout(), #file.path(getwd(), 'temp', 'out_test_rep_reportQualityCheckRawData.Rmd'),
"euclidean"
)
}
|
/inst/unitTests/test_qck_raw_data.R
|
no_license
|
rmylonas/Prots4Prots
|
R
| false | false | 386 |
r
|
test_rep_reportQualityCheckRawData <- function() {
normalData <- replicate(8, rnorm(n=3000, mean=11.5, sd=2.2))
normalData <- 2^normalData
## Test
reportQualityCheckRawData(
normalData,
file.path(getwd(), 'temp'),
stdout(), #file.path(getwd(), 'temp', 'out_test_rep_reportQualityCheckRawData.Rmd'),
"euclidean"
)
}
|
# ****************************************************************
# GOAL : Normalize with various methods:
# - Quantile normalization
# USAGE :
# ****************************************************************
################ load libraries ###############
suppressPackageStartupMessages(library("argparse"))
## Main logic
main <- function(){
##### Parse command line arguments ############
args <- check_options()
inputDir <- args$inputDir
outputfile <- args$outputfile
rnaClass <- args$rnaClass
################ Load libraries ###############
load_libraries()
################ Main logic ###################
## Get the input data
cat("- Reading input file ...\n")
allDataOrig <- data.frame(read.table(inputfile, header=TRUE, sep="\t", na.strings=c(NA, NaN, Inf), row.names=1))
# Create the output directories and files
cat("\n2) Create the output directories and files ...\n")
outputDir <- paste(dirname(outputFile),"/snormalized_counts", sep=''); system(paste("mkdir -p", outputDir, sep=' '))
basefilename <- tools::file_path_sans_ext(basename(outputFile))
extfilename <- tools::file_ext(outputFile)
# Get counts matrix
sampleCountsDF <- get_counts_matrix(inputDir, rnaClass)
# Print session info to the session's log file
logdir <- paste(dirname(outputfile),"/logs", sep='')
system(paste("mkdir -p ", logdir, sep=''))
session_logfile <- paste(logdir,"/session_info_",file_path_sans_ext(basename(outputfile)),".log" ,sep='');
print_session_info(session_logfile)
}
############ USER DEFINED FUNCTIONS ##########
# Generate RLE plots
get_rle_plots <- function(sampleCountsDF, outputDir, dataType, color){
# # Relative log expression (RLE) plot from ExpressionSet:
# - RLE plots were initially proposed to measure the overall quality of a dataset but can also be used to visualize the presence of unwanted batch effects in the data
# - Unwanted variation can be highly problematic and so its detection is often crucial
# - Relative log expression (RLE) plots are a powerful tool for visualising such variation in high dimensional data
# - RLE plots are particularly useful for assessing whether a procedure aimed at removing unwanted variation, i.e. a normalisation procedure, has been successful
# - These plots, while originally devised for gene expression data from microarrays, can also be used to reveal unwanted variation in single-cell expression data, where such variation can be problematic
# # If style is "full", as usual with boxplots:
# - The box shows the inter-quartile range and whiskers extend no more than 1.5 * IQR from the hinge (the 25th or 75th percentile)
# - Data beyond the whiskers are called outliers and are plotted individually
# - The median (50th percentile) is shown with a white bar
# Get the output dirs
rlePlotsDir <- paste(outputDir, "/rle_plots", sep=''); system(paste("mkdir -p", rlePlotsDir, sep=' '));
ylimvals <- c(-5,5)
# 1) # Create RLE plots for RAW counts
cat("\t9.1) Create RLE plots for RAW counts ...\n")
filter <- apply(sampleCountsDF, 1, function(x) length(x[x>1])>=2)
filtered <- sampleCountsDF[filter,]
set <- newSeqExpressionSet(as.matrix(filtered))
RLEplotFile <- paste(rlePlotsDir, "/", basefilename, "_",dataType,".png", sep='')
png(filename=RLEplotFile, height=900, width=1000, bg="white", res=100)
par(mar=c(20,4,1,1));
plotRLE(set, outline=FALSE,col=color, las=2, cex.axis=0.5, ylim=ylimvals)
dev.off()
}
# Get normalized reads
get_qn_reads <- function(fdds, sampleCountsDF, outputFile){
# 1) RLE plots for raw data
# 2) Get size factor normalized reads (SFN)
# 3) Get variance stabilizing transformation (VST) normalized reads
# Get annotation table
sampleFiles <- c(ControlFiles, TreatmentFiles)
sampleCondition <- c(rep(conditionVals[1], length(ControlFiles)), rep(conditionVals[2], length(TreatmentFiles)))
sampleDiagnosis <- c(rep(0, length(ControlFiles)), rep(1, length(TreatmentFiles)))
newSampleTable <- data.frame(sampleName = names(sampleCountsDF), condition = sampleCondition, diagnosis = sampleDiagnosis, row.names=names(sampleCountsDF))
# 1) # Create RLE plots for RAW counts
cat("\t9.1) Create RLE plots for RAW counts ...\n")
filter <- apply(sampleCountsDF, 1, function(x) length(x[x>1])>=2)
filtered <- sampleCountsDF[filter,]
set <- newSeqExpressionSet(as.matrix(filtered))
rawRLEplotFile <- paste(rlePlotsDir, "/", basefilename, "_raw.png", sep='')
png(filename=rawRLEplotFile, height=900, width=1000, bg="white", res=100)
par(mar=c(20,4,1,1));
plotRLE(set, outline=FALSE,col='navy',las=2, cex.axis=0.5, ylim=ylimvals)
dev.off()
# 2) Get quantile normalized reads (QNR)
cat("\t9.2) Get quantile normalized reads (QNR) ...\n")
suppressPackageStartupMessages(library("preprocessCore", warn.conflicts=FALSE, quietly=TRUE))
qnrCountsDF <- normalize.quantiles(as.matrix(sampleCountsDF))
qnrfilter <- apply(qnrCountsDF, 1, function(x) length(x[x>1])>=2)
qnrfiltered <- qnrCountsDF[qnrfilter,]
qnrset <- newSeqExpressionSet(as.matrix(qnrfiltered))
qnrrawRLEplotFile <- paste(rlePlotsDir, "/", basefilename, "_qnr.png", sep='')
png(filename=qnrrawRLEplotFile, height=900, width=1000, bg="white", res=100)
par(mar=c(20,4,1,1));
plotRLE(qnrset, outline=FALSE,col='deepskyblue',las=2, cex.axis=0.5, ylim=ylimvals)
dev.off()
# Get QNR related directories and filenames
qnrnormalizedDir <- paste(outputDir,'/qnr/',conditionVals[2],'_',conditionVals[1], sep='')
qnrtnormalizedDir <- paste(qnrnormalizedDir, "/transposed", sep='')
indqnrnormalizedDir <- paste(qnrnormalizedDir, "/qnr_normalized_counts", sep='')
qnrnormalizedOutFile <- paste(qnrnormalizedDir , "/", basefilename, "_qnr_normalized.txt", sep='')
qnrtnormalizedOutFile <- paste(qnrtnormalizedDir, "/", basefilename, "_featureInCols_qnr_normalized.txt", sep='')
system(paste("mkdir -p", qnrnormalizedDir, qnrtnormalizedDir, indqnrnormalizedDir, sep=' '))
# Convert rownames as feature column to save it in the csv file
qnrrownames <- rownames(sampleCountsDF[qnrfilter,])
qnrcolnames <- names(sampleCountsDF[qnrfilter,])
colnames(qnrfiltered) <- qnrcolnames
rownames(qnrfiltered) <- qnrrownames
qnrfiltered <- cbind(feature = rownames(qnrfiltered), qnrfiltered)
rownames(qnrfiltered) <- 1:nrow(qnrfiltered)
# Save the counts file to output csv file
write.table(qnrfiltered , file = qnrnormalizedOutFile , row.names = F, sep = '\t', quote = F)
write.table(t(qnrfiltered), file = qnrtnormalizedOutFile, col.names = F, sep = '\t', quote = F)
# Get the normalized individual count files
indvcmd <- paste("bash scripts/get_individual_countFiles.sh", qnrnormalizedOutFile, indqnrnormalizedDir, rnaClass, 1, 2, sep=' ')
system(indvcmd)
}
get_counts_matrix <- function(inputDir, rnaClass){
# Combine individual HTSeq count files into a matrix
# Source: https://wiki.bits.vib.be/index.php/NGS_RNASeq_DE_Exercise.4
# Get the list of all count files
allCountFiles <- list.files(inputDir)
# Subset the files here for specific files
subCountFiles <- allCountFiles # replace it with the subset file code
# Read each file as array element of DT and rename the last 2 cols
# we created a list of single sample tables
DT <- list()
for (i in 1:length(subCountFiles)){
# Get the absolute file name
infile = paste(inputDir, subCountFiles[i], sep = "/")
# Add it to the data table
DT[[subCountFiles[i]]] <- read.table(infile, header = F, stringsAsFactors = FALSE)
# Get the column names
colnames(DT[[subCountFiles[i]]]) <- c("feature", subCountFiles[i])
}
# Merge all elements based on first 'feature' columns
mergedDataTab <- DT[[subCountFiles[1]]]
# We now add each other table with the 'feature' column as key
for (i in 2:length(subCountFiles)) {
y <- DT[[subCountFiles[i]]]
z <- merge(mergedDataTab, y, by = c("feature"))
mergedDataTab <- z
}
# 'feature' column becomes rownames
rownames(mergedDataTab) <- mergedDataTab$feature
mergedDataTab <- mergedDataTab[,-1]
## add total counts per sample
mergedDataTab <- rbind(mergedDataTab, tot.counts=colSums(mergedDataTab))
# Remove rnaClass (mirna or allncrna) from column names
oldColNames <- names(mergedDataTab)
newColNames <- gsub(paste("_",rnaClass,"Counts.txt", sep=''), "" , oldColNames)
names(mergedDataTab) <- newColNames
# Remove HTseq specific rows
# __no_feature,__not_aligned,__too_low_Qual,__alignment_not_unique,__ambiguous
toDrop <- c('tot.counts','__no_feature','__not_aligned','__too_low_Qual','__alignment_not_unique','__ambiguous')
mergedDataTab <- mergedDataTab[ !(rownames(mergedDataTab) %in% toDrop), ]
return(mergedDataTab)
}
# Load Libraries
load_libraries <- function(){
# Load libraries at start
suppressPackageStartupMessages(library("preprocessCore", warn.conflicts=FALSE, quietly=TRUE))
suppressPackageStartupMessages(library("edgeR" , warn.conflicts=FALSE, quietly=TRUE))
suppressPackageStartupMessages(library("RUVSeq", warn.conflicts=FALSE, quietly=TRUE))
# suppressPackageStartupMessages(library("DESeq2", warn.conflicts=FALSE, quietly=TRUE))
}
# Print session info as log file formatted in tabular format
# Source: https://stackoverflow.com/questions/21967254/how-to-write-a-reader-friendly-sessioninfo-to-text-file
print_session_info <- function(session_logfile){
suppressPackageStartupMessages(library("devtools"))
suppressPackageStartupMessages(library("knitr"))
# Get all the session info to the variable
my_session_info <- devtools::session_info()
# Print it in the tabular format using knitr
writeLines(text = {
paste(sep = "\n", collapse = "",
paste0(rep("-", 80), collapse = ""),
paste(paste0(rep("-", 32), collapse = ""),
"R environment",
paste0(rep("-", 33), collapse = "")),
paste0(rep("-", 80), collapse = ""),
paste(knitr::kable(data.frame(setting = names(my_session_info$platform),
value = as.character(my_session_info$platform))), collapse = "\n"),
paste0(rep("-", 80), collapse = ""),
paste(paste0(rep("-", 35), collapse = ""),
"packages",
paste0(rep("-", 35), collapse = "")),
paste0(rep("-", 80), collapse = ""),
paste(knitr::kable(my_session_info$packages), collapse = "\n")
)
}, con = session_logfile)
}
check_options <- function(){
# Description of the script
desc <- sprintf("
----------------- SAMPLE USAGE ------------------
- Rscript scripts/R_heatmap.R -if=output/filtered_data/deseq2/mirna/results_DEseq2/ACC18mwt_over_ACC4mwt_DE_RESULTS.txt -of=output/filtered_data/deseq2/mirna/heatmaps_DEseq2/ACC18mwt_over_ACC4mwt_DE_heatmaps.txt -xc -yc
- Rscript scripts/R_heatmap.R -if=output/filtered_data/deseq2/mirna/results_DEseq2/Blood18mwt_over_Blood4mwt_DE_RESULTS.txt -of=output/filtered_data/deseq2/mirna/heatmaps_DEseq2/Blood18mwt_over_Blood4mwt_DE_heatmaps.txt
-------------------------------------------------
CONTACT:
Gaurav Jain
gaurav.jain@dzne.de
-------------------------------------------------\n
")
# create parser object
parser <- ArgumentParser(description=cat(desc))
# Add arguments
parser$add_argument("-id", "--inputDir" , dest="inputDir" , help="*Input counts directory", type="character", required=TRUE)
parser$add_argument("-of", "--outputfile" , dest="outputfile" , help="*Output file name" , type="character", required=TRUE)
parser$add_argument("-rc", "--rnaClass" , dest="rnaClass" , help=" String that will be used to remove suffix in count file names" , type="character")
# Print the help message only if no arguments are supplied
if(length(commandArgs(TRUE))==0){
cat(desc)
parser$print_help()
quit()
}
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
args <- parser$parse_args()
return(args)
}
## Call the main function in the end
main()
|
/scripts/R_normalize_data.R
|
no_license
|
gauravj49/BulkRnaseqDE
|
R
| false | false | 12,138 |
r
|
# ****************************************************************
# GOAL : Normalize with various methods:
# - Quantile normalization
# USAGE :
# ****************************************************************
################ load libraries ###############
suppressPackageStartupMessages(library("argparse"))
## Main logic
main <- function(){
##### Parse command line arguments ############
args <- check_options()
inputDir <- args$inputDir
outputfile <- args$outputfile
rnaClass <- args$rnaClass
################ Load libraries ###############
load_libraries()
################ Main logic ###################
## Get the input data
cat("- Reading input file ...\n")
allDataOrig <- data.frame(read.table(inputfile, header=TRUE, sep="\t", na.strings=c(NA, NaN, Inf), row.names=1))
# Create the output directories and files
cat("\n2) Create the output directories and files ...\n")
outputDir <- paste(dirname(outputFile),"/snormalized_counts", sep=''); system(paste("mkdir -p", outputDir, sep=' '))
basefilename <- tools::file_path_sans_ext(basename(outputFile))
extfilename <- tools::file_ext(outputFile)
# Get counts matrix
sampleCountsDF <- get_counts_matrix(inputDir, rnaClass)
# Print session info to the session's log file
logdir <- paste(dirname(outputfile),"/logs", sep='')
system(paste("mkdir -p ", logdir, sep=''))
session_logfile <- paste(logdir,"/session_info_",file_path_sans_ext(basename(outputfile)),".log" ,sep='');
print_session_info(session_logfile)
}
############ USER DEFINED FUNCTIONS ##########
# Generate RLE plots
get_rle_plots <- function(sampleCountsDF, outputDir, dataType, color){
# # Relative log expression (RLE) plot from ExpressionSet:
# - RLE plots were initially proposed to measure the overall quality of a dataset but can also be used to visualize the presence of unwanted batch effects in the data
# - Unwanted variation can be highly problematic and so its detection is often crucial
# - Relative log expression (RLE) plots are a powerful tool for visualising such variation in high dimensional data
# - RLE plots are particularly useful for assessing whether a procedure aimed at removing unwanted variation, i.e. a normalisation procedure, has been successful
# - These plots, while originally devised for gene expression data from microarrays, can also be used to reveal unwanted variation in single-cell expression data, where such variation can be problematic
# # If style is "full", as usual with boxplots:
# - The box shows the inter-quartile range and whiskers extend no more than 1.5 * IQR from the hinge (the 25th or 75th percentile)
# - Data beyond the whiskers are called outliers and are plotted individually
# - The median (50th percentile) is shown with a white bar
# Get the output dirs
rlePlotsDir <- paste(outputDir, "/rle_plots", sep=''); system(paste("mkdir -p", rlePlotsDir, sep=' '));
ylimvals <- c(-5,5)
# 1) # Create RLE plots for RAW counts
cat("\t9.1) Create RLE plots for RAW counts ...\n")
filter <- apply(sampleCountsDF, 1, function(x) length(x[x>1])>=2)
filtered <- sampleCountsDF[filter,]
set <- newSeqExpressionSet(as.matrix(filtered))
RLEplotFile <- paste(rlePlotsDir, "/", basefilename, "_",dataType,".png", sep='')
png(filename=RLEplotFile, height=900, width=1000, bg="white", res=100)
par(mar=c(20,4,1,1));
plotRLE(set, outline=FALSE,col=color, las=2, cex.axis=0.5, ylim=ylimvals)
dev.off()
}
# Get normalized reads
get_qn_reads <- function(fdds, sampleCountsDF, outputFile){
# 1) RLE plots for raw data
# 2) Get size factor normalized reads (SFN)
# 3) Get variance stabilizing transformation (VST) normalized reads
# Get annotation table
sampleFiles <- c(ControlFiles, TreatmentFiles)
sampleCondition <- c(rep(conditionVals[1], length(ControlFiles)), rep(conditionVals[2], length(TreatmentFiles)))
sampleDiagnosis <- c(rep(0, length(ControlFiles)), rep(1, length(TreatmentFiles)))
newSampleTable <- data.frame(sampleName = names(sampleCountsDF), condition = sampleCondition, diagnosis = sampleDiagnosis, row.names=names(sampleCountsDF))
# 1) # Create RLE plots for RAW counts
cat("\t9.1) Create RLE plots for RAW counts ...\n")
filter <- apply(sampleCountsDF, 1, function(x) length(x[x>1])>=2)
filtered <- sampleCountsDF[filter,]
set <- newSeqExpressionSet(as.matrix(filtered))
rawRLEplotFile <- paste(rlePlotsDir, "/", basefilename, "_raw.png", sep='')
png(filename=rawRLEplotFile, height=900, width=1000, bg="white", res=100)
par(mar=c(20,4,1,1));
plotRLE(set, outline=FALSE,col='navy',las=2, cex.axis=0.5, ylim=ylimvals)
dev.off()
# 2) Get quantile normalized reads (QNR)
cat("\t9.2) Get quantile normalized reads (QNR) ...\n")
suppressPackageStartupMessages(library("preprocessCore", warn.conflicts=FALSE, quietly=TRUE))
qnrCountsDF <- normalize.quantiles(as.matrix(sampleCountsDF))
qnrfilter <- apply(qnrCountsDF, 1, function(x) length(x[x>1])>=2)
qnrfiltered <- qnrCountsDF[qnrfilter,]
qnrset <- newSeqExpressionSet(as.matrix(qnrfiltered))
qnrrawRLEplotFile <- paste(rlePlotsDir, "/", basefilename, "_qnr.png", sep='')
png(filename=qnrrawRLEplotFile, height=900, width=1000, bg="white", res=100)
par(mar=c(20,4,1,1));
plotRLE(qnrset, outline=FALSE,col='deepskyblue',las=2, cex.axis=0.5, ylim=ylimvals)
dev.off()
# Get QNR related directories and filenames
qnrnormalizedDir <- paste(outputDir,'/qnr/',conditionVals[2],'_',conditionVals[1], sep='')
qnrtnormalizedDir <- paste(qnrnormalizedDir, "/transposed", sep='')
indqnrnormalizedDir <- paste(qnrnormalizedDir, "/qnr_normalized_counts", sep='')
qnrnormalizedOutFile <- paste(qnrnormalizedDir , "/", basefilename, "_qnr_normalized.txt", sep='')
qnrtnormalizedOutFile <- paste(qnrtnormalizedDir, "/", basefilename, "_featureInCols_qnr_normalized.txt", sep='')
system(paste("mkdir -p", qnrnormalizedDir, qnrtnormalizedDir, indqnrnormalizedDir, sep=' '))
# Convert rownames as feature column to save it in the csv file
qnrrownames <- rownames(sampleCountsDF[qnrfilter,])
qnrcolnames <- names(sampleCountsDF[qnrfilter,])
colnames(qnrfiltered) <- qnrcolnames
rownames(qnrfiltered) <- qnrrownames
qnrfiltered <- cbind(feature = rownames(qnrfiltered), qnrfiltered)
rownames(qnrfiltered) <- 1:nrow(qnrfiltered)
# Save the counts file to output csv file
write.table(qnrfiltered , file = qnrnormalizedOutFile , row.names = F, sep = '\t', quote = F)
write.table(t(qnrfiltered), file = qnrtnormalizedOutFile, col.names = F, sep = '\t', quote = F)
# Get the normalized individual count files
indvcmd <- paste("bash scripts/get_individual_countFiles.sh", qnrnormalizedOutFile, indqnrnormalizedDir, rnaClass, 1, 2, sep=' ')
system(indvcmd)
}
get_counts_matrix <- function(inputDir, rnaClass){
# Combine individual HTSeq count files into a matrix
# Source: https://wiki.bits.vib.be/index.php/NGS_RNASeq_DE_Exercise.4
# Get the list of all count files
allCountFiles <- list.files(inputDir)
# Subset the files here for specific files
subCountFiles <- allCountFiles # replace it with the subset file code
# Read each file as array element of DT and rename the last 2 cols
# we created a list of single sample tables
DT <- list()
for (i in 1:length(subCountFiles)){
# Get the absolute file name
infile = paste(inputDir, subCountFiles[i], sep = "/")
# Add it to the data table
DT[[subCountFiles[i]]] <- read.table(infile, header = F, stringsAsFactors = FALSE)
# Get the column names
colnames(DT[[subCountFiles[i]]]) <- c("feature", subCountFiles[i])
}
# Merge all elements based on first 'feature' columns
mergedDataTab <- DT[[subCountFiles[1]]]
# We now add each other table with the 'feature' column as key
for (i in 2:length(subCountFiles)) {
y <- DT[[subCountFiles[i]]]
z <- merge(mergedDataTab, y, by = c("feature"))
mergedDataTab <- z
}
# 'feature' column becomes rownames
rownames(mergedDataTab) <- mergedDataTab$feature
mergedDataTab <- mergedDataTab[,-1]
## add total counts per sample
mergedDataTab <- rbind(mergedDataTab, tot.counts=colSums(mergedDataTab))
# Remove rnaClass (mirna or allncrna) from column names
oldColNames <- names(mergedDataTab)
newColNames <- gsub(paste("_",rnaClass,"Counts.txt", sep=''), "" , oldColNames)
names(mergedDataTab) <- newColNames
# Remove HTseq specific rows
# __no_feature,__not_aligned,__too_low_Qual,__alignment_not_unique,__ambiguous
toDrop <- c('tot.counts','__no_feature','__not_aligned','__too_low_Qual','__alignment_not_unique','__ambiguous')
mergedDataTab <- mergedDataTab[ !(rownames(mergedDataTab) %in% toDrop), ]
return(mergedDataTab)
}
# Load Libraries
load_libraries <- function(){
# Load libraries at start
suppressPackageStartupMessages(library("preprocessCore", warn.conflicts=FALSE, quietly=TRUE))
suppressPackageStartupMessages(library("edgeR" , warn.conflicts=FALSE, quietly=TRUE))
suppressPackageStartupMessages(library("RUVSeq", warn.conflicts=FALSE, quietly=TRUE))
# suppressPackageStartupMessages(library("DESeq2", warn.conflicts=FALSE, quietly=TRUE))
}
# Print session info as log file formatted in tabular format
# Source: https://stackoverflow.com/questions/21967254/how-to-write-a-reader-friendly-sessioninfo-to-text-file
print_session_info <- function(session_logfile){
suppressPackageStartupMessages(library("devtools"))
suppressPackageStartupMessages(library("knitr"))
# Get all the session info to the variable
my_session_info <- devtools::session_info()
# Print it in the tabular format using knitr
writeLines(text = {
paste(sep = "\n", collapse = "",
paste0(rep("-", 80), collapse = ""),
paste(paste0(rep("-", 32), collapse = ""),
"R environment",
paste0(rep("-", 33), collapse = "")),
paste0(rep("-", 80), collapse = ""),
paste(knitr::kable(data.frame(setting = names(my_session_info$platform),
value = as.character(my_session_info$platform))), collapse = "\n"),
paste0(rep("-", 80), collapse = ""),
paste(paste0(rep("-", 35), collapse = ""),
"packages",
paste0(rep("-", 35), collapse = "")),
paste0(rep("-", 80), collapse = ""),
paste(knitr::kable(my_session_info$packages), collapse = "\n")
)
}, con = session_logfile)
}
check_options <- function(){
# Description of the script
desc <- sprintf("
----------------- SAMPLE USAGE ------------------
- Rscript scripts/R_heatmap.R -if=output/filtered_data/deseq2/mirna/results_DEseq2/ACC18mwt_over_ACC4mwt_DE_RESULTS.txt -of=output/filtered_data/deseq2/mirna/heatmaps_DEseq2/ACC18mwt_over_ACC4mwt_DE_heatmaps.txt -xc -yc
- Rscript scripts/R_heatmap.R -if=output/filtered_data/deseq2/mirna/results_DEseq2/Blood18mwt_over_Blood4mwt_DE_RESULTS.txt -of=output/filtered_data/deseq2/mirna/heatmaps_DEseq2/Blood18mwt_over_Blood4mwt_DE_heatmaps.txt
-------------------------------------------------
CONTACT:
Gaurav Jain
gaurav.jain@dzne.de
-------------------------------------------------\n
")
# create parser object
parser <- ArgumentParser(description=cat(desc))
# Add arguments
parser$add_argument("-id", "--inputDir" , dest="inputDir" , help="*Input counts directory", type="character", required=TRUE)
parser$add_argument("-of", "--outputfile" , dest="outputfile" , help="*Output file name" , type="character", required=TRUE)
parser$add_argument("-rc", "--rnaClass" , dest="rnaClass" , help=" String that will be used to remove suffix in count file names" , type="character")
# Print the help message only if no arguments are supplied
if(length(commandArgs(TRUE))==0){
cat(desc)
parser$print_help()
quit()
}
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
args <- parser$parse_args()
return(args)
}
## Call the main function in the end
main()
|
#####################################################################
######## GETTING AND CLEANING DATA ###############################
### ASSIGNMENT NO. 1
## The objective of this analysis is to make the raw data tidy enough for further analysis.
##################################################################
### The data for this analysis was downloaded and saved in a local directory.
### The data was located on: https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
### The data was in a zipped folder and had to be unzipped
### Description of the data is available in this link http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
###########################################################
# Set the directory for easy of reading the raw data, writing tidy data and other outputs
setwd("D:/Coursera courses/Getting and Cleaning Data/Assignment/UCI HAR Dataset")
getwd() # confirms the directory set
############################
### Reading the data
#############################
# Importing the test data
X_Test <- read.table("test/X_test.txt")
Y_Test <- read.table("test/y_test.txt")
Subject_Test <- read.table("test/subject_test.txt")
# Importing the training data
X_Train <- read.table("train/X_train.txt")
Y_Train <- read.table("train/y_train.txt")
Subject_Train <- read.table("train/subject_train.txt")
###############################################
## TAST 1) Merge the training and test sets to create one data set.
#######################################
# binding together the data sets of X_Test with X_Train, Y_Test with Y_Test, and subject_test with subject_train to creat one test data set
Combined_Data <- rbind(X_Test, X_Train)
Combined_labels <- rbind(Y_Test, Y_Train)
All_Subjects <- rbind(Subject_Test, Subject_Train)
# The variables of the data sets appear to be not named.
#The names of the variables is provided in a separate file and thus the column names of the binded data sets need to be renamed
Features <- read.table("features.txt")
colnames(Combined_Data) <- Features[,2]
names(Combined_Data)
###############################################
## TASK 2) Extracting only the measurements on the mean and standard deviation for each measurement
#######################################
# the variable names in the data sets containing mean and standard deviations are represented using mean() and std()
# the grepl function can identify the column names containing mean() and std()s
ColumnsOfInterest <- grepl("mean()",colnames(Combined_Data)) | grepl("std()",colnames(Combined_Data))
# subseting the column names containg mean() and std()
Data_Mean_Std <- Combined_Data[,ColumnsOfInterest]
###############################################
## TASK 3) Using descriptive activity names to name the activities in the data set
#######################################
# Reading the activities
Activities <- read.table("activity_labels.txt")
# the activity names are descriptive enough and thus no need of renaming the activity names provided with the raw data set
###############################################
## TASK 4) Appropriately labeling the data set with descriptive activity names.
#######################################
# The labels in the data set Combined_labels should be replaced with the the activity names
str(Combined_labels) # this shows the labels are read as intiger, better to convert them into factors
Labels_as_factors <- as.factor(Combined_labels[,1])
# Replacing the factored lebels by the activity names
library(plyr)
Labels_as_factors <- mapvalues(Labels_as_factors,from = as.character(Activities[,1]), to = as.character(Activities[,2]))
# Verifying the length of the activity names labels with the number of rows of the data sets
length(Labels_as_factors)
dim(Data_Mean_Std)
# Combining the labels with the data sets using cbind function
Data_Mean_Std <- cbind(Labels_as_factors, Data_Mean_Std)
# Renaming the column name of the column containing the labels
colnames(Data_Mean_Std)[1] <- "activity"
# The ID of subjects should also be included in the data sets as they might be needed for further analysis down the stream
Data_Mean_Std <- cbind(All_Subjects, Data_Mean_Std)
colnames(Data_Mean_Std)[1] <- "subject" # rename the column name
# using descriptive variable names
names(Data_Mean_Std)
# All variable names contain an upen and closed brackets, i.e., '()'. I think this is redundant and needs to be removed from the variable names
# Also, column names contain a hyphen, i.e., '-'. In my opinion, it is desirable to use underscore instead, i.e., '_'
VarNames <- names(Data_Mean_Std)
VarNames <- sub("[)]", "", VarNames)
VarNames <- sub("[(]", "", VarNames)
VarNames <- sub("-", "_", VarNames)
VarNames <- sub("-", "_", VarNames)
# changing the column names with the descriptive variable names
colnames(Data_Mean_Std) <- VarNames
# Check for missing values
sum(as.numeric(sapply(Data_Mean_Std, function (x) {sum(is.na(x))}))) # if the output is zero, then the data contains no missing data
# Now the data set X_mean is tidy enough for further analysis
###############################################
## TASK 5) Creating a second, independent tidy data set with the average of each variable for each activity and each subject
#######################################
# The average of each variable for each subject can be obtained by reshaping the data
library(reshape2)
Melted_Data <- melt(Data_Mean_Std,id.vars=c("subject","activity")) # melt the data by the selected variables
Average_For_Subjects <- dcast(Melted_Data, subject + activity ~ ..., mean) # aggregate the data to find the average of each variable for each subject
# check for missing values
sum(as.numeric(sapply(Average_For_Subjects, function (x) {sum(is.na(x))})))
## Now the data set Average_For_Subjects is tidy enough for further analysis
###############################################
## Writing the tidy data set for submission as an answer to the assignment for the "course Getting and Cleaning Data"
#######################################
write.table(Average_For_Subjects, "Tidy_Data_Set.txt", row.names=FALSE)
#####################################################################
# Some further analysis on the tidy data.
# Let's try to apply some classification algorithm to determine whether the subjects are
# WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# For this purpose, I will apply Bayes Classification algorithm
############################################
library(class)
library(e1071)
# performing the classification
classifier<-naiveBayes((Data_Mean_Std[,3:ncol(Data_Mean_Std)]), factor(Data_Mean_Std[,2]))
# Missclasification Rates
BayesGroup <- predict(classifier, (Data_Mean_Std[,3:ncol(Data_Mean_Std)]))
ctNAIVE <- table(BayesGroup, factor(Data_Mean_Std[,2]))
ctNAIVE
diag(prop.table(ctNAIVE , 1)) # returns the accuracy of the classification output
## WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
## 0.9201365 0.7467249 0.7759336 0.8093246 0.8025144 0.9579534
# The accuracy of the activity classification is very good, particularly for activities LAYING and WALKING
# total percent correct
sum(diag(prop.table(ctNAIVE)))
# 0.836489
# OVERALL, the accuracy of the classification output was found to be 84% which is pretty good
##########################################
##################################
#######################
###############
|
/run_analysis.R
|
no_license
|
filmonghere/Getting_And_Cleaning_Data_Ass
|
R
| false | false | 7,596 |
r
|
#####################################################################
######## GETTING AND CLEANING DATA ###############################
### ASSIGNMENT NO. 1
## The objective of this analysis is to make the raw data tidy enough for further analysis.
##################################################################
### The data for this analysis was downloaded and saved in a local directory.
### The data was located on: https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
### The data was in a zipped folder and had to be unzipped
### Description of the data is available in this link http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
###########################################################
# Set the directory for easy of reading the raw data, writing tidy data and other outputs
setwd("D:/Coursera courses/Getting and Cleaning Data/Assignment/UCI HAR Dataset")
getwd() # confirms the directory set
############################
### Reading the data
#############################
# Importing the test data
X_Test <- read.table("test/X_test.txt")
Y_Test <- read.table("test/y_test.txt")
Subject_Test <- read.table("test/subject_test.txt")
# Importing the training data
X_Train <- read.table("train/X_train.txt")
Y_Train <- read.table("train/y_train.txt")
Subject_Train <- read.table("train/subject_train.txt")
###############################################
## TAST 1) Merge the training and test sets to create one data set.
#######################################
# binding together the data sets of X_Test with X_Train, Y_Test with Y_Test, and subject_test with subject_train to creat one test data set
Combined_Data <- rbind(X_Test, X_Train)
Combined_labels <- rbind(Y_Test, Y_Train)
All_Subjects <- rbind(Subject_Test, Subject_Train)
# The variables of the data sets appear to be not named.
#The names of the variables is provided in a separate file and thus the column names of the binded data sets need to be renamed
Features <- read.table("features.txt")
colnames(Combined_Data) <- Features[,2]
names(Combined_Data)
###############################################
## TASK 2) Extracting only the measurements on the mean and standard deviation for each measurement
#######################################
# the variable names in the data sets containing mean and standard deviations are represented using mean() and std()
# the grepl function can identify the column names containing mean() and std()s
ColumnsOfInterest <- grepl("mean()",colnames(Combined_Data)) | grepl("std()",colnames(Combined_Data))
# subseting the column names containg mean() and std()
Data_Mean_Std <- Combined_Data[,ColumnsOfInterest]
###############################################
## TASK 3) Using descriptive activity names to name the activities in the data set
#######################################
# Reading the activities
Activities <- read.table("activity_labels.txt")
# the activity names are descriptive enough and thus no need of renaming the activity names provided with the raw data set
###############################################
## TASK 4) Appropriately labeling the data set with descriptive activity names.
#######################################
# The labels in the data set Combined_labels should be replaced with the the activity names
str(Combined_labels) # this shows the labels are read as intiger, better to convert them into factors
Labels_as_factors <- as.factor(Combined_labels[,1])
# Replacing the factored lebels by the activity names
library(plyr)
Labels_as_factors <- mapvalues(Labels_as_factors,from = as.character(Activities[,1]), to = as.character(Activities[,2]))
# Verifying the length of the activity names labels with the number of rows of the data sets
length(Labels_as_factors)
dim(Data_Mean_Std)
# Combining the labels with the data sets using cbind function
Data_Mean_Std <- cbind(Labels_as_factors, Data_Mean_Std)
# Renaming the column name of the column containing the labels
colnames(Data_Mean_Std)[1] <- "activity"
# The ID of subjects should also be included in the data sets as they might be needed for further analysis down the stream
Data_Mean_Std <- cbind(All_Subjects, Data_Mean_Std)
colnames(Data_Mean_Std)[1] <- "subject" # rename the column name
# using descriptive variable names
names(Data_Mean_Std)
# All variable names contain an upen and closed brackets, i.e., '()'. I think this is redundant and needs to be removed from the variable names
# Also, column names contain a hyphen, i.e., '-'. In my opinion, it is desirable to use underscore instead, i.e., '_'
VarNames <- names(Data_Mean_Std)
VarNames <- sub("[)]", "", VarNames)
VarNames <- sub("[(]", "", VarNames)
VarNames <- sub("-", "_", VarNames)
VarNames <- sub("-", "_", VarNames)
# changing the column names with the descriptive variable names
colnames(Data_Mean_Std) <- VarNames
# Check for missing values
sum(as.numeric(sapply(Data_Mean_Std, function (x) {sum(is.na(x))}))) # if the output is zero, then the data contains no missing data
# Now the data set X_mean is tidy enough for further analysis
###############################################
## TASK 5) Creating a second, independent tidy data set with the average of each variable for each activity and each subject
#######################################
# The average of each variable for each subject can be obtained by reshaping the data
library(reshape2)
Melted_Data <- melt(Data_Mean_Std,id.vars=c("subject","activity")) # melt the data by the selected variables
Average_For_Subjects <- dcast(Melted_Data, subject + activity ~ ..., mean) # aggregate the data to find the average of each variable for each subject
# check for missing values
sum(as.numeric(sapply(Average_For_Subjects, function (x) {sum(is.na(x))})))
## Now the data set Average_For_Subjects is tidy enough for further analysis
###############################################
## Writing the tidy data set for submission as an answer to the assignment for the "course Getting and Cleaning Data"
#######################################
write.table(Average_For_Subjects, "Tidy_Data_Set.txt", row.names=FALSE)
#####################################################################
# Some further analysis on the tidy data.
# Let's try to apply some classification algorithm to determine whether the subjects are
# WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# For this purpose, I will apply Bayes Classification algorithm
############################################
library(class)
library(e1071)
# performing the classification
classifier<-naiveBayes((Data_Mean_Std[,3:ncol(Data_Mean_Std)]), factor(Data_Mean_Std[,2]))
# Missclasification Rates
BayesGroup <- predict(classifier, (Data_Mean_Std[,3:ncol(Data_Mean_Std)]))
ctNAIVE <- table(BayesGroup, factor(Data_Mean_Std[,2]))
ctNAIVE
diag(prop.table(ctNAIVE , 1)) # returns the accuracy of the classification output
## WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
## 0.9201365 0.7467249 0.7759336 0.8093246 0.8025144 0.9579534
# The accuracy of the activity classification is very good, particularly for activities LAYING and WALKING
# total percent correct
sum(diag(prop.table(ctNAIVE)))
# 0.836489
# OVERALL, the accuracy of the classification output was found to be 84% which is pretty good
##########################################
##################################
#######################
###############
|
## ----load-libraries------------------------------------------------------
# Load packages required for entire script
library(lubridate) #work with dates
#set working directory to ensure R can find the file we wish to import
#setwd("working-dir-path-here")
#Load csv file of 15 min meterological data from Harvard Forest
#Factors=FALSE so strings, series of letters/ words/ numerals, remain characters
harMet_15Min <- read.csv(file="AtmosData/HARV/hf001-10-15min-m.csv",
stringsAsFactors = FALSE)
#what type of R object is our imported data?
class(harMet_15Min)
#what data classes are within our R object?
#What is our date-time field called?
str(harMet_15Min)
## ----view-date-structure-------------------------------------------------
#view field data class
class(harMet_15Min$datetime)
#view sample data
head(harMet_15Min$datetime)
## ----as-date-only, results="hide"----------------------------------------
#convert field to date format
har_dateOnly <- as.Date(harMet_15Min$datetime)
#view data
head(har_dateOnly)
## ----explore-as_date-----------------------------------------------------
#Convert character data to date (no time)
myDate <- as.Date("2015-10-19 10:15")
str(myDate)
head(myDate)
#whhat happens if the date has text at the end?
myDate2 <- as.Date("2015-10-19Hello")
str(myDate2)
head(myDate2)
## ----explore-POSIXct-----------------------------------------------------
#Convert character data to date and time.
timeDate <- as.POSIXct("2015-10-19 10:15")
str(timeDate)
head(timeDate)
## ----time-zone-----------------------------------------------------------
#view date - notice the time zone - MDT (mountain daylight time)
timeDate
## ----explore-POSIXct2----------------------------------------------------
unclass(timeDate)
## ----explore-POSIXlt-----------------------------------------------------
#Convert character data to POSIXlt date and time
timeDatelt<- as.POSIXlt("2015-10-1910:15")
str(timeDatelt)
head(timeDatelt)
unclass(timeDatelt)
## ----view-date-----------------------------------------------------------
#view one date time entry
harMet_15Min$datetime[1]
## ----format-date---------------------------------------------------------
#convert single instance of date/time in format year-month-day hour:min:sec
as.POSIXct(harMet_15Min$datetime[1],format="%Y-%m-%dT%H:%M")
##The format of date-time MUST match the specified format or the field will not
# convert
as.POSIXct(harMet_15Min$datetime[1],format="%m-%d-%YT%H:%M")
## ----time-zone-HarMet----------------------------------------------------
#checking time zone of data
tz(harMet_15Min)
## ----assign-time-zone----------------------------------------------------
#assign time zone to just the first entry
as.POSIXct(harMet_15Min$datetime[1],
format = "%Y-%m-%dT%H:%M",
tz = "America/New_York")
## ----POSIX-convert-best-practice-code------------------------------------
#convert to date-time class
harMet_15Min$datetime <- as.POSIXct(harMet_15Min$datetime,
format = "%Y-%m-%dT%H:%M",
tz = "America/New_York")
#view format of the newly defined datetime data.frame column
str(harMet_15Min$datetime)
|
/TS02-Convert-to-Date-Time-Class-POSIX.R
|
no_license
|
mguarinello/NEON-R-Tabular-Time-Series
|
R
| false | false | 3,251 |
r
|
## ----load-libraries------------------------------------------------------
# Load packages required for entire script
library(lubridate) #work with dates
#set working directory to ensure R can find the file we wish to import
#setwd("working-dir-path-here")
#Load csv file of 15 min meterological data from Harvard Forest
#Factors=FALSE so strings, series of letters/ words/ numerals, remain characters
harMet_15Min <- read.csv(file="AtmosData/HARV/hf001-10-15min-m.csv",
stringsAsFactors = FALSE)
#what type of R object is our imported data?
class(harMet_15Min)
#what data classes are within our R object?
#What is our date-time field called?
str(harMet_15Min)
## ----view-date-structure-------------------------------------------------
#view field data class
class(harMet_15Min$datetime)
#view sample data
head(harMet_15Min$datetime)
## ----as-date-only, results="hide"----------------------------------------
#convert field to date format
har_dateOnly <- as.Date(harMet_15Min$datetime)
#view data
head(har_dateOnly)
## ----explore-as_date-----------------------------------------------------
#Convert character data to date (no time)
myDate <- as.Date("2015-10-19 10:15")
str(myDate)
head(myDate)
#whhat happens if the date has text at the end?
myDate2 <- as.Date("2015-10-19Hello")
str(myDate2)
head(myDate2)
## ----explore-POSIXct-----------------------------------------------------
#Convert character data to date and time.
timeDate <- as.POSIXct("2015-10-19 10:15")
str(timeDate)
head(timeDate)
## ----time-zone-----------------------------------------------------------
#view date - notice the time zone - MDT (mountain daylight time)
timeDate
## ----explore-POSIXct2----------------------------------------------------
unclass(timeDate)
## ----explore-POSIXlt-----------------------------------------------------
#Convert character data to POSIXlt date and time
timeDatelt<- as.POSIXlt("2015-10-1910:15")
str(timeDatelt)
head(timeDatelt)
unclass(timeDatelt)
## ----view-date-----------------------------------------------------------
#view one date time entry
harMet_15Min$datetime[1]
## ----format-date---------------------------------------------------------
#convert single instance of date/time in format year-month-day hour:min:sec
as.POSIXct(harMet_15Min$datetime[1],format="%Y-%m-%dT%H:%M")
##The format of date-time MUST match the specified format or the field will not
# convert
as.POSIXct(harMet_15Min$datetime[1],format="%m-%d-%YT%H:%M")
## ----time-zone-HarMet----------------------------------------------------
#checking time zone of data
tz(harMet_15Min)
## ----assign-time-zone----------------------------------------------------
#assign time zone to just the first entry
as.POSIXct(harMet_15Min$datetime[1],
format = "%Y-%m-%dT%H:%M",
tz = "America/New_York")
## ----POSIX-convert-best-practice-code------------------------------------
#convert to date-time class
harMet_15Min$datetime <- as.POSIXct(harMet_15Min$datetime,
format = "%Y-%m-%dT%H:%M",
tz = "America/New_York")
#view format of the newly defined datetime data.frame column
str(harMet_15Min$datetime)
|
geigen <- function(Amat, Bmat, Cmat)
{
# solve the generalized eigenanalysis problem
#
# max {tr L'AM / sqrt[tr L'BL tr M'CM] w.r.t. L and M
#
# Arguments:
# AMAT ... p by q matrix
# BMAT ... order p symmetric positive definite matrix
# CMAT ... order q symmetric positive definite matrix
# Returns:
# VALUES ... vector of length s = min(p,q) of eigenvalues
# LMAT ... p by s matrix L
# MMAT ... q by s matrix M
# last modified 2007 to use svd2; previous mod 18 May 2001
Bdim <- dim(Bmat)
Cdim <- dim(Cmat)
if (Bdim[1] != Bdim[2]) stop('BMAT is not square')
if (Cdim[1] != Cdim[2]) stop('CMAT is not square')
p <- Bdim[1]
q <- Cdim[1]
s <- min(c(p,q))
if (max(abs(Bmat - t(Bmat)))/max(abs(Bmat)) > 1e-10) stop(
'BMAT not symmetric.')
if (max(abs(Cmat - t(Cmat)))/max(abs(Cmat)) > 1e-10) stop(
'CMAT not symmetric.')
Bmat <- (Bmat + t(Bmat))/2
Cmat <- (Cmat + t(Cmat))/2
Bfac <- chol(Bmat)
Cfac <- chol(Cmat)
Bfacinv <- solve(Bfac)
Cfacinv <- solve(Cfac)
Dmat <- t(Bfacinv) %*% Amat %*% Cfacinv
if (p >= q) {
result <- svd2(Dmat)
values <- result$d
Lmat <- Bfacinv %*% result$u
Mmat <- Cfacinv %*% result$v
} else {
result <- svd2(t(Dmat))
values <- result$d
Lmat <- Bfacinv %*% result$v
Mmat <- Cfacinv %*% result$u
}
geigenlist <- list (values, Lmat, Mmat)
names(geigenlist) <- c('values', 'Lmat', 'Mmat')
return(geigenlist)
}
|
/R/geigen.R
|
no_license
|
bonniewan/fda
|
R
| false | false | 1,512 |
r
|
geigen <- function(Amat, Bmat, Cmat)
{
# solve the generalized eigenanalysis problem
#
# max {tr L'AM / sqrt[tr L'BL tr M'CM] w.r.t. L and M
#
# Arguments:
# AMAT ... p by q matrix
# BMAT ... order p symmetric positive definite matrix
# CMAT ... order q symmetric positive definite matrix
# Returns:
# VALUES ... vector of length s = min(p,q) of eigenvalues
# LMAT ... p by s matrix L
# MMAT ... q by s matrix M
# last modified 2007 to use svd2; previous mod 18 May 2001
Bdim <- dim(Bmat)
Cdim <- dim(Cmat)
if (Bdim[1] != Bdim[2]) stop('BMAT is not square')
if (Cdim[1] != Cdim[2]) stop('CMAT is not square')
p <- Bdim[1]
q <- Cdim[1]
s <- min(c(p,q))
if (max(abs(Bmat - t(Bmat)))/max(abs(Bmat)) > 1e-10) stop(
'BMAT not symmetric.')
if (max(abs(Cmat - t(Cmat)))/max(abs(Cmat)) > 1e-10) stop(
'CMAT not symmetric.')
Bmat <- (Bmat + t(Bmat))/2
Cmat <- (Cmat + t(Cmat))/2
Bfac <- chol(Bmat)
Cfac <- chol(Cmat)
Bfacinv <- solve(Bfac)
Cfacinv <- solve(Cfac)
Dmat <- t(Bfacinv) %*% Amat %*% Cfacinv
if (p >= q) {
result <- svd2(Dmat)
values <- result$d
Lmat <- Bfacinv %*% result$u
Mmat <- Cfacinv %*% result$v
} else {
result <- svd2(t(Dmat))
values <- result$d
Lmat <- Bfacinv %*% result$v
Mmat <- Cfacinv %*% result$u
}
geigenlist <- list (values, Lmat, Mmat)
names(geigenlist) <- c('values', 'Lmat', 'Mmat')
return(geigenlist)
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 7494
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 7494
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc04-uniform-depth-7.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2817
c no.of clauses 7494
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 7494
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc04-uniform-depth-7.qdimacs 2817 7494 E1 [] 0 16 2779 7494 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/trafficlight-controller/tlc04-uniform-depth-7/tlc04-uniform-depth-7.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 677 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 7494
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 7494
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc04-uniform-depth-7.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2817
c no.of clauses 7494
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 7494
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc04-uniform-depth-7.qdimacs 2817 7494 E1 [] 0 16 2779 7494 NONE
|
expected_values[[runno]] <- list(lik = c(-12044.25, 24106.5, 24158.09), param = c(6.9114,
5.522, 4.235, 0.0099988, 0.19252), stdev_param = c(0.29486, 0.26095,
0.36142, 0.30731, NA), sigma = c(prop.err = 0.19252), parFixedDf = structure(list(
Estimate = c(lVM = 6.91142325517718, lKM = 5.52204347329164,
lVc = 4.23501048572482, lKA = 0.0099987825821457, prop.err = 0.192521618882743
), SE = c(lVM = 0.0464412227788152, lKM = 0.0809189581741544,
lVc = 0.0279708927552252, lKA = 0.0325457651850827, prop.err = NA
), "%RSE" = c(0.671948758803439, 1.46538067955339, 0.660468087375655,
325.497278470661, NA), "Back-transformed" = c(1003.67471145213,
250.145681286941, 69.062402899074, 1.01004893743181, 0.192521618882743
), "CI Lower" = c(916.351597537404, 213.459096664375, 65.378178670185,
0.947631329754115, NA), "CI Upper" = c(1099.31922322795,
293.137480877155, 72.954242397229, 1.07657780401988, NA),
"BSV(CV%)" = c(26.5454934784181, 37.3548378553605, 30.1384992149858,
31.4712001641019, NA), "Shrink(SD)%" = c(17.5510438311191,
34.6545912986663, 2.08214460719865, 10.8814496446127, NA)), class = "data.frame", row.names = c("lVM",
"lKM", "lVc", "lKA", "prop.err")), omega = structure(c(0.0869415452535236,
0, 0, 0, 0, 0.0680943688621909, 0, 0, 0, 0, 0.130623260333984,
0, 0, 0, 0, 0.0944403870778222), .Dim = c(4L, 4L), .Dimnames = list(
c("eta.Vc", "eta.VM", "eta.KM", "eta.KA"), c("eta.Vc", "eta.VM",
"eta.KM", "eta.KA"))), time = structure(list(saem = 117.367,
setup = 6.90591099999993, table = 0.0979999999999563, cwres = 7.02300000000014,
covariance = 0.0720000000001164, other = 0.212088999999992), class = "data.frame", row.names = "elapsed"),
objDf = structure(list(OBJF = 19899.9801798792, AIC = 24106.5020142261,
BIC = 24158.0854424893, "Log-likelihood" = -12044.2510071131,
"Condition Number" = 11.0249590198241), row.names = "FOCEi", class = "data.frame"))
|
/inst/models/values-1.1.1.3-U029_saem-unix.R
|
no_license
|
nlmixrdevelopment/nlmixr.examples
|
R
| false | false | 1,979 |
r
|
expected_values[[runno]] <- list(lik = c(-12044.25, 24106.5, 24158.09), param = c(6.9114,
5.522, 4.235, 0.0099988, 0.19252), stdev_param = c(0.29486, 0.26095,
0.36142, 0.30731, NA), sigma = c(prop.err = 0.19252), parFixedDf = structure(list(
Estimate = c(lVM = 6.91142325517718, lKM = 5.52204347329164,
lVc = 4.23501048572482, lKA = 0.0099987825821457, prop.err = 0.192521618882743
), SE = c(lVM = 0.0464412227788152, lKM = 0.0809189581741544,
lVc = 0.0279708927552252, lKA = 0.0325457651850827, prop.err = NA
), "%RSE" = c(0.671948758803439, 1.46538067955339, 0.660468087375655,
325.497278470661, NA), "Back-transformed" = c(1003.67471145213,
250.145681286941, 69.062402899074, 1.01004893743181, 0.192521618882743
), "CI Lower" = c(916.351597537404, 213.459096664375, 65.378178670185,
0.947631329754115, NA), "CI Upper" = c(1099.31922322795,
293.137480877155, 72.954242397229, 1.07657780401988, NA),
"BSV(CV%)" = c(26.5454934784181, 37.3548378553605, 30.1384992149858,
31.4712001641019, NA), "Shrink(SD)%" = c(17.5510438311191,
34.6545912986663, 2.08214460719865, 10.8814496446127, NA)), class = "data.frame", row.names = c("lVM",
"lKM", "lVc", "lKA", "prop.err")), omega = structure(c(0.0869415452535236,
0, 0, 0, 0, 0.0680943688621909, 0, 0, 0, 0, 0.130623260333984,
0, 0, 0, 0, 0.0944403870778222), .Dim = c(4L, 4L), .Dimnames = list(
c("eta.Vc", "eta.VM", "eta.KM", "eta.KA"), c("eta.Vc", "eta.VM",
"eta.KM", "eta.KA"))), time = structure(list(saem = 117.367,
setup = 6.90591099999993, table = 0.0979999999999563, cwres = 7.02300000000014,
covariance = 0.0720000000001164, other = 0.212088999999992), class = "data.frame", row.names = "elapsed"),
objDf = structure(list(OBJF = 19899.9801798792, AIC = 24106.5020142261,
BIC = 24158.0854424893, "Log-likelihood" = -12044.2510071131,
"Condition Number" = 11.0249590198241), row.names = "FOCEi", class = "data.frame"))
|
#' @rdname bcbioSingleCell
#' @aliases NULL
#' @exportClass bcbioSingleCell
#' @usage NULL
bcbioSingleCell <- setClass(
Class = "bcbioSingleCell",
contains = "SingleCellExperiment"
)
setOldClass(Classes = c("grouped_df", "tbl_df", "tibble"))
|
/R/AllClasses.R
|
permissive
|
chitrita/bcbioSingleCell
|
R
| false | false | 253 |
r
|
#' @rdname bcbioSingleCell
#' @aliases NULL
#' @exportClass bcbioSingleCell
#' @usage NULL
bcbioSingleCell <- setClass(
Class = "bcbioSingleCell",
contains = "SingleCellExperiment"
)
setOldClass(Classes = c("grouped_df", "tbl_df", "tibble"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writing-options.R
\name{writing-options}
\alias{writing-options}
\title{Writing data options}
\description{
Writing data options
}
\examples{
\dontrun{
# write to disk
(x <- HttpClient$new(url = "https://httpbin.org"))
f <- tempfile()
res <- x$get("get", disk = f)
res$content # when using write to disk, content is a path
readLines(res$content)
close(file(f))
# streaming response
(x <- HttpClient$new(url = "https://httpbin.org"))
res <- x$get('stream/50', stream = function(x) cat(rawToChar(x)))
res$content # when streaming, content is NULL
## Async
(cc <- Async$new(
urls = c(
'https://httpbin.org/get?a=5',
'https://httpbin.org/get?foo=bar',
'https://httpbin.org/get?b=4',
'https://httpbin.org/get?stuff=things',
'https://httpbin.org/get?b=4&g=7&u=9&z=1'
)
))
files <- replicate(5, tempfile())
(res <- cc$get(disk = files, verbose = TRUE))
lapply(files, readLines)
## Async varied
### disk
f <- tempfile()
g <- tempfile()
req1 <- HttpRequest$new(url = "https://httpbin.org/get")$get(disk = f)
req2 <- HttpRequest$new(url = "https://httpbin.org/post")$post(disk = g)
req3 <- HttpRequest$new(url = "https://httpbin.org/get")$get()
(out <- AsyncVaried$new(req1, req2, req3))
out$request()
out$content()
readLines(f)
readLines(g)
close(file(f))
close(file(g))
### stream - to console
fun <- function(x) cat(rawToChar(x))
req1 <- HttpRequest$new(url = "https://httpbin.org/get"
)$get(query = list(foo = "bar"), stream = fun)
req2 <- HttpRequest$new(url = "https://httpbin.org/get"
)$get(query = list(hello = "world"), stream = fun)
(out <- AsyncVaried$new(req1, req2))
out$request()
out$content()
### stream - to an R object
lst <- c()
fun <- function(x) lst <<- c(lst, x)
req1 <- HttpRequest$new(url = "https://httpbin.org/get"
)$get(query = list(foo = "bar"), stream = fun)
req2 <- HttpRequest$new(url = "https://httpbin.org/get"
)$get(query = list(hello = "world"), stream = fun)
(out <- AsyncVaried$new(req1, req2))
out$request()
lst
cat(rawToChar(lst))
}
}
|
/man/writing-options.Rd
|
permissive
|
hlapp/crul
|
R
| false | true | 2,071 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writing-options.R
\name{writing-options}
\alias{writing-options}
\title{Writing data options}
\description{
Writing data options
}
\examples{
\dontrun{
# write to disk
(x <- HttpClient$new(url = "https://httpbin.org"))
f <- tempfile()
res <- x$get("get", disk = f)
res$content # when using write to disk, content is a path
readLines(res$content)
close(file(f))
# streaming response
(x <- HttpClient$new(url = "https://httpbin.org"))
res <- x$get('stream/50', stream = function(x) cat(rawToChar(x)))
res$content # when streaming, content is NULL
## Async
(cc <- Async$new(
urls = c(
'https://httpbin.org/get?a=5',
'https://httpbin.org/get?foo=bar',
'https://httpbin.org/get?b=4',
'https://httpbin.org/get?stuff=things',
'https://httpbin.org/get?b=4&g=7&u=9&z=1'
)
))
files <- replicate(5, tempfile())
(res <- cc$get(disk = files, verbose = TRUE))
lapply(files, readLines)
## Async varied
### disk
f <- tempfile()
g <- tempfile()
req1 <- HttpRequest$new(url = "https://httpbin.org/get")$get(disk = f)
req2 <- HttpRequest$new(url = "https://httpbin.org/post")$post(disk = g)
req3 <- HttpRequest$new(url = "https://httpbin.org/get")$get()
(out <- AsyncVaried$new(req1, req2, req3))
out$request()
out$content()
readLines(f)
readLines(g)
close(file(f))
close(file(g))
### stream - to console
fun <- function(x) cat(rawToChar(x))
req1 <- HttpRequest$new(url = "https://httpbin.org/get"
)$get(query = list(foo = "bar"), stream = fun)
req2 <- HttpRequest$new(url = "https://httpbin.org/get"
)$get(query = list(hello = "world"), stream = fun)
(out <- AsyncVaried$new(req1, req2))
out$request()
out$content()
### stream - to an R object
lst <- c()
fun <- function(x) lst <<- c(lst, x)
req1 <- HttpRequest$new(url = "https://httpbin.org/get"
)$get(query = list(foo = "bar"), stream = fun)
req2 <- HttpRequest$new(url = "https://httpbin.org/get"
)$get(query = list(hello = "world"), stream = fun)
(out <- AsyncVaried$new(req1, req2))
out$request()
lst
cat(rawToChar(lst))
}
}
|
setwd("D:\\Education\\stepupanalytics_Blog\\linear regression")
help(package = "MASS")
install.packages("MASS")
install.packages("ISLR")
library(MASS) #loads dataset from the book MASS
library(ISLR) #dataset by Statistical Learning professors
##Simple Linear Regression
# reading data in R
data(Boston)
# name of the variables of Boston dataset
names(Boston)
# description of the Boston dataset
?Boston
# viewing visually the Boston dataset
View(Boston)
# dimension of the Boston Dataset
dim(Boston)
# plotting the variables to see if there is any relationship:
plot(medv~rm,main = "Boston", data = Boston)
plot(medv~lstat, main = "Boston", data = Boston) #as lower status people decrease, median value of houses increase
# fitting of model
basemodel<-lm(medv~lstat, Boston)
basemodel
summary(basemodel)
anova(basemodel)
#add a line to the fit
abline(basemodel,col="red")
#see the components of fit
names(basemodel)
# [1] "coefficients" "residuals" "effects" "rank" "fitted.values" "assign"
# [7] "qr" "df.residual" "xlevels" "call" "terms" "model"
basemodel$coefficients
# (Intercept) lstat
# 34.5538409 -0.9500494
#95% confidence interval
confint(basemodel)
# 2.5 % 97.5 %
# (Intercept) 33.448457 35.6592247
# lstat -1.026148 -0.8739505
#predict medv (response) for these 3 values of lstat (predictor).
#also show confidece intervals
predict(basemodel,data.frame(lstat=c(5,10,15)),interval="confidence")
# fit lwr upr
# 1 29.80359 29.00741 30.59978
# 2 25.05335 24.47413 25.63256
# 3 20.30310 19.73159 20.87461
# Skill-Up Scale-Up !!!
|
/linear.R
|
no_license
|
mdzishanhussain/Linear-Regression-in-R
|
R
| false | false | 1,732 |
r
|
setwd("D:\\Education\\stepupanalytics_Blog\\linear regression")
help(package = "MASS")
install.packages("MASS")
install.packages("ISLR")
library(MASS) #loads dataset from the book MASS
library(ISLR) #dataset by Statistical Learning professors
##Simple Linear Regression
# reading data in R
data(Boston)
# name of the variables of Boston dataset
names(Boston)
# description of the Boston dataset
?Boston
# viewing visually the Boston dataset
View(Boston)
# dimension of the Boston Dataset
dim(Boston)
# plotting the variables to see if there is any relationship:
plot(medv~rm,main = "Boston", data = Boston)
plot(medv~lstat, main = "Boston", data = Boston) #as lower status people decrease, median value of houses increase
# fitting of model
basemodel<-lm(medv~lstat, Boston)
basemodel
summary(basemodel)
anova(basemodel)
#add a line to the fit
abline(basemodel,col="red")
#see the components of fit
names(basemodel)
# [1] "coefficients" "residuals" "effects" "rank" "fitted.values" "assign"
# [7] "qr" "df.residual" "xlevels" "call" "terms" "model"
basemodel$coefficients
# (Intercept) lstat
# 34.5538409 -0.9500494
#95% confidence interval
confint(basemodel)
# 2.5 % 97.5 %
# (Intercept) 33.448457 35.6592247
# lstat -1.026148 -0.8739505
#predict medv (response) for these 3 values of lstat (predictor).
#also show confidece intervals
predict(basemodel,data.frame(lstat=c(5,10,15)),interval="confidence")
# fit lwr upr
# 1 29.80359 29.00741 30.59978
# 2 25.05335 24.47413 25.63256
# 3 20.30310 19.73159 20.87461
# Skill-Up Scale-Up !!!
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.R
\name{get}
\alias{get}
\title{Gets a Synapse entity}
\usage{
get(synid)
}
\arguments{
\item{synid}{Synapse Id of an entity}
}
\value{
Entity
}
\description{
Gets a Synapse entity
}
\examples{
library(synapserprototype)
entity <- get('syn123')
}
|
/man/get.Rd
|
no_license
|
thomasyu888/synapserprototype
|
R
| false | true | 329 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.R
\name{get}
\alias{get}
\title{Gets a Synapse entity}
\usage{
get(synid)
}
\arguments{
\item{synid}{Synapse Id of an entity}
}
\value{
Entity
}
\description{
Gets a Synapse entity
}
\examples{
library(synapserprototype)
entity <- get('syn123')
}
|
#### run this file for the pre-registered (confirmatory) analysis
#### main output are the graphs
rm(list=ls())
library(dplyr)
library(ggplot2)
library(MatchIt)
library(multiwayvcov)
library(plm)
library(lmtest)
library(clubSandwich)
library(moments)
library(doParallel)
set.seed(123456789) #not needed for final version?
### this is executed in the /report subdirectory, need to ..
### set this switch to TRUE if you want to produce a final report - this will save results matrices in a static directory
final_verion_swith <- TRUE
RI_conf_switch <- TRUE
glob_repli <- 10000
glob_sig <- c(.025,.975) ### 5 percent conf intervals
path <- strsplit(getwd(), "/report")[[1]]
endline <- read.csv(paste(path,"data/public/endline.csv", sep="/"), stringsAsFactors = TRUE)
endline$a21 <- as.character(endline$region)
endline$region <- NULL
endline$interviewed <- TRUE
dta_plan <- read.csv(paste(path,"questionnaire/sampling_list_hh.csv", sep="/"), stringsAsFactors = TRUE)
endline <- merge(dta_plan, endline, by="hhid", all.x=T)
endline$interviewed[is.na(endline$interviewed)] <- FALSE
endline$a21 <- endline$a21.x
endline$a21.y <- NULL
endline$information <- NULL
endline$deliberation <- NULL
endline$district_baraza <- NULL
endline$attriter <- !endline$interviewed
########################################################### functions declarations #####################################################
trim <- function(var, dataset, trim_perc=.05) {
### function for triming a variable in a dataset - replaces with NA
dataset[var][dataset[var] < quantile(dataset[var],c(trim_perc/2,1-(trim_perc/2)), na.rm=T)[1] | dataset[var] > quantile(dataset[var], c(trim_perc/2,1-(trim_perc/2)),na.rm=T)[2] ] <- NA
return(dataset)
}
FW_index <- function(indexer,revcols = NULL,data_orig) {
### function to make family wise index using covariance as weights (following http://cyrussamii.com/?p=2656)
### FW_index(c("baraza.B2","baraza.B3","baraza.B4.1","inputs","baraza.B5.2","baraza.B5.3"),data=endline)
##indexer <- c("baraza.B2","baraza.B3","baraza.B4.1","inputs","baraza.B5.2","baraza.B5.3")
##data_orig <- endline
data <- data_orig[complete.cases(data_orig[indexer]),]
x <- data[indexer]
if(length(revcols)>0){
x[,revcols] <- -1*x[,revcols]
}
for(j in 1:ncol(x)){
x[,j] <- (x[,j] - mean(x[,j]))/sd(x[,j])
}
i.vec <- as.matrix(rep(1,ncol(x)))
Sx <- cov(x)
data$index <- t(solve(t(i.vec)%*%solve(Sx)%*%i.vec)%*%t(i.vec)%*%solve(Sx)%*%t(x))
data <- merge(data_orig,data[c("hhid","index")], by="hhid",all.x=T)
return( data )
}
### wrapper function to make summary forest plots
credplot.gg <- function(d,units, hypo, axlabs, lim){
# d is a data frame with 4 columns
# d$x gives variable names
# d$y gives center point
# d$ylo gives lower limits
# d$yhi gives upper limits
require(ggplot2)
p <- ggplot(d, aes(x=x, y=y, ymin=ylo, ymax=yhi, colour=as.factor(grp)))+
geom_pointrange(position=position_dodge(-.4), size=.5)+
geom_hline(yintercept = 0, linetype=2)+
coord_flip(ylim = c(-lim,lim))+
xlab('') + ylab(units)+ labs(title=hypo) + theme_minimal()+ theme(axis.text=element_text(size=18),
axis.title=element_text(size=14,face="bold"),legend.text=element_text(size=18), plot.title = element_text(size=22,hjust = 0.5), legend.title=element_blank())+
geom_errorbar(aes(ymin=ylo, ymax=yhi),position=position_dodge(-.4),width=0,cex=1.5) + scale_colour_manual(values = c("#CCCCCC", "#6E8DAB", "#104E8B", "#000000")) + scale_x_discrete(labels=axlabs)
return(p)
}
## function definitions
RI_conf_sc <- function(i,outcomes, baseline_outcomes, dta_sim , ctrls = NULL, nr_repl = 1000, sig = c(.025,.975)) {
### a function to esimate confidence intervals using randomization inference following Gerber and Green pages 66-71 and 83.
#RI_conf_dist(2,outcomes, baseline_outcomes, subset(dta, ((information == 1 & deliberation==1) | district_baraza == 1)) , ctrls = "a21", nr_repl = 1000, sig = c(.025,.975))
#dta_sim <- dta[dta$district_baraza == 0 ,]
#ctrls <- "a21"
#nr_repl <- 1000
#sig <- c(.025,.975)
if (is.null(baseline_outcomes)) {
formula1 <- as.formula(paste(outcomes[i],paste("information:deliberation",ctrls,sep="+"),sep="~"))
formula2 <- as.formula(paste(outcomes[i],paste("information*deliberation",ctrls,sep="+"),sep="~"))
} else {
formula1 <- as.formula(paste(paste(outcomes[i],paste("information:deliberation",ctrls,sep="+"),sep="~"),baseline_outcomes[i],sep="+"))
formula2 <- as.formula(paste(paste(outcomes[i],paste("information*deliberation",ctrls,sep="+"),sep="~"),baseline_outcomes[i],sep="+"))
}
dta_sim <- dta_sim %>% mutate(clusterID = group_indices(., district, subcounty))
### get ATEs for two different models
ols_1 <- lm(formula1, data=dta_sim[dta_sim$deliberation == dta_sim$information,])
ols_2 <- lm(formula2, data=dta_sim)
dta_sim$dep <- as.numeric(unlist(dta_sim[as.character(formula1[[2]])]))
treat_nrs <- table(data.frame(aggregate(dta_sim[c("information","deliberation")], list(dta_sim$clusterID),mean))[,2:3])
### calculate potential outcomes
### for model 1 (sc effect)
dta_sim$pot_out_0_1 <- NA
dta_sim$pot_out_0_1[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0]
dta_sim$pot_out_0_1[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] - coef(ols_1)["information:deliberation"]
dta_sim$pot_out_1_1 <- NA
dta_sim$pot_out_1_1[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1]
dta_sim$pot_out_1_1[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] + coef(ols_1)["information:deliberation"]
### for model 2 (factorial design with )
### potential outcomes for I=0 D=0
dta_sim$pot_out_00_2 <- NA
dta_sim$pot_out_00_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0]
dta_sim$pot_out_00_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] - coef(ols_2)["information"]
dta_sim$pot_out_00_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] - coef(ols_2)["information"] - coef(ols_2)["deliberation"] - coef(ols_2)["information:deliberation"]
dta_sim$pot_out_00_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] - coef(ols_2)["deliberation"]
### potential outcomes for I=1 and D-1
dta_sim$pot_out_11_2 <- NA
dta_sim$pot_out_11_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1]
dta_sim$pot_out_11_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] + coef(ols_2)["information"]
dta_sim$pot_out_11_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] + coef(ols_2)["deliberation"]
dta_sim$pot_out_11_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] + coef(ols_2)["information"] + coef(ols_2)["deliberation"] + coef(ols_2)["information:deliberation"]
### potential outcomes for I=1 and D=0
dta_sim$pot_out_10_2 <- NA
dta_sim$pot_out_10_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0]
dta_sim$pot_out_10_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] - coef(ols_2)["deliberation"] - coef(ols_2)["information:deliberation"]
dta_sim$pot_out_10_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] + coef(ols_2)["information"] - coef(ols_2)["deliberation"]
dta_sim$pot_out_10_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] + coef(ols_2)["information"]
### potential outcomes for I=0 and D=1
dta_sim$pot_out_01_2 <- NA
dta_sim$pot_out_01_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1]
dta_sim$pot_out_01_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] - coef(ols_2)["information"] - coef(ols_2)["information:deliberation"]
dta_sim$pot_out_01_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] - coef(ols_2)["information"] + coef(ols_2)["deliberation"]
dta_sim$pot_out_01_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] + coef(ols_2)["deliberation"]
oper <- foreach (repl = 1:nr_repl,.combine=rbind) %dopar% {
#do the permuations
perm_treat <- data.frame(cbind(sample(c(rep("C",treat_nrs[1,1]), rep("D",treat_nrs[1,2]), rep("I",treat_nrs[2,1]),rep("B",treat_nrs[2,2]))),names(table(dta_sim$clusterID))))
names(perm_treat) <- c("perm_treat","clusterID")
dta_perm <- merge(dta_sim, perm_treat, by.x="clusterID", by.y="clusterID")
dta_perm$information <- ifelse(dta_perm$perm_treat %in% c("I","B"), 1, 0)
dta_perm$deliberation <- ifelse(dta_perm$perm_treat %in% c("D","B"), 1, 0)
dta_perm$dep_1 <- NA
dta_perm$dep_1[dta_perm["information"] ==1 & dta_perm["deliberation"] ==1] <- dta_perm$pot_out_1_1[dta_perm["information"] ==1 & dta_perm["deliberation"] ==1]
dta_perm$dep_1[dta_perm["information"] ==0 & dta_perm["deliberation"] ==0] <- dta_perm$pot_out_0_1[dta_perm["information"] ==0 & dta_perm["deliberation"] ==0]
dta_perm$dep_2 <- NA
dta_perm$dep_2[dta_perm["information"] ==1 & dta_perm["deliberation"] ==1] <- dta_perm$pot_out_11_2[dta_perm["information"] ==1 & dta_perm["deliberation"] ==1]
dta_perm$dep_2[dta_perm["information"] ==0 & dta_perm["deliberation"] ==0] <- dta_perm$pot_out_00_2[dta_perm["information"] ==0 & dta_perm["deliberation"] ==0]
dta_perm$dep_2[dta_perm["information"] ==1 & dta_perm["deliberation"] ==0] <- dta_perm$pot_out_10_2[dta_perm["information"] ==1 & dta_perm["deliberation"] ==0]
dta_perm$dep_2[dta_perm["information"] ==0 & dta_perm["deliberation"] ==1] <- dta_perm$pot_out_01_2[dta_perm["information"] ==0 & dta_perm["deliberation"] ==1]
### p-value
exceed1 <- abs(coef(lm(formula1, data=dta_perm))["information:deliberation"]) > abs(coef(ols_1)["information:deliberation"])
exceed2 <- abs(coef(lm(formula2, data=dta_perm))["information"]) > abs(coef(ols_2)["information"])
exceed3 <- abs(coef(lm(formula2, data=dta_perm))["deliberation"]) > abs(coef(ols_2)["deliberation"])
dta_perm[outcomes[i]] <- dta_perm$dep_1
r1 <-coef(lm(formula1, data=dta_perm[dta_perm$deliberation == dta_perm$information,]))["information:deliberation"]
dta_perm[outcomes[i]] <- dta_perm$dep_2
r2 <-coef(lm(formula2, data=dta_perm))["information"]
r3 <- coef(lm(formula2, data=dta_perm))["deliberation"]
oper <- return(c(r1,r2,r3, exceed1, exceed2, exceed3))
}
return(list(conf_1 = quantile(oper[,1],sig, na.rm=T),conf_2 = quantile(oper[,2],sig, na.rm=T),conf_3 = quantile(oper[,3],sig, na.rm=T), pval_1= (sum(oper[,4])/nr_repl), pval_2= (sum(oper[,5])/nr_repl), pval_3= (sum(oper[,6])/nr_repl)))
}
RI_conf_dist <- function(i,outcomes, baseline_outcomes, dta_sim , ctrls = NULL, nr_repl = 1000, sig = c(.025,.975)) {
#RI_conf_dist(2,outcomes, baseline_outcomes, subset(dta, ((information == 1 & deliberation==1) | district_baraza == 1)) , ctrls = "a21", nr_repl = 1000, sig = c(.025,.975))
#dta_sim <- subset(dta, ((information == 1 & deliberation==1) | district_baraza == 1))
#ctrls <- "a21"
#nr_repl <- 1000
#sig <- c(.025,.975)
### a function to esimate confidence intervals using randomization inference following Gerber and Green pages 66-71 and 83.
if (is.null(baseline_outcomes)) {
formula <- as.formula(paste(outcomes[i],paste("district_baraza",ctrls,sep="+"),sep="~"))
} else {
formula <- as.formula(paste(paste(outcomes[i],paste("district_baraza",ctrls,sep="+"),sep="~"),baseline_outcomes[i],sep="+"))
}
dta_sim <- dta_sim %>% mutate(clusterID = group_indices(., district))
### get ATEs for two different models
ols <- lm(formula, data=dta_sim)
dta_sim$dep <- as.numeric(unlist(dta_sim[as.character(formula[[2]])]))
treat_nrs <- table(data.frame(aggregate(dta_sim["district_baraza"], list(dta_sim$clusterID),mean)[,2]))
### calculate potential outcomes
### for model 1 (sc effect)
dta_sim$pot_out_0 <- NA
dta_sim$pot_out_0[dta_sim["district_baraza"] == 0 ] <- dta_sim$dep[dta_sim["district_baraza"] == 0 ]
dta_sim$pot_out_0[dta_sim["district_baraza"] == 1 ] <- dta_sim$dep[dta_sim["district_baraza"] == 1] - coef(ols)["district_baraza"]
dta_sim$pot_out_1 <- NA
dta_sim$pot_out_1[dta_sim["district_baraza"] == 0 ] <- dta_sim$dep[dta_sim["district_baraza"] == 0 ] + coef(ols)["district_baraza"]
dta_sim$pot_out_1[dta_sim["district_baraza"] == 1 ] <- dta_sim$dep[dta_sim["district_baraza"] == 1]
oper <- foreach (repl = 1:nr_repl,.combine=rbind) %dopar% {
#do the permuations
perm_treat <- data.frame(cbind(sample(c(rep("SC",treat_nrs[1]), rep("D",treat_nrs[2]))),names(table(dta_sim$clusterID))))
names(perm_treat) <- c("perm_treat","clusterID")
dta_perm <- merge(dta_sim, perm_treat, by.x="clusterID", by.y="clusterID")
dta_perm$district_baraza <- ifelse(dta_perm$perm_treat == "D", 1, 0)
dta_perm$dep <- NA
dta_perm$dep[dta_perm["district_baraza"] ==1 ] <- dta_perm$pot_out_1[dta_perm["district_baraza"] ==1 ]
dta_perm$dep[dta_perm["district_baraza"] ==0 ] <- dta_perm$pot_out_0[dta_perm["district_baraza"] ==0 ]
### p-value
exceed <- abs(coef(lm(formula, data=dta_perm))["district_baraza"]) > abs(coef(ols)["district_baraza"])
dta_perm[outcomes[i]] <- dta_perm$dep
res_list <- cbind(coef(lm(formula, data=dta_perm))["district_baraza"],"exceed" = exceed)
return(res_list)
}
return(list(conf = quantile(oper[,1],sig, na.rm=T),pval= (sum(oper[,2])/nr_repl)))
}
################################################################## end of funtions declarations
#### for the mock report, I use a dummy endline - I read in a dummy endline of 3 households just to get the correct variable names
#endline <- read.csv("/home/bjvca/Dropbox (IFPRI)/baraza/Impact Evaluation Surveys/endline/data/public/endline.csv", stringsAsFactors = TRUE)[10:403]
treats <- read.csv(paste(path,"questionnaire/final_list_5.csv", sep ="/"), stringsAsFactors = TRUE)
endline <- merge(treats, endline, by.x=c("district","subcounty"), by.y=c("a22","a23"))
#create unique ID for clustering based on district and subcounty
endline <- endline %>% mutate(clusterID = group_indices(., district, subcounty))
endline <- endline %>% mutate(clusterID2 = group_indices(., district))
outcomes <- "attriter"
baseline_outcomes <- NULL
###init arrays to store results
df_ancova <- array(NA,dim=c(6,5,length(outcomes)))
df_averages <- array(NA,dim=c(2,length(outcomes)))
### parallel computing for RI
cl <- makeCluster(detectCores(all.tests = FALSE, logical = TRUE))
registerDoParallel(cl)
dta <- endline
for (i in 1:length(outcomes)) {
#print(i)
# i <- 1
df_averages[1,i] <- mean(as.matrix(endline[outcomes[i]]), na.rm=T)
df_averages[2,i] <- sd(as.matrix(endline[outcomes[i]]), na.rm=T)
##ancova
## merge in baseline
ols <- lm(as.formula(paste(outcomes[i],"information*deliberation+a21",sep="~")), data=dta[dta$district_baraza == 0,])
vcov_cluster <- vcovCR(ols, cluster = dta$clusterID[dta$district_baraza == 0], type = "CR0")
res <- coef_test(ols, vcov_cluster)
conf <- conf_int(ols, vcov_cluster)
if (RI_conf_switch) {
RI_store <- RI_conf_sc(i,outcomes, baseline_outcomes, subset(dta, district_baraza == 0) , ctrls = "a21", nr_repl = glob_repli, sig = glob_sig)
conf[2,4:5] <- RI_store$conf_2
conf[3,4:5] <- RI_store$conf_3
res[2,5] <- RI_store$pval_2
res[3,5] <- RI_store$pval_3
}
df_ancova[,2,i] <- c(res[2,1],res[2,2],res[2,5], conf[2,4],conf[2,5], nobs(ols))
df_ancova[,3,i] <- c(res[3,1],res[3,2],res[3,5], conf[3,4],conf[3,5], nobs(ols))
ols <- lm(as.formula(paste(outcomes[i],"information:deliberation+a21",sep="~")), data=dta[dta$district_baraza == 0 & (dta$information == dta$deliberation),])
vcov_cluster <- vcovCR(ols, cluster = dta$clusterID[dta$district_baraza == 0 & (dta$information == dta$deliberation)], type = "CR0")
res <- coef_test(ols, vcov_cluster)
conf <- conf_int(ols, vcov_cluster)
if (RI_conf_switch) {
conf[5,4:5] <- RI_store$conf_1
res[5,5] <- RI_store$pval_1
}
df_ancova[,1,i] <- c(res[5,1],res[5,2],res[5,5], conf[5,4],conf[5,5], nobs(ols))
ols <- lm(as.formula(paste(outcomes[i],"district_baraza+a21",sep="~"),), data=dta[(dta$information == 0 & dta$deliberation==0) | dta$district_baraza == 1 ,])
vcov_cluster <- vcovCR(ols, cluster = dta$clusterID2[(dta$information == 0 & dta$deliberation==0) | dta$district_baraza == 1 ], type = "CR0")
res <- coef_test(ols, vcov_cluster)
conf <- conf_int(ols, vcov_cluster)
if (RI_conf_switch) {
RI_store <- RI_conf_dist(i,outcomes, baseline_outcomes, subset(dta, ((information == 0 & deliberation==0) | district_baraza == 1)) , ctrls = "a21", nr_repl = glob_repli, sig = glob_sig)
conf[2,4:5] <- RI_store$conf
res[2,5] <- RI_store$pval
}
df_ancova[,4,i] <- c(res[2,1],res[2,2],res[2,5], conf[2,4],conf[2,5], nobs(ols))
ols <- lm(as.formula(paste(outcomes[i],"district_baraza+a21",sep="~")), data=dta[(dta$information == 1 & dta$deliberation==1) | dta$district_baraza == 1 ,])
vcov_cluster <- vcovCR(ols, cluster = dta$clusterID2[(dta$information == 1 & dta$deliberation==1) | dta$district_baraza == 1 ], type = "CR0")
res <- coef_test(ols, vcov_cluster)
conf <- conf_int(ols, vcov_cluster)
if (RI_conf_switch) {
RI_store <- RI_conf_dist(i,outcomes, baseline_outcomes, subset(dta, ((information == 1 & deliberation==1) | district_baraza == 1)) , ctrls = "a21", nr_repl = glob_repli, sig = glob_sig)
conf[2,4:5] <- RI_store$conf
res[2,5] <- RI_store$pval
}
df_ancova[,5,i] <- c(res[2,1],res[2,2],res[2,5], conf[2,4],conf[2,5], nobs(ols))
}
### save results
save_path <- ifelse(final_verion_swith, paste(path,"report/results/final", sep = "/"), paste(path,"report/results/", sep = "/"))
save(df_ancova, file= paste(save_path,"df_ancova_attrition.Rd", sep="/"))
save(df_averages, file= paste(save_path,"df_averages_attrition.Rd", sep="/"))
|
/report/attrition.R
|
no_license
|
bjvca/baraza
|
R
| false | false | 18,950 |
r
|
#### run this file for the pre-registered (confirmatory) analysis
#### main output are the graphs
rm(list=ls())
library(dplyr)
library(ggplot2)
library(MatchIt)
library(multiwayvcov)
library(plm)
library(lmtest)
library(clubSandwich)
library(moments)
library(doParallel)
set.seed(123456789) #not needed for final version?
### this is executed in the /report subdirectory, need to ..
### set this switch to TRUE if you want to produce a final report - this will save results matrices in a static directory
final_verion_swith <- TRUE
RI_conf_switch <- TRUE
glob_repli <- 10000
glob_sig <- c(.025,.975) ### 5 percent conf intervals
path <- strsplit(getwd(), "/report")[[1]]
endline <- read.csv(paste(path,"data/public/endline.csv", sep="/"), stringsAsFactors = TRUE)
endline$a21 <- as.character(endline$region)
endline$region <- NULL
endline$interviewed <- TRUE
dta_plan <- read.csv(paste(path,"questionnaire/sampling_list_hh.csv", sep="/"), stringsAsFactors = TRUE)
endline <- merge(dta_plan, endline, by="hhid", all.x=T)
endline$interviewed[is.na(endline$interviewed)] <- FALSE
endline$a21 <- endline$a21.x
endline$a21.y <- NULL
endline$information <- NULL
endline$deliberation <- NULL
endline$district_baraza <- NULL
endline$attriter <- !endline$interviewed
########################################################### functions declarations #####################################################
trim <- function(var, dataset, trim_perc=.05) {
### function for triming a variable in a dataset - replaces with NA
dataset[var][dataset[var] < quantile(dataset[var],c(trim_perc/2,1-(trim_perc/2)), na.rm=T)[1] | dataset[var] > quantile(dataset[var], c(trim_perc/2,1-(trim_perc/2)),na.rm=T)[2] ] <- NA
return(dataset)
}
FW_index <- function(indexer,revcols = NULL,data_orig) {
### function to make family wise index using covariance as weights (following http://cyrussamii.com/?p=2656)
### FW_index(c("baraza.B2","baraza.B3","baraza.B4.1","inputs","baraza.B5.2","baraza.B5.3"),data=endline)
##indexer <- c("baraza.B2","baraza.B3","baraza.B4.1","inputs","baraza.B5.2","baraza.B5.3")
##data_orig <- endline
data <- data_orig[complete.cases(data_orig[indexer]),]
x <- data[indexer]
if(length(revcols)>0){
x[,revcols] <- -1*x[,revcols]
}
for(j in 1:ncol(x)){
x[,j] <- (x[,j] - mean(x[,j]))/sd(x[,j])
}
i.vec <- as.matrix(rep(1,ncol(x)))
Sx <- cov(x)
data$index <- t(solve(t(i.vec)%*%solve(Sx)%*%i.vec)%*%t(i.vec)%*%solve(Sx)%*%t(x))
data <- merge(data_orig,data[c("hhid","index")], by="hhid",all.x=T)
return( data )
}
### wrapper function to make summary forest plots
credplot.gg <- function(d,units, hypo, axlabs, lim){
# d is a data frame with 4 columns
# d$x gives variable names
# d$y gives center point
# d$ylo gives lower limits
# d$yhi gives upper limits
require(ggplot2)
p <- ggplot(d, aes(x=x, y=y, ymin=ylo, ymax=yhi, colour=as.factor(grp)))+
geom_pointrange(position=position_dodge(-.4), size=.5)+
geom_hline(yintercept = 0, linetype=2)+
coord_flip(ylim = c(-lim,lim))+
xlab('') + ylab(units)+ labs(title=hypo) + theme_minimal()+ theme(axis.text=element_text(size=18),
axis.title=element_text(size=14,face="bold"),legend.text=element_text(size=18), plot.title = element_text(size=22,hjust = 0.5), legend.title=element_blank())+
geom_errorbar(aes(ymin=ylo, ymax=yhi),position=position_dodge(-.4),width=0,cex=1.5) + scale_colour_manual(values = c("#CCCCCC", "#6E8DAB", "#104E8B", "#000000")) + scale_x_discrete(labels=axlabs)
return(p)
}
## function definitions
RI_conf_sc <- function(i,outcomes, baseline_outcomes, dta_sim , ctrls = NULL, nr_repl = 1000, sig = c(.025,.975)) {
### a function to esimate confidence intervals using randomization inference following Gerber and Green pages 66-71 and 83.
#RI_conf_dist(2,outcomes, baseline_outcomes, subset(dta, ((information == 1 & deliberation==1) | district_baraza == 1)) , ctrls = "a21", nr_repl = 1000, sig = c(.025,.975))
#dta_sim <- dta[dta$district_baraza == 0 ,]
#ctrls <- "a21"
#nr_repl <- 1000
#sig <- c(.025,.975)
if (is.null(baseline_outcomes)) {
formula1 <- as.formula(paste(outcomes[i],paste("information:deliberation",ctrls,sep="+"),sep="~"))
formula2 <- as.formula(paste(outcomes[i],paste("information*deliberation",ctrls,sep="+"),sep="~"))
} else {
formula1 <- as.formula(paste(paste(outcomes[i],paste("information:deliberation",ctrls,sep="+"),sep="~"),baseline_outcomes[i],sep="+"))
formula2 <- as.formula(paste(paste(outcomes[i],paste("information*deliberation",ctrls,sep="+"),sep="~"),baseline_outcomes[i],sep="+"))
}
dta_sim <- dta_sim %>% mutate(clusterID = group_indices(., district, subcounty))
### get ATEs for two different models
ols_1 <- lm(formula1, data=dta_sim[dta_sim$deliberation == dta_sim$information,])
ols_2 <- lm(formula2, data=dta_sim)
dta_sim$dep <- as.numeric(unlist(dta_sim[as.character(formula1[[2]])]))
treat_nrs <- table(data.frame(aggregate(dta_sim[c("information","deliberation")], list(dta_sim$clusterID),mean))[,2:3])
### calculate potential outcomes
### for model 1 (sc effect)
dta_sim$pot_out_0_1 <- NA
dta_sim$pot_out_0_1[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0]
dta_sim$pot_out_0_1[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] - coef(ols_1)["information:deliberation"]
dta_sim$pot_out_1_1 <- NA
dta_sim$pot_out_1_1[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1]
dta_sim$pot_out_1_1[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] + coef(ols_1)["information:deliberation"]
### for model 2 (factorial design with )
### potential outcomes for I=0 D=0
dta_sim$pot_out_00_2 <- NA
dta_sim$pot_out_00_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0]
dta_sim$pot_out_00_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] - coef(ols_2)["information"]
dta_sim$pot_out_00_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] - coef(ols_2)["information"] - coef(ols_2)["deliberation"] - coef(ols_2)["information:deliberation"]
dta_sim$pot_out_00_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] - coef(ols_2)["deliberation"]
### potential outcomes for I=1 and D-1
dta_sim$pot_out_11_2 <- NA
dta_sim$pot_out_11_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1]
dta_sim$pot_out_11_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] + coef(ols_2)["information"]
dta_sim$pot_out_11_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] + coef(ols_2)["deliberation"]
dta_sim$pot_out_11_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] + coef(ols_2)["information"] + coef(ols_2)["deliberation"] + coef(ols_2)["information:deliberation"]
### potential outcomes for I=1 and D=0
dta_sim$pot_out_10_2 <- NA
dta_sim$pot_out_10_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0]
dta_sim$pot_out_10_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] - coef(ols_2)["deliberation"] - coef(ols_2)["information:deliberation"]
dta_sim$pot_out_10_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] + coef(ols_2)["information"] - coef(ols_2)["deliberation"]
dta_sim$pot_out_10_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] + coef(ols_2)["information"]
### potential outcomes for I=0 and D=1
dta_sim$pot_out_01_2 <- NA
dta_sim$pot_out_01_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 1]
dta_sim$pot_out_01_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 1] - coef(ols_2)["information"] - coef(ols_2)["information:deliberation"]
dta_sim$pot_out_01_2[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 1 & dta_sim["deliberation"] == 0] - coef(ols_2)["information"] + coef(ols_2)["deliberation"]
dta_sim$pot_out_01_2[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] <- dta_sim$dep[dta_sim["information"] == 0 & dta_sim["deliberation"] == 0] + coef(ols_2)["deliberation"]
oper <- foreach (repl = 1:nr_repl,.combine=rbind) %dopar% {
#do the permuations
perm_treat <- data.frame(cbind(sample(c(rep("C",treat_nrs[1,1]), rep("D",treat_nrs[1,2]), rep("I",treat_nrs[2,1]),rep("B",treat_nrs[2,2]))),names(table(dta_sim$clusterID))))
names(perm_treat) <- c("perm_treat","clusterID")
dta_perm <- merge(dta_sim, perm_treat, by.x="clusterID", by.y="clusterID")
dta_perm$information <- ifelse(dta_perm$perm_treat %in% c("I","B"), 1, 0)
dta_perm$deliberation <- ifelse(dta_perm$perm_treat %in% c("D","B"), 1, 0)
dta_perm$dep_1 <- NA
dta_perm$dep_1[dta_perm["information"] ==1 & dta_perm["deliberation"] ==1] <- dta_perm$pot_out_1_1[dta_perm["information"] ==1 & dta_perm["deliberation"] ==1]
dta_perm$dep_1[dta_perm["information"] ==0 & dta_perm["deliberation"] ==0] <- dta_perm$pot_out_0_1[dta_perm["information"] ==0 & dta_perm["deliberation"] ==0]
dta_perm$dep_2 <- NA
dta_perm$dep_2[dta_perm["information"] ==1 & dta_perm["deliberation"] ==1] <- dta_perm$pot_out_11_2[dta_perm["information"] ==1 & dta_perm["deliberation"] ==1]
dta_perm$dep_2[dta_perm["information"] ==0 & dta_perm["deliberation"] ==0] <- dta_perm$pot_out_00_2[dta_perm["information"] ==0 & dta_perm["deliberation"] ==0]
dta_perm$dep_2[dta_perm["information"] ==1 & dta_perm["deliberation"] ==0] <- dta_perm$pot_out_10_2[dta_perm["information"] ==1 & dta_perm["deliberation"] ==0]
dta_perm$dep_2[dta_perm["information"] ==0 & dta_perm["deliberation"] ==1] <- dta_perm$pot_out_01_2[dta_perm["information"] ==0 & dta_perm["deliberation"] ==1]
### p-value
exceed1 <- abs(coef(lm(formula1, data=dta_perm))["information:deliberation"]) > abs(coef(ols_1)["information:deliberation"])
exceed2 <- abs(coef(lm(formula2, data=dta_perm))["information"]) > abs(coef(ols_2)["information"])
exceed3 <- abs(coef(lm(formula2, data=dta_perm))["deliberation"]) > abs(coef(ols_2)["deliberation"])
dta_perm[outcomes[i]] <- dta_perm$dep_1
r1 <-coef(lm(formula1, data=dta_perm[dta_perm$deliberation == dta_perm$information,]))["information:deliberation"]
dta_perm[outcomes[i]] <- dta_perm$dep_2
r2 <-coef(lm(formula2, data=dta_perm))["information"]
r3 <- coef(lm(formula2, data=dta_perm))["deliberation"]
oper <- return(c(r1,r2,r3, exceed1, exceed2, exceed3))
}
return(list(conf_1 = quantile(oper[,1],sig, na.rm=T),conf_2 = quantile(oper[,2],sig, na.rm=T),conf_3 = quantile(oper[,3],sig, na.rm=T), pval_1= (sum(oper[,4])/nr_repl), pval_2= (sum(oper[,5])/nr_repl), pval_3= (sum(oper[,6])/nr_repl)))
}
RI_conf_dist <- function(i,outcomes, baseline_outcomes, dta_sim , ctrls = NULL, nr_repl = 1000, sig = c(.025,.975)) {
#RI_conf_dist(2,outcomes, baseline_outcomes, subset(dta, ((information == 1 & deliberation==1) | district_baraza == 1)) , ctrls = "a21", nr_repl = 1000, sig = c(.025,.975))
#dta_sim <- subset(dta, ((information == 1 & deliberation==1) | district_baraza == 1))
#ctrls <- "a21"
#nr_repl <- 1000
#sig <- c(.025,.975)
### a function to esimate confidence intervals using randomization inference following Gerber and Green pages 66-71 and 83.
if (is.null(baseline_outcomes)) {
formula <- as.formula(paste(outcomes[i],paste("district_baraza",ctrls,sep="+"),sep="~"))
} else {
formula <- as.formula(paste(paste(outcomes[i],paste("district_baraza",ctrls,sep="+"),sep="~"),baseline_outcomes[i],sep="+"))
}
dta_sim <- dta_sim %>% mutate(clusterID = group_indices(., district))
### get ATEs for two different models
ols <- lm(formula, data=dta_sim)
dta_sim$dep <- as.numeric(unlist(dta_sim[as.character(formula[[2]])]))
treat_nrs <- table(data.frame(aggregate(dta_sim["district_baraza"], list(dta_sim$clusterID),mean)[,2]))
### calculate potential outcomes
### for model 1 (sc effect)
dta_sim$pot_out_0 <- NA
dta_sim$pot_out_0[dta_sim["district_baraza"] == 0 ] <- dta_sim$dep[dta_sim["district_baraza"] == 0 ]
dta_sim$pot_out_0[dta_sim["district_baraza"] == 1 ] <- dta_sim$dep[dta_sim["district_baraza"] == 1] - coef(ols)["district_baraza"]
dta_sim$pot_out_1 <- NA
dta_sim$pot_out_1[dta_sim["district_baraza"] == 0 ] <- dta_sim$dep[dta_sim["district_baraza"] == 0 ] + coef(ols)["district_baraza"]
dta_sim$pot_out_1[dta_sim["district_baraza"] == 1 ] <- dta_sim$dep[dta_sim["district_baraza"] == 1]
oper <- foreach (repl = 1:nr_repl,.combine=rbind) %dopar% {
#do the permuations
perm_treat <- data.frame(cbind(sample(c(rep("SC",treat_nrs[1]), rep("D",treat_nrs[2]))),names(table(dta_sim$clusterID))))
names(perm_treat) <- c("perm_treat","clusterID")
dta_perm <- merge(dta_sim, perm_treat, by.x="clusterID", by.y="clusterID")
dta_perm$district_baraza <- ifelse(dta_perm$perm_treat == "D", 1, 0)
dta_perm$dep <- NA
dta_perm$dep[dta_perm["district_baraza"] ==1 ] <- dta_perm$pot_out_1[dta_perm["district_baraza"] ==1 ]
dta_perm$dep[dta_perm["district_baraza"] ==0 ] <- dta_perm$pot_out_0[dta_perm["district_baraza"] ==0 ]
### p-value
exceed <- abs(coef(lm(formula, data=dta_perm))["district_baraza"]) > abs(coef(ols)["district_baraza"])
dta_perm[outcomes[i]] <- dta_perm$dep
res_list <- cbind(coef(lm(formula, data=dta_perm))["district_baraza"],"exceed" = exceed)
return(res_list)
}
return(list(conf = quantile(oper[,1],sig, na.rm=T),pval= (sum(oper[,2])/nr_repl)))
}
################################################################## end of funtions declarations
#### for the mock report, I use a dummy endline - I read in a dummy endline of 3 households just to get the correct variable names
#endline <- read.csv("/home/bjvca/Dropbox (IFPRI)/baraza/Impact Evaluation Surveys/endline/data/public/endline.csv", stringsAsFactors = TRUE)[10:403]
treats <- read.csv(paste(path,"questionnaire/final_list_5.csv", sep ="/"), stringsAsFactors = TRUE)
endline <- merge(treats, endline, by.x=c("district","subcounty"), by.y=c("a22","a23"))
#create unique ID for clustering based on district and subcounty
endline <- endline %>% mutate(clusterID = group_indices(., district, subcounty))
endline <- endline %>% mutate(clusterID2 = group_indices(., district))
outcomes <- "attriter"
baseline_outcomes <- NULL
###init arrays to store results
df_ancova <- array(NA,dim=c(6,5,length(outcomes)))
df_averages <- array(NA,dim=c(2,length(outcomes)))
### parallel computing for RI
cl <- makeCluster(detectCores(all.tests = FALSE, logical = TRUE))
registerDoParallel(cl)
dta <- endline
for (i in 1:length(outcomes)) {
#print(i)
# i <- 1
df_averages[1,i] <- mean(as.matrix(endline[outcomes[i]]), na.rm=T)
df_averages[2,i] <- sd(as.matrix(endline[outcomes[i]]), na.rm=T)
##ancova
## merge in baseline
ols <- lm(as.formula(paste(outcomes[i],"information*deliberation+a21",sep="~")), data=dta[dta$district_baraza == 0,])
vcov_cluster <- vcovCR(ols, cluster = dta$clusterID[dta$district_baraza == 0], type = "CR0")
res <- coef_test(ols, vcov_cluster)
conf <- conf_int(ols, vcov_cluster)
if (RI_conf_switch) {
RI_store <- RI_conf_sc(i,outcomes, baseline_outcomes, subset(dta, district_baraza == 0) , ctrls = "a21", nr_repl = glob_repli, sig = glob_sig)
conf[2,4:5] <- RI_store$conf_2
conf[3,4:5] <- RI_store$conf_3
res[2,5] <- RI_store$pval_2
res[3,5] <- RI_store$pval_3
}
df_ancova[,2,i] <- c(res[2,1],res[2,2],res[2,5], conf[2,4],conf[2,5], nobs(ols))
df_ancova[,3,i] <- c(res[3,1],res[3,2],res[3,5], conf[3,4],conf[3,5], nobs(ols))
ols <- lm(as.formula(paste(outcomes[i],"information:deliberation+a21",sep="~")), data=dta[dta$district_baraza == 0 & (dta$information == dta$deliberation),])
vcov_cluster <- vcovCR(ols, cluster = dta$clusterID[dta$district_baraza == 0 & (dta$information == dta$deliberation)], type = "CR0")
res <- coef_test(ols, vcov_cluster)
conf <- conf_int(ols, vcov_cluster)
if (RI_conf_switch) {
conf[5,4:5] <- RI_store$conf_1
res[5,5] <- RI_store$pval_1
}
df_ancova[,1,i] <- c(res[5,1],res[5,2],res[5,5], conf[5,4],conf[5,5], nobs(ols))
ols <- lm(as.formula(paste(outcomes[i],"district_baraza+a21",sep="~"),), data=dta[(dta$information == 0 & dta$deliberation==0) | dta$district_baraza == 1 ,])
vcov_cluster <- vcovCR(ols, cluster = dta$clusterID2[(dta$information == 0 & dta$deliberation==0) | dta$district_baraza == 1 ], type = "CR0")
res <- coef_test(ols, vcov_cluster)
conf <- conf_int(ols, vcov_cluster)
if (RI_conf_switch) {
RI_store <- RI_conf_dist(i,outcomes, baseline_outcomes, subset(dta, ((information == 0 & deliberation==0) | district_baraza == 1)) , ctrls = "a21", nr_repl = glob_repli, sig = glob_sig)
conf[2,4:5] <- RI_store$conf
res[2,5] <- RI_store$pval
}
df_ancova[,4,i] <- c(res[2,1],res[2,2],res[2,5], conf[2,4],conf[2,5], nobs(ols))
ols <- lm(as.formula(paste(outcomes[i],"district_baraza+a21",sep="~")), data=dta[(dta$information == 1 & dta$deliberation==1) | dta$district_baraza == 1 ,])
vcov_cluster <- vcovCR(ols, cluster = dta$clusterID2[(dta$information == 1 & dta$deliberation==1) | dta$district_baraza == 1 ], type = "CR0")
res <- coef_test(ols, vcov_cluster)
conf <- conf_int(ols, vcov_cluster)
if (RI_conf_switch) {
RI_store <- RI_conf_dist(i,outcomes, baseline_outcomes, subset(dta, ((information == 1 & deliberation==1) | district_baraza == 1)) , ctrls = "a21", nr_repl = glob_repli, sig = glob_sig)
conf[2,4:5] <- RI_store$conf
res[2,5] <- RI_store$pval
}
df_ancova[,5,i] <- c(res[2,1],res[2,2],res[2,5], conf[2,4],conf[2,5], nobs(ols))
}
### save results
save_path <- ifelse(final_verion_swith, paste(path,"report/results/final", sep = "/"), paste(path,"report/results/", sep = "/"))
save(df_ancova, file= paste(save_path,"df_ancova_attrition.Rd", sep="/"))
save(df_averages, file= paste(save_path,"df_averages_attrition.Rd", sep="/"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mark.R
\name{summary.bench_mark}
\alias{summary.bench_mark}
\title{Summarize \link[bench:mark]{bench::mark} results.}
\usage{
\method{summary}{bench_mark}(object, filter_gc = TRUE, relative = FALSE, time_unit = NULL, ...)
}
\arguments{
\item{object}{\link{bench_mark} object to summarize.}
\item{filter_gc}{If \code{TRUE} remove iterations that contained at least one
garbage collection before summarizing. If \code{TRUE} but an expression had
a garbage collection in every iteration, filtering is disabled, with a warning.}
\item{relative}{If \code{TRUE} all summaries are computed relative to the minimum
execution time rather than absolute time.}
\item{time_unit}{If \code{NULL} the times are reported in a human readable
fashion depending on each value. If one of 'ns', 'us', 'ms', 's', 'm', 'h',
'd', 'w' the time units are instead expressed as nanoseconds, microseconds,
milliseconds, seconds, hours, minutes, days or weeks respectively.}
\item{...}{Additional arguments ignored.}
}
\value{
A \link[tibble:tibble]{tibble} with the additional summary columns.
The following summary columns are computed
\itemize{
\item \code{expression} - \code{bench_expr} The deparsed expression that was evaluated
(or its name if one was provided).
\item \code{min} - \code{bench_time} The minimum execution time.
\item \code{median} - \code{bench_time} The sample median of execution time.
\item \code{itr/sec} - \code{double} The estimated number of executions performed per
second.
\item \code{mem_alloc} - \code{bench_bytes} Total amount of memory allocated by R while
running the expression. Memory allocated \emph{outside} the R heap, e.g. by
\code{malloc()} or \code{new} directly is \emph{not} tracked, take care to avoid
misinterpreting the results if running code that may do this.
\item \code{gc/sec} - \code{double} The number of garbage collections per second.
\item \code{n_itr} - \code{integer} Total number of iterations after filtering
garbage collections (if \code{filter_gc == TRUE}).
\item \code{n_gc} - \code{double} Total number of garbage collections performed over all
iterations. This is a psudo-measure of the pressure on the garbage collector, if
it varies greatly between to alternatives generally the one with fewer
collections will cause fewer allocation in real usage.
\item \code{total_time} - \code{bench_time} The total time to perform the benchmarks.
\item \code{result} - \code{list} A list column of the object(s) returned by the
evaluated expression(s).
\item \code{memory} - \code{list} A list column with results from \code{\link[=Rprofmem]{Rprofmem()}}.
\item \code{time} - \code{list} A list column of \code{bench_time} vectors for each evaluated
expression.
\item \code{gc} - \code{list} A list column with tibbles containing the level of
garbage collection (0-2, columns) for each iteration (rows).
}
}
\description{
Summarize \link[bench:mark]{bench::mark} results.
}
\details{
If \code{filter_gc == TRUE} (the default) runs that contain a garbage
collection will be removed before summarizing. This is most useful for fast
expressions when the majority of runs do not contain a gc. Call
\code{summary(filter_gc = FALSE)} if you would like to compute summaries \emph{with}
these times, such as expressions with lots of allocations when all or most
runs contain a gc.
}
\examples{
dat <- data.frame(x = runif(10000, 1, 1000), y=runif(10000, 1, 1000))
# `bench::mark()` implicitly calls summary() automatically
results <- bench::mark(
dat[dat$x > 500, ],
dat[which(dat$x > 500), ],
subset(dat, x > 500))
# However you can also do so explicitly to filter gc differently.
summary(results, filter_gc = FALSE)
# Or output relative times
summary(results, relative = TRUE)
}
|
/man/summary.bench_mark.Rd
|
permissive
|
isabella232/bench-3
|
R
| false | true | 3,794 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mark.R
\name{summary.bench_mark}
\alias{summary.bench_mark}
\title{Summarize \link[bench:mark]{bench::mark} results.}
\usage{
\method{summary}{bench_mark}(object, filter_gc = TRUE, relative = FALSE, time_unit = NULL, ...)
}
\arguments{
\item{object}{\link{bench_mark} object to summarize.}
\item{filter_gc}{If \code{TRUE} remove iterations that contained at least one
garbage collection before summarizing. If \code{TRUE} but an expression had
a garbage collection in every iteration, filtering is disabled, with a warning.}
\item{relative}{If \code{TRUE} all summaries are computed relative to the minimum
execution time rather than absolute time.}
\item{time_unit}{If \code{NULL} the times are reported in a human readable
fashion depending on each value. If one of 'ns', 'us', 'ms', 's', 'm', 'h',
'd', 'w' the time units are instead expressed as nanoseconds, microseconds,
milliseconds, seconds, hours, minutes, days or weeks respectively.}
\item{...}{Additional arguments ignored.}
}
\value{
A \link[tibble:tibble]{tibble} with the additional summary columns.
The following summary columns are computed
\itemize{
\item \code{expression} - \code{bench_expr} The deparsed expression that was evaluated
(or its name if one was provided).
\item \code{min} - \code{bench_time} The minimum execution time.
\item \code{median} - \code{bench_time} The sample median of execution time.
\item \code{itr/sec} - \code{double} The estimated number of executions performed per
second.
\item \code{mem_alloc} - \code{bench_bytes} Total amount of memory allocated by R while
running the expression. Memory allocated \emph{outside} the R heap, e.g. by
\code{malloc()} or \code{new} directly is \emph{not} tracked, take care to avoid
misinterpreting the results if running code that may do this.
\item \code{gc/sec} - \code{double} The number of garbage collections per second.
\item \code{n_itr} - \code{integer} Total number of iterations after filtering
garbage collections (if \code{filter_gc == TRUE}).
\item \code{n_gc} - \code{double} Total number of garbage collections performed over all
iterations. This is a psudo-measure of the pressure on the garbage collector, if
it varies greatly between to alternatives generally the one with fewer
collections will cause fewer allocation in real usage.
\item \code{total_time} - \code{bench_time} The total time to perform the benchmarks.
\item \code{result} - \code{list} A list column of the object(s) returned by the
evaluated expression(s).
\item \code{memory} - \code{list} A list column with results from \code{\link[=Rprofmem]{Rprofmem()}}.
\item \code{time} - \code{list} A list column of \code{bench_time} vectors for each evaluated
expression.
\item \code{gc} - \code{list} A list column with tibbles containing the level of
garbage collection (0-2, columns) for each iteration (rows).
}
}
\description{
Summarize \link[bench:mark]{bench::mark} results.
}
\details{
If \code{filter_gc == TRUE} (the default) runs that contain a garbage
collection will be removed before summarizing. This is most useful for fast
expressions when the majority of runs do not contain a gc. Call
\code{summary(filter_gc = FALSE)} if you would like to compute summaries \emph{with}
these times, such as expressions with lots of allocations when all or most
runs contain a gc.
}
\examples{
dat <- data.frame(x = runif(10000, 1, 1000), y=runif(10000, 1, 1000))
# `bench::mark()` implicitly calls summary() automatically
results <- bench::mark(
dat[dat$x > 500, ],
dat[which(dat$x > 500), ],
subset(dat, x > 500))
# However you can also do so explicitly to filter gc differently.
summary(results, filter_gc = FALSE)
# Or output relative times
summary(results, relative = TRUE)
}
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(PointFore)
## -----------------------------------------------------------------------------
set.seed(1)
Y.mean <- rnorm(200,0,1)
Y <- rnorm(200,Y.mean,1)
X <- Y.mean
## -----------------------------------------------------------------------------
res.const <- estimate.functional(
iden.fct = expectiles,
model = constant,
Y = Y,
X = X,
theta0 = 0.5)
summary(res.const)
## -----------------------------------------------------------------------------
res.flexible <- estimate.functional(iden.fct = expectiles,
model = probit_linear,
Y = Y,X = X,
theta0 = c(0,0),
stateVariable = X)
summary(res.flexible)
## ---- fig.show="hold", fig.cap = "The results of the constant and flexible model plotted against the forecast."----
plot(res.const)
plot(res.flexible)
## -----------------------------------------------------------------------------
library(car)
## -----------------------------------------------------------------------------
linearHypothesis(res.const$gmm,"Theta[1]=0.5")
## -----------------------------------------------------------------------------
linearHypothesis(res.flexible$gmm,c("Theta[1]=0", "Theta[2]=0"))
linearHypothesis(res.flexible$gmm,"Theta[2]=0")
## -----------------------------------------------------------------------------
break_model <- function(stateVariable, theta)
{
if(length(theta)!=2)
stop("Wrong dimension of theta")
return(boot::inv.logit(theta[1]+theta[2]*(stateVariable>0)))
}
## -----------------------------------------------------------------------------
res.break <- estimate.functional(iden.fct = expectiles,
model = break_model,
Y = Y,X = X,
theta0 = c(0,0),
stateVariable = X)
summary(res.break)
plot(res.break)
## -----------------------------------------------------------------------------
X <- Y.mean + 0.2 *(Y.mean>0)
res.break <- estimate.functional(iden.fct = expectiles,
model = break_model,
Y = Y,X = X,
theta0 = c(0,0),
stateVariable = X)
summary(res.break)
plot(res.break)
## -----------------------------------------------------------------------------
res.flexible <- estimate.functional(iden.fct = expectiles,
model = probit_linear,
Y = Y,X = X,
theta0 = c(0,0),
stateVariable = X)
summary(res.flexible)
plot(res.flexible)
## -----------------------------------------------------------------------------
res.flexible <- estimate.functional(iden.fct = expectiles,
model = probit_linear,
Y = Y,X = X,
theta0 = c(0,0),
instruments = c("lag(Y)","X","sign(X)*X^2"),
stateVariable = X)
summary(res.flexible)$Jtest
res.break <- estimate.functional(iden.fct = expectiles,
model = probit_break,
Y = Y,X = X,
theta0 = c(0,0),
instruments = c("lag(Y)","X","sign(X)*X^2"),
stateVariable = X)
summary(res.break)$Jtest
|
/doc/Tutorial.R
|
no_license
|
Schmidtpk/PointFore
|
R
| false | false | 4,014 |
r
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(PointFore)
## -----------------------------------------------------------------------------
set.seed(1)
Y.mean <- rnorm(200,0,1)
Y <- rnorm(200,Y.mean,1)
X <- Y.mean
## -----------------------------------------------------------------------------
res.const <- estimate.functional(
iden.fct = expectiles,
model = constant,
Y = Y,
X = X,
theta0 = 0.5)
summary(res.const)
## -----------------------------------------------------------------------------
res.flexible <- estimate.functional(iden.fct = expectiles,
model = probit_linear,
Y = Y,X = X,
theta0 = c(0,0),
stateVariable = X)
summary(res.flexible)
## ---- fig.show="hold", fig.cap = "The results of the constant and flexible model plotted against the forecast."----
plot(res.const)
plot(res.flexible)
## -----------------------------------------------------------------------------
library(car)
## -----------------------------------------------------------------------------
linearHypothesis(res.const$gmm,"Theta[1]=0.5")
## -----------------------------------------------------------------------------
linearHypothesis(res.flexible$gmm,c("Theta[1]=0", "Theta[2]=0"))
linearHypothesis(res.flexible$gmm,"Theta[2]=0")
## -----------------------------------------------------------------------------
break_model <- function(stateVariable, theta)
{
if(length(theta)!=2)
stop("Wrong dimension of theta")
return(boot::inv.logit(theta[1]+theta[2]*(stateVariable>0)))
}
## -----------------------------------------------------------------------------
res.break <- estimate.functional(iden.fct = expectiles,
model = break_model,
Y = Y,X = X,
theta0 = c(0,0),
stateVariable = X)
summary(res.break)
plot(res.break)
## -----------------------------------------------------------------------------
X <- Y.mean + 0.2 *(Y.mean>0)
res.break <- estimate.functional(iden.fct = expectiles,
model = break_model,
Y = Y,X = X,
theta0 = c(0,0),
stateVariable = X)
summary(res.break)
plot(res.break)
## -----------------------------------------------------------------------------
res.flexible <- estimate.functional(iden.fct = expectiles,
model = probit_linear,
Y = Y,X = X,
theta0 = c(0,0),
stateVariable = X)
summary(res.flexible)
plot(res.flexible)
## -----------------------------------------------------------------------------
res.flexible <- estimate.functional(iden.fct = expectiles,
model = probit_linear,
Y = Y,X = X,
theta0 = c(0,0),
instruments = c("lag(Y)","X","sign(X)*X^2"),
stateVariable = X)
summary(res.flexible)$Jtest
res.break <- estimate.functional(iden.fct = expectiles,
model = probit_break,
Y = Y,X = X,
theta0 = c(0,0),
instruments = c("lag(Y)","X","sign(X)*X^2"),
stateVariable = X)
summary(res.break)$Jtest
|
library(TrenaProjectMouseMacrophage)
library(RUnit)
#------------------------------------------------------------------------------------------------------------------------
if(!exists("tProj")) {
message(sprintf("--- creating instance of TrenaProjectMouseMacrophage"))
tProj <- TrenaProjectMouseMacrophage();
}
#------------------------------------------------------------------------------------------------------------------------
runTests <- function()
{
test_constructor()
test_supportedGenes()
test_variants()
test_footprintDatabases()
test_expressionMatrices()
test_setTargetGene()
} # runTests
#------------------------------------------------------------------------------------------------------------------------
test_constructor <- function()
{
message(sprintf("--- test_constructor"))
checkTrue(all(c("TrenaProjectMouseMacrophage", "TrenaProject") %in% is(tProj)))
} # test_constructor
#------------------------------------------------------------------------------------------------------------------------
test_supportedGenes <- function()
{
message(sprintf("--- test_supportedGenes"))
subset.expected <- c("Abca1")
checkTrue(all(subset.expected %in% getSupportedGenes(tProj)))
} # test_supportedGenes
#------------------------------------------------------------------------------------------------------------------------
test_variants <- function()
{
message(sprintf("--- test_variants"))
checkEquals(getVariantDatasetNames(tProj), character(0))
} # test_variants
#------------------------------------------------------------------------------------------------------------------------
test_footprintDatabases <- function()
{
message(sprintf("--- test_footprintDatabases"))
expected <- c("extraembryonic_structure_wellington_16", "extraembryonic_structure_wellington_20",
"extraembryonic_structure_hint_16", "extraembryonic_structure_hint_20")
checkTrue(is.na(getFootprintDatabaseNames(tProj)))
checkTrue(is.na(getFootprintDatabaseHost(tProj)))
} # test_footprintDatabases
#------------------------------------------------------------------------------------------------------------------------
test_expressionMatrices <- function()
{
expected <- c("thioglycollate-elicited-peritoneal-macrophages")
checkTrue(all(expected %in% getExpressionMatrixNames(tProj)))
mtx <- getExpressionMatrix(tProj, expected[1])
checkEquals(dim(mtx), c(6890, 14))
} # test_expressionMatrices
#------------------------------------------------------------------------------------------------------------------------
# setting the target gene implies a few other assignements, all tested here:
# geneInfo (temporarily also masquerading at tbl.transcripts
# geneRegion
# geneEnhancersRegion (when avaialable, defaults to geneRegion)
#
test_setTargetGene <- function()
{
message(sprintf("--- test_setTargetGene"))
setTargetGene(tProj, "Abca1")
checkEquals(getTargetGene(tProj), "Abca1")
message(sprintf(" transcripts"))
tbl.transcripts <- getTranscriptsTable(tProj)
checkTrue(nrow(tbl.transcripts) >= 1)
checkEquals(tbl.transcripts$chr, "chr4")
checkEquals(tbl.transcripts$start, 53030787)
checkEquals(tbl.transcripts$end , 53159895)
checkEquals(tbl.transcripts$tss, 53159895)
checkEquals(tbl.transcripts$strand, -1)
message(sprintf(" geneRegion"))
region <- getGeneRegion(tProj, flankingPercent=0)
checkTrue(all(c("chromLocString", "chrom", "start", "end") %in% names(region)))
checkEquals(region$chromLocString, "chr4:53030787-53159895")
message(sprintf(" enhancers"))
tbl.enhancers <- getEnhancers(tProj)
checkEquals(colnames(tbl.enhancers), c("chrom", "start", "end", "type", "combinedScore", "geneSymbol"))
checkTrue(nrow(tbl.enhancers) >= 0)
message(sprintf(" geneGeneEnhancersRegion"))
region <- getGeneEnhancersRegion(tProj, flankingPercent=0)
checkTrue(all(c("chromLocString", "chrom", "start", "end") %in% names(region)))
checkEquals(region$chromLocString, "chr4:52901678-53289004")
message(sprintf(" encode DHS"))
tbl.dhs <- getEncodeDHS(tProj)
checkEquals(nrow(tbl.dhs), 0)
message(sprintf(" ChIP-seq"))
tbl.chipSeq <- getChipSeq(tProj, chrom=chromosome, start=start, end=end, tfs=NA)
checkEquals(nrow(tbl.chipSeq), 0)
} # test_setTargetGene
#------------------------------------------------------------------------------------------------------------------------
if(!interactive())
runTests()
|
/inst/unitTests/test_TrenaProjectMouseMacrophage.R
|
permissive
|
PriceLab/TrenaProjectMouseMacrophage
|
R
| false | false | 4,520 |
r
|
library(TrenaProjectMouseMacrophage)
library(RUnit)
#------------------------------------------------------------------------------------------------------------------------
if(!exists("tProj")) {
message(sprintf("--- creating instance of TrenaProjectMouseMacrophage"))
tProj <- TrenaProjectMouseMacrophage();
}
#------------------------------------------------------------------------------------------------------------------------
runTests <- function()
{
test_constructor()
test_supportedGenes()
test_variants()
test_footprintDatabases()
test_expressionMatrices()
test_setTargetGene()
} # runTests
#------------------------------------------------------------------------------------------------------------------------
test_constructor <- function()
{
message(sprintf("--- test_constructor"))
checkTrue(all(c("TrenaProjectMouseMacrophage", "TrenaProject") %in% is(tProj)))
} # test_constructor
#------------------------------------------------------------------------------------------------------------------------
test_supportedGenes <- function()
{
message(sprintf("--- test_supportedGenes"))
subset.expected <- c("Abca1")
checkTrue(all(subset.expected %in% getSupportedGenes(tProj)))
} # test_supportedGenes
#------------------------------------------------------------------------------------------------------------------------
test_variants <- function()
{
message(sprintf("--- test_variants"))
checkEquals(getVariantDatasetNames(tProj), character(0))
} # test_variants
#------------------------------------------------------------------------------------------------------------------------
test_footprintDatabases <- function()
{
message(sprintf("--- test_footprintDatabases"))
expected <- c("extraembryonic_structure_wellington_16", "extraembryonic_structure_wellington_20",
"extraembryonic_structure_hint_16", "extraembryonic_structure_hint_20")
checkTrue(is.na(getFootprintDatabaseNames(tProj)))
checkTrue(is.na(getFootprintDatabaseHost(tProj)))
} # test_footprintDatabases
#------------------------------------------------------------------------------------------------------------------------
test_expressionMatrices <- function()
{
expected <- c("thioglycollate-elicited-peritoneal-macrophages")
checkTrue(all(expected %in% getExpressionMatrixNames(tProj)))
mtx <- getExpressionMatrix(tProj, expected[1])
checkEquals(dim(mtx), c(6890, 14))
} # test_expressionMatrices
#------------------------------------------------------------------------------------------------------------------------
# setting the target gene implies a few other assignements, all tested here:
# geneInfo (temporarily also masquerading at tbl.transcripts
# geneRegion
# geneEnhancersRegion (when avaialable, defaults to geneRegion)
#
test_setTargetGene <- function()
{
message(sprintf("--- test_setTargetGene"))
setTargetGene(tProj, "Abca1")
checkEquals(getTargetGene(tProj), "Abca1")
message(sprintf(" transcripts"))
tbl.transcripts <- getTranscriptsTable(tProj)
checkTrue(nrow(tbl.transcripts) >= 1)
checkEquals(tbl.transcripts$chr, "chr4")
checkEquals(tbl.transcripts$start, 53030787)
checkEquals(tbl.transcripts$end , 53159895)
checkEquals(tbl.transcripts$tss, 53159895)
checkEquals(tbl.transcripts$strand, -1)
message(sprintf(" geneRegion"))
region <- getGeneRegion(tProj, flankingPercent=0)
checkTrue(all(c("chromLocString", "chrom", "start", "end") %in% names(region)))
checkEquals(region$chromLocString, "chr4:53030787-53159895")
message(sprintf(" enhancers"))
tbl.enhancers <- getEnhancers(tProj)
checkEquals(colnames(tbl.enhancers), c("chrom", "start", "end", "type", "combinedScore", "geneSymbol"))
checkTrue(nrow(tbl.enhancers) >= 0)
message(sprintf(" geneGeneEnhancersRegion"))
region <- getGeneEnhancersRegion(tProj, flankingPercent=0)
checkTrue(all(c("chromLocString", "chrom", "start", "end") %in% names(region)))
checkEquals(region$chromLocString, "chr4:52901678-53289004")
message(sprintf(" encode DHS"))
tbl.dhs <- getEncodeDHS(tProj)
checkEquals(nrow(tbl.dhs), 0)
message(sprintf(" ChIP-seq"))
tbl.chipSeq <- getChipSeq(tProj, chrom=chromosome, start=start, end=end, tfs=NA)
checkEquals(nrow(tbl.chipSeq), 0)
} # test_setTargetGene
#------------------------------------------------------------------------------------------------------------------------
if(!interactive())
runTests()
|
context("Adding nodes and/or edges from a table to a graph")
test_that("adding nodes from a table to a graph is possible", {
library(tibble)
# Create a data frame for graph nodes
node_table <-
tibble::tribble(
~iso_4217_code, ~curr_number, ~exponent,
"AED", 784, 2,
"AFN", 971, 2,
"ALL", 8, 2,
"AMD", 51, 2,
"ANG", 532, 2,
"AOA", 973, 2,
"ARS", 32, 2,
"AUD", 36, 2,
"AWG", 533, 2,
"AZN", 944, 2,
"BAM", 977, 2,
"BBD", 52, 2,
"BDT", 50, 2,
"BGN", 975, 2,
"BHD", 48, 3,
"BIF", 108, 0,
"BMD", 60, 2,
"BND", 96, 2,
"BOB", 68, 2,
"BOV", 984, 2,
"BRL", 986, 2,
"BSD", 44, 2,
"BTN", 64, 2,
"BWP", 72, 2,
"BYR", 974, 0,
"BZD", 84, 2,
"CAD", 124, 2,
"CDF", 976, 2,
"CHE", 947, 2,
"CHF", 756, 2,
"CHW", 948, 2,
"CLF", 990, 4,
"CLP", 152, 0,
"CNY", 156, 2,
"COP", 170, 2,
"COU", 970, 2,
"CRC", 188, 2,
"CUC", 931, 2,
"CUP", 192, 2,
"CVE", 132, 0,
"CZK", 203, 2,
"DJF", 262, 0,
"DKK", 208, 2,
"DOP", 214, 2,
"DZD", 12, 2,
"EGP", 818, 2,
"ERN", 232, 2,
"ETB", 230, 2,
"EUR", 978, 2,
"FJD", 242, 2,
"FKP", 238, 2,
"GBP", 826, 2,
"GEL", 981, 2,
"GHS", 936, 2,
"GIP", 292, 2,
"GMD", 270, 2,
"GNF", 324, 0,
"GTQ", 320, 2,
"GYD", 328, 2,
"HKD", 344, 2,
"HNL", 340, 2,
"HRK", 191, 2,
"HTG", 332, 2,
"HUF", 348, 2,
"IDR", 360, 2,
"ILS", 376, 2,
"INR", 356, 2,
"IQD", 368, 3,
"IRR", 364, 2,
"ISK", 352, 0,
"JMD", 388, 2,
"JOD", 400, 3,
"JPY", 392, 0,
"KES", 404, 2,
"KGS", 417, 2,
"KHR", 116, 2,
"KMF", 174, 0,
"KPW", 408, 2,
"KRW", 410, 0,
"KWD", 414, 3,
"KYD", 136, 2,
"KZT", 398, 2,
"LAK", 418, 2,
"LBP", 422, 2,
"LKR", 144, 2,
"LRD", 430, 2,
"LSL", 426, 2,
"LYD", 434, 3,
"MAD", 504, 2,
"MDL", 498, 2,
"MGA", 969, 1,
"MKD", 807, 2,
"MMK", 104, 2,
"MNT", 496, 2,
"MOP", 446, 1,
"MRO", 478, 1,
"MUR", 480, 2,
"MVR", 462, 2,
"MWK", 454, 2,
"MXN", 484, 2,
"MXV", 979, 2,
"MYR", 458, 2,
"MZN", 943, 2,
"NAD", 516, 2,
"NGN", 566, 2,
"NIO", 558, 2,
"NOK", 578, 2,
"NPR", 524, 2,
"NZD", 554, 2,
"OMR", 512, 3,
"PAB", 590, 2,
"PEN", 604, 2,
"PGK", 598, 2,
"PHP", 608, 2,
"PKR", 586, 2,
"PLN", 985, 2,
"PYG", 600, 0,
"QAR", 634, 2,
"RON", 946, 2,
"RSD", 941, 2,
"RUB", 643, 2,
"RWF", 646, 0,
"SAR", 682, 2,
"SBD", 90, 2,
"SCR", 690, 2,
"SDG", 938, 2,
"SEK", 752, 2,
"SGD", 702, 2,
"SHP", 654, 2,
"SLL", 694, 2,
"SOS", 706, 2,
"SRD", 968, 2,
"SSP", 728, 2,
"STD", 678, 2,
"SYP", 760, 2,
"SZL", 748, 2,
"THB", 764, 2,
"TJS", 972, 2,
"TMT", 934, 2,
"TND", 788, 3,
"TOP", 776, 2,
"TRY", 949, 2,
"TTD", 780, 2,
"TWD", 901, 2,
"TZS", 834, 2,
"UAH", 980, 2,
"UGX", 800, 0,
"USD", 840, 2,
"USN", 997, 2,
"USS", 998, 2,
"UYI", 940, 0,
"UYU", 858, 2,
"UZS", 860, 2,
"VEF", 937, 2,
"VND", 704, 0,
"VUV", 548, 0,
"WST", 882, 2,
"XAF", 950, 0,
"XAG", 961, NA,
"XAU", 959, NA,
"XCD", 951, 2,
"XDR", 960, NA,
"XOF", 952, 0,
"XPD", 964, NA,
"XPF", 953, 0,
"XPT", 962, NA,
"XSU", 994, NA,
"XUA", 965, NA,
"YER", 886, 2,
"ZAR", 710, 2,
"ZMW", 967, 2)
# Convert to a data frame
node_table <-
as.data.frame(node_table, stringsAsFactors = FALSE)
# Add nodes directly from the df, calling the
# `add_nodes_from_table()` function with default
# options
graph_1_df <-
create_graph() %>%
add_nodes_from_table(node_table)
# Expect that the graph has the same number of nodes
# as there are rows in the CSV
expect_equal(
nrow(node_table), node_count(graph_1_df))
# Expect certain columns to exist in the graph's
# node data frame
expect_equal(
colnames(graph_1_df$nodes_df),
c("id", "type", "label", "iso_4217_code",
"curr_number", "exponent"))
# Add nodes from the df, but this time apply the
# `curr_number` column to the graph's `label` attribute
graph_2_df <-
create_graph() %>%
add_nodes_from_table(
node_table, label_col = "curr_number")
# Expect that there aren't any NA values in the
# graph's `label` column
expect_true(
all(!is.na(graph_2_df$nodes_df[, 3])))
# Expect that the values in the `label` are
# of the character class
expect_true(
is.character(graph_2_df$nodes_df[, 3]))
# Add nodes from the df; also apply a static value
# for `type` as `currency`
graph_3_df <-
create_graph() %>%
add_nodes_from_table(
node_table,
set_type = "currency",
label_col = "curr_number")
# Expect that all values set for the `type`
# attribute are `currency`
expect_true(
all(graph_3_df$nodes_df[, 2] == "currency"))
# Add nodes from the df; drop some of the
# incoming columns
graph_4_df <-
create_graph() %>%
add_nodes_from_table(
node_table,
set_type = "currency",
label_col = "curr_number",
drop_cols = c("exponent", "currency_name"))
# Expect that the node attributes `exponent`
# and `currency_name` do not appear in the graph's
# internal node data frame
expect_true(
!all(c("exponent", "currency_name") %in%
colnames(graph_4_df$nodes_df)))
# Add nodes from the df; assign a table column
# to the `type` atttribute
graph_5_df <-
create_graph() %>%
add_nodes_from_table(
node_table,
set_type = "currency",
label_col = "curr_number",
type_col = "exponent")
# Expect that all values set for the `type`
# attribute are have certain values (including NA)
expect_true(
all(graph_5_df$nodes_df[, 2] %in%
c("0", "1", "2", "3", "4", NA)))
})
test_that("adding edges from a table to a graph is possible", {
# Create a data frame for graph nodes
node_table <-
tibble::tribble(
~iso_4217_code, ~curr_number, ~exponent,
"AED", 784, 2,
"AFN", 971, 2,
"ALL", 8, 2,
"AMD", 51, 2,
"ANG", 532, 2,
"AOA", 973, 2,
"ARS", 32, 2,
"AUD", 36, 2,
"AWG", 533, 2,
"AZN", 944, 2,
"BAM", 977, 2,
"BBD", 52, 2,
"BDT", 50, 2,
"BGN", 975, 2,
"BHD", 48, 3,
"BIF", 108, 0,
"BMD", 60, 2,
"BND", 96, 2,
"BOB", 68, 2,
"BOV", 984, 2,
"BRL", 986, 2,
"BSD", 44, 2,
"BTN", 64, 2,
"BWP", 72, 2,
"BYR", 974, 0,
"BZD", 84, 2,
"CAD", 124, 2,
"CDF", 976, 2,
"CHE", 947, 2,
"CHF", 756, 2,
"CHW", 948, 2,
"CLF", 990, 4,
"CLP", 152, 0,
"CNY", 156, 2,
"COP", 170, 2,
"COU", 970, 2,
"CRC", 188, 2,
"CUC", 931, 2,
"CUP", 192, 2,
"CVE", 132, 0,
"CZK", 203, 2,
"DJF", 262, 0,
"DKK", 208, 2,
"DOP", 214, 2,
"DZD", 12, 2,
"EGP", 818, 2,
"ERN", 232, 2,
"ETB", 230, 2,
"EUR", 978, 2,
"FJD", 242, 2,
"FKP", 238, 2,
"GBP", 826, 2,
"GEL", 981, 2,
"GHS", 936, 2,
"GIP", 292, 2,
"GMD", 270, 2,
"GNF", 324, 0,
"GTQ", 320, 2,
"GYD", 328, 2,
"HKD", 344, 2,
"HNL", 340, 2,
"HRK", 191, 2,
"HTG", 332, 2,
"HUF", 348, 2,
"IDR", 360, 2,
"ILS", 376, 2,
"INR", 356, 2,
"IQD", 368, 3,
"IRR", 364, 2,
"ISK", 352, 0,
"JMD", 388, 2,
"JOD", 400, 3,
"JPY", 392, 0,
"KES", 404, 2,
"KGS", 417, 2,
"KHR", 116, 2,
"KMF", 174, 0,
"KPW", 408, 2,
"KRW", 410, 0,
"KWD", 414, 3,
"KYD", 136, 2,
"KZT", 398, 2,
"LAK", 418, 2,
"LBP", 422, 2,
"LKR", 144, 2,
"LRD", 430, 2,
"LSL", 426, 2,
"LYD", 434, 3,
"MAD", 504, 2,
"MDL", 498, 2,
"MGA", 969, 1,
"MKD", 807, 2,
"MMK", 104, 2,
"MNT", 496, 2,
"MOP", 446, 1,
"MRO", 478, 1,
"MUR", 480, 2,
"MVR", 462, 2,
"MWK", 454, 2,
"MXN", 484, 2,
"MXV", 979, 2,
"MYR", 458, 2,
"MZN", 943, 2,
"NAD", 516, 2,
"NGN", 566, 2,
"NIO", 558, 2,
"NOK", 578, 2,
"NPR", 524, 2,
"NZD", 554, 2,
"OMR", 512, 3,
"PAB", 590, 2,
"PEN", 604, 2,
"PGK", 598, 2,
"PHP", 608, 2,
"PKR", 586, 2,
"PLN", 985, 2,
"PYG", 600, 0,
"QAR", 634, 2,
"RON", 946, 2,
"RSD", 941, 2,
"RUB", 643, 2,
"RWF", 646, 0,
"SAR", 682, 2,
"SBD", 90, 2,
"SCR", 690, 2,
"SDG", 938, 2,
"SEK", 752, 2,
"SGD", 702, 2,
"SHP", 654, 2,
"SLL", 694, 2,
"SOS", 706, 2,
"SRD", 968, 2,
"SSP", 728, 2,
"STD", 678, 2,
"SYP", 760, 2,
"SZL", 748, 2,
"THB", 764, 2,
"TJS", 972, 2,
"TMT", 934, 2,
"TND", 788, 3,
"TOP", 776, 2,
"TRY", 949, 2,
"TTD", 780, 2,
"TWD", 901, 2,
"TZS", 834, 2,
"UAH", 980, 2,
"UGX", 800, 0,
"USD", 840, 2,
"USN", 997, 2,
"USS", 998, 2,
"UYI", 940, 0,
"UYU", 858, 2,
"UZS", 860, 2,
"VEF", 937, 2,
"VND", 704, 0,
"VUV", 548, 0,
"WST", 882, 2,
"XAF", 950, 0,
"XAG", 961, NA,
"XAU", 959, NA,
"XCD", 951, 2,
"XDR", 960, NA,
"XOF", 952, 0,
"XPD", 964, NA,
"XPF", 953, 0,
"XPT", 962, NA,
"XSU", 994, NA,
"XUA", 965, NA,
"YER", 886, 2,
"ZAR", 710, 2,
"ZMW", 967, 2)
# Convert to a data frame
node_table <-
as.data.frame(node_table, stringsAsFactors = FALSE)
# Create a data frame for graph edges
edge_table <-
tibble::tribble(
~from_currency, ~to_currency, ~cost_unit,
"USD","ADF", 0.1672,
"USD","ADP", 0.00659,
"USD","AED", 0.2723,
"USD","AFN", 0.01521,
"USD","ALL", 0.008055,
"USD","AMD", 0.002107,
"USD","ANG", 0.565,
"USD","AOA", 0.006058,
"USD","AON", 0.006058,
"USD","ARS", 0.06597,
"USD","ATS", 0.07969,
"USD","AUD", 0.7604,
"USD","AWG", 0.5587,
"USD","AZM", 0.0001221,
"USD","AZN", 0.6105,
"USD","BAM", 0.5607,
"USD","BBD", 0.5,
"USD","BDT", 0.01277,
"USD","BEF", 0.02718,
"USD","BGN", 0.5635,
"USD","BHD", 2.6699,
"USD","BIF", 0.000601,
"USD","BMD", 1,
"USD","BND", 0.7184,
"USD","BOB", 0.1456,
"USD","BRL", 0.3127,
"USD","BSD", 1,
"USD","BTN", 0.01496,
"USD","BWP", 0.097,
"USD","BYR", 4.995e-05,
"USD","BZD", 0.5051,
"USD","CAD", 0.7461,
"USD","CDF", 0.0009891,
"USD","CHF", 1.0116,
"USD","CLP", 0.001533,
"USD","CNY", 0.1476,
"USD","COP", 0.0003334,
"USD","CRC", 0.001827,
"USD","CUC", 1,
"USD","CUP", 0.045,
"USD","CVE", 0.00995,
"USD","CYP", 1.8735,
"USD","CZK", 0.04061,
"USD","DEM", 0.5606,
"USD","DJF", 0.005654,
"USD","DKK", 0.1474,
"USD","DOP", 0.02173,
"USD","DZD", 0.00912,
"USD","ECS", 4.15e-05,
"USD","EEK", 0.07008,
"USD","EGP", 0.1129,
"USD","ESP", 0.00659,
"USD","ETB", 0.04515,
"USD","EUR", 1.0965,
"USD","FIM", 0.1844,
"USD","FJD", 0.4929,
"USD","FKP", 1.2236,
"USD","FRF", 0.1672,
"USD","GBP", 1.2191,
"USD","GEL", 0.4151,
"USD","GHC", 2.515e-05,
"USD","GHS", 0.2515,
"USD","GIP", 1.2236,
"USD","GMD", 0.02365,
"USD","GNF", 0.0001105,
"USD","GRD", 0.003218,
"USD","GTQ", 0.1331,
"USD","GYD", 0.005083,
"USD","HKD", 0.129,
"USD","HNL", 0.04444,
"USD","HRK", 0.1461,
"USD","HTG", 0.01582,
"USD","HUF", 0.003549,
"USD","IDR", 7.74e-05,
"USD","IEP", 1.3923,
"USD","ILS", 0.2609,
"USD","INR", 0.015,
"USD","IQD", 0.0008729,
"USD","IRR", 3.328e-05,
"USD","ISK", 0.008882,
"USD","ITL", 0.0005663,
"USD","JMD", 0.007735,
"USD","JOD", 1.413,
"USD","JPY", 0.009536,
"USD","KES", 0.01003,
"USD","KGS", 0.01456,
"USD","KHR", 0.0002483,
"USD","KMF", 0.002281,
"USD","KPW", 0.007407,
"USD","KRW", 0.0008803,
"USD","KWD", 3.3113,
"USD","KYD", 1.2293,
"USD","KZT", 0.002981,
"USD","LAK", 0.0001229,
"USD","LBP", 0.0006638,
"USD","LKR", 0.006768,
"USD","LRD", 0.01111,
"USD","LSL", 0.073291,
"USD","LTL", 0.3176,
"USD","LUF", 0.02718,
"USD","LVL", 1.5602,
"USD","LYD", 0.7135,
"USD","MAD", 0.1015,
"USD","MDL", 0.05,
"USD","MGA", 0.0003137,
"USD","MGF", 0.0001093,
"USD","MKD", 0.01786,
"USD","MMK", 0.0007776,
"USD","MNT", 0.0004239,
"USD","MOP", 0.1285,
"USD","MRO", 0.002817,
"USD","MTL", 2.5541,
"USD","MUR", 0.028,
"USD","MVR", 0.06623,
"USD","MWK", 0.001404,
"USD","MXN", 0.05281,
"USD","MYR", 0.2385,
"USD","MZM", 1.299e-05,
"USD","MZN", 0.01299,
"USD","NAD", 0.073291,
"USD","NGN", 0.003284,
"USD","NIO", 0.03438,
"USD","NLG", 0.4976,
"USD","NOK", 0.1211,
"USD","NPR", 0.009368,
"USD","NZD", 0.7152,
"USD","OMR", 2.5974,
"USD","PAB", 1,
"USD","PEN", 0.2974,
"USD","PGK", 0.3222,
"USD","PHP", 0.02064,
"USD","PKR", 0.009545,
"USD","PLN", 0.2536,
"USD","PTE", 0.005469,
"USD","PYG", 0.0001751,
"USD","QAR", 0.2746,
"USD","ROL", 2.436e-05,
"USD","RON", 0.2436,
"USD","RSD", 0.008938,
"USD","RUB", 0.01587,
"USD","RWF", 0.001231,
"USD","SAR", 0.2668,
"USD","SBD", 0.1275,
"USD","SCR", 0.07586,
"USD","SDD", 0.001576,
"USD","SDG", 0.1576,
"USD","SDP", 0.0004423,
"USD","SEK", 0.1109,
"USD","SGD", 0.7184,
"USD","SHP", 1.8744,
"USD","SIT", 0.004576,
"USD","SKK", 0.0364,
"USD","SLL", 0.0001786,
"USD","SOS", 0.001799,
"USD","SRD", 0.146,
"USD","SRG", 0.000146,
"USD","STD", 4.454e-05,
"USD","SVC", 0.1147,
"USD","SYP", 0.004661,
"USD","SZL", 0.073291,
"USD","THB", 0.0286,
"USD","TJS", 0.127,
"USD","TMM", 5.7e-05,
"USD","TMT", 0.2857,
"USD","TND", 0.4441,
"USD","TOP", 0.4458,
"USD","TRL", 3.222e-07,
"USD","TRY", 0.3222,
"USD","TTD", 0.1497,
"USD","TWD", 0.03167,
"USD","TZS", 0.0004587,
"USD","UAH", 0.03922,
"USD","UGX", 0.0002929,
"USD","USD", 1,
"USD","UYU", 0.03524,
"USD","UZS", 0.0003225,
"USD","VEB", 0.0001003,
"USD","VEF", 0.1003,
"USD","VND", 4.48e-05,
"USD","VUV", 0.009526,
"USD","WST", 0.3951,
"USD","XAF", 0.001674,
"USD","XAG", 17.8532,
"USD","XAU", 1275.81,
"USD","XCD", 0.372,
"USD","XEU", 1.0965,
"USD","XOF", 0.001669,
"USD","XPD", 624,
"USD","XPF", 0.009255,
"USD","XPT", 983.5,
"USD","YER", 0.004001,
"USD","YUN", 0.008938,
"USD","ZAR", 0.07329,
"USD","ZMK", 0.0001932,
"USD","ZMW", 0.1037,
"USD","ZWD", 0.002679)
# Convert to a data frame
edge_table <-
as.data.frame(edge_table, stringsAsFactors = FALSE)
# Add nodes directly from the CSV file, calling the
# `add_nodes_from_table()` function with default
# options
graph <-
create_graph() %>%
add_nodes_from_table(node_table)
# Augment the graph by adding edges from a table
# with the `add_edges_from_table()` function
graph_nodes_edges <-
graph %>%
add_edges_from_table(
edge_table,
from_col = "from_currency",
to_col = "to_currency",
ndf_mapping = "iso_4217_code")
# Expect that the graph has a certain number of edges
expect_equal(edge_count(graph_nodes_edges), 157)
# Expect certain columns to exist in the graph's
# edge data frame
expect_equal(
colnames(graph_nodes_edges$edges_df),
c("id", "from", "to", "rel", "cost_unit"))
# Expect an error if value for `from_col` is
# not in the table
expect_error(
graph %>%
add_edges_from_table(
edge_table,
from_col = "from",
to_col = "to_currency",
ndf_mapping = "iso_4217_code"))
# Expect an error if value for `to_col` is
# not in the table
expect_error(
graph %>%
add_edges_from_table(
edge_table,
from_col = "from_currency",
to_col = "to",
ndf_mapping = "iso_4217_code"))
# Expect an error if value for `ndf_mapping` is
# not in the table
expect_error(
graph %>%
add_edges_from_table(
edge_table,
from_col = "from_currency",
to_col = "to_currency",
ndf_mapping = "iso_4217"))
})
test_that("adding nodes from several table columns to a graph is possible", {
# Create a simple graph
graph <-
create_graph() %>%
add_path(2)
# Create a data frame from which several
# columns have values designated as graph nodes
df <-
data.frame(
col_1 = c("f", "p", "q"),
col_2 = c("q", "x", "f"),
col_3 = c(1, 5, 3),
col_4 = c("a", "v", "h"),
stringsAsFactors = FALSE)
# Add nodes from columns `col_1` and `col_2`
# from the data frame to the graph object
graph <-
graph %>%
add_nodes_from_df_cols(
df = df,
columns = c("col_1", "col_2"))
# Expect a certain sequence of node `label` values
expect_equal(
graph %>% get_node_df() %>% .$label,
c("1", "2", "f", "p", "q", "x"))
# Add new nodes from columns 3 and 4; we are here
# specifying the columns by their numbers
graph <-
graph %>%
add_nodes_from_df_cols(
df = df,
columns = 3:4)
# Expect a certain sequence of node `label` values
expect_equal(
graph %>% get_node_df() %>% .$label,
c("1", "2", "f", "p", "q", "x",
"a", "v", "h"))
# Add column 4's values as labels/nodes again
graph <-
graph %>%
add_nodes_from_df_cols(
df = df,
columns = 4)
# Expect no change in the graph
expect_equal(
graph %>% get_node_df() %>% .$label,
c("1", "2", "f", "p", "q", "x",
"a", "v", "h"))
# Add column 4's values as labels/nodes except with
# the `keep_duplicates` argument set to TRUE
graph <-
graph %>%
add_nodes_from_df_cols(
df = df,
columns = 4,
keep_duplicates = TRUE)
# Expect duplicated labels in the graph
expect_equal(
graph %>% get_node_df() %>% .$label,
c("1", "2", "f", "p", "q", "x",
"a", "v", "h", "a", "v", "h"))
# Add column 4's values as labels/nodes with
# the `keep_duplicates = TRUE` and a `type` value
# of `new`
graph <-
graph %>%
add_nodes_from_df_cols(
df = df,
columns = 4,
type = "new",
keep_duplicates = TRUE)
# Expect more duplicated labels in the graph
expect_equal(
graph %>% get_node_df() %>% .$label,
c("1", "2", "f", "p", "q", "x",
"a", "v", "h", "a", "v", "h",
"a", "v", "h"))
# Expect the `type` value of `new` to appear
# for the last three nodes (others are not set)
expect_equal(
graph %>% get_node_df() %>% .$type,
c(rep(as.character(NA), 12),
rep("new", 3)))
})
|
/tests/testthat/test-add_nodes_edges_from_table.R
|
no_license
|
DataXujing/DiagrammeR
|
R
| false | false | 20,021 |
r
|
context("Adding nodes and/or edges from a table to a graph")
test_that("adding nodes from a table to a graph is possible", {
library(tibble)
# Create a data frame for graph nodes
node_table <-
tibble::tribble(
~iso_4217_code, ~curr_number, ~exponent,
"AED", 784, 2,
"AFN", 971, 2,
"ALL", 8, 2,
"AMD", 51, 2,
"ANG", 532, 2,
"AOA", 973, 2,
"ARS", 32, 2,
"AUD", 36, 2,
"AWG", 533, 2,
"AZN", 944, 2,
"BAM", 977, 2,
"BBD", 52, 2,
"BDT", 50, 2,
"BGN", 975, 2,
"BHD", 48, 3,
"BIF", 108, 0,
"BMD", 60, 2,
"BND", 96, 2,
"BOB", 68, 2,
"BOV", 984, 2,
"BRL", 986, 2,
"BSD", 44, 2,
"BTN", 64, 2,
"BWP", 72, 2,
"BYR", 974, 0,
"BZD", 84, 2,
"CAD", 124, 2,
"CDF", 976, 2,
"CHE", 947, 2,
"CHF", 756, 2,
"CHW", 948, 2,
"CLF", 990, 4,
"CLP", 152, 0,
"CNY", 156, 2,
"COP", 170, 2,
"COU", 970, 2,
"CRC", 188, 2,
"CUC", 931, 2,
"CUP", 192, 2,
"CVE", 132, 0,
"CZK", 203, 2,
"DJF", 262, 0,
"DKK", 208, 2,
"DOP", 214, 2,
"DZD", 12, 2,
"EGP", 818, 2,
"ERN", 232, 2,
"ETB", 230, 2,
"EUR", 978, 2,
"FJD", 242, 2,
"FKP", 238, 2,
"GBP", 826, 2,
"GEL", 981, 2,
"GHS", 936, 2,
"GIP", 292, 2,
"GMD", 270, 2,
"GNF", 324, 0,
"GTQ", 320, 2,
"GYD", 328, 2,
"HKD", 344, 2,
"HNL", 340, 2,
"HRK", 191, 2,
"HTG", 332, 2,
"HUF", 348, 2,
"IDR", 360, 2,
"ILS", 376, 2,
"INR", 356, 2,
"IQD", 368, 3,
"IRR", 364, 2,
"ISK", 352, 0,
"JMD", 388, 2,
"JOD", 400, 3,
"JPY", 392, 0,
"KES", 404, 2,
"KGS", 417, 2,
"KHR", 116, 2,
"KMF", 174, 0,
"KPW", 408, 2,
"KRW", 410, 0,
"KWD", 414, 3,
"KYD", 136, 2,
"KZT", 398, 2,
"LAK", 418, 2,
"LBP", 422, 2,
"LKR", 144, 2,
"LRD", 430, 2,
"LSL", 426, 2,
"LYD", 434, 3,
"MAD", 504, 2,
"MDL", 498, 2,
"MGA", 969, 1,
"MKD", 807, 2,
"MMK", 104, 2,
"MNT", 496, 2,
"MOP", 446, 1,
"MRO", 478, 1,
"MUR", 480, 2,
"MVR", 462, 2,
"MWK", 454, 2,
"MXN", 484, 2,
"MXV", 979, 2,
"MYR", 458, 2,
"MZN", 943, 2,
"NAD", 516, 2,
"NGN", 566, 2,
"NIO", 558, 2,
"NOK", 578, 2,
"NPR", 524, 2,
"NZD", 554, 2,
"OMR", 512, 3,
"PAB", 590, 2,
"PEN", 604, 2,
"PGK", 598, 2,
"PHP", 608, 2,
"PKR", 586, 2,
"PLN", 985, 2,
"PYG", 600, 0,
"QAR", 634, 2,
"RON", 946, 2,
"RSD", 941, 2,
"RUB", 643, 2,
"RWF", 646, 0,
"SAR", 682, 2,
"SBD", 90, 2,
"SCR", 690, 2,
"SDG", 938, 2,
"SEK", 752, 2,
"SGD", 702, 2,
"SHP", 654, 2,
"SLL", 694, 2,
"SOS", 706, 2,
"SRD", 968, 2,
"SSP", 728, 2,
"STD", 678, 2,
"SYP", 760, 2,
"SZL", 748, 2,
"THB", 764, 2,
"TJS", 972, 2,
"TMT", 934, 2,
"TND", 788, 3,
"TOP", 776, 2,
"TRY", 949, 2,
"TTD", 780, 2,
"TWD", 901, 2,
"TZS", 834, 2,
"UAH", 980, 2,
"UGX", 800, 0,
"USD", 840, 2,
"USN", 997, 2,
"USS", 998, 2,
"UYI", 940, 0,
"UYU", 858, 2,
"UZS", 860, 2,
"VEF", 937, 2,
"VND", 704, 0,
"VUV", 548, 0,
"WST", 882, 2,
"XAF", 950, 0,
"XAG", 961, NA,
"XAU", 959, NA,
"XCD", 951, 2,
"XDR", 960, NA,
"XOF", 952, 0,
"XPD", 964, NA,
"XPF", 953, 0,
"XPT", 962, NA,
"XSU", 994, NA,
"XUA", 965, NA,
"YER", 886, 2,
"ZAR", 710, 2,
"ZMW", 967, 2)
# Convert to a data frame
node_table <-
as.data.frame(node_table, stringsAsFactors = FALSE)
# Add nodes directly from the df, calling the
# `add_nodes_from_table()` function with default
# options
graph_1_df <-
create_graph() %>%
add_nodes_from_table(node_table)
# Expect that the graph has the same number of nodes
# as there are rows in the CSV
expect_equal(
nrow(node_table), node_count(graph_1_df))
# Expect certain columns to exist in the graph's
# node data frame
expect_equal(
colnames(graph_1_df$nodes_df),
c("id", "type", "label", "iso_4217_code",
"curr_number", "exponent"))
# Add nodes from the df, but this time apply the
# `curr_number` column to the graph's `label` attribute
graph_2_df <-
create_graph() %>%
add_nodes_from_table(
node_table, label_col = "curr_number")
# Expect that there aren't any NA values in the
# graph's `label` column
expect_true(
all(!is.na(graph_2_df$nodes_df[, 3])))
# Expect that the values in the `label` are
# of the character class
expect_true(
is.character(graph_2_df$nodes_df[, 3]))
# Add nodes from the df; also apply a static value
# for `type` as `currency`
graph_3_df <-
create_graph() %>%
add_nodes_from_table(
node_table,
set_type = "currency",
label_col = "curr_number")
# Expect that all values set for the `type`
# attribute are `currency`
expect_true(
all(graph_3_df$nodes_df[, 2] == "currency"))
# Add nodes from the df; drop some of the
# incoming columns
graph_4_df <-
create_graph() %>%
add_nodes_from_table(
node_table,
set_type = "currency",
label_col = "curr_number",
drop_cols = c("exponent", "currency_name"))
# Expect that the node attributes `exponent`
# and `currency_name` do not appear in the graph's
# internal node data frame
expect_true(
!all(c("exponent", "currency_name") %in%
colnames(graph_4_df$nodes_df)))
# Add nodes from the df; assign a table column
# to the `type` atttribute
graph_5_df <-
create_graph() %>%
add_nodes_from_table(
node_table,
set_type = "currency",
label_col = "curr_number",
type_col = "exponent")
# Expect that all values set for the `type`
# attribute are have certain values (including NA)
expect_true(
all(graph_5_df$nodes_df[, 2] %in%
c("0", "1", "2", "3", "4", NA)))
})
test_that("adding edges from a table to a graph is possible", {
# Create a data frame for graph nodes
node_table <-
tibble::tribble(
~iso_4217_code, ~curr_number, ~exponent,
"AED", 784, 2,
"AFN", 971, 2,
"ALL", 8, 2,
"AMD", 51, 2,
"ANG", 532, 2,
"AOA", 973, 2,
"ARS", 32, 2,
"AUD", 36, 2,
"AWG", 533, 2,
"AZN", 944, 2,
"BAM", 977, 2,
"BBD", 52, 2,
"BDT", 50, 2,
"BGN", 975, 2,
"BHD", 48, 3,
"BIF", 108, 0,
"BMD", 60, 2,
"BND", 96, 2,
"BOB", 68, 2,
"BOV", 984, 2,
"BRL", 986, 2,
"BSD", 44, 2,
"BTN", 64, 2,
"BWP", 72, 2,
"BYR", 974, 0,
"BZD", 84, 2,
"CAD", 124, 2,
"CDF", 976, 2,
"CHE", 947, 2,
"CHF", 756, 2,
"CHW", 948, 2,
"CLF", 990, 4,
"CLP", 152, 0,
"CNY", 156, 2,
"COP", 170, 2,
"COU", 970, 2,
"CRC", 188, 2,
"CUC", 931, 2,
"CUP", 192, 2,
"CVE", 132, 0,
"CZK", 203, 2,
"DJF", 262, 0,
"DKK", 208, 2,
"DOP", 214, 2,
"DZD", 12, 2,
"EGP", 818, 2,
"ERN", 232, 2,
"ETB", 230, 2,
"EUR", 978, 2,
"FJD", 242, 2,
"FKP", 238, 2,
"GBP", 826, 2,
"GEL", 981, 2,
"GHS", 936, 2,
"GIP", 292, 2,
"GMD", 270, 2,
"GNF", 324, 0,
"GTQ", 320, 2,
"GYD", 328, 2,
"HKD", 344, 2,
"HNL", 340, 2,
"HRK", 191, 2,
"HTG", 332, 2,
"HUF", 348, 2,
"IDR", 360, 2,
"ILS", 376, 2,
"INR", 356, 2,
"IQD", 368, 3,
"IRR", 364, 2,
"ISK", 352, 0,
"JMD", 388, 2,
"JOD", 400, 3,
"JPY", 392, 0,
"KES", 404, 2,
"KGS", 417, 2,
"KHR", 116, 2,
"KMF", 174, 0,
"KPW", 408, 2,
"KRW", 410, 0,
"KWD", 414, 3,
"KYD", 136, 2,
"KZT", 398, 2,
"LAK", 418, 2,
"LBP", 422, 2,
"LKR", 144, 2,
"LRD", 430, 2,
"LSL", 426, 2,
"LYD", 434, 3,
"MAD", 504, 2,
"MDL", 498, 2,
"MGA", 969, 1,
"MKD", 807, 2,
"MMK", 104, 2,
"MNT", 496, 2,
"MOP", 446, 1,
"MRO", 478, 1,
"MUR", 480, 2,
"MVR", 462, 2,
"MWK", 454, 2,
"MXN", 484, 2,
"MXV", 979, 2,
"MYR", 458, 2,
"MZN", 943, 2,
"NAD", 516, 2,
"NGN", 566, 2,
"NIO", 558, 2,
"NOK", 578, 2,
"NPR", 524, 2,
"NZD", 554, 2,
"OMR", 512, 3,
"PAB", 590, 2,
"PEN", 604, 2,
"PGK", 598, 2,
"PHP", 608, 2,
"PKR", 586, 2,
"PLN", 985, 2,
"PYG", 600, 0,
"QAR", 634, 2,
"RON", 946, 2,
"RSD", 941, 2,
"RUB", 643, 2,
"RWF", 646, 0,
"SAR", 682, 2,
"SBD", 90, 2,
"SCR", 690, 2,
"SDG", 938, 2,
"SEK", 752, 2,
"SGD", 702, 2,
"SHP", 654, 2,
"SLL", 694, 2,
"SOS", 706, 2,
"SRD", 968, 2,
"SSP", 728, 2,
"STD", 678, 2,
"SYP", 760, 2,
"SZL", 748, 2,
"THB", 764, 2,
"TJS", 972, 2,
"TMT", 934, 2,
"TND", 788, 3,
"TOP", 776, 2,
"TRY", 949, 2,
"TTD", 780, 2,
"TWD", 901, 2,
"TZS", 834, 2,
"UAH", 980, 2,
"UGX", 800, 0,
"USD", 840, 2,
"USN", 997, 2,
"USS", 998, 2,
"UYI", 940, 0,
"UYU", 858, 2,
"UZS", 860, 2,
"VEF", 937, 2,
"VND", 704, 0,
"VUV", 548, 0,
"WST", 882, 2,
"XAF", 950, 0,
"XAG", 961, NA,
"XAU", 959, NA,
"XCD", 951, 2,
"XDR", 960, NA,
"XOF", 952, 0,
"XPD", 964, NA,
"XPF", 953, 0,
"XPT", 962, NA,
"XSU", 994, NA,
"XUA", 965, NA,
"YER", 886, 2,
"ZAR", 710, 2,
"ZMW", 967, 2)
# Convert to a data frame
node_table <-
as.data.frame(node_table, stringsAsFactors = FALSE)
# Create a data frame for graph edges
edge_table <-
tibble::tribble(
~from_currency, ~to_currency, ~cost_unit,
"USD","ADF", 0.1672,
"USD","ADP", 0.00659,
"USD","AED", 0.2723,
"USD","AFN", 0.01521,
"USD","ALL", 0.008055,
"USD","AMD", 0.002107,
"USD","ANG", 0.565,
"USD","AOA", 0.006058,
"USD","AON", 0.006058,
"USD","ARS", 0.06597,
"USD","ATS", 0.07969,
"USD","AUD", 0.7604,
"USD","AWG", 0.5587,
"USD","AZM", 0.0001221,
"USD","AZN", 0.6105,
"USD","BAM", 0.5607,
"USD","BBD", 0.5,
"USD","BDT", 0.01277,
"USD","BEF", 0.02718,
"USD","BGN", 0.5635,
"USD","BHD", 2.6699,
"USD","BIF", 0.000601,
"USD","BMD", 1,
"USD","BND", 0.7184,
"USD","BOB", 0.1456,
"USD","BRL", 0.3127,
"USD","BSD", 1,
"USD","BTN", 0.01496,
"USD","BWP", 0.097,
"USD","BYR", 4.995e-05,
"USD","BZD", 0.5051,
"USD","CAD", 0.7461,
"USD","CDF", 0.0009891,
"USD","CHF", 1.0116,
"USD","CLP", 0.001533,
"USD","CNY", 0.1476,
"USD","COP", 0.0003334,
"USD","CRC", 0.001827,
"USD","CUC", 1,
"USD","CUP", 0.045,
"USD","CVE", 0.00995,
"USD","CYP", 1.8735,
"USD","CZK", 0.04061,
"USD","DEM", 0.5606,
"USD","DJF", 0.005654,
"USD","DKK", 0.1474,
"USD","DOP", 0.02173,
"USD","DZD", 0.00912,
"USD","ECS", 4.15e-05,
"USD","EEK", 0.07008,
"USD","EGP", 0.1129,
"USD","ESP", 0.00659,
"USD","ETB", 0.04515,
"USD","EUR", 1.0965,
"USD","FIM", 0.1844,
"USD","FJD", 0.4929,
"USD","FKP", 1.2236,
"USD","FRF", 0.1672,
"USD","GBP", 1.2191,
"USD","GEL", 0.4151,
"USD","GHC", 2.515e-05,
"USD","GHS", 0.2515,
"USD","GIP", 1.2236,
"USD","GMD", 0.02365,
"USD","GNF", 0.0001105,
"USD","GRD", 0.003218,
"USD","GTQ", 0.1331,
"USD","GYD", 0.005083,
"USD","HKD", 0.129,
"USD","HNL", 0.04444,
"USD","HRK", 0.1461,
"USD","HTG", 0.01582,
"USD","HUF", 0.003549,
"USD","IDR", 7.74e-05,
"USD","IEP", 1.3923,
"USD","ILS", 0.2609,
"USD","INR", 0.015,
"USD","IQD", 0.0008729,
"USD","IRR", 3.328e-05,
"USD","ISK", 0.008882,
"USD","ITL", 0.0005663,
"USD","JMD", 0.007735,
"USD","JOD", 1.413,
"USD","JPY", 0.009536,
"USD","KES", 0.01003,
"USD","KGS", 0.01456,
"USD","KHR", 0.0002483,
"USD","KMF", 0.002281,
"USD","KPW", 0.007407,
"USD","KRW", 0.0008803,
"USD","KWD", 3.3113,
"USD","KYD", 1.2293,
"USD","KZT", 0.002981,
"USD","LAK", 0.0001229,
"USD","LBP", 0.0006638,
"USD","LKR", 0.006768,
"USD","LRD", 0.01111,
"USD","LSL", 0.073291,
"USD","LTL", 0.3176,
"USD","LUF", 0.02718,
"USD","LVL", 1.5602,
"USD","LYD", 0.7135,
"USD","MAD", 0.1015,
"USD","MDL", 0.05,
"USD","MGA", 0.0003137,
"USD","MGF", 0.0001093,
"USD","MKD", 0.01786,
"USD","MMK", 0.0007776,
"USD","MNT", 0.0004239,
"USD","MOP", 0.1285,
"USD","MRO", 0.002817,
"USD","MTL", 2.5541,
"USD","MUR", 0.028,
"USD","MVR", 0.06623,
"USD","MWK", 0.001404,
"USD","MXN", 0.05281,
"USD","MYR", 0.2385,
"USD","MZM", 1.299e-05,
"USD","MZN", 0.01299,
"USD","NAD", 0.073291,
"USD","NGN", 0.003284,
"USD","NIO", 0.03438,
"USD","NLG", 0.4976,
"USD","NOK", 0.1211,
"USD","NPR", 0.009368,
"USD","NZD", 0.7152,
"USD","OMR", 2.5974,
"USD","PAB", 1,
"USD","PEN", 0.2974,
"USD","PGK", 0.3222,
"USD","PHP", 0.02064,
"USD","PKR", 0.009545,
"USD","PLN", 0.2536,
"USD","PTE", 0.005469,
"USD","PYG", 0.0001751,
"USD","QAR", 0.2746,
"USD","ROL", 2.436e-05,
"USD","RON", 0.2436,
"USD","RSD", 0.008938,
"USD","RUB", 0.01587,
"USD","RWF", 0.001231,
"USD","SAR", 0.2668,
"USD","SBD", 0.1275,
"USD","SCR", 0.07586,
"USD","SDD", 0.001576,
"USD","SDG", 0.1576,
"USD","SDP", 0.0004423,
"USD","SEK", 0.1109,
"USD","SGD", 0.7184,
"USD","SHP", 1.8744,
"USD","SIT", 0.004576,
"USD","SKK", 0.0364,
"USD","SLL", 0.0001786,
"USD","SOS", 0.001799,
"USD","SRD", 0.146,
"USD","SRG", 0.000146,
"USD","STD", 4.454e-05,
"USD","SVC", 0.1147,
"USD","SYP", 0.004661,
"USD","SZL", 0.073291,
"USD","THB", 0.0286,
"USD","TJS", 0.127,
"USD","TMM", 5.7e-05,
"USD","TMT", 0.2857,
"USD","TND", 0.4441,
"USD","TOP", 0.4458,
"USD","TRL", 3.222e-07,
"USD","TRY", 0.3222,
"USD","TTD", 0.1497,
"USD","TWD", 0.03167,
"USD","TZS", 0.0004587,
"USD","UAH", 0.03922,
"USD","UGX", 0.0002929,
"USD","USD", 1,
"USD","UYU", 0.03524,
"USD","UZS", 0.0003225,
"USD","VEB", 0.0001003,
"USD","VEF", 0.1003,
"USD","VND", 4.48e-05,
"USD","VUV", 0.009526,
"USD","WST", 0.3951,
"USD","XAF", 0.001674,
"USD","XAG", 17.8532,
"USD","XAU", 1275.81,
"USD","XCD", 0.372,
"USD","XEU", 1.0965,
"USD","XOF", 0.001669,
"USD","XPD", 624,
"USD","XPF", 0.009255,
"USD","XPT", 983.5,
"USD","YER", 0.004001,
"USD","YUN", 0.008938,
"USD","ZAR", 0.07329,
"USD","ZMK", 0.0001932,
"USD","ZMW", 0.1037,
"USD","ZWD", 0.002679)
# Convert to a data frame
edge_table <-
as.data.frame(edge_table, stringsAsFactors = FALSE)
# Add nodes directly from the CSV file, calling the
# `add_nodes_from_table()` function with default
# options
graph <-
create_graph() %>%
add_nodes_from_table(node_table)
# Augment the graph by adding edges from a table
# with the `add_edges_from_table()` function
graph_nodes_edges <-
graph %>%
add_edges_from_table(
edge_table,
from_col = "from_currency",
to_col = "to_currency",
ndf_mapping = "iso_4217_code")
# Expect that the graph has a certain number of edges
expect_equal(edge_count(graph_nodes_edges), 157)
# Expect certain columns to exist in the graph's
# edge data frame
expect_equal(
colnames(graph_nodes_edges$edges_df),
c("id", "from", "to", "rel", "cost_unit"))
# Expect an error if value for `from_col` is
# not in the table
expect_error(
graph %>%
add_edges_from_table(
edge_table,
from_col = "from",
to_col = "to_currency",
ndf_mapping = "iso_4217_code"))
# Expect an error if value for `to_col` is
# not in the table
expect_error(
graph %>%
add_edges_from_table(
edge_table,
from_col = "from_currency",
to_col = "to",
ndf_mapping = "iso_4217_code"))
# Expect an error if value for `ndf_mapping` is
# not in the table
expect_error(
graph %>%
add_edges_from_table(
edge_table,
from_col = "from_currency",
to_col = "to_currency",
ndf_mapping = "iso_4217"))
})
test_that("adding nodes from several table columns to a graph is possible", {
# Create a simple graph
graph <-
create_graph() %>%
add_path(2)
# Create a data frame from which several
# columns have values designated as graph nodes
df <-
data.frame(
col_1 = c("f", "p", "q"),
col_2 = c("q", "x", "f"),
col_3 = c(1, 5, 3),
col_4 = c("a", "v", "h"),
stringsAsFactors = FALSE)
# Add nodes from columns `col_1` and `col_2`
# from the data frame to the graph object
graph <-
graph %>%
add_nodes_from_df_cols(
df = df,
columns = c("col_1", "col_2"))
# Expect a certain sequence of node `label` values
expect_equal(
graph %>% get_node_df() %>% .$label,
c("1", "2", "f", "p", "q", "x"))
# Add new nodes from columns 3 and 4; we are here
# specifying the columns by their numbers
graph <-
graph %>%
add_nodes_from_df_cols(
df = df,
columns = 3:4)
# Expect a certain sequence of node `label` values
expect_equal(
graph %>% get_node_df() %>% .$label,
c("1", "2", "f", "p", "q", "x",
"a", "v", "h"))
# Add column 4's values as labels/nodes again
graph <-
graph %>%
add_nodes_from_df_cols(
df = df,
columns = 4)
# Expect no change in the graph
expect_equal(
graph %>% get_node_df() %>% .$label,
c("1", "2", "f", "p", "q", "x",
"a", "v", "h"))
# Add column 4's values as labels/nodes except with
# the `keep_duplicates` argument set to TRUE
graph <-
graph %>%
add_nodes_from_df_cols(
df = df,
columns = 4,
keep_duplicates = TRUE)
# Expect duplicated labels in the graph
expect_equal(
graph %>% get_node_df() %>% .$label,
c("1", "2", "f", "p", "q", "x",
"a", "v", "h", "a", "v", "h"))
# Add column 4's values as labels/nodes with
# the `keep_duplicates = TRUE` and a `type` value
# of `new`
graph <-
graph %>%
add_nodes_from_df_cols(
df = df,
columns = 4,
type = "new",
keep_duplicates = TRUE)
# Expect more duplicated labels in the graph
expect_equal(
graph %>% get_node_df() %>% .$label,
c("1", "2", "f", "p", "q", "x",
"a", "v", "h", "a", "v", "h",
"a", "v", "h"))
# Expect the `type` value of `new` to appear
# for the last three nodes (others are not set)
expect_equal(
graph %>% get_node_df() %>% .$type,
c(rep(as.character(NA), 12),
rep("new", 3)))
})
|
# 提交成功显示
observeEvent(input$submit_oplsda, {
if (input$submit_oplsda>0) {
sendSweetAlert(
session = session,
title = "提交成功!",
text = "数据上传成功,参数设置正确",
type = "success")
}
})
# 导入数据
user_data_oplsda <- reactive({
table_in_test <- read.csv(input$data_input_oplsda$datapath,
header = T,
stringsAsFactors = TRUE,
encoding = 'UTF-8')
colnames(table_in_test)[1:2] <- c('sample','group')
table_in_test <<- table_in_test
})
# 下载示例数据
output$download_demo_data_oplsda <- downloadHandler(
filename = 'OPLS-DA示例数据.csv',
content = function(file){
file.copy('./demo_data/OPLS-DA示例数据.csv',file)
}
)
# oplsda
oplsda_res <- reactive({
df_oplsda <- user_data_oplsda()
oplsda <- ropls::opls(df_oplsda[,3:ncol(df_oplsda)],
df_oplsda$group,
predI = 1,
orthoI = NA)
oplsda <<- oplsda
})
# 展示统计分析结果
output$taboutput_oplsda_view <- renderDataTable(df_table_oplsda(),
options = list(
pageLength = 4
))
df_table_oplsda <- eventReactive(input$submit_oplsda,{
if (input$submit_oplsda > 0) {
user_records <- data.table::fread('./www/user records.txt', encoding = 'UTF-8')
temp <- as.character(Sys.time())
temp <- stringr::str_split(temp, ' ')
date <- temp[[1]][1]
time <- temp[[1]][2]
user_record <- data.frame(Cat = 'oplsda',
Date = date,
Time = time)
user_records <- rbind(user_records, user_record)
data.table::fwrite(user_records,file = './www/user records.txt')
vip_score <- as.data.frame(oplsda_res()@vipVn)
colnames(vip_score) <- 'VIP值'
vip_score$metabolites <- rownames(vip_score)
vip_score <- vip_score[,c('metabolites','VIP值')]
vip_score <- vip_score[order(-vip_score$VIP值),]
# 保存分析结果
write.csv(vip_score, file = './results/oplsda/rest_tab.csv',row.names = FALSE)
write.table(vip_score, file = './results/oplsda/rest_tab.txt',row.names = FALSE)
xlsx::write.xlsx(vip_score, file = './results/oplsda/rest_tab.xlsx',row.names = FALSE)
}
vip_score
})
# 棒棒糖图
output$lollipop_plot_oplsda <- renderPlot(plot_lollipop_res_oplsda(),
height = 350,
width = 600)
plot_lollipop_res_oplsda <- eventReactive(input$submit_oplsda,{
if (input$submit_oplsda > 0) {
vip_score <- as.data.frame(oplsda_res()@vipVn)
colnames(vip_score) <- 'VIP值'
vip_score$metabolites <- rownames(vip_score)
vip_score <- dplyr::select(vip_score,c('metabolites','VIP值'))
vip_score = vip_score[vip_score$VIP值 >= 1,]
vip_score <- vip_score[order(-vip_score$VIP值),][1:as.numeric(input$num_oplsda_show),]
vip_2 = vip_score[order(vip_score$VIP值),]
vip_2$metabolites = factor(vip_2$metabolites,levels = unique(vip_2$metabolites))
p_oplsda_lollipop = ggplot(vip_2, aes(metabolites, VIP值)) +
geom_segment(aes(x = metabolites,xend = metabolites,
y = 1, yend = VIP值)) +
geom_point(aes(size = VIP值), color = '#008000') +
geom_hline(yintercept = max(vip_2$VIP值*1.02), color = 'white') +
coord_flip() +
scale_y_continuous(expand = c(0,0)) +
labs(x = '',y = 'VIP value') +
theme_bw() +
theme(legend.title = element_blank(),
legend.text = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
panel.background = element_blank(),
panel.grid = element_blank(),
axis.text = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
axis.title = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
axis.ticks = element_line(color = 'black'),
axis.ticks.y = element_blank())
# 保存图片
filename <- ifelse(input$oplsda_fig_res_filetype == '.pdf','res_fig.pdf',
ifelse(input$oplsda_fig_res_filetype == '.png','res_fig.png',
ifelse(input$oplsda_fig_res_filetype == '.jpg','res_fig.jpg',
ifelse(input$oplsda_fig_res_filetype == '.tiff','res_fig.tiff','res_fig.eps'))))
if (input$oplsda_fig_res_filetype == '.pdf') {
ggsave(p_oplsda_lollipop,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 8,
height = 5,
device = cairo_pdf)
}else{
ggsave(p_oplsda_lollipop,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 8,
height = 5)
}
}
p_oplsda_lollipop # 返回图
})
# heatmap
output$heatmap_oplsda <- renderPlot(plot_heatmap_res_oplsda(),
height = 350,
width = 600)
plot_heatmap_res_oplsda <- eventReactive(input$submit_oplsda,{
if (input$submit_oplsda > 0) {
vip_score <- as.data.frame(oplsda_res()@vipVn)
colnames(vip_score) <- 'VIP值'
vip_score$metabolites <- rownames(vip_score)
vip_score <- vip_score[,c('metabolites','VIP值')]
vip_score <- vip_score[order(-vip_score$VIP值),]
vip_score <- vip_score[1:as.numeric(input$num_oplsda_show),]
df_sub <- user_data_oplsda()
rownames(df_sub) <- df_sub[,1]
df_sub <- df_sub[,-1]
df_sub <- df_sub[,-1]
heat = df_sub[,vip_score$meta] %>% t() %>% as.data.frame()
p_heapmap <- pheatmap::pheatmap(heat,
scale = ifelse(input$oplsda_heatmap_scale == 'TRUE',
'row','none'),
color = colorRampPalette(colors = c("blue","white","red"))(100),
border_color = NA,
cluster_rows = ifelse(input$oplsda_heatmap_cluster == 'TRUE',
TRUE,FALSE),
cluster_cols = ifelse(input$oplsda_heatmap_cluster == 'TRUE',
TRUE,FALSE))
p_heapmap <- ggplotify::as.ggplot(p_heapmap)
# 保存图片
filename <- ifelse(input$oplsda_fig_res_filetype == '.pdf','热图.pdf',
ifelse(input$oplsda_fig_res_filetype == '.png','热图.png',
ifelse(input$oplsda_fig_res_filetype == '.jpg','热图.jpg',
ifelse(input$oplsda_fig_res_filetype == '.tiff','热图.tiff','热图.eps'))))
if (input$oplsda_fig_res_filetype == '.pdf') {
ggsave(p_heapmap,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 6,
height = 5,
device = cairo_pdf)
}else{
ggsave(p_heapmap,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 6,
height = 5)
}
}
dev.off()
pheatmap::pheatmap(heat,
scale = ifelse(input$oplsda_heatmap_scale == 'TRUE',
'row','none'),
color = colorRampPalette(colors = c("blue","white","red"))(100),
border_color = NA,
cluster_rows = ifelse(input$oplsda_heatmap_cluster == 'TRUE',
TRUE,FALSE),
cluster_cols = ifelse(input$oplsda_heatmap_cluster == 'TRUE',
TRUE,FALSE))
}
)
# sample scores plot
output$oplsda_plot <- renderPlot(plot_res_oplsda(),
height = 350,
width = 600)
plot_res_oplsda <- eventReactive(input$submit_oplsda,{
if (input$submit_oplsda > 0) {
sacurine_model.df <- oplsda_res()@modelDF # 提取解释度
sample.score = oplsda_res()@scoreMN %>%
as.data.frame() %>%
mutate(group.3 = user_data_oplsda()$group,
o1 = oplsda_res()@orthoScoreMN[,1])
p_score = ggplot(sample.score, aes(p1, o1, color = group.3)) +
geom_hline(yintercept = 0, linetype = 'dashed', size = 0.5) +
geom_vline(xintercept = 0, linetype = 'dashed', size = 0.5) +
geom_point(size = 2) +
labs(x = paste('P1(',round( sacurine_model.df["p1", "R2X(cum)"]*100, 2),'%)',sep = ''),
y = 'to1') +
stat_ellipse(level = 0.95, linetype = 'solid',
size = 1, show.legend = FALSE) +
scale_color_aaas() +
theme_bw() +
theme(legend.title = element_blank(),
legend.text = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
panel.background = element_blank(),
panel.grid = element_blank(),
axis.text = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
axis.title = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
axis.ticks = element_line(color = 'black'))
# 保存图片
filename <- ifelse(input$oplsda_fig_res_filetype == '.pdf','oplsda图.pdf',
ifelse(input$oplsda_fig_res_filetype == '.png','oplsda图.png',
ifelse(input$oplsda_fig_res_filetype == '.jpg','oplsda图.jpg',
ifelse(input$oplsda_fig_res_filetype == '.tiff','oplsda图.tiff','oplsda图.eps'))))
if (input$oplsda_fig_res_filetype == '.pdf') {
ggsave(p_score,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 5,
height = 4,
device = cairo_pdf)
}else{
ggsave(p_score,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 5,
height = 4)
}
}
p_score
})
# 下载分析结果
output$taboutput_oplsda_download <- downloadHandler(
filename <- function(){
paste(stringr::str_sub(input$data_input_oplsda$name,
1,
(nchar(input$data_input_oplsda$name) - 4)),
'_统计分析结果',input$oplsda_stat_res_filetype,sep = ''
)
},
content <- function(file){
if (input$oplsda_stat_res_filetype == '.csv') {
file.copy('./results/oplsda/rest_tab.csv',file)
}else if (input$oplsda_stat_res_filetype == '.txt') {
file.copy('./results/oplsda/rest_tab.txt',file)
}else{
#return(NULL)
file.copy('./results/oplsda/rest_tab.xlsx',file)
}
}
)
# 下载图片
output$download_figure__oplsda <- downloadHandler(
filename <- function(){
paste(stringr::str_sub(input$data_input_oplsda$name,
1,
(nchar(input$data_input_oplsda$name) - 4)),
'oplsda图片结果.zip',sep = '')
},
content <- function(file){
zip(file,'./results/oplsda/figures/')
}
)
|
/main/oplsda_server.R
|
no_license
|
lixiang117423/Tools4You
|
R
| false | false | 11,269 |
r
|
# 提交成功显示
observeEvent(input$submit_oplsda, {
if (input$submit_oplsda>0) {
sendSweetAlert(
session = session,
title = "提交成功!",
text = "数据上传成功,参数设置正确",
type = "success")
}
})
# 导入数据
user_data_oplsda <- reactive({
table_in_test <- read.csv(input$data_input_oplsda$datapath,
header = T,
stringsAsFactors = TRUE,
encoding = 'UTF-8')
colnames(table_in_test)[1:2] <- c('sample','group')
table_in_test <<- table_in_test
})
# 下载示例数据
output$download_demo_data_oplsda <- downloadHandler(
filename = 'OPLS-DA示例数据.csv',
content = function(file){
file.copy('./demo_data/OPLS-DA示例数据.csv',file)
}
)
# oplsda
oplsda_res <- reactive({
df_oplsda <- user_data_oplsda()
oplsda <- ropls::opls(df_oplsda[,3:ncol(df_oplsda)],
df_oplsda$group,
predI = 1,
orthoI = NA)
oplsda <<- oplsda
})
# 展示统计分析结果
output$taboutput_oplsda_view <- renderDataTable(df_table_oplsda(),
options = list(
pageLength = 4
))
df_table_oplsda <- eventReactive(input$submit_oplsda,{
if (input$submit_oplsda > 0) {
user_records <- data.table::fread('./www/user records.txt', encoding = 'UTF-8')
temp <- as.character(Sys.time())
temp <- stringr::str_split(temp, ' ')
date <- temp[[1]][1]
time <- temp[[1]][2]
user_record <- data.frame(Cat = 'oplsda',
Date = date,
Time = time)
user_records <- rbind(user_records, user_record)
data.table::fwrite(user_records,file = './www/user records.txt')
vip_score <- as.data.frame(oplsda_res()@vipVn)
colnames(vip_score) <- 'VIP值'
vip_score$metabolites <- rownames(vip_score)
vip_score <- vip_score[,c('metabolites','VIP值')]
vip_score <- vip_score[order(-vip_score$VIP值),]
# 保存分析结果
write.csv(vip_score, file = './results/oplsda/rest_tab.csv',row.names = FALSE)
write.table(vip_score, file = './results/oplsda/rest_tab.txt',row.names = FALSE)
xlsx::write.xlsx(vip_score, file = './results/oplsda/rest_tab.xlsx',row.names = FALSE)
}
vip_score
})
# 棒棒糖图
output$lollipop_plot_oplsda <- renderPlot(plot_lollipop_res_oplsda(),
height = 350,
width = 600)
plot_lollipop_res_oplsda <- eventReactive(input$submit_oplsda,{
if (input$submit_oplsda > 0) {
vip_score <- as.data.frame(oplsda_res()@vipVn)
colnames(vip_score) <- 'VIP值'
vip_score$metabolites <- rownames(vip_score)
vip_score <- dplyr::select(vip_score,c('metabolites','VIP值'))
vip_score = vip_score[vip_score$VIP值 >= 1,]
vip_score <- vip_score[order(-vip_score$VIP值),][1:as.numeric(input$num_oplsda_show),]
vip_2 = vip_score[order(vip_score$VIP值),]
vip_2$metabolites = factor(vip_2$metabolites,levels = unique(vip_2$metabolites))
p_oplsda_lollipop = ggplot(vip_2, aes(metabolites, VIP值)) +
geom_segment(aes(x = metabolites,xend = metabolites,
y = 1, yend = VIP值)) +
geom_point(aes(size = VIP值), color = '#008000') +
geom_hline(yintercept = max(vip_2$VIP值*1.02), color = 'white') +
coord_flip() +
scale_y_continuous(expand = c(0,0)) +
labs(x = '',y = 'VIP value') +
theme_bw() +
theme(legend.title = element_blank(),
legend.text = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
panel.background = element_blank(),
panel.grid = element_blank(),
axis.text = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
axis.title = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
axis.ticks = element_line(color = 'black'),
axis.ticks.y = element_blank())
# 保存图片
filename <- ifelse(input$oplsda_fig_res_filetype == '.pdf','res_fig.pdf',
ifelse(input$oplsda_fig_res_filetype == '.png','res_fig.png',
ifelse(input$oplsda_fig_res_filetype == '.jpg','res_fig.jpg',
ifelse(input$oplsda_fig_res_filetype == '.tiff','res_fig.tiff','res_fig.eps'))))
if (input$oplsda_fig_res_filetype == '.pdf') {
ggsave(p_oplsda_lollipop,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 8,
height = 5,
device = cairo_pdf)
}else{
ggsave(p_oplsda_lollipop,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 8,
height = 5)
}
}
p_oplsda_lollipop # 返回图
})
# heatmap
output$heatmap_oplsda <- renderPlot(plot_heatmap_res_oplsda(),
height = 350,
width = 600)
plot_heatmap_res_oplsda <- eventReactive(input$submit_oplsda,{
if (input$submit_oplsda > 0) {
vip_score <- as.data.frame(oplsda_res()@vipVn)
colnames(vip_score) <- 'VIP值'
vip_score$metabolites <- rownames(vip_score)
vip_score <- vip_score[,c('metabolites','VIP值')]
vip_score <- vip_score[order(-vip_score$VIP值),]
vip_score <- vip_score[1:as.numeric(input$num_oplsda_show),]
df_sub <- user_data_oplsda()
rownames(df_sub) <- df_sub[,1]
df_sub <- df_sub[,-1]
df_sub <- df_sub[,-1]
heat = df_sub[,vip_score$meta] %>% t() %>% as.data.frame()
p_heapmap <- pheatmap::pheatmap(heat,
scale = ifelse(input$oplsda_heatmap_scale == 'TRUE',
'row','none'),
color = colorRampPalette(colors = c("blue","white","red"))(100),
border_color = NA,
cluster_rows = ifelse(input$oplsda_heatmap_cluster == 'TRUE',
TRUE,FALSE),
cluster_cols = ifelse(input$oplsda_heatmap_cluster == 'TRUE',
TRUE,FALSE))
p_heapmap <- ggplotify::as.ggplot(p_heapmap)
# 保存图片
filename <- ifelse(input$oplsda_fig_res_filetype == '.pdf','热图.pdf',
ifelse(input$oplsda_fig_res_filetype == '.png','热图.png',
ifelse(input$oplsda_fig_res_filetype == '.jpg','热图.jpg',
ifelse(input$oplsda_fig_res_filetype == '.tiff','热图.tiff','热图.eps'))))
if (input$oplsda_fig_res_filetype == '.pdf') {
ggsave(p_heapmap,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 6,
height = 5,
device = cairo_pdf)
}else{
ggsave(p_heapmap,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 6,
height = 5)
}
}
dev.off()
pheatmap::pheatmap(heat,
scale = ifelse(input$oplsda_heatmap_scale == 'TRUE',
'row','none'),
color = colorRampPalette(colors = c("blue","white","red"))(100),
border_color = NA,
cluster_rows = ifelse(input$oplsda_heatmap_cluster == 'TRUE',
TRUE,FALSE),
cluster_cols = ifelse(input$oplsda_heatmap_cluster == 'TRUE',
TRUE,FALSE))
}
)
# sample scores plot
output$oplsda_plot <- renderPlot(plot_res_oplsda(),
height = 350,
width = 600)
plot_res_oplsda <- eventReactive(input$submit_oplsda,{
if (input$submit_oplsda > 0) {
sacurine_model.df <- oplsda_res()@modelDF # 提取解释度
sample.score = oplsda_res()@scoreMN %>%
as.data.frame() %>%
mutate(group.3 = user_data_oplsda()$group,
o1 = oplsda_res()@orthoScoreMN[,1])
p_score = ggplot(sample.score, aes(p1, o1, color = group.3)) +
geom_hline(yintercept = 0, linetype = 'dashed', size = 0.5) +
geom_vline(xintercept = 0, linetype = 'dashed', size = 0.5) +
geom_point(size = 2) +
labs(x = paste('P1(',round( sacurine_model.df["p1", "R2X(cum)"]*100, 2),'%)',sep = ''),
y = 'to1') +
stat_ellipse(level = 0.95, linetype = 'solid',
size = 1, show.legend = FALSE) +
scale_color_aaas() +
theme_bw() +
theme(legend.title = element_blank(),
legend.text = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
panel.background = element_blank(),
panel.grid = element_blank(),
axis.text = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
axis.title = element_text(color = 'black',size = 10, family = 'Arial', face = 'plain'),
axis.ticks = element_line(color = 'black'))
# 保存图片
filename <- ifelse(input$oplsda_fig_res_filetype == '.pdf','oplsda图.pdf',
ifelse(input$oplsda_fig_res_filetype == '.png','oplsda图.png',
ifelse(input$oplsda_fig_res_filetype == '.jpg','oplsda图.jpg',
ifelse(input$oplsda_fig_res_filetype == '.tiff','oplsda图.tiff','oplsda图.eps'))))
if (input$oplsda_fig_res_filetype == '.pdf') {
ggsave(p_score,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 5,
height = 4,
device = cairo_pdf)
}else{
ggsave(p_score,
filename = paste('./results/oplsda/figures/', filename, sep = ''),
width = 5,
height = 4)
}
}
p_score
})
# 下载分析结果
output$taboutput_oplsda_download <- downloadHandler(
filename <- function(){
paste(stringr::str_sub(input$data_input_oplsda$name,
1,
(nchar(input$data_input_oplsda$name) - 4)),
'_统计分析结果',input$oplsda_stat_res_filetype,sep = ''
)
},
content <- function(file){
if (input$oplsda_stat_res_filetype == '.csv') {
file.copy('./results/oplsda/rest_tab.csv',file)
}else if (input$oplsda_stat_res_filetype == '.txt') {
file.copy('./results/oplsda/rest_tab.txt',file)
}else{
#return(NULL)
file.copy('./results/oplsda/rest_tab.xlsx',file)
}
}
)
# 下载图片
output$download_figure__oplsda <- downloadHandler(
filename <- function(){
paste(stringr::str_sub(input$data_input_oplsda$name,
1,
(nchar(input$data_input_oplsda$name) - 4)),
'oplsda图片结果.zip',sep = '')
},
content <- function(file){
zip(file,'./results/oplsda/figures/')
}
)
|
t=seq(0,2*pi,0.1)
y=sin(t)
plot(t,y,type="l", xlab="time", ylab="Sine wave", main="Graphe de la fonction sinus")
plot(dnorm,-4,4)
#abline(v=0.2)
#abline(v=3)
abline(h=0)
#segments(x0=c(-2), y0=c(0.0), x1=c(3), y1=c(0.3),col="pink")
#segments(x0=c(-2), y0=c(0.0), x1=c(3), y1=c(0.5),col="red")
#segments(x0=c(-2), y0=c(0.0), x1=c(3), y1=c(0.9),col="blue")
segments(0,0,0,dnorm(0), lty=2)
curve(dt(x, df=5),add=TRUE, col=2)
curve(dt(x, df=30),add=TRUE, col=3)
legend("topleft", legend=c("Normale", "Student(5)", "Student(30)"), col=1:3, lty=1)
#Exercice 3
ozone <- read.table("ozone.txt", header=TRUE)
ozone
plot(maxO3~T12, data=ozone)#pour relier les points il suffit d'utiliser l'argument type="l"; ce graphe n'est pas lisible il faut trier les données
plot(maxO3~T12, data=ozone, type="l")
ordon <- order(ozone[,"T12"])
plot(maxO3~T12, data=ozone[ordon,], type="b")
#Loi des grands nombres
#En statistiques, la loi des grands nombres exprime le fait que les caractéristiques d'un échantillon aléatoire se rapprochent des caractéristiques de l'ensemble de la population pour peu que cet échantillon soit suffisamment grand
set.seed(123)
X <- rbinom(1000, size=1, prob=0.6)
S1 <- cumsum(X) #la fonction cumsum permet de construire un vecteur des sommes cumulées
M1<-S1/(1:1000)
plot(M1, type="l")
abline(h=0.6, col=2)
|
/semestre2/OutilsDonnees/TP2.R
|
no_license
|
bistendope/Cours_M1
|
R
| false | false | 1,331 |
r
|
t=seq(0,2*pi,0.1)
y=sin(t)
plot(t,y,type="l", xlab="time", ylab="Sine wave", main="Graphe de la fonction sinus")
plot(dnorm,-4,4)
#abline(v=0.2)
#abline(v=3)
abline(h=0)
#segments(x0=c(-2), y0=c(0.0), x1=c(3), y1=c(0.3),col="pink")
#segments(x0=c(-2), y0=c(0.0), x1=c(3), y1=c(0.5),col="red")
#segments(x0=c(-2), y0=c(0.0), x1=c(3), y1=c(0.9),col="blue")
segments(0,0,0,dnorm(0), lty=2)
curve(dt(x, df=5),add=TRUE, col=2)
curve(dt(x, df=30),add=TRUE, col=3)
legend("topleft", legend=c("Normale", "Student(5)", "Student(30)"), col=1:3, lty=1)
#Exercice 3
ozone <- read.table("ozone.txt", header=TRUE)
ozone
plot(maxO3~T12, data=ozone)#pour relier les points il suffit d'utiliser l'argument type="l"; ce graphe n'est pas lisible il faut trier les données
plot(maxO3~T12, data=ozone, type="l")
ordon <- order(ozone[,"T12"])
plot(maxO3~T12, data=ozone[ordon,], type="b")
#Loi des grands nombres
#En statistiques, la loi des grands nombres exprime le fait que les caractéristiques d'un échantillon aléatoire se rapprochent des caractéristiques de l'ensemble de la population pour peu que cet échantillon soit suffisamment grand
set.seed(123)
X <- rbinom(1000, size=1, prob=0.6)
S1 <- cumsum(X) #la fonction cumsum permet de construire un vecteur des sommes cumulées
M1<-S1/(1:1000)
plot(M1, type="l")
abline(h=0.6, col=2)
|
# server.R
library(tuneR)
library(seewave)
library(dplyr)
library(stringr)
library(tidyr)
library(ggplot2)
library(gridExtra)
source("functions.R")
shinyServer(
function(input, output, session) {
###############
## Defaults
###############
## First look for defaults
if(file.exists("./Data/defaults.txt")) {
if(file.info("./Data/defaults.txt")$size > 0) {
defaults <- read.table("./Data/defaults.txt", header = T)
} else defaults <- NULL
} else{
defaults <- NULL
}
## Load progress to date
if(file.exists("./Data/progress.txt")) {
p <- read.csv("./Data/progress.txt")
} else {
p <- NULL
}
## Load song list
if(file.exists("./Data/song_list.txt")) {
sl <- read.csv("./Data/song_list.txt")
} else {
sl <- NULL
}
## Setup file structure
if(!dir.exists("./Data/")) dir.create("./Data/")
if(!dir.exists("./Data/Times/")) dir.create("./Data/Times/")
if(!dir.exists("./Data/Images/")) dir.create("./Data/Images/")
###############
### General
###############
## reactive data
data <- reactiveValues(song_list = sl,
progress = p,
times = NULL,
write = NULL,
status_error = "",
a.null = data.frame(type = "song",
time.P1 = NA,
time.P2 = NA,
freq.P1 = NA,
freq.P2 = NA,
freq.M = NA,
freq.dom.spec = NA),
a = data.frame(type = "song",
time.P1 = NA,
time.P2 = NA,
freq.P1 = NA,
freq.P2 = NA,
freq.M = NA,
freq.dom.spec = NA),
measures = NULL)
## Delay some things until we're ready to go
session$onFlushed(function() {
data$starting <- FALSE
})
## Reactive ID of the currently active song
current <- reactive({
if(!is.null(data$song_list)){
if(nrow(data$song_list[data$song_list$time.done == FALSE,]) > 0) {
## Sort so not done is at the top but descending
data$song_list <- data$song_list[order(data$song_list$time.done, data$song_list$ID),]
data$song_list[data$song_list$time.done==FALSE,][1,]
} else {
NULL
}
} else {
NULL
}
})
current.song <- reactive({
if(!is.null(current())) as.character(current()$file)
})
current.ID <- reactive({
if(!is.null(current())) current()$ID
})
current.n <- reactive({
if(!is.null(current())) current()$n
})
current.IDs <- reactive({
if(!is.null(data$song_list)) {
unique(as.character(data$song_list$ID))
} else NULL
})
###############
### Setup
###############
if(is.null(defaults)){
data$defaults <- list(
songs_location = "/home/steffi/Data.big/Testing",
pattern_ID = "([BCMC]{2}[ABC]{1}[0-9]{2})",
pattern_n = "([0-9]{3}).wav",
notes = c(1,3),
collevels_min = -35,
collevels_bin = 3,
wl = 512,
save_plot = TRUE
)
} else {
## Grab the defaults from the file, remove any non-unique values (based on some defaults having min/max)
data$defaults <- lapply(as.list(defaults), "unique")
isolate(data$defaults[grep("pattern|songs_location", names(data$defaults))] <- lapply(data$defaults[grep("pattern|songs_location", names(data$defaults))], "as.character"))
## Make sure there are always two notes (otherwise don't get a proper slider)
isolate(if(length(data$defaults$notes) < 2) data$defaults$notes[2] <- data$defaults$notes[1])
}
## Get the defaults and create the UIs
output$UI_setup_songs_location <- renderUI({
textInput("default_songs_location", label = "", value = data$defaults$songs_location)
})
output$UI_setup_pattern_ID <- renderUI({
textInput("default_pattern_ID", label = "", value = data$defaults$pattern_ID)
})
output$UI_setup_pattern_n <- renderUI({
textInput("default_pattern_n", label = "", value = data$defaults$pattern_n)
})
output$UI_setup_notes <- renderUI({
sliderInput("default_notes", label = strong("Assign the min and max number of notes you reasonably expect to see"), min = 1, max = 10, value = data$defaults$notes, step = 1)
})
output$UI_setup_collevels_min <- renderUI({
sliderInput("default_collevels_min",
label = "Default minimum dB to show",
min = -100, max = 0, step = 1, value = data$defaults$collevels_min)
})
output$UI_setup_collevels_bin <- renderUI({
sliderInput("default_collevels_bin",
label = "Default # amplitude bins per colour",
min = 1, max = 20, step = 1, value = data$defaults$collevels_bin)
})
output$UI_setup_wl <- renderUI({
radioButtons("default_wl",
label = "Default window Length",
choices = list(128, 256, 512, 1024, 2048),
selected = data$defaults$wl)
})
output$UI_setup_save_plot<- renderUI({
radioButtons("default_save_plot",
label = "Should a PNG of each plot (with min/max times, freq etc. be saved?",
choices = list("TRUE","FALSE"),
selected = data$defaults$save_plot)
})
## Save and Set Defaults when buttom pressed
observeEvent(input$setup_save1, {
## Grab all the setup input and save to the defaults list (d)
temp <- reactiveValuesToList(input)
temp <- temp[grep("default_",names(temp))]
names(temp) <- gsub("default_(.+)", "\\1", names(temp))
data$defaults <- temp
write.table(as.data.frame(do.call(cbind, data$defaults)), "./Data/defaults.txt", row.names = F, sep = "\t")
})
## Render values for showing:
output$setup_folders <- renderPrint({
if(!is.null(data$sl)) {
sl$file[1:10]
} else list.files(data$defaults$songs_location)[1:10]
})
## Render examples for showing:
output$setup_ID_eg <- renderTable({
data.frame(Original = list.dirs(data$defaults$songs_location)[2:4],
ID = gsub(paste0(".*",data$defaults$pattern_ID,".*"), "\\1", list.dirs(data$defaults$songs_location)[2:4]))
data.frame(Original = list.dirs(data$defaults$songs_location)[2:4],
ID = gsub(paste0(".*",data$defaults$pattern_ID,".*"), "\\1", list.dirs(data$defaults$songs_location)[2:4]))
}, include.rownames = F)
output$setup_n_eg <- renderTable({
data.frame(Original = list.files(list.dirs(data$defaults$songs_location)[2])[1:3],
n = gsub(paste0(".*",data$defaults$pattern_n,".*"), "\\1", list.files(list.dirs(data$defaults$songs_location)[2])[1:3]))
}, include.rownames = F)
###############
### Status
###############
#volumes <- c(wd = "/home/steffi/Data.big")
#shinyDirChoose(input, 'dir', session = session, roots = volumes)
output$status_error <- renderText({
data$status_error
})
observeEvent(input$progress_update, {
## Songs total
l.s <- list.files(data$defaults$songs_location, pattern = data$defaults$pattern_n, recursive = TRUE, include.dirs = TRUE, full.names = T)
## Progress
sl <- data.frame(file = l.s,
ID = gsub(paste0(".*",data$defaults$pattern_ID,".*"), "\\1", l.s),
n = as.integer(gsub(paste0(".*",data$defaults$pattern_n,".*"), "\\1", l.s)))
sl <- sl[!is.na(sl$n) & !is.na(sl$ID),]
p <- sl %>%
group_by(ID) %>%
summarise(total = length(file))
times <- unique(do.call("rbind", lapply(list.files("./Data/Times/", pattern = "*.csv$", full.names = T), read.csv))[,c("ID","n")])
if(!is.null(times)){
times$time.done <- TRUE
sl <- merge(sl, times, by = c("ID","n"), all.x = T)
sl$time.done[is.na(sl$time.done)] <- FALSE
## Summarize progress
times <- times %>%
group_by(ID) %>%
summarize(time.n = length(n))
p <- merge(p, times, by = c("ID"), all.x = T)
p$time.n[is.na(p$time.n)] <- 0
} else {
sl$time.done <- FALSE
p$time.n <- 0
}
p$time.done <- p$time.n == p$total
## Save to disk
write.csv(p, "./Data/progress.txt", row.names = F)
write.csv(sl, "./Data/song_list.txt", row.names = F)
## Save to reactive variables
data$song_list <- sl
data$progress <- p
})
## Load list of songs from directory
output$progress <- renderDataTable({
if(!is.null(data$progress)){
temp <- data$progress
temp$time.n <- as.integer(temp$time.n)
names(temp) <- c("ID","Total # Vocs","# Extracted", "Individual Complete?")
temp
}
}, options = list(pageLength = 10))
output$current_song <- renderText({
if(!is.null(current())) {
current.song()
} else {
"No vocalization active (No vocalizations in cue)"
}
})
output$progress_select_list<- renderUI({
if(!is.null(current.IDs())){
selectInput("songlistID",
label = h4("Select ID"),
choices = current.IDs(), selected = current.IDs()[1])
}
})
## Load current time data table
output$data_song_list <- renderDataTable({
if(!is.null(data$song_list) & !is.null(input$songlistID)){
validate(need(!is.null(input$songlistID) & nrow(data$song_list[data$song_list$ID == input$songlistID,]) > 0, "No data for this Individual"))
data$song_list[data$song_list$ID == input$songlistID,]
}
}, options = list(pageLength = 10, columnDefs = list(list(width = "30em", targets = 0))))
###############
## Data Tables
###############
output$select_data <- renderUI({
temp <- as.list(unique(as.character(data$song_list$ID)))
names(temp) <- unique(as.character(data$song_list$ID))
selectInput("dataID",
label = h4("Select ID (default is active ID)"),
choices = temp, selected = current.ID())
})
## Load current time data table
output$data_time <- renderDataTable({
validate(need(!is.null(input$dataID) & file.exists(paste0("./Data/Times/",input$dataID,"_times.csv")), "No data for this Individual"))
read.csv(paste0("./Data/Times/",input$dataID,"_times.csv"))
}, options = list(pageLength = 50))
###############
## Time extraction
###############
## Prep measures for saving, whether or not we have them
measures <- reactive({
temp <- data$a %>%
gather(key = "measure", value = "value", -type) %>%
spread(key = "measure", value = "value")
names(temp) <- gsub("_", ".", names(temp))
temp <- cbind(ID = current.ID(),
n = current.n(),
temp,
fraction = input$time_fraction,
extracted = Sys.time())
})
## Load wave
wave <- reactive({
validate(need(file.exists(current.song()), paste0("Song file ", current.song(), " couldn't be found, consider using the 'Update Status' button on the 'Status' tab.")))
readWave(current.song())
})
## Save clicks as time data
observeEvent(input$click, {
data$times <- rbind(data$times, data.frame(xmin = input$click$xmin, xmax = input$click$xmax, ymin = input$click$ymin, ymax = input$click$ymax))
data$times <- data$times[order(data$times$xmin),]
res <- as.integer(input$time_wl) / wave()@samp.rate
data$times$xmin[data$times$xmin < res] <- res
data$times$xmax[data$times$xmax >= length(wave())/wave()@samp.rate] <- length(wave())/wave()@samp.rate - res*2
})
## Change button depending on times
## Disable the buttons if no song present
buttonDisabled <- observe({
min.n <- data$defaults$notes[1]
max.n <- data$defaults$notes[2]
if(is.null(current())) {
updateButton(session, "calc", disabled = TRUE, style = "default")
updateButton(session, "reset", disabled = TRUE)
updateButton(session, "nogood", disabled = TRUE)
} else if (length(data$times$xmin) >= min.n & length(data$times$xmin) <= max.n) {
updateButton(session, "calc", disabled = FALSE, style = "success")
updateButton(session, "reset", disabled = FALSE)
updateButton(session, "nogood", disabled = FALSE)
} else {
updateButton(session, "calc", disabled = TRUE, style = "default")
updateButton(session, "reset", disabled = FALSE)
updateButton(session, "nogood", disabled = FALSE)
}
})
## Reset times if reset button clicked
observeEvent(input$reset, {
data$times <- NULL
data$a <- data$a.null
})
## Calculate Times if Calc button pressed
observeEvent(input$calc, {
ck <- check_times(times = data$times, notes = data$defaults$notes)
if(ck != TRUE) {
output$time_error <- renderText(ck)
data$times <- NULL
} else {
output$time_error <- renderText("")
withProgress(message = "Calculating...", expr = {
prog_inc <- 1 / nrow(data$times)
## Acoustat
data$a <- data$a.null
res <- as.integer(input$time_wl) / wave()@samp.rate
for(i in 1:nrow(data$times)){
temp <- data$times[i,]
#browser()
setProgress(value = prog_inc * (i-1), detail = paste0("Note ", i, " of ", nrow(data$times), ": Acoustat"))
acous <- c(
acoustat(wave(), wl = 256, fraction = input$time_fraction, tlim=c(temp$xmin,temp$xmax), flim = c(temp$ymin,temp$ymax), plot = F)[c("time.P1", "time.P2")],
acoustat(wave(), wl = 1024, fraction = input$time_fraction, tlim=c(temp$xmin,temp$xmax), flim = c(temp$ymin,temp$ymax), plot = F)[c("freq.P1", "freq.M", "freq.P2")])
temp <- data.frame(note = i,
acous)
temp[, c("time.P1", "time.P2")] <- temp[, c("time.P1", "time.P2")] + data$times[i, 'xmin']
temp <- temp[, names(temp) %in% names(data$a.null)]
if(i == 1) data$a <- temp else data$a <- rbind(data$a, temp)
}
## Dom freqs
for(i in 1:nrow(data$times)){
setProgress(value = prog_inc * (i-1), detail = paste0("Note ", i, " of ", nrow(data$times), ": Dom Freq"))
temp <- data$times[i,]
wave2 <- cutw(wave(), from = temp$xmin, to = temp$xmax, output = "Wave") %>%
fir(wave = ., wl = 512, from = temp$ymin*1000, to = temp$ymax*1000, output = "Wave")
temp.spec <- as.data.frame(meanspec(wave2,
wl = 512,
fftw = FALSE,
plot = FALSE,
norm = FALSE)) %>%
dplyr::filter(y == max(y, na.rm = TRUE)) %>%
rename(freq = x, amp = y)
data$a$freq.dom.spec[i] = temp.spec$freq
}
## Identify notes
data$a <- data$a[order(data$a$time.P1),]
data$a$type <- paste0("note", 1:nrow(data$a))
}) #End of withProgress
}
})
## Write null data if button "nogood" pressed, change the value of data$a AND SAVE
observeEvent(input$nogood, {
data$a <- data$a.null
data$write <- measures()
})
## Write data for saving when SAVE Button pressed
observeEvent(input$save, {
output$time_error <- renderText("")
data$write <- measures()
})
## When data saved for writing, write it!
observeEvent(data$write, {
if(data$defaults$save_plot == TRUE & data$write$type[1] != "song"){
if(!dir.exists(paste0("./Data/Images/",current.ID()))) dir.create(paste0("./Data/Images/",current.ID()))
png(paste0("./Data/Images/",current.ID(),"/",current.ID(),"_",current.n()), width = 600, height = 480)
spectro(wave(), wl = as.integer(input$time_wl), flim = c(input$time_flim[1], input$time_flim[2]), tlim = c(input$time_tlim[1], input$time_tlim[2]), scale = F, collevels = seq(input$time_collevels_min, 0, by = input$time_collevels_bin), fftw = FALSE)
title(paste0(current.ID()," - ", current.n()))
rect(xright = data$a$time.P1, xleft = data$a$time.P2, ybottom = data$a$freq.P1, ytop = data$a$freq.P2, col = "#0000FF30")
segments(x0 = data$a$time.P1, x1 = data$a$time.P2, y0 = data$a$freq.M, y1 = data$a$freq.M, col = "red", lwd = 3)
segments(x0 = data$a$time.P1, x1 = data$a$time.P2, y0 = data$a$freq.dom.spec, y1 = data$a$freq.dom.spec, col = "green", lwd = 3)
dev.off()
}
append <- file.exists(paste0("./Data/Times/",current.ID(),"_times.csv"))
write.table(x = data$write,
append = append,
col.names = !append,
file = paste0("./Data/Times/",current.ID(),"_times.csv"),
row.names = FALSE,
sep = ",", quote = FALSE)
data$times <- NULL
data$a <- data$a.null
data$write <- NULL
data$progress <- update.progress(data$progress, type = "time")
data$song_list[data$song_list$ID == current.ID() & data$song_list$n == current.n(),'time.done'] <- TRUE
write.csv(data$progress, "./Data/progress.txt", row.names = F)
write.csv(data$song_list, "./Data/song_list.txt", row.names = F)
})
## Time limits slider based on song length
output$UI_time_tlim <- renderUI({
if(!is.null(current())){
t <- c(0,length(wave())/wave()@samp.rate)
} else if(!is.null(input$time_tlim)){
t <- c(input$time_tlim[1], input$time_tlim[2])
} else {
t <- c(0,2.5)
}
if(!is.null(current())){
t.max <- floor(length(wave())/wave()@samp.rate/0.001)*0.001
} else {
t.max <- 2.5
}
sliderInput("time_tlim",
label = "Time Limits",
min = 0, max = t.max, value = t, step = 0.05)
})
## Spectrogram UI Settings
output$UI_time_collevels_min <- renderUI({
sliderInput("time_collevels_min",
label = "Minimum dB to show",
min = -100, max = 0, step = 1, value = data$defaults$collevels_min)
})
output$UI_time_collevels_bin <- renderUI({
sliderInput("time_collevels_bin",
label = "# Amplitude bins per colour",
min = 1, max = 20, step = 1, value = data$defaults$collevels_bin)
})
output$UI_time_wl <- renderUI({
radioButtons("time_wl",
label = "Window Length",
choices = list(128, 256, 512, 1024, 2048),
selected = data$defaults$wl)
})
## If data$a exists, allow saving
output$UI_time_save <- renderUI({
if(data$a$type[1]!="song") bsButton("save", "Save data", style = "default")
})
## Spectrogram
output$song <- renderPlot({
if(!is.null(current()) & !is.null(input$time_wl)){
if(input$time_tlim[2] <= length(wave())/wave()@samp.rate){
spectro(wave(), wl = as.integer(input$time_wl), flim = c(input$time_flim[1], input$time_flim[2]), tlim = c(input$time_tlim[1], input$time_tlim[2]), scale = F, collevels = seq(input$time_collevels_min, 0, by = input$time_collevels_bin), fftw = FALSE)
title(current.song())
if(data$a$type[1]!="song") {
rect(xright = data$a$time.P1, xleft = data$a$time.P2, ybottom = data$a$freq.P1, ytop = data$a$freq.P2, col = "#0000FF30")
segments(x0 = data$a$time.P1, x1 = data$a$time.P2, y0 = data$a$freq.M, y1 = data$a$freq.M, col = "red", lwd = 3)
segments(x0 = data$a$time.P1, x1 = data$a$time.P2, y0 = data$a$freq.dom.spec, y1 = data$a$freq.dom.spec, col = "green", lwd = 3)
#segments(x0 = data$a$time.P1, x1 = data$a$time.P2, y0 = data$a$freq.max.spec, y1 = data$a$freq.max.spec, col = "blue", lwd = 3)
#segments(x0 = data$a$time.at.max, x1 = data$a$time.at.max, y0 = data$a$freq.P1, y1 = data$a$freq.P2, col = "black", lwd = 3)
#segments(x0 = data$times$xmin, x1 = data$times$xmin+abs(input$time_skip[1]), y0 = data$times$ymin, y1 = data$times$ymin, col = "black", lwd = 7)
#segments(x0 = data$times$xmax, x1 = data$times$xmax-abs(input$time_skip[2]), y0 = data$times$ymin, y1 = data$times$ymin, col = "black", lwd = 7)
}
} else {
plot(0:10, 0:10, type = "n", axes = F, xlab = "", ylab = "")
text(x = 5, y = 5, labels = "No vocalization active", cex = 3)
}
} else {
plot(0:10, 0:10, type = "n", axes = F, xlab = "", ylab = "")
text(x = 5, y = 5, labels = "No vocalization active", cex = 3)
}
})
## Get current coordinates
output$hovertext <- renderText({
if(is.null(input$hover)) "( , )" else paste0("(",round(input$hover$x,2),", ",round(input$hover$y,2),")")
})
## Populate data frame with clicks
output$data <- renderText({if(is.null(data$times)) "No times selected" else round(data$times$xmin, 3)})
observeEvent(input$pause, {
browser()
})
}
)
|
/server.R
|
no_license
|
steffilazerte/song-extract
|
R
| false | false | 25,237 |
r
|
# server.R
library(tuneR)
library(seewave)
library(dplyr)
library(stringr)
library(tidyr)
library(ggplot2)
library(gridExtra)
source("functions.R")
shinyServer(
function(input, output, session) {
###############
## Defaults
###############
## First look for defaults
if(file.exists("./Data/defaults.txt")) {
if(file.info("./Data/defaults.txt")$size > 0) {
defaults <- read.table("./Data/defaults.txt", header = T)
} else defaults <- NULL
} else{
defaults <- NULL
}
## Load progress to date
if(file.exists("./Data/progress.txt")) {
p <- read.csv("./Data/progress.txt")
} else {
p <- NULL
}
## Load song list
if(file.exists("./Data/song_list.txt")) {
sl <- read.csv("./Data/song_list.txt")
} else {
sl <- NULL
}
## Setup file structure
if(!dir.exists("./Data/")) dir.create("./Data/")
if(!dir.exists("./Data/Times/")) dir.create("./Data/Times/")
if(!dir.exists("./Data/Images/")) dir.create("./Data/Images/")
###############
### General
###############
## reactive data
data <- reactiveValues(song_list = sl,
progress = p,
times = NULL,
write = NULL,
status_error = "",
a.null = data.frame(type = "song",
time.P1 = NA,
time.P2 = NA,
freq.P1 = NA,
freq.P2 = NA,
freq.M = NA,
freq.dom.spec = NA),
a = data.frame(type = "song",
time.P1 = NA,
time.P2 = NA,
freq.P1 = NA,
freq.P2 = NA,
freq.M = NA,
freq.dom.spec = NA),
measures = NULL)
## Delay some things until we're ready to go
session$onFlushed(function() {
data$starting <- FALSE
})
## Reactive ID of the currently active song
current <- reactive({
if(!is.null(data$song_list)){
if(nrow(data$song_list[data$song_list$time.done == FALSE,]) > 0) {
## Sort so not done is at the top but descending
data$song_list <- data$song_list[order(data$song_list$time.done, data$song_list$ID),]
data$song_list[data$song_list$time.done==FALSE,][1,]
} else {
NULL
}
} else {
NULL
}
})
current.song <- reactive({
if(!is.null(current())) as.character(current()$file)
})
current.ID <- reactive({
if(!is.null(current())) current()$ID
})
current.n <- reactive({
if(!is.null(current())) current()$n
})
current.IDs <- reactive({
if(!is.null(data$song_list)) {
unique(as.character(data$song_list$ID))
} else NULL
})
###############
### Setup
###############
if(is.null(defaults)){
data$defaults <- list(
songs_location = "/home/steffi/Data.big/Testing",
pattern_ID = "([BCMC]{2}[ABC]{1}[0-9]{2})",
pattern_n = "([0-9]{3}).wav",
notes = c(1,3),
collevels_min = -35,
collevels_bin = 3,
wl = 512,
save_plot = TRUE
)
} else {
## Grab the defaults from the file, remove any non-unique values (based on some defaults having min/max)
data$defaults <- lapply(as.list(defaults), "unique")
isolate(data$defaults[grep("pattern|songs_location", names(data$defaults))] <- lapply(data$defaults[grep("pattern|songs_location", names(data$defaults))], "as.character"))
## Make sure there are always two notes (otherwise don't get a proper slider)
isolate(if(length(data$defaults$notes) < 2) data$defaults$notes[2] <- data$defaults$notes[1])
}
## Get the defaults and create the UIs
output$UI_setup_songs_location <- renderUI({
textInput("default_songs_location", label = "", value = data$defaults$songs_location)
})
output$UI_setup_pattern_ID <- renderUI({
textInput("default_pattern_ID", label = "", value = data$defaults$pattern_ID)
})
output$UI_setup_pattern_n <- renderUI({
textInput("default_pattern_n", label = "", value = data$defaults$pattern_n)
})
output$UI_setup_notes <- renderUI({
sliderInput("default_notes", label = strong("Assign the min and max number of notes you reasonably expect to see"), min = 1, max = 10, value = data$defaults$notes, step = 1)
})
output$UI_setup_collevels_min <- renderUI({
sliderInput("default_collevels_min",
label = "Default minimum dB to show",
min = -100, max = 0, step = 1, value = data$defaults$collevels_min)
})
output$UI_setup_collevels_bin <- renderUI({
sliderInput("default_collevels_bin",
label = "Default # amplitude bins per colour",
min = 1, max = 20, step = 1, value = data$defaults$collevels_bin)
})
output$UI_setup_wl <- renderUI({
radioButtons("default_wl",
label = "Default window Length",
choices = list(128, 256, 512, 1024, 2048),
selected = data$defaults$wl)
})
output$UI_setup_save_plot<- renderUI({
radioButtons("default_save_plot",
label = "Should a PNG of each plot (with min/max times, freq etc. be saved?",
choices = list("TRUE","FALSE"),
selected = data$defaults$save_plot)
})
## Save and Set Defaults when buttom pressed
observeEvent(input$setup_save1, {
## Grab all the setup input and save to the defaults list (d)
temp <- reactiveValuesToList(input)
temp <- temp[grep("default_",names(temp))]
names(temp) <- gsub("default_(.+)", "\\1", names(temp))
data$defaults <- temp
write.table(as.data.frame(do.call(cbind, data$defaults)), "./Data/defaults.txt", row.names = F, sep = "\t")
})
## Render values for showing:
output$setup_folders <- renderPrint({
if(!is.null(data$sl)) {
sl$file[1:10]
} else list.files(data$defaults$songs_location)[1:10]
})
## Render examples for showing:
output$setup_ID_eg <- renderTable({
data.frame(Original = list.dirs(data$defaults$songs_location)[2:4],
ID = gsub(paste0(".*",data$defaults$pattern_ID,".*"), "\\1", list.dirs(data$defaults$songs_location)[2:4]))
data.frame(Original = list.dirs(data$defaults$songs_location)[2:4],
ID = gsub(paste0(".*",data$defaults$pattern_ID,".*"), "\\1", list.dirs(data$defaults$songs_location)[2:4]))
}, include.rownames = F)
output$setup_n_eg <- renderTable({
data.frame(Original = list.files(list.dirs(data$defaults$songs_location)[2])[1:3],
n = gsub(paste0(".*",data$defaults$pattern_n,".*"), "\\1", list.files(list.dirs(data$defaults$songs_location)[2])[1:3]))
}, include.rownames = F)
###############
### Status
###############
#volumes <- c(wd = "/home/steffi/Data.big")
#shinyDirChoose(input, 'dir', session = session, roots = volumes)
output$status_error <- renderText({
data$status_error
})
observeEvent(input$progress_update, {
## Songs total
l.s <- list.files(data$defaults$songs_location, pattern = data$defaults$pattern_n, recursive = TRUE, include.dirs = TRUE, full.names = T)
## Progress
sl <- data.frame(file = l.s,
ID = gsub(paste0(".*",data$defaults$pattern_ID,".*"), "\\1", l.s),
n = as.integer(gsub(paste0(".*",data$defaults$pattern_n,".*"), "\\1", l.s)))
sl <- sl[!is.na(sl$n) & !is.na(sl$ID),]
p <- sl %>%
group_by(ID) %>%
summarise(total = length(file))
times <- unique(do.call("rbind", lapply(list.files("./Data/Times/", pattern = "*.csv$", full.names = T), read.csv))[,c("ID","n")])
if(!is.null(times)){
times$time.done <- TRUE
sl <- merge(sl, times, by = c("ID","n"), all.x = T)
sl$time.done[is.na(sl$time.done)] <- FALSE
## Summarize progress
times <- times %>%
group_by(ID) %>%
summarize(time.n = length(n))
p <- merge(p, times, by = c("ID"), all.x = T)
p$time.n[is.na(p$time.n)] <- 0
} else {
sl$time.done <- FALSE
p$time.n <- 0
}
p$time.done <- p$time.n == p$total
## Save to disk
write.csv(p, "./Data/progress.txt", row.names = F)
write.csv(sl, "./Data/song_list.txt", row.names = F)
## Save to reactive variables
data$song_list <- sl
data$progress <- p
})
## Load list of songs from directory
output$progress <- renderDataTable({
if(!is.null(data$progress)){
temp <- data$progress
temp$time.n <- as.integer(temp$time.n)
names(temp) <- c("ID","Total # Vocs","# Extracted", "Individual Complete?")
temp
}
}, options = list(pageLength = 10))
output$current_song <- renderText({
if(!is.null(current())) {
current.song()
} else {
"No vocalization active (No vocalizations in cue)"
}
})
output$progress_select_list<- renderUI({
if(!is.null(current.IDs())){
selectInput("songlistID",
label = h4("Select ID"),
choices = current.IDs(), selected = current.IDs()[1])
}
})
## Load current time data table
output$data_song_list <- renderDataTable({
if(!is.null(data$song_list) & !is.null(input$songlistID)){
validate(need(!is.null(input$songlistID) & nrow(data$song_list[data$song_list$ID == input$songlistID,]) > 0, "No data for this Individual"))
data$song_list[data$song_list$ID == input$songlistID,]
}
}, options = list(pageLength = 10, columnDefs = list(list(width = "30em", targets = 0))))
###############
## Data Tables
###############
output$select_data <- renderUI({
temp <- as.list(unique(as.character(data$song_list$ID)))
names(temp) <- unique(as.character(data$song_list$ID))
selectInput("dataID",
label = h4("Select ID (default is active ID)"),
choices = temp, selected = current.ID())
})
## Load current time data table
output$data_time <- renderDataTable({
validate(need(!is.null(input$dataID) & file.exists(paste0("./Data/Times/",input$dataID,"_times.csv")), "No data for this Individual"))
read.csv(paste0("./Data/Times/",input$dataID,"_times.csv"))
}, options = list(pageLength = 50))
###############
## Time extraction
###############
## Prep measures for saving, whether or not we have them
measures <- reactive({
temp <- data$a %>%
gather(key = "measure", value = "value", -type) %>%
spread(key = "measure", value = "value")
names(temp) <- gsub("_", ".", names(temp))
temp <- cbind(ID = current.ID(),
n = current.n(),
temp,
fraction = input$time_fraction,
extracted = Sys.time())
})
## Load wave
wave <- reactive({
validate(need(file.exists(current.song()), paste0("Song file ", current.song(), " couldn't be found, consider using the 'Update Status' button on the 'Status' tab.")))
readWave(current.song())
})
## Save clicks as time data
observeEvent(input$click, {
data$times <- rbind(data$times, data.frame(xmin = input$click$xmin, xmax = input$click$xmax, ymin = input$click$ymin, ymax = input$click$ymax))
data$times <- data$times[order(data$times$xmin),]
res <- as.integer(input$time_wl) / wave()@samp.rate
data$times$xmin[data$times$xmin < res] <- res
data$times$xmax[data$times$xmax >= length(wave())/wave()@samp.rate] <- length(wave())/wave()@samp.rate - res*2
})
## Change button depending on times
## Disable the buttons if no song present
buttonDisabled <- observe({
min.n <- data$defaults$notes[1]
max.n <- data$defaults$notes[2]
if(is.null(current())) {
updateButton(session, "calc", disabled = TRUE, style = "default")
updateButton(session, "reset", disabled = TRUE)
updateButton(session, "nogood", disabled = TRUE)
} else if (length(data$times$xmin) >= min.n & length(data$times$xmin) <= max.n) {
updateButton(session, "calc", disabled = FALSE, style = "success")
updateButton(session, "reset", disabled = FALSE)
updateButton(session, "nogood", disabled = FALSE)
} else {
updateButton(session, "calc", disabled = TRUE, style = "default")
updateButton(session, "reset", disabled = FALSE)
updateButton(session, "nogood", disabled = FALSE)
}
})
## Reset times if reset button clicked
observeEvent(input$reset, {
data$times <- NULL
data$a <- data$a.null
})
## Calculate Times if Calc button pressed
observeEvent(input$calc, {
ck <- check_times(times = data$times, notes = data$defaults$notes)
if(ck != TRUE) {
output$time_error <- renderText(ck)
data$times <- NULL
} else {
output$time_error <- renderText("")
withProgress(message = "Calculating...", expr = {
prog_inc <- 1 / nrow(data$times)
## Acoustat
data$a <- data$a.null
res <- as.integer(input$time_wl) / wave()@samp.rate
for(i in 1:nrow(data$times)){
temp <- data$times[i,]
#browser()
setProgress(value = prog_inc * (i-1), detail = paste0("Note ", i, " of ", nrow(data$times), ": Acoustat"))
acous <- c(
acoustat(wave(), wl = 256, fraction = input$time_fraction, tlim=c(temp$xmin,temp$xmax), flim = c(temp$ymin,temp$ymax), plot = F)[c("time.P1", "time.P2")],
acoustat(wave(), wl = 1024, fraction = input$time_fraction, tlim=c(temp$xmin,temp$xmax), flim = c(temp$ymin,temp$ymax), plot = F)[c("freq.P1", "freq.M", "freq.P2")])
temp <- data.frame(note = i,
acous)
temp[, c("time.P1", "time.P2")] <- temp[, c("time.P1", "time.P2")] + data$times[i, 'xmin']
temp <- temp[, names(temp) %in% names(data$a.null)]
if(i == 1) data$a <- temp else data$a <- rbind(data$a, temp)
}
## Dom freqs
for(i in 1:nrow(data$times)){
setProgress(value = prog_inc * (i-1), detail = paste0("Note ", i, " of ", nrow(data$times), ": Dom Freq"))
temp <- data$times[i,]
wave2 <- cutw(wave(), from = temp$xmin, to = temp$xmax, output = "Wave") %>%
fir(wave = ., wl = 512, from = temp$ymin*1000, to = temp$ymax*1000, output = "Wave")
temp.spec <- as.data.frame(meanspec(wave2,
wl = 512,
fftw = FALSE,
plot = FALSE,
norm = FALSE)) %>%
dplyr::filter(y == max(y, na.rm = TRUE)) %>%
rename(freq = x, amp = y)
data$a$freq.dom.spec[i] = temp.spec$freq
}
## Identify notes
data$a <- data$a[order(data$a$time.P1),]
data$a$type <- paste0("note", 1:nrow(data$a))
}) #End of withProgress
}
})
## Write null data if button "nogood" pressed, change the value of data$a AND SAVE
observeEvent(input$nogood, {
data$a <- data$a.null
data$write <- measures()
})
## Write data for saving when SAVE Button pressed
observeEvent(input$save, {
output$time_error <- renderText("")
data$write <- measures()
})
## When data saved for writing, write it!
observeEvent(data$write, {
if(data$defaults$save_plot == TRUE & data$write$type[1] != "song"){
if(!dir.exists(paste0("./Data/Images/",current.ID()))) dir.create(paste0("./Data/Images/",current.ID()))
png(paste0("./Data/Images/",current.ID(),"/",current.ID(),"_",current.n()), width = 600, height = 480)
spectro(wave(), wl = as.integer(input$time_wl), flim = c(input$time_flim[1], input$time_flim[2]), tlim = c(input$time_tlim[1], input$time_tlim[2]), scale = F, collevels = seq(input$time_collevels_min, 0, by = input$time_collevels_bin), fftw = FALSE)
title(paste0(current.ID()," - ", current.n()))
rect(xright = data$a$time.P1, xleft = data$a$time.P2, ybottom = data$a$freq.P1, ytop = data$a$freq.P2, col = "#0000FF30")
segments(x0 = data$a$time.P1, x1 = data$a$time.P2, y0 = data$a$freq.M, y1 = data$a$freq.M, col = "red", lwd = 3)
segments(x0 = data$a$time.P1, x1 = data$a$time.P2, y0 = data$a$freq.dom.spec, y1 = data$a$freq.dom.spec, col = "green", lwd = 3)
dev.off()
}
append <- file.exists(paste0("./Data/Times/",current.ID(),"_times.csv"))
write.table(x = data$write,
append = append,
col.names = !append,
file = paste0("./Data/Times/",current.ID(),"_times.csv"),
row.names = FALSE,
sep = ",", quote = FALSE)
data$times <- NULL
data$a <- data$a.null
data$write <- NULL
data$progress <- update.progress(data$progress, type = "time")
data$song_list[data$song_list$ID == current.ID() & data$song_list$n == current.n(),'time.done'] <- TRUE
write.csv(data$progress, "./Data/progress.txt", row.names = F)
write.csv(data$song_list, "./Data/song_list.txt", row.names = F)
})
## Time limits slider based on song length
output$UI_time_tlim <- renderUI({
if(!is.null(current())){
t <- c(0,length(wave())/wave()@samp.rate)
} else if(!is.null(input$time_tlim)){
t <- c(input$time_tlim[1], input$time_tlim[2])
} else {
t <- c(0,2.5)
}
if(!is.null(current())){
t.max <- floor(length(wave())/wave()@samp.rate/0.001)*0.001
} else {
t.max <- 2.5
}
sliderInput("time_tlim",
label = "Time Limits",
min = 0, max = t.max, value = t, step = 0.05)
})
## Spectrogram UI Settings
output$UI_time_collevels_min <- renderUI({
sliderInput("time_collevels_min",
label = "Minimum dB to show",
min = -100, max = 0, step = 1, value = data$defaults$collevels_min)
})
output$UI_time_collevels_bin <- renderUI({
sliderInput("time_collevels_bin",
label = "# Amplitude bins per colour",
min = 1, max = 20, step = 1, value = data$defaults$collevels_bin)
})
output$UI_time_wl <- renderUI({
radioButtons("time_wl",
label = "Window Length",
choices = list(128, 256, 512, 1024, 2048),
selected = data$defaults$wl)
})
## If data$a exists, allow saving
output$UI_time_save <- renderUI({
if(data$a$type[1]!="song") bsButton("save", "Save data", style = "default")
})
## Spectrogram
output$song <- renderPlot({
if(!is.null(current()) & !is.null(input$time_wl)){
if(input$time_tlim[2] <= length(wave())/wave()@samp.rate){
spectro(wave(), wl = as.integer(input$time_wl), flim = c(input$time_flim[1], input$time_flim[2]), tlim = c(input$time_tlim[1], input$time_tlim[2]), scale = F, collevels = seq(input$time_collevels_min, 0, by = input$time_collevels_bin), fftw = FALSE)
title(current.song())
if(data$a$type[1]!="song") {
rect(xright = data$a$time.P1, xleft = data$a$time.P2, ybottom = data$a$freq.P1, ytop = data$a$freq.P2, col = "#0000FF30")
segments(x0 = data$a$time.P1, x1 = data$a$time.P2, y0 = data$a$freq.M, y1 = data$a$freq.M, col = "red", lwd = 3)
segments(x0 = data$a$time.P1, x1 = data$a$time.P2, y0 = data$a$freq.dom.spec, y1 = data$a$freq.dom.spec, col = "green", lwd = 3)
#segments(x0 = data$a$time.P1, x1 = data$a$time.P2, y0 = data$a$freq.max.spec, y1 = data$a$freq.max.spec, col = "blue", lwd = 3)
#segments(x0 = data$a$time.at.max, x1 = data$a$time.at.max, y0 = data$a$freq.P1, y1 = data$a$freq.P2, col = "black", lwd = 3)
#segments(x0 = data$times$xmin, x1 = data$times$xmin+abs(input$time_skip[1]), y0 = data$times$ymin, y1 = data$times$ymin, col = "black", lwd = 7)
#segments(x0 = data$times$xmax, x1 = data$times$xmax-abs(input$time_skip[2]), y0 = data$times$ymin, y1 = data$times$ymin, col = "black", lwd = 7)
}
} else {
plot(0:10, 0:10, type = "n", axes = F, xlab = "", ylab = "")
text(x = 5, y = 5, labels = "No vocalization active", cex = 3)
}
} else {
plot(0:10, 0:10, type = "n", axes = F, xlab = "", ylab = "")
text(x = 5, y = 5, labels = "No vocalization active", cex = 3)
}
})
## Get current coordinates
output$hovertext <- renderText({
if(is.null(input$hover)) "( , )" else paste0("(",round(input$hover$x,2),", ",round(input$hover$y,2),")")
})
## Populate data frame with clicks
output$data <- renderText({if(is.null(data$times)) "No times selected" else round(data$times$xmin, 3)})
observeEvent(input$pause, {
browser()
})
}
)
|
#----Data Frame----
#We can create a dataframe by combining variables of same length.
# Create a, b, c, d variables
a <- c(10,20,30,40)
b <- c('book', 'pen', 'textbook', 'pencil_case')
c <- c(TRUE,FALSE,TRUE,FALSE)
d <- c(2.5, 8, 10, 7)
# Join the variables to create a data frame
df <- data.frame(a,b,c,d)
df
#We can see the column headers have the same name as the variables.
#We can change the column name with the function names()
names(df) <- c('ID', 'items', 'store', 'price')
df
# Print the structure
str(df)
# Note: By default, data frame returns string variables as a factor.
str(df) #structure of DF
head(df) #top 6 rows
head(df,n=3) #top 3 rows
tail(df) #last 6 rows
View(df)
class(df) # DF
summary(df) #summary
#Slice Data Frame:
#We select rows and columns to return into bracket preceded by the name of the data frame
## Select row 1 in column 2
df[1,2]
## Select Rows 1 to 2
df[1:2,]
## Select Columns 1
df[,1]
## Select Rows 1 to 3 and columns 3 to 4
df[1:3, 3:4]
#It is also possible to select the columns with their names.
## Slice with columns name
df[, c('ID', 'store')]
#Append a Column to Data Frame (use the symbol $ to append a new variable.)
# Create a new vector
quantity <- c(10, 35, 40, 5)
# Add `quantity` to the `df` data frame
df$quantity <- quantity
df
#Note:The number of elements in the vector has to be equal to the no of elements in data frame.
#Hence, executing the below statement will give error
quantity <- c(10, 35, 40)
# Add `quantity` to the `df` data frame
df$quantity <- quantity
#Selecting a Column of a Data Frame.
# Select the column ID
df$ID
#Subsetting a Data Frame based on some condition.(we will use subset() fxn for this)
# Select price above 5
subset(df, subset = price > 5)
|
/3 DataFrame-DataStructuresinR.R
|
no_license
|
rajanimishra128/Data-Analytics
|
R
| false | false | 1,815 |
r
|
#----Data Frame----
#We can create a dataframe by combining variables of same length.
# Create a, b, c, d variables
a <- c(10,20,30,40)
b <- c('book', 'pen', 'textbook', 'pencil_case')
c <- c(TRUE,FALSE,TRUE,FALSE)
d <- c(2.5, 8, 10, 7)
# Join the variables to create a data frame
df <- data.frame(a,b,c,d)
df
#We can see the column headers have the same name as the variables.
#We can change the column name with the function names()
names(df) <- c('ID', 'items', 'store', 'price')
df
# Print the structure
str(df)
# Note: By default, data frame returns string variables as a factor.
str(df) #structure of DF
head(df) #top 6 rows
head(df,n=3) #top 3 rows
tail(df) #last 6 rows
View(df)
class(df) # DF
summary(df) #summary
#Slice Data Frame:
#We select rows and columns to return into bracket preceded by the name of the data frame
## Select row 1 in column 2
df[1,2]
## Select Rows 1 to 2
df[1:2,]
## Select Columns 1
df[,1]
## Select Rows 1 to 3 and columns 3 to 4
df[1:3, 3:4]
#It is also possible to select the columns with their names.
## Slice with columns name
df[, c('ID', 'store')]
#Append a Column to Data Frame (use the symbol $ to append a new variable.)
# Create a new vector
quantity <- c(10, 35, 40, 5)
# Add `quantity` to the `df` data frame
df$quantity <- quantity
df
#Note:The number of elements in the vector has to be equal to the no of elements in data frame.
#Hence, executing the below statement will give error
quantity <- c(10, 35, 40)
# Add `quantity` to the `df` data frame
df$quantity <- quantity
#Selecting a Column of a Data Frame.
# Select the column ID
df$ID
#Subsetting a Data Frame based on some condition.(we will use subset() fxn for this)
# Select price above 5
subset(df, subset = price > 5)
|
RetrieveTWCoBaseURL <- function(API.Location, API) {
# **********************************Header***********************************
# FUNCTION NAME: RetrieveTWCoBaseURL
# DESCRIPTION: This function accepts a data frame in the style of
# modelerDataModel and will return a data frame will legal R field names that
# in theory should match modelerData.
#
# Args:
# API.Location: (string) Equivalent to whether being searched by lat/long
# or being searched by postal code.
# API: The actual version of the The Weather Channel API that is being
# called. Depending on the information that needs to be returned.
#
# Returns:
# The base URL that will be updated later on by other functions and script.
#
# TODO(Grant Case): Add Current and Forecast URL strings
# **********************************Header***********************************
if (API.Location == "geocode" && API == "History-Site") {
BaseURL <- "https://api.weather.com/v1/geocode/<latitude>/<longitude>/observations/historical.json?language=<language>&units=<units>&apiKey=<api.key>&startDate=<start.date>&endDate=<end.date>"
} else if (API.Location == "postalcode" && API == "History-Site") {
BaseURL <- "https://api.weather.com/v1/location/<postal.code>:4:<country>/observations/historical.json?language=<language>&units=<units>&apiKey=<api.key>&startDate=<start.date>&endDate=<end.date>"
} else if (API.Location == "stationid" && API == "History-Site") {
BaseURL <- "https://api.weather.com/v1/location/<station.id>:4:<country>/observations/historical.json?language=<language>&units=<units>&apiKey=<api.key>&startDate=<start.date>&endDate=<end.date>"
} else if (API.Location == "geocode" && API == "Location-Point") {
BaseURL <- "https://api.weather.com/v3/location/point?geocode=<latitudecommalongitude>&language=<language>&format=json&apiKey=<api.key>"
} else if (API.Location == "postalcode" && API == "Location-Point") {
BaseURL <- "https://api.weather.com/v3/location/point?postalKey=<postal.key>&language=<language>&format=json&apiKey=<api.key>"
} else {
BaseURL = ""
}
return(BaseURL)
} ### END RetrieveTWCoBaseURL
|
/Code/Functions/RetrieveTWCoBaseURL.R
|
no_license
|
hangtime79/TWCo_History_On_Demand
|
R
| false | false | 2,188 |
r
|
RetrieveTWCoBaseURL <- function(API.Location, API) {
# **********************************Header***********************************
# FUNCTION NAME: RetrieveTWCoBaseURL
# DESCRIPTION: This function accepts a data frame in the style of
# modelerDataModel and will return a data frame will legal R field names that
# in theory should match modelerData.
#
# Args:
# API.Location: (string) Equivalent to whether being searched by lat/long
# or being searched by postal code.
# API: The actual version of the The Weather Channel API that is being
# called. Depending on the information that needs to be returned.
#
# Returns:
# The base URL that will be updated later on by other functions and script.
#
# TODO(Grant Case): Add Current and Forecast URL strings
# **********************************Header***********************************
if (API.Location == "geocode" && API == "History-Site") {
BaseURL <- "https://api.weather.com/v1/geocode/<latitude>/<longitude>/observations/historical.json?language=<language>&units=<units>&apiKey=<api.key>&startDate=<start.date>&endDate=<end.date>"
} else if (API.Location == "postalcode" && API == "History-Site") {
BaseURL <- "https://api.weather.com/v1/location/<postal.code>:4:<country>/observations/historical.json?language=<language>&units=<units>&apiKey=<api.key>&startDate=<start.date>&endDate=<end.date>"
} else if (API.Location == "stationid" && API == "History-Site") {
BaseURL <- "https://api.weather.com/v1/location/<station.id>:4:<country>/observations/historical.json?language=<language>&units=<units>&apiKey=<api.key>&startDate=<start.date>&endDate=<end.date>"
} else if (API.Location == "geocode" && API == "Location-Point") {
BaseURL <- "https://api.weather.com/v3/location/point?geocode=<latitudecommalongitude>&language=<language>&format=json&apiKey=<api.key>"
} else if (API.Location == "postalcode" && API == "Location-Point") {
BaseURL <- "https://api.weather.com/v3/location/point?postalKey=<postal.key>&language=<language>&format=json&apiKey=<api.key>"
} else {
BaseURL = ""
}
return(BaseURL)
} ### END RetrieveTWCoBaseURL
|
# Author: TW
#require(testthat)
context("plotFingerPrint")
if (!exists("Example_DETha98")) load("data/Example_DETha98.RData")
EddyData.F <- Example_DETha98
#Include POSIX time stamp column
EddyDataWithPosix.F <- suppressMessages(fConvertTimeToPosix(
EddyData.F, 'YDH', Year = 'Year', Day = 'DoY', Hour = 'Hour'))
# construct multiyear dataset
EddyData99.F <- EddyData.F
EddyData99.F$Year <- 1999
EddyDataWithPosix2yr.F <- suppressMessages(fConvertTimeToPosix(rbind(
EddyData.F, EddyData99.F), 'YDH', Year = 'Year', Day = 'DoY', Hour = 'Hour'))
rm( EddyData99.F )
EProc <- sEddyProc$new('DE-Tha', EddyDataWithPosix.F, c('NEE','Rg', 'Tair', 'VPD'))
data <- cbind( EProc$sDATA, EProc$sTEMP)
dts <- EProc$sINFO$DTS
test_that("plotting NEE with class method",{
EProc$sPlotFingerprintY("NEE", Year = 1998)
})
test_that("plotting NEE with different range",{
EProc$sPlotFingerprintY(
"NEE", Year = 1998,
valueLimits = quantile(EProc$sDATA$NEE,
prob = c( 0.05, 0.99), na.rm = TRUE))
})
test_that("plotting legend only",{
EProc$sPlotFingerprintY("NEE", Year = 1998, onlyLegend = TRUE)
})
test_that("plotting NEE",{
sEddyProc_sPlotFingerprintY("NEE", Year = 1998, data = data, dts = dts)
})
test_that("plotting NEE with Inf-values",{
data2 <- data
data2$NEE[5:10][is.finite(data2$NEE[5:10])] <- Inf
sEddyProc_sPlotFingerprintY("NEE", Year = 1998, data = data2, dts = dts)
})
test_that("plotting NEE to pdf",{
skip_on_cran()
EProc$sPlotFingerprint("NEE", Dir = tempdir())
})
test_that("plot diurnal cycle of NEE to pdf",{
skip_on_cran()
EProc$sPlotDiurnalCycle("NEE", Dir = tempdir())
})
test_that("sPlotHHFluxes",{
skip_on_cran()
EProc$sPlotHHFluxes("NEE", Dir = tempdir())
})
test_that("compute_daily_mean",{
nday = 5
nRecInDay = 48
x0 = 1.2 # mumol CO2 / s
x0_sd = 0.1*x0
x = rep(x0, nday*nRecInDay)
x_sd = rep(x0_sd, nday*nRecInDay)
# no noise,
res = REddyProc:::compute_daily_mean(x, x_sd, nRecInDay, 1, 1)
expect_equal(res$x, rep(x0,5))
expect_equal(res$x_sd, rep(x0_sd,5)) # no uncertainty decrease: correlated
# convert to mumol CO2 per day
timeFactor = 3600 * 24
res = REddyProc:::compute_daily_mean(x, x_sd, nRecInDay, timeFactor, 1)
expect_equal(res$x, rep(x0,5)*timeFactor)
expect_equal(res$x_sd, rep(x0_sd,5)*timeFactor) # correlated
# convert to gCO2 per second: (g CO2/mumol CO2) * (gC/gCO2)
massFactor = (44.0096 / 1e6) * (12.011 / 44.0096)
## conversion factor with default from mumol CO2 to g C
res = REddyProc:::compute_daily_mean(x, x_sd, nRecInDay, 1, massFactor)
expect_equal(res$x, rep(x0,5)*massFactor)
expect_equal(res$x_sd, rep(x0_sd,5)*massFactor) # correlated
})
test_that("sPlotDailySums",{
skip_on_cran()
df = cbind(EProc$sDATA, EProc$sTEMP)
REddyProc:::sEddyProc_sPlotDailySumsY("NEE", Year=1998, data=df, dts=48)
#
EProc$sPlotDailySums("NEE", Dir = tempdir())
})
test_that("sPlotDailySums",{
skip_on_cran()
df = cbind(EProc$sDATA, EProc$sTEMP)
REddyProc:::sEddyProc_sPlotDailySumsY("NEE", Year=1998, data=df, dts=48)
#
EProc$sPlotDailySums("NEE", Dir = tempdir())
})
test_that("sPlotFingerprintY with all missing: error caught",{
data2 <- EddyDataWithPosix2yr.F
data2$NEE[data2$Year == 1999] <- NA
EProc2 <- sEddyProc$new('DE-Tha', data2, c('NEE','Rg', 'Tair', 'VPD'))
EProc2$sPlotFingerprintY("NEE", Year = 1999)
EProc2$sPlotFingerprint("NEE", Dir=tempdir())
})
|
/tests/testthat/test_plotFingerprint.R
|
no_license
|
bgctw/REddyProc
|
R
| false | false | 3,440 |
r
|
# Author: TW
#require(testthat)
context("plotFingerPrint")
if (!exists("Example_DETha98")) load("data/Example_DETha98.RData")
EddyData.F <- Example_DETha98
#Include POSIX time stamp column
EddyDataWithPosix.F <- suppressMessages(fConvertTimeToPosix(
EddyData.F, 'YDH', Year = 'Year', Day = 'DoY', Hour = 'Hour'))
# construct multiyear dataset
EddyData99.F <- EddyData.F
EddyData99.F$Year <- 1999
EddyDataWithPosix2yr.F <- suppressMessages(fConvertTimeToPosix(rbind(
EddyData.F, EddyData99.F), 'YDH', Year = 'Year', Day = 'DoY', Hour = 'Hour'))
rm( EddyData99.F )
EProc <- sEddyProc$new('DE-Tha', EddyDataWithPosix.F, c('NEE','Rg', 'Tair', 'VPD'))
data <- cbind( EProc$sDATA, EProc$sTEMP)
dts <- EProc$sINFO$DTS
test_that("plotting NEE with class method",{
EProc$sPlotFingerprintY("NEE", Year = 1998)
})
test_that("plotting NEE with different range",{
EProc$sPlotFingerprintY(
"NEE", Year = 1998,
valueLimits = quantile(EProc$sDATA$NEE,
prob = c( 0.05, 0.99), na.rm = TRUE))
})
test_that("plotting legend only",{
EProc$sPlotFingerprintY("NEE", Year = 1998, onlyLegend = TRUE)
})
test_that("plotting NEE",{
sEddyProc_sPlotFingerprintY("NEE", Year = 1998, data = data, dts = dts)
})
test_that("plotting NEE with Inf-values",{
data2 <- data
data2$NEE[5:10][is.finite(data2$NEE[5:10])] <- Inf
sEddyProc_sPlotFingerprintY("NEE", Year = 1998, data = data2, dts = dts)
})
test_that("plotting NEE to pdf",{
skip_on_cran()
EProc$sPlotFingerprint("NEE", Dir = tempdir())
})
test_that("plot diurnal cycle of NEE to pdf",{
skip_on_cran()
EProc$sPlotDiurnalCycle("NEE", Dir = tempdir())
})
test_that("sPlotHHFluxes",{
skip_on_cran()
EProc$sPlotHHFluxes("NEE", Dir = tempdir())
})
test_that("compute_daily_mean",{
nday = 5
nRecInDay = 48
x0 = 1.2 # mumol CO2 / s
x0_sd = 0.1*x0
x = rep(x0, nday*nRecInDay)
x_sd = rep(x0_sd, nday*nRecInDay)
# no noise,
res = REddyProc:::compute_daily_mean(x, x_sd, nRecInDay, 1, 1)
expect_equal(res$x, rep(x0,5))
expect_equal(res$x_sd, rep(x0_sd,5)) # no uncertainty decrease: correlated
# convert to mumol CO2 per day
timeFactor = 3600 * 24
res = REddyProc:::compute_daily_mean(x, x_sd, nRecInDay, timeFactor, 1)
expect_equal(res$x, rep(x0,5)*timeFactor)
expect_equal(res$x_sd, rep(x0_sd,5)*timeFactor) # correlated
# convert to gCO2 per second: (g CO2/mumol CO2) * (gC/gCO2)
massFactor = (44.0096 / 1e6) * (12.011 / 44.0096)
## conversion factor with default from mumol CO2 to g C
res = REddyProc:::compute_daily_mean(x, x_sd, nRecInDay, 1, massFactor)
expect_equal(res$x, rep(x0,5)*massFactor)
expect_equal(res$x_sd, rep(x0_sd,5)*massFactor) # correlated
})
test_that("sPlotDailySums",{
skip_on_cran()
df = cbind(EProc$sDATA, EProc$sTEMP)
REddyProc:::sEddyProc_sPlotDailySumsY("NEE", Year=1998, data=df, dts=48)
#
EProc$sPlotDailySums("NEE", Dir = tempdir())
})
test_that("sPlotDailySums",{
skip_on_cran()
df = cbind(EProc$sDATA, EProc$sTEMP)
REddyProc:::sEddyProc_sPlotDailySumsY("NEE", Year=1998, data=df, dts=48)
#
EProc$sPlotDailySums("NEE", Dir = tempdir())
})
test_that("sPlotFingerprintY with all missing: error caught",{
data2 <- EddyDataWithPosix2yr.F
data2$NEE[data2$Year == 1999] <- NA
EProc2 <- sEddyProc$new('DE-Tha', data2, c('NEE','Rg', 'Tair', 'VPD'))
EProc2$sPlotFingerprintY("NEE", Year = 1999)
EProc2$sPlotFingerprint("NEE", Dir=tempdir())
})
|
test_that("ci", {
skip_if_not_installed("lme4")
model <- lm(mpg ~ wt, data = mtcars)
expect_equal(suppressMessages(ci(model))[1, 3], 33.4505, tolerance = 0.01)
expect_equal(suppressMessages(ci(model, ci = c(0.7, 0.8)))[1, 3], 35.30486, tolerance = 0.01)
model <- glm(vs ~ wt, family = "binomial", data = mtcars)
expect_equal(suppressMessages(ci(model))[1, 3], 1.934013, tolerance = 0.01)
model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars)
expect_equal(suppressMessages(ci(model, method = "normal"))[1, 3], -0.335063, tolerance = 0.01)
model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars)
expect_equal(ci(model)[1, 3], -0.3795646, tolerance = 0.01)
set.seed(1)
val <- ci(model, method = "boot")[1, 3]
expect_equal(val, -0.555424, tolerance = 0.01)
model <- lme4::glmer(vs ~ cyl + (1 | gear), data = mtcars, family = "binomial")
expect_equal(ci(model)[1, 3], -0.7876679, tolerance = 0.01)
model <- lme4::glmer(vs ~ drat + cyl + (1 | gear), data = mtcars, family = "binomial")
expect_equal(ci(model)[1, 3], -48.14195, tolerance = 0.01)
})
test_that("vs. sandwich & lmtest", {
skip_if_not_installed("sandwich")
skip_if_not_installed("lmtest")
model <- lm(mpg ~ wt, data = mtcars)
known <- lmtest::coefci(model, vcov = sandwich::vcovHC)
unknown <- ci(model, vcov = sandwich::vcovHC)
expect_equal(unknown[["CI_low"]], known[, "2.5 %"], ignore_attr = TRUE)
expect_equal(unknown[["CI_high"]], known[, "97.5 %"], ignore_attr = TRUE)
model <- glm(am ~ wt, data = mtcars, family = binomial)
known <- lmtest::coefci(model, vcov = sandwich::vcovHC)
unknown <- ci(model, vcov = sandwich::vcovHC, method = "wald")
expect_equal(unknown[["CI_low"]], known[, "2.5 %"], ignore_attr = TRUE)
expect_equal(unknown[["CI_high"]], known[, "97.5 %"], ignore_attr = TRUE)
suppressMessages(
expect_message(ci(model, vcov = sandwich::vcovHC), regexp = "vcov.*are not available with.*profile")
)
})
|
/tests/testthat/test-ci.R
|
no_license
|
cran/parameters
|
R
| false | false | 2,007 |
r
|
test_that("ci", {
skip_if_not_installed("lme4")
model <- lm(mpg ~ wt, data = mtcars)
expect_equal(suppressMessages(ci(model))[1, 3], 33.4505, tolerance = 0.01)
expect_equal(suppressMessages(ci(model, ci = c(0.7, 0.8)))[1, 3], 35.30486, tolerance = 0.01)
model <- glm(vs ~ wt, family = "binomial", data = mtcars)
expect_equal(suppressMessages(ci(model))[1, 3], 1.934013, tolerance = 0.01)
model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars)
expect_equal(suppressMessages(ci(model, method = "normal"))[1, 3], -0.335063, tolerance = 0.01)
model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars)
expect_equal(ci(model)[1, 3], -0.3795646, tolerance = 0.01)
set.seed(1)
val <- ci(model, method = "boot")[1, 3]
expect_equal(val, -0.555424, tolerance = 0.01)
model <- lme4::glmer(vs ~ cyl + (1 | gear), data = mtcars, family = "binomial")
expect_equal(ci(model)[1, 3], -0.7876679, tolerance = 0.01)
model <- lme4::glmer(vs ~ drat + cyl + (1 | gear), data = mtcars, family = "binomial")
expect_equal(ci(model)[1, 3], -48.14195, tolerance = 0.01)
})
test_that("vs. sandwich & lmtest", {
skip_if_not_installed("sandwich")
skip_if_not_installed("lmtest")
model <- lm(mpg ~ wt, data = mtcars)
known <- lmtest::coefci(model, vcov = sandwich::vcovHC)
unknown <- ci(model, vcov = sandwich::vcovHC)
expect_equal(unknown[["CI_low"]], known[, "2.5 %"], ignore_attr = TRUE)
expect_equal(unknown[["CI_high"]], known[, "97.5 %"], ignore_attr = TRUE)
model <- glm(am ~ wt, data = mtcars, family = binomial)
known <- lmtest::coefci(model, vcov = sandwich::vcovHC)
unknown <- ci(model, vcov = sandwich::vcovHC, method = "wald")
expect_equal(unknown[["CI_low"]], known[, "2.5 %"], ignore_attr = TRUE)
expect_equal(unknown[["CI_high"]], known[, "97.5 %"], ignore_attr = TRUE)
suppressMessages(
expect_message(ci(model, vcov = sandwich::vcovHC), regexp = "vcov.*are not available with.*profile")
)
})
|
library(base)
library(utils)
library(data.table)
# The function downloads the Samsung data and extracts it
download.data <- function () {
zip.url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
zip.file <- 'dataset.zip'
download.file(zip.url, destfile = zip.file)
unzip(zip.file)
}
# The function loads and processes either a train or a test data set,
# given that current directory is the Samsung data set.
load.dataset <- function (set, features, labels) {
# Construct the relative pathes of data files
prefix <- paste(set, '/', sep = '')
file.data <- paste(prefix, 'X_', set, '.txt', sep = '')
file.label <- paste(prefix, 'y_', set, '.txt', sep = '')
file.subject <- paste(prefix, 'subject_', set, '.txt',
sep = '')
# Read the data into a data.frame
# then transform it into data.table
data <- read.table(file.data)[, features$index]
names(data) <- features$name
label.set <- read.table(file.label)[, 1]
data$label <- factor(label.set, levels=labels$level,
labels=labels$label)
subject.set <- read.table(file.subject)[, 1]
data$subject <- factor(subject.set)
# convert to data table
data.table(data)
}
run.analysis <- function () {
setwd('UCI HAR Dataset/')
# Get the features
feature.set <- read.table('features.txt',
col.names = c('index', 'name'))
features <- subset(feature.set, grepl('-(mean|std)[(]',
feature.set$name))
# Get the labels
label.set <- read.table('activity_labels.txt',
col.names = c('level', 'label'))
# Read train and test data sets
train.set <- load.dataset('train', features, label.set)
test.set <- load.dataset('test', features, label.set)
# The raw data set
dataset <- rbind(train.set, test.set)
# Generate the tidy data set
tidy.dataset <- dataset[, lapply(.SD, mean),
by=list(label, subject)]
# Fix the variable names
names <- names(tidy.dataset)
# Replace `-mean' by `Mean'
names <- gsub('-mean', 'Mean', names)
# Replace `-std' by 'Std'
names <- gsub('-std', 'Std', names)
# Remove the parenthesis and dashes
names <- gsub('[()-]', '', names)
# Replace `BodyBody' by `Body'
names <- gsub('BodyBody', 'Body', names)
setnames(tidy.dataset, names)
# Write the raw and the tidy data sets to files
setwd('..')
write.csv(dataset, file = 'rawdata.csv', row.names = FALSE)
write.csv(tidy.dataset, file = 'tidydata.csv',
row.names = FALSE, quote = FALSE)
write.table(dataset, file = 'rawdata.txt', row.names = FALSE)
write.table(tidy.dataset, file = 'tidydata.txt',
row.names = FALSE, quote = FALSE)
# Return the tidy data set
tidy.dataset
}
|
/run_analysis.R
|
no_license
|
Beassoum/getdata-Project
|
R
| false | false | 3,277 |
r
|
library(base)
library(utils)
library(data.table)
# The function downloads the Samsung data and extracts it
download.data <- function () {
zip.url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
zip.file <- 'dataset.zip'
download.file(zip.url, destfile = zip.file)
unzip(zip.file)
}
# The function loads and processes either a train or a test data set,
# given that current directory is the Samsung data set.
load.dataset <- function (set, features, labels) {
# Construct the relative pathes of data files
prefix <- paste(set, '/', sep = '')
file.data <- paste(prefix, 'X_', set, '.txt', sep = '')
file.label <- paste(prefix, 'y_', set, '.txt', sep = '')
file.subject <- paste(prefix, 'subject_', set, '.txt',
sep = '')
# Read the data into a data.frame
# then transform it into data.table
data <- read.table(file.data)[, features$index]
names(data) <- features$name
label.set <- read.table(file.label)[, 1]
data$label <- factor(label.set, levels=labels$level,
labels=labels$label)
subject.set <- read.table(file.subject)[, 1]
data$subject <- factor(subject.set)
# convert to data table
data.table(data)
}
run.analysis <- function () {
setwd('UCI HAR Dataset/')
# Get the features
feature.set <- read.table('features.txt',
col.names = c('index', 'name'))
features <- subset(feature.set, grepl('-(mean|std)[(]',
feature.set$name))
# Get the labels
label.set <- read.table('activity_labels.txt',
col.names = c('level', 'label'))
# Read train and test data sets
train.set <- load.dataset('train', features, label.set)
test.set <- load.dataset('test', features, label.set)
# The raw data set
dataset <- rbind(train.set, test.set)
# Generate the tidy data set
tidy.dataset <- dataset[, lapply(.SD, mean),
by=list(label, subject)]
# Fix the variable names
names <- names(tidy.dataset)
# Replace `-mean' by `Mean'
names <- gsub('-mean', 'Mean', names)
# Replace `-std' by 'Std'
names <- gsub('-std', 'Std', names)
# Remove the parenthesis and dashes
names <- gsub('[()-]', '', names)
# Replace `BodyBody' by `Body'
names <- gsub('BodyBody', 'Body', names)
setnames(tidy.dataset, names)
# Write the raw and the tidy data sets to files
setwd('..')
write.csv(dataset, file = 'rawdata.csv', row.names = FALSE)
write.csv(tidy.dataset, file = 'tidydata.csv',
row.names = FALSE, quote = FALSE)
write.table(dataset, file = 'rawdata.txt', row.names = FALSE)
write.table(tidy.dataset, file = 'tidydata.txt',
row.names = FALSE, quote = FALSE)
# Return the tidy data set
tidy.dataset
}
|
# Simple function to generate filename of csv report in desired format
generate_filename <- function(report,date){
# put generated file in a folder called reports in home directory, and generate filename based on name of report and user input
filename <- paste("~/reports/",report,device_name,"_",date,".csv",sep = "")
}
|
/reporting/generate_filename.R
|
no_license
|
L3Vyt/edulution_scripts
|
R
| false | false | 325 |
r
|
# Simple function to generate filename of csv report in desired format
generate_filename <- function(report,date){
# put generated file in a folder called reports in home directory, and generate filename based on name of report and user input
filename <- paste("~/reports/",report,device_name,"_",date,".csv",sep = "")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenFactorMatrix.R
\name{GenFactorMatrix}
\alias{GenFactorMatrix}
\title{Generate Factor Loadings Matrix}
\usage{
GenFactorMatrix(nfactors = 5, items = c(5, 5, 5, 5, 5), itemsR = c(2, 2,
2, 2, 2), loading = 0.5, loading_norm = FALSE, loading_norm_sd = 0.025)
}
\arguments{
\item{nfactors}{Number of factors}
\item{items}{vector of total number of items per factor}
\item{itemsR}{Number of items per factor that are reverse scored}
\item{loading}{Factor loading magnitude. Default is .5}
\item{loading_norm}{If TRUE, generate factor loadings that average to \code{loading}}
\item{loading_norm_sd}{If \code{loading_norm} is TRUE, standard deviation of loadings.}
}
\value{
\code{matrix} of n factors by i items
}
\description{
\code{GenFactorMatrix} returns a matrix of factor loadings, based on number of items, factors, and loadings
}
|
/man/GenFactorMatrix.Rd
|
no_license
|
R-Computing-Lab/enumR
|
R
| false | true | 919 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenFactorMatrix.R
\name{GenFactorMatrix}
\alias{GenFactorMatrix}
\title{Generate Factor Loadings Matrix}
\usage{
GenFactorMatrix(nfactors = 5, items = c(5, 5, 5, 5, 5), itemsR = c(2, 2,
2, 2, 2), loading = 0.5, loading_norm = FALSE, loading_norm_sd = 0.025)
}
\arguments{
\item{nfactors}{Number of factors}
\item{items}{vector of total number of items per factor}
\item{itemsR}{Number of items per factor that are reverse scored}
\item{loading}{Factor loading magnitude. Default is .5}
\item{loading_norm}{If TRUE, generate factor loadings that average to \code{loading}}
\item{loading_norm_sd}{If \code{loading_norm} is TRUE, standard deviation of loadings.}
}
\value{
\code{matrix} of n factors by i items
}
\description{
\code{GenFactorMatrix} returns a matrix of factor loadings, based on number of items, factors, and loadings
}
|
N0 <- 10000
lambda <- 1.5
f <- function(t) lambda * exp(-lambda*t)
F <- function(t) 1 - exp(-lambda * t)
inv_F <- function(t) exp(-lambda * t)
reverse_exp_function <- function(x) -1 / lambda * log(x)
sequence <- sort(sapply(runif(N0, 0, 1), reverse_exp_function))
max_t <- max(sequence)
pdf("out.pdf")
N <- sapply(c(0:(N0-1)), function(x) (N0 - x) / N0)
plot(sequence, N, type="l", col="blue", lwd = 10, main="rel(t)", ylab="rel(t)", xlab="t")
plot(inv_F, 0, max_t, col="red", lwd = 4, add=TRUE)
legend("topright", NULL, c("theory rel", "expiremental rel"), fill=c("red", "blue"))
rev_N <- sapply(c(0:(N0-1)), function(x) x / N0)
plot(sequence, rev_N, type="l", col="blue", lwd = 10, main="unrel(t)", ylab="rel(t)", xlab="t")
plot(F, 0, max_t, col="red", lwd = 4, add=TRUE)
legend("bottomright", NULL, c("theory unrel", "expiremental unrel"), fill=c("red", "blue"))
histogram <- hist(sequence, breaks=200, plot=FALSE)
exp_f <- histogram$density
exp_f_xs <- histogram$mids
plot(exp_f_xs, exp_f, type="l", col="blue", lwd = 10, main="f(t)", ylab="f(t)", xlab="t")
plot(f, 0, max(exp_f_xs), col="red", lwd = 4, add=TRUE)
legend("topright", NULL, c("theory f(t)", "expiremental f(t)"), fill=c("red", "blue"))
intence <- function(t) f(t) / inv_F(t)
counts <- histogram$counts
breaks <- histogram$breaks
breaks <- breaks[2:length(breaks)]
print(counts)
print(sapply(1:length(counts), function(i) (N0 - findInterval(breaks[i], sequence))))
exp_intence <- sapply(1:length(counts), function(i) counts[i] / (N0 - findInterval(breaks[i], sequence)) / diff(breaks)[1])
plot(exp_f_xs, exp_intence, type="l", col="blue", lwd = 10, main="lambda(t)", ylab="lambda(t)", xlab="t")
plot(intence, 0, max_t, col="red", lwd = 4, add=TRUE)
legend("topleft", NULL, c("theory lambda(t)", "expiremental lambda(t)"), fill=c("red", "blue"))
|
/8sem/SR/lab3/lab3.R
|
no_license
|
azhi/BSUIR_labs
|
R
| false | false | 1,820 |
r
|
N0 <- 10000
lambda <- 1.5
f <- function(t) lambda * exp(-lambda*t)
F <- function(t) 1 - exp(-lambda * t)
inv_F <- function(t) exp(-lambda * t)
reverse_exp_function <- function(x) -1 / lambda * log(x)
sequence <- sort(sapply(runif(N0, 0, 1), reverse_exp_function))
max_t <- max(sequence)
pdf("out.pdf")
N <- sapply(c(0:(N0-1)), function(x) (N0 - x) / N0)
plot(sequence, N, type="l", col="blue", lwd = 10, main="rel(t)", ylab="rel(t)", xlab="t")
plot(inv_F, 0, max_t, col="red", lwd = 4, add=TRUE)
legend("topright", NULL, c("theory rel", "expiremental rel"), fill=c("red", "blue"))
rev_N <- sapply(c(0:(N0-1)), function(x) x / N0)
plot(sequence, rev_N, type="l", col="blue", lwd = 10, main="unrel(t)", ylab="rel(t)", xlab="t")
plot(F, 0, max_t, col="red", lwd = 4, add=TRUE)
legend("bottomright", NULL, c("theory unrel", "expiremental unrel"), fill=c("red", "blue"))
histogram <- hist(sequence, breaks=200, plot=FALSE)
exp_f <- histogram$density
exp_f_xs <- histogram$mids
plot(exp_f_xs, exp_f, type="l", col="blue", lwd = 10, main="f(t)", ylab="f(t)", xlab="t")
plot(f, 0, max(exp_f_xs), col="red", lwd = 4, add=TRUE)
legend("topright", NULL, c("theory f(t)", "expiremental f(t)"), fill=c("red", "blue"))
intence <- function(t) f(t) / inv_F(t)
counts <- histogram$counts
breaks <- histogram$breaks
breaks <- breaks[2:length(breaks)]
print(counts)
print(sapply(1:length(counts), function(i) (N0 - findInterval(breaks[i], sequence))))
exp_intence <- sapply(1:length(counts), function(i) counts[i] / (N0 - findInterval(breaks[i], sequence)) / diff(breaks)[1])
plot(exp_f_xs, exp_intence, type="l", col="blue", lwd = 10, main="lambda(t)", ylab="lambda(t)", xlab="t")
plot(intence, 0, max_t, col="red", lwd = 4, add=TRUE)
legend("topleft", NULL, c("theory lambda(t)", "expiremental lambda(t)"), fill=c("red", "blue"))
|
## original code was designed to detect segfaults/hangs from error handling
library(lmeAddSigma)
set.seed(101)
d <- expand.grid(block=LETTERS[1:26],rep=1:100)
d$x <- runif(nrow(d))
reff_f <- rnorm(length(levels(d$block)),sd=1)
## need intercept large enough to avoid negative values
d$eta0 <- 4+3*d$x ## version without random effects
d$eta <- d$eta0+reff_f[d$block]
## inverse link
d$mu <- 1/d$eta
d$y <- rgamma(nrow(d),scale=d$mu/2,shape=2)
## update: these all work now (2013 May, but compDev is ignored
gm1 <- glmer(y ~ 1|block, d, Gamma, nAGQ=25L)
gm1 <- glmer(y ~ 1|block, d, Gamma, nAGQ=25L, compDev=FALSE)
gm1 <- glmer(y ~ 1|block, d, Gamma, nAGQ=25L, compDev=FALSE,
optimizer="Nelder_Mead")
gm2 <- glmer(y ~ 1|block, d, Gamma, nAGQ=25L)
gm3 <- glmer(y ~ 1|block, d, Gamma, nAGQ=25L)
|
/tests/throw.R
|
no_license
|
naef-lab/lmeAddSigma
|
R
| false | false | 814 |
r
|
## original code was designed to detect segfaults/hangs from error handling
library(lmeAddSigma)
set.seed(101)
d <- expand.grid(block=LETTERS[1:26],rep=1:100)
d$x <- runif(nrow(d))
reff_f <- rnorm(length(levels(d$block)),sd=1)
## need intercept large enough to avoid negative values
d$eta0 <- 4+3*d$x ## version without random effects
d$eta <- d$eta0+reff_f[d$block]
## inverse link
d$mu <- 1/d$eta
d$y <- rgamma(nrow(d),scale=d$mu/2,shape=2)
## update: these all work now (2013 May, but compDev is ignored
gm1 <- glmer(y ~ 1|block, d, Gamma, nAGQ=25L)
gm1 <- glmer(y ~ 1|block, d, Gamma, nAGQ=25L, compDev=FALSE)
gm1 <- glmer(y ~ 1|block, d, Gamma, nAGQ=25L, compDev=FALSE,
optimizer="Nelder_Mead")
gm2 <- glmer(y ~ 1|block, d, Gamma, nAGQ=25L)
gm3 <- glmer(y ~ 1|block, d, Gamma, nAGQ=25L)
|
boxplot(sphere10_32particles_cpu_multi_runs$V11, sphere10_32particles_gpu_multi_runs$V11, names=c("CPU", "GPU"), main="SPHERE(10) - 32 particles - 51 runs", ylab="Fitness value")
boxplot(sphere10_32particles_cpu_time_multi_runs$V1, sphere10_32particles_gpu_time_multi_runs$V1, names=c("CPU", "GPU"), main="SPHERE(10) - 32 particles - 51 runs", ylab="Execution time (s)")
|
/scripts/sphere10_32particles_multi_runs.r
|
no_license
|
AOE-khkhan/CUDA-final-project
|
R
| false | false | 373 |
r
|
boxplot(sphere10_32particles_cpu_multi_runs$V11, sphere10_32particles_gpu_multi_runs$V11, names=c("CPU", "GPU"), main="SPHERE(10) - 32 particles - 51 runs", ylab="Fitness value")
boxplot(sphere10_32particles_cpu_time_multi_runs$V1, sphere10_32particles_gpu_time_multi_runs$V1, names=c("CPU", "GPU"), main="SPHERE(10) - 32 particles - 51 runs", ylab="Execution time (s)")
|
# Import libraries
library(readr)
library(ggplot2)
library(dplyr)
library(mvtnorm)
# Parms
n <- 100 # number data points per centroid
sd <- 1 # standard deviation for each centroid
# Define our four centroids
centroid_1 <- c(0, 0)
centroid_2 <- c(10, 10)
centroid_3 <- c(0, 10)
centroid_4 <- c(10, 0)
# Make covariance matrix
sd_mat <- matrix(0, 2, 2)
sd_mat[1, 1] <- sd
sd_mat[2, 2] <- sd
# Generate 100 data points per centroid. Bivariate Normal, sd = 1
data_1 <- rmvnorm(n = n, mean = centroid_1, sigma = sd_mat)
data_2 <- rmvnorm(n = n, mean = centroid_2, sigma = sd_mat)
data_3 <- rmvnorm(n = n, mean = centroid_3, sigma = sd_mat)
data_4 <- rmvnorm(n = n, mean = centroid_4, sigma = sd_mat)
# Classify our data into 4 classes
data_1 <- cbind(data_1, rep(1, 100))
data_2 <- cbind(data_2, rep(2, 100))
data_3 <- cbind(data_3, rep(3, 100))
data_4 <- cbind(data_4, rep(4, 100))
# Bind our data together
sep_data <- as.data.frame(rbind(data_1, data_2, data_3, data_4))
sep_data %>% ggplot() +
geom_point(mapping = aes(x = V1, y = V2, color = V3))
# Write out data
write_csv(sep_data, path = "/Users/henryneeb/CME-research/sep_data.txt")
|
/KNN_data_script.R
|
no_license
|
CME323-isomap/CME-research
|
R
| false | false | 1,147 |
r
|
# Import libraries
library(readr)
library(ggplot2)
library(dplyr)
library(mvtnorm)
# Parms
n <- 100 # number data points per centroid
sd <- 1 # standard deviation for each centroid
# Define our four centroids
centroid_1 <- c(0, 0)
centroid_2 <- c(10, 10)
centroid_3 <- c(0, 10)
centroid_4 <- c(10, 0)
# Make covariance matrix
sd_mat <- matrix(0, 2, 2)
sd_mat[1, 1] <- sd
sd_mat[2, 2] <- sd
# Generate 100 data points per centroid. Bivariate Normal, sd = 1
data_1 <- rmvnorm(n = n, mean = centroid_1, sigma = sd_mat)
data_2 <- rmvnorm(n = n, mean = centroid_2, sigma = sd_mat)
data_3 <- rmvnorm(n = n, mean = centroid_3, sigma = sd_mat)
data_4 <- rmvnorm(n = n, mean = centroid_4, sigma = sd_mat)
# Classify our data into 4 classes
data_1 <- cbind(data_1, rep(1, 100))
data_2 <- cbind(data_2, rep(2, 100))
data_3 <- cbind(data_3, rep(3, 100))
data_4 <- cbind(data_4, rep(4, 100))
# Bind our data together
sep_data <- as.data.frame(rbind(data_1, data_2, data_3, data_4))
sep_data %>% ggplot() +
geom_point(mapping = aes(x = V1, y = V2, color = V3))
# Write out data
write_csv(sep_data, path = "/Users/henryneeb/CME-research/sep_data.txt")
|
library(jpeg)
jpg<-readJPEG(source="getdata-jeff.jpg",native=TRUE)
quantile(jpg,probs = c(.3,.8))
|
/02_Getting_and_Cleaning_Data/week3/q2.R
|
no_license
|
fhyme/Coursera_Data_Science
|
R
| false | false | 97 |
r
|
library(jpeg)
jpg<-readJPEG(source="getdata-jeff.jpg",native=TRUE)
quantile(jpg,probs = c(.3,.8))
|
# Useful Functions
source("functions_out.R")
source("functions_plot.R")
source("functions_power.R")
source("functions_string.R")
source("functions_tstats.R")
source("functions_wavelets.R")
source("functions_SPS.R")
quad.area <- function(x1, x2, y1, y2) {
t1 <- tri.area(x1, x2, y1)
t2 <- tri.area(x2, y1, y2)
area <- t1 + t2
return(area)
}
tri.area <- function(x, y, z) {
area <- 0.5 * abs((x[1] - z[1]) * (y[2] - x[2]) - (x[1] - y[1]) * (z[2] - x[2]))
return(area)
}
boot.test <- function(x, y, f, num.perm = 1000, diag = FALSE, exact = TRUE) {
# Runs a BOOTSTRAP test for a given function (Note: Input function should JUST
# output p-value) Takes as input f, a function that returns a statistic
require(gtools)
lenx <- length(x)
leny <- length(y)
# First Step, combine into one dataset
z <- c(x, y)
lenz <- length(z)
# Calculate TS for the ACTUAL data
ts.obs <- f(x, y)
ts.random <- c(NULL)
###
if (lenz < 10 & exact == TRUE) {
all.perm <- permutations(n = lenz, r = lenz, v = z, repeats.allowed = FALSE,
set = FALSE)
all.permx <- all.perm[, 1:lenx]
all.permy <- all.perm[, (lenx + 1):lenz]
exact.perm <- dim(all.perm)[1]
for (i in 1:exact.perm) {
ts.random[i] <- f(all.permx[i, ], all.permy[i, ])
}
p.val <- sum(abs(ts.random) >= abs(ts.obs))/exact.perm
c.val <- quantile(ts.random, probs = 0.95)
} else {
for (i in 1:num.perm) {
z1 <- sample(z, size = lenz, replace = TRUE)
a <- z1[1:lenx]
b <- z1[(lenx + 1):lenz]
ts.random[i] <- f(a, b)
}
p.val <- sum(abs(ts.random) >= abs(ts.obs))/num.perm
c.val <- quantile(ts.random, probs = 0.95)
}
# 1st value of output is p value, 2nd is 95% critical value, 3rd is the actual test
# statistic
if (diag == TRUE)
return(list(`p-value` = p.val, `95% crit val` = c.val, `Obs. TS` = ts.obs, ts.dist = ts.random)) else {
return(list(`p-value` = p.val, `95% crit val` = c.val, `Obs. TS` = ts.obs))
}
}
perm.test <- function(x, y, distops = NULL, f, fops = NULL, num.perm = 2001, diag = FALSE,
exact = FALSE, out=FALSE, do.plot=FALSE, ...) {
#Args:
# x: numeric vector
# y: numeric vector or quantile distribution (qgamma, qnorm)
# distops: list describing quantile parameters (e.g. for qunif, list(min=0,max=2)
# f: function outputting a test statistic
# fops: if the test statistic has options, put them here. (e.g. for myts.out, size=.2)
# num.perm: number of permutations to assess p-values
#
# Output:
# list containing observed test statistic,
if (is.null(distops)==FALSE){
if(is.list(distops)==FALSE) stop("distops must be a list")
}
if (is.null(fops)==FALSE){
if(is.list(fops)==FALSE) stop("fops must be a list")
}
if (out==TRUE){
res_out <- perm.test.out(x,y,distops,f,fops,num.perm,diag,exact)
return(res_out)
}
lenx <- length(x)
# Handling function inputs for y
if (is.function(y))
y <- as.character(substitute(y))
# Calculating observed test statistic
# One Sample
if (is.character(y)) {
y <- chartoli(y)
if (length(fops) == 0)
fops <- NULL
if (length(distops) == 0)
distops <- NULL
ts.obs <- do.call(f, c(list(x), list(names(y)), distops, fops))
}
# Two sample
if (is.numeric(y)){
if (length(fops)== 0)
fops <- NULL
if (is.null(fops[[1]])) # MUST BE EDITED ASAP. ONLY LETS ONE OPTION IN FOPS
fops <- NULL
ts.obs <- do.call(f, c(list(x,y), fops))
}
ts.random <- vector(mode = "numeric", length = num.perm)
# Two sample
if (is.numeric(y)) {
z <- c(x, y)
lenz <- length(z)
if (lenz < 11 & exact == TRUE) {
require(gtools)
all.perm <- permutations(n = lenz, r = lenz, v = z, repeats.allowed = FALSE,
set = FALSE)
all.permx <- all.perm[, 1:lenx]
all.permy <- all.perm[, (lenx + 1):lenz]
exact.perm <- dim(all.perm)[1]
for (i in 1:exact.perm) {
ts.random[i] <- f(all.permx[i, ], all.permy[i, ])
}
p.val <- sum(abs(ts.random) >= abs(ts.obs))/exact.perm
c.val <- quantile(ts.random, probs = 0.95)
} else {
for (i in 1:num.perm) {
z1 <- sample(z, size = lenz, replace = FALSE)
a <- z1[1:lenx]
b <- z1[(lenx + 1):lenz]
ts.random[i] <- do.call(f, c(list(a,b), fops))
}
p.val <- sum(abs(ts.random) >= abs(ts.obs)) / num.perm
c.val <- quantile(ts.random, probs = 0.95)
}
if (do.plot == TRUE){
hplot <- hist(ts.random, prob=TRUE)
hplot
segments(ts.obs, 0, x1=ts.obs, y1=max(hplot$density))
}
if (diag == TRUE) {
return(list("p-value" = p.val, "95% crit val" = c.val, "Obs. TS" = ts.obs,
"ts.dist" = ts.random))
} else {
return(list("p-value" = p.val, "95% crit val" = c.val, "Obs. TS" = ts.obs))
}
}
# One Sample
if (is.list(y)) {
ry <- dist.conv(funname = names(y), type = "r")
fy <- get(names(y), mode = "function", envir = parent.frame())
for (i in 1:num.perm) {
z <- do.call(ry, c(list(lenx), distops)) #(lenx,...)
ts.random[i] <- do.call(f, c(list(z), list(names(y)), distops, fops))
}
p.val <- sum(abs(ts.random) >= abs(ts.obs)) / num.perm
c.val <- quantile(ts.random, probs = 0.95)
if (do.plot == TRUE){
hplot <- hist(ts.random, prob=TRUE, main="Histogram of Permuted Test Statistic (line=Observed TS)")
hplot
segments(ts.obs,0,x1=ts.obs,y1=max(hplot$density))
}
if (diag == TRUE) {
# 1st value of output is p value, 2nd is 95% critical value, 3rd is the actual test
# statistic
return(list("p-value" = p.val, "95% crit val" = c.val, "Obs. TS" = ts.obs,
"ts.dist" = ts.random))
} else {
return(list("p-value" = p.val, "95% crit val" = c.val, "Obs. TS" = ts.obs))
}
}
}
power.res.onesamp <- function(x, y, distops = NULL, f, fops = NULL, g = perm.test, ...) {
# Args:
# x: numeric matrix
# y: either: function name, character naming a function, or
# list with the format list(qnorm=qnorm)
dim.x <- dim(x)
fun <- f
if (is.character(f))
fun <- get(fun, mode = "function", envir = parent.frame())
if (is.function(y))
y <- as.character(substitute(y))
if (is.character(y))
y <- chartoli(y)
pv <- vector(mode = "numeric", length = dim.x[1])
for (i in 1:dim.x[1]) {
a <- g(x[i, ], y, distops, f = fun, fops)
pv[i] <- a[[1]]
}
return(pv)
}
power.res.twosamp <- function(x, y, distops = NULL, f, fops = NULL, g = perm.test, num.perm = 1001, ...) {
# Args:
# x: numeric matrix
# y: either: function name, character naming a function, or
# list with the format list(qnorm=qnorm)
dim.x <- dim(x)
fun <- f
if (is.character(f))
fun <- get(fun, mode = "function", envir = parent.frame())
pv <- vector(mode = "numeric", length = dim.x[1])
for (i in 1:dim.x[1]) {
a <- g(x[i, ], y[i, ], distops, f = fun, fops, num.perm=num.perm )
pv[i] <- a[[1]]
}
return(pv)
}
Make_Inter_CDF <- function(x,y,interp=4){
z <-c(x,y)
z <- sort(z)
lenz <- length(z)
z1 <- seq(1/(lenz+1),lenz/(lenz+1), length.out=lenz)
#q1 <- quantile(z, probs = z1, type = interp)
#q_inter <- approxfun(z1, q1, yleft = min(q1), yright = max(q1))
cdf_inter <- approxfun(z,z1, yleft=0, yright=1)
return(cdf_inter)
}
Bi_Var_PIT <- function(x,y){
#com_QFun <- Make_Inter_QFun(x,y)
inter_CDF <- Make_Inter_CDF(x,y)
U_x <- inter_CDF(x)
U_y <- inter_CDF(y)
return(list(U_x,U_y))
}
Bi_Var_PIT_ks <- function(x,y, alpha=.05){
# Calculates Bivariate PIT using KS
a <- Bi_Var_PIT(x,y)
pval_x <- ks.test(a[[1]],punif,0,1)$p.value
pval_y <- ks.test(a[[2]],punif,0,1)$p.value
FWER <- alpha/2
pvals <- c(pval_x,pval_y)
if(pval_x < FWER & pval_y < FWER){
reject <- TRUE
}else{
reject <- FALSE
}
names(reject) <- "REJECT NULL?"
return(list("Reject Null?"=reject, "Pvals"=c(pval_x,pval_y)))
}
|
/Power Simulations (ISERC Paper)/functions.R
|
no_license
|
morndorff/GoF-Test
|
R
| false | false | 8,193 |
r
|
# Useful Functions
source("functions_out.R")
source("functions_plot.R")
source("functions_power.R")
source("functions_string.R")
source("functions_tstats.R")
source("functions_wavelets.R")
source("functions_SPS.R")
quad.area <- function(x1, x2, y1, y2) {
t1 <- tri.area(x1, x2, y1)
t2 <- tri.area(x2, y1, y2)
area <- t1 + t2
return(area)
}
tri.area <- function(x, y, z) {
area <- 0.5 * abs((x[1] - z[1]) * (y[2] - x[2]) - (x[1] - y[1]) * (z[2] - x[2]))
return(area)
}
boot.test <- function(x, y, f, num.perm = 1000, diag = FALSE, exact = TRUE) {
# Runs a BOOTSTRAP test for a given function (Note: Input function should JUST
# output p-value) Takes as input f, a function that returns a statistic
require(gtools)
lenx <- length(x)
leny <- length(y)
# First Step, combine into one dataset
z <- c(x, y)
lenz <- length(z)
# Calculate TS for the ACTUAL data
ts.obs <- f(x, y)
ts.random <- c(NULL)
###
if (lenz < 10 & exact == TRUE) {
all.perm <- permutations(n = lenz, r = lenz, v = z, repeats.allowed = FALSE,
set = FALSE)
all.permx <- all.perm[, 1:lenx]
all.permy <- all.perm[, (lenx + 1):lenz]
exact.perm <- dim(all.perm)[1]
for (i in 1:exact.perm) {
ts.random[i] <- f(all.permx[i, ], all.permy[i, ])
}
p.val <- sum(abs(ts.random) >= abs(ts.obs))/exact.perm
c.val <- quantile(ts.random, probs = 0.95)
} else {
for (i in 1:num.perm) {
z1 <- sample(z, size = lenz, replace = TRUE)
a <- z1[1:lenx]
b <- z1[(lenx + 1):lenz]
ts.random[i] <- f(a, b)
}
p.val <- sum(abs(ts.random) >= abs(ts.obs))/num.perm
c.val <- quantile(ts.random, probs = 0.95)
}
# 1st value of output is p value, 2nd is 95% critical value, 3rd is the actual test
# statistic
if (diag == TRUE)
return(list(`p-value` = p.val, `95% crit val` = c.val, `Obs. TS` = ts.obs, ts.dist = ts.random)) else {
return(list(`p-value` = p.val, `95% crit val` = c.val, `Obs. TS` = ts.obs))
}
}
perm.test <- function(x, y, distops = NULL, f, fops = NULL, num.perm = 2001, diag = FALSE,
exact = FALSE, out=FALSE, do.plot=FALSE, ...) {
#Args:
# x: numeric vector
# y: numeric vector or quantile distribution (qgamma, qnorm)
# distops: list describing quantile parameters (e.g. for qunif, list(min=0,max=2)
# f: function outputting a test statistic
# fops: if the test statistic has options, put them here. (e.g. for myts.out, size=.2)
# num.perm: number of permutations to assess p-values
#
# Output:
# list containing observed test statistic,
if (is.null(distops)==FALSE){
if(is.list(distops)==FALSE) stop("distops must be a list")
}
if (is.null(fops)==FALSE){
if(is.list(fops)==FALSE) stop("fops must be a list")
}
if (out==TRUE){
res_out <- perm.test.out(x,y,distops,f,fops,num.perm,diag,exact)
return(res_out)
}
lenx <- length(x)
# Handling function inputs for y
if (is.function(y))
y <- as.character(substitute(y))
# Calculating observed test statistic
# One Sample
if (is.character(y)) {
y <- chartoli(y)
if (length(fops) == 0)
fops <- NULL
if (length(distops) == 0)
distops <- NULL
ts.obs <- do.call(f, c(list(x), list(names(y)), distops, fops))
}
# Two sample
if (is.numeric(y)){
if (length(fops)== 0)
fops <- NULL
if (is.null(fops[[1]])) # MUST BE EDITED ASAP. ONLY LETS ONE OPTION IN FOPS
fops <- NULL
ts.obs <- do.call(f, c(list(x,y), fops))
}
ts.random <- vector(mode = "numeric", length = num.perm)
# Two sample
if (is.numeric(y)) {
z <- c(x, y)
lenz <- length(z)
if (lenz < 11 & exact == TRUE) {
require(gtools)
all.perm <- permutations(n = lenz, r = lenz, v = z, repeats.allowed = FALSE,
set = FALSE)
all.permx <- all.perm[, 1:lenx]
all.permy <- all.perm[, (lenx + 1):lenz]
exact.perm <- dim(all.perm)[1]
for (i in 1:exact.perm) {
ts.random[i] <- f(all.permx[i, ], all.permy[i, ])
}
p.val <- sum(abs(ts.random) >= abs(ts.obs))/exact.perm
c.val <- quantile(ts.random, probs = 0.95)
} else {
for (i in 1:num.perm) {
z1 <- sample(z, size = lenz, replace = FALSE)
a <- z1[1:lenx]
b <- z1[(lenx + 1):lenz]
ts.random[i] <- do.call(f, c(list(a,b), fops))
}
p.val <- sum(abs(ts.random) >= abs(ts.obs)) / num.perm
c.val <- quantile(ts.random, probs = 0.95)
}
if (do.plot == TRUE){
hplot <- hist(ts.random, prob=TRUE)
hplot
segments(ts.obs, 0, x1=ts.obs, y1=max(hplot$density))
}
if (diag == TRUE) {
return(list("p-value" = p.val, "95% crit val" = c.val, "Obs. TS" = ts.obs,
"ts.dist" = ts.random))
} else {
return(list("p-value" = p.val, "95% crit val" = c.val, "Obs. TS" = ts.obs))
}
}
# One Sample
if (is.list(y)) {
ry <- dist.conv(funname = names(y), type = "r")
fy <- get(names(y), mode = "function", envir = parent.frame())
for (i in 1:num.perm) {
z <- do.call(ry, c(list(lenx), distops)) #(lenx,...)
ts.random[i] <- do.call(f, c(list(z), list(names(y)), distops, fops))
}
p.val <- sum(abs(ts.random) >= abs(ts.obs)) / num.perm
c.val <- quantile(ts.random, probs = 0.95)
if (do.plot == TRUE){
hplot <- hist(ts.random, prob=TRUE, main="Histogram of Permuted Test Statistic (line=Observed TS)")
hplot
segments(ts.obs,0,x1=ts.obs,y1=max(hplot$density))
}
if (diag == TRUE) {
# 1st value of output is p value, 2nd is 95% critical value, 3rd is the actual test
# statistic
return(list("p-value" = p.val, "95% crit val" = c.val, "Obs. TS" = ts.obs,
"ts.dist" = ts.random))
} else {
return(list("p-value" = p.val, "95% crit val" = c.val, "Obs. TS" = ts.obs))
}
}
}
power.res.onesamp <- function(x, y, distops = NULL, f, fops = NULL, g = perm.test, ...) {
# Args:
# x: numeric matrix
# y: either: function name, character naming a function, or
# list with the format list(qnorm=qnorm)
dim.x <- dim(x)
fun <- f
if (is.character(f))
fun <- get(fun, mode = "function", envir = parent.frame())
if (is.function(y))
y <- as.character(substitute(y))
if (is.character(y))
y <- chartoli(y)
pv <- vector(mode = "numeric", length = dim.x[1])
for (i in 1:dim.x[1]) {
a <- g(x[i, ], y, distops, f = fun, fops)
pv[i] <- a[[1]]
}
return(pv)
}
power.res.twosamp <- function(x, y, distops = NULL, f, fops = NULL, g = perm.test, num.perm = 1001, ...) {
# Args:
# x: numeric matrix
# y: either: function name, character naming a function, or
# list with the format list(qnorm=qnorm)
dim.x <- dim(x)
fun <- f
if (is.character(f))
fun <- get(fun, mode = "function", envir = parent.frame())
pv <- vector(mode = "numeric", length = dim.x[1])
for (i in 1:dim.x[1]) {
a <- g(x[i, ], y[i, ], distops, f = fun, fops, num.perm=num.perm )
pv[i] <- a[[1]]
}
return(pv)
}
Make_Inter_CDF <- function(x,y,interp=4){
z <-c(x,y)
z <- sort(z)
lenz <- length(z)
z1 <- seq(1/(lenz+1),lenz/(lenz+1), length.out=lenz)
#q1 <- quantile(z, probs = z1, type = interp)
#q_inter <- approxfun(z1, q1, yleft = min(q1), yright = max(q1))
cdf_inter <- approxfun(z,z1, yleft=0, yright=1)
return(cdf_inter)
}
Bi_Var_PIT <- function(x,y){
#com_QFun <- Make_Inter_QFun(x,y)
inter_CDF <- Make_Inter_CDF(x,y)
U_x <- inter_CDF(x)
U_y <- inter_CDF(y)
return(list(U_x,U_y))
}
Bi_Var_PIT_ks <- function(x,y, alpha=.05){
# Calculates Bivariate PIT using KS
a <- Bi_Var_PIT(x,y)
pval_x <- ks.test(a[[1]],punif,0,1)$p.value
pval_y <- ks.test(a[[2]],punif,0,1)$p.value
FWER <- alpha/2
pvals <- c(pval_x,pval_y)
if(pval_x < FWER & pval_y < FWER){
reject <- TRUE
}else{
reject <- FALSE
}
names(reject) <- "REJECT NULL?"
return(list("Reject Null?"=reject, "Pvals"=c(pval_x,pval_y)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ancova.h.R
\name{ancova}
\alias{ancova}
\title{ANCOVA}
\usage{
ancova(data, dep, factors = NULL, covs = NULL, effectSize = NULL,
modelTest = FALSE, modelTerms = NULL, ss = "3", homo = FALSE,
norm = FALSE, qq = FALSE, contrasts = NULL, postHoc = NULL,
postHocCorr = list("tukey"), postHocES = list(),
postHocEsCi = FALSE, postHocEsCiWidth = 95, emMeans = list(list()),
emmPlots = TRUE, emmPlotData = FALSE, emmPlotError = "ci",
emmTables = FALSE, emmWeights = TRUE, ciWidthEmm = 95, formula)
}
\arguments{
\item{data}{the data as a data frame}
\item{dep}{the dependent variable from \code{data}, variable must be
numeric (not necessary when providing a formula, see examples)}
\item{factors}{the explanatory factors in \code{data} (not necessary when
providing a formula, see examples)}
\item{covs}{the explanatory covariates (not necessary when providing a
formula, see examples)}
\item{effectSize}{one or more of \code{'eta'}, \code{'partEta'}, or
\code{'omega'}; use eta², partial eta², and omega² effect sizes,
respectively}
\item{modelTest}{\code{TRUE} or \code{FALSE} (default); perform an overall
model test}
\item{modelTerms}{a formula describing the terms to go into the model (not
necessary when providing a formula, see examples)}
\item{ss}{\code{'1'}, \code{'2'} or \code{'3'} (default), the sum of
squares to use}
\item{homo}{\code{TRUE} or \code{FALSE} (default), perform homogeneity
tests}
\item{norm}{\code{TRUE} or \code{FALSE} (default), perform Shapiro-Wilk
tests of normality}
\item{qq}{\code{TRUE} or \code{FALSE} (default), provide a Q-Q plot of
residuals}
\item{contrasts}{a list of lists specifying the factor and type of contrast
to use, one of \code{'deviation'}, \code{'simple'}, \code{'difference'},
\code{'helmert'}, \code{'repeated'} or \code{'polynomial'}}
\item{postHoc}{a formula containing the terms to perform post-hoc tests on
(see the examples)}
\item{postHocCorr}{one or more of \code{'none'}, \code{'tukey'},
\code{'scheffe'}, \code{'bonf'}, or \code{'holm'}; provide no, Tukey,
Scheffe, Bonferroni, and Holm Post Hoc corrections respectively}
\item{postHocES}{a possible value of \code{'d'}; provide cohen's d measure
of effect size for the post-hoc tests}
\item{postHocEsCi}{\code{TRUE} or \code{FALSE} (default), provide
confidence intervals for the post-hoc effect sizes}
\item{postHocEsCiWidth}{a number between 50 and 99.9 (default: 95), the
width of confidence intervals for the post-hoc effect sizes}
\item{emMeans}{a formula containing the terms to estimate marginal means
for (see the examples)}
\item{emmPlots}{\code{TRUE} (default) or \code{FALSE}, provide estimated
marginal means plots}
\item{emmPlotData}{\code{TRUE} or \code{FALSE} (default), plot the data on
top of the marginal means}
\item{emmPlotError}{\code{'none'}, \code{'ci'} (default), or \code{'se'}.
Use no error bars, use confidence intervals, or use standard errors on the
marginal mean plots, respectively}
\item{emmTables}{\code{TRUE} or \code{FALSE} (default), provide estimated
marginal means tables}
\item{emmWeights}{\code{TRUE} (default) or \code{FALSE}, weigh each cell
equally or weigh them according to the cell frequency}
\item{ciWidthEmm}{a number between 50 and 99.9 (default: 95) specifying the
confidence interval width for the estimated marginal means}
\item{formula}{(optional) the formula to use, see the examples}
}
\value{
A results object containing:
\tabular{llllll}{
\code{results$main} \tab \tab \tab \tab \tab a table of ANCOVA results \cr
\code{results$model} \tab \tab \tab \tab \tab The underlying \code{aov} object \cr
\code{results$assump$homo} \tab \tab \tab \tab \tab a table of homogeneity tests \cr
\code{results$assump$norm} \tab \tab \tab \tab \tab a table of normality tests \cr
\code{results$assump$qq} \tab \tab \tab \tab \tab a q-q plot \cr
\code{results$contrasts} \tab \tab \tab \tab \tab an array of contrasts tables \cr
\code{results$postHoc} \tab \tab \tab \tab \tab an array of post-hoc tables \cr
\code{results$emm} \tab \tab \tab \tab \tab an array of the estimated marginal means plots + tables \cr
\code{results$residsOV} \tab \tab \tab \tab \tab an output \cr
}
Tables can be converted to data frames with \code{asDF} or \code{\link{as.data.frame}}. For example:
\code{results$main$asDF}
\code{as.data.frame(results$main)}
}
\description{
The Analysis of Covariance (ANCOVA) is used to explore the relationship
between a continuous dependent variable, one or more categorical
explanatory variables, and one or more continuous explanatory variables
(or covariates). It is essentially the same analysis as ANOVA, but
with the addition of covariates.
}
\examples{
data('ToothGrowth')
ancova(formula = len ~ supp + dose, data = ToothGrowth)
#
# ANCOVA
#
# ANCOVA
# -----------------------------------------------------------------------
# Sum of Squares df Mean Square F p
# -----------------------------------------------------------------------
# supp 205 1 205.4 11.4 0.001
# dose 2224 1 2224.3 124.0 < .001
# Residuals 1023 57 17.9
# -----------------------------------------------------------------------
#
ancova(
formula = len ~ supp + dose,
data = ToothGrowth,
postHoc = ~ supp,
emMeans = ~ supp)
}
|
/man/ancova.Rd
|
no_license
|
cran/jmv
|
R
| false | true | 5,477 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ancova.h.R
\name{ancova}
\alias{ancova}
\title{ANCOVA}
\usage{
ancova(data, dep, factors = NULL, covs = NULL, effectSize = NULL,
modelTest = FALSE, modelTerms = NULL, ss = "3", homo = FALSE,
norm = FALSE, qq = FALSE, contrasts = NULL, postHoc = NULL,
postHocCorr = list("tukey"), postHocES = list(),
postHocEsCi = FALSE, postHocEsCiWidth = 95, emMeans = list(list()),
emmPlots = TRUE, emmPlotData = FALSE, emmPlotError = "ci",
emmTables = FALSE, emmWeights = TRUE, ciWidthEmm = 95, formula)
}
\arguments{
\item{data}{the data as a data frame}
\item{dep}{the dependent variable from \code{data}, variable must be
numeric (not necessary when providing a formula, see examples)}
\item{factors}{the explanatory factors in \code{data} (not necessary when
providing a formula, see examples)}
\item{covs}{the explanatory covariates (not necessary when providing a
formula, see examples)}
\item{effectSize}{one or more of \code{'eta'}, \code{'partEta'}, or
\code{'omega'}; use eta², partial eta², and omega² effect sizes,
respectively}
\item{modelTest}{\code{TRUE} or \code{FALSE} (default); perform an overall
model test}
\item{modelTerms}{a formula describing the terms to go into the model (not
necessary when providing a formula, see examples)}
\item{ss}{\code{'1'}, \code{'2'} or \code{'3'} (default), the sum of
squares to use}
\item{homo}{\code{TRUE} or \code{FALSE} (default), perform homogeneity
tests}
\item{norm}{\code{TRUE} or \code{FALSE} (default), perform Shapiro-Wilk
tests of normality}
\item{qq}{\code{TRUE} or \code{FALSE} (default), provide a Q-Q plot of
residuals}
\item{contrasts}{a list of lists specifying the factor and type of contrast
to use, one of \code{'deviation'}, \code{'simple'}, \code{'difference'},
\code{'helmert'}, \code{'repeated'} or \code{'polynomial'}}
\item{postHoc}{a formula containing the terms to perform post-hoc tests on
(see the examples)}
\item{postHocCorr}{one or more of \code{'none'}, \code{'tukey'},
\code{'scheffe'}, \code{'bonf'}, or \code{'holm'}; provide no, Tukey,
Scheffe, Bonferroni, and Holm Post Hoc corrections respectively}
\item{postHocES}{a possible value of \code{'d'}; provide cohen's d measure
of effect size for the post-hoc tests}
\item{postHocEsCi}{\code{TRUE} or \code{FALSE} (default), provide
confidence intervals for the post-hoc effect sizes}
\item{postHocEsCiWidth}{a number between 50 and 99.9 (default: 95), the
width of confidence intervals for the post-hoc effect sizes}
\item{emMeans}{a formula containing the terms to estimate marginal means
for (see the examples)}
\item{emmPlots}{\code{TRUE} (default) or \code{FALSE}, provide estimated
marginal means plots}
\item{emmPlotData}{\code{TRUE} or \code{FALSE} (default), plot the data on
top of the marginal means}
\item{emmPlotError}{\code{'none'}, \code{'ci'} (default), or \code{'se'}.
Use no error bars, use confidence intervals, or use standard errors on the
marginal mean plots, respectively}
\item{emmTables}{\code{TRUE} or \code{FALSE} (default), provide estimated
marginal means tables}
\item{emmWeights}{\code{TRUE} (default) or \code{FALSE}, weigh each cell
equally or weigh them according to the cell frequency}
\item{ciWidthEmm}{a number between 50 and 99.9 (default: 95) specifying the
confidence interval width for the estimated marginal means}
\item{formula}{(optional) the formula to use, see the examples}
}
\value{
A results object containing:
\tabular{llllll}{
\code{results$main} \tab \tab \tab \tab \tab a table of ANCOVA results \cr
\code{results$model} \tab \tab \tab \tab \tab The underlying \code{aov} object \cr
\code{results$assump$homo} \tab \tab \tab \tab \tab a table of homogeneity tests \cr
\code{results$assump$norm} \tab \tab \tab \tab \tab a table of normality tests \cr
\code{results$assump$qq} \tab \tab \tab \tab \tab a q-q plot \cr
\code{results$contrasts} \tab \tab \tab \tab \tab an array of contrasts tables \cr
\code{results$postHoc} \tab \tab \tab \tab \tab an array of post-hoc tables \cr
\code{results$emm} \tab \tab \tab \tab \tab an array of the estimated marginal means plots + tables \cr
\code{results$residsOV} \tab \tab \tab \tab \tab an output \cr
}
Tables can be converted to data frames with \code{asDF} or \code{\link{as.data.frame}}. For example:
\code{results$main$asDF}
\code{as.data.frame(results$main)}
}
\description{
The Analysis of Covariance (ANCOVA) is used to explore the relationship
between a continuous dependent variable, one or more categorical
explanatory variables, and one or more continuous explanatory variables
(or covariates). It is essentially the same analysis as ANOVA, but
with the addition of covariates.
}
\examples{
data('ToothGrowth')
ancova(formula = len ~ supp + dose, data = ToothGrowth)
#
# ANCOVA
#
# ANCOVA
# -----------------------------------------------------------------------
# Sum of Squares df Mean Square F p
# -----------------------------------------------------------------------
# supp 205 1 205.4 11.4 0.001
# dose 2224 1 2224.3 124.0 < .001
# Residuals 1023 57 17.9
# -----------------------------------------------------------------------
#
ancova(
formula = len ~ supp + dose,
data = ToothGrowth,
postHoc = ~ supp,
emMeans = ~ supp)
}
|
context("Model Fitting")
source("generate_test_datasets.R")
# Generate data sets and compare results of fitDRModel to the result of nls and
# lm for AIC function (if these are consistent parameter estimates, residual
# sum of square and degrees of freedom are consistent) and the vcov function
# (if these are consistent parameter estimates, RSS, df and gradient are
# consistent)
# TODO:
# * Against what do we compare the following things from testsFitting.R?
# - predict(fit0, predType="effect-curve", se.fit=TRUE)
# - predict(fit0, predType="full-model", se.fit=TRUE)
# - TD(fit0, Delta = 1)
# * Using `unname` to make all.equal shut up about unequal dimnames is a bit ugly
# * exponential model with covariates
# beta model -------------------------------------------------------------------
set.seed(2000)
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
bnds <- matrix(c(0.05, 0.05, 6, 6), nrow=2)
test_that("the beta model can be fitted (without covariates)", {
fit0 <- fitMod(x, y, datset, model = "betaMod", addCovars = ~1,
addArgs=list(scal=1.2*max(datset$x)), bnds=bnds, start=c(0.6, 0.6))
fitnls <- nls(y~betaMod(x, e0, eMax, delta1, delta2, 1.2*max(datset$x)),
start=c(e0=15, eMax=14, delta1=0.8, delta2=0.5), data=datset)
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
expect_equal(coef(fit0), coef(fitnls), tolerance = 0.0001)
expect_equal(vcov(fit0), vcov(fitnls), tolerance = 0.0001)
})
test_that("the beta model can be fitted (with covariates)", {
fit0 <- fitMod(x, y, datset, model="betaMod", addCovars = ~age+center,
addArgs=list(scal=1.2*max(datset$x)), bnds=bnds)
XX <- model.matrix(~center+age, data=datset)
scl <- 1.2*max(datset$x)
fitnls <- nls(y~cbind(XX, betaMod(x, 0, 1, delta1, delta2, scl)),
data=datset, start=c(delta1=1, delta2=0.2),
algorithm = "plinear")
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
ord <- c(3, 9, 1, 2, 8, 4, 5, 6, 7)
expect_equal(unname(coef(fit0)), unname(coef(fitnls))[ord], tolerance = 0.0001)
expect_equal(unname(vcov(fit0)), unname(vcov(fitnls))[ord, ord], tolerance = 0.0001)
})
# emax model -------------------------------------------------------------------
set.seed(1)
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
bnds <- c(1e-5, max(datset$x))
test_that("the emax model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model="emax", addCovars = ~1, bnds=bnds)
fitnls <- nls(y~emax(x, e0, eMax, ed50), start=c(e0=-1, eMax=1.3, ed50=0.1), data=datset)
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
expect_equal(coef(fit0), coef(fitnls), tolerance = 0.0001)
expect_equal(vcov(fit0), vcov(fitnls), tolerance = 0.0001)
})
test_that("the emax model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model="emax", addCovars = ~age+center, bnds=bnds)
XX <- model.matrix(~center+age, data=datset)
fitnls <- nls(y~cbind(XX, emax(x, 0, 1, ed50)),
data=datset, start=list(ed50=1), algorithm = "plinear")
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
ord <- c(2, 8, 1, 7, 3, 4, 5, 6)
expect_equal(unname(coef(fit0)), unname(coef(fitnls))[ord], tolerance = 0.0001)
expect_equal(unname(vcov(fit0)), unname(vcov(fitnls))[ord, ord], tolerance = 0.0001)
})
# sigEmax model ----------------------------------------------------------------
set.seed(13)
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
bnds <- matrix(c(1e-5, 1e-5, max(datset$x), 30), nrow=2)
test_that("the sigEmax model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model = "sigEmax", addCovars = ~1, bnds=bnds)
fitnls <- nls(y~sigEmax(x, e0, eMax, ed50, h),
start=c(e0=6, eMax=17, ed50=240, h=2), data=datset)
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
expect_equal(coef(fit0), coef(fitnls), tolerance = 0.0001)
expect_equal(vcov(fit0), vcov(fitnls), tolerance = 0.0001)
})
test_that("the sigEmax model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model="sigEmax", addCovars = ~age+center, bnds=bnds)
XX <- model.matrix(~center+age, data=datset)
fitnls <- nls(y~cbind(XX, sigEmax(x, 0, 1, ed50, h)),
data=datset, start=list(ed50=368, h=2), algorithm = "plinear")
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
ord <- c(3, 9, 1, 2, 8, 4, 5, 6, 7)
expect_equal(unname(coef(fit0)), unname(coef(fitnls))[ord], tolerance = 0.0001)
expect_equal(unname(vcov(fit0)), unname(vcov(fitnls))[ord, ord], tolerance = 0.0001)
})
# logistic model ---------------------------------------------------------------
set.seed(200)
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
bnds <- matrix(c(1e-5, 1e-5, max(datset$x), max(datset$x)/2), nrow=2)
test_that("the logistic model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model="logistic", addCovars = ~1, bnds=bnds)
fitnls <- nls(y~logistic(x, e0, eMax, ed50, delta),
start=c(e0=0, eMax=16, ed50=250, delta=90), data=datset)
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
expect_equal(coef(fit0), coef(fitnls), tolerance = 0.0001)
expect_equal(vcov(fit0), vcov(fitnls), tolerance = 0.0001)
})
test_that("the logistic model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model="logistic", addCovars = ~age+center, bnds=bnds)
XX <- model.matrix(~center+age, data=datset)
fitnls <- nls(y~cbind(XX, logistic(x, 0, 1, ed50, delta)),
data=datset, start=list(ed50=220, delta=48), algorithm = "plinear")
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
ord <- c(3, 9, 1, 2, 8, 4, 5, 6, 7)
expect_equal(unname(coef(fit0)), unname(coef(fitnls))[ord], tolerance = 0.0001)
expect_equal(unname(vcov(fit0)), unname(vcov(fitnls))[ord, ord], tolerance = 0.0001)
})
# exponential model ------------------------------------------------------------
set.seed(104)
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
bnds <- c(0.1, 2)*max(datset$x)
test_that("the exponential model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model = "exponential", addCovars = ~1, bnds=bnds)
fitnls <- nls(y~exponential(x, e0, e1, delta), start=coef(fit0), data=datset)
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
expect_equal(coef(fit0), coef(fitnls), tolerance = 0.0001)
expect_equal(vcov(fit0), vcov(fitnls), tolerance = 0.0001)
})
test_that("the exponential model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model = "exponential", addCovars = ~age+center,
bnds=bnds)
XX <- model.matrix(~center+age, data=datset)
fitnls <- nls(y~cbind(XX, exponential(x, 0, 1, delta)),
data=datset, start=c(delta=450), algorithm = "plinear")
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
ord <- c(2, 8, 1, 7, 3, 4, 5, 6)
expect_equal(unname(coef(fit0)), unname(coef(fitnls))[ord], tolerance = 0.0001)
expect_equal(unname(vcov(fit0)), unname(vcov(fitnls))[ord, ord], tolerance = 0.0001)
})
# linear model -----------------------------------------------------------------
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
test_that("the linear model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model = "linear", addCovars = ~1)
fitlm <- lm(y~x, data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
test_that("the linear model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model = "linear", addCovars = ~age+center)
fitlm <- lm(y~x+age+center, data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
# linlog model -----------------------------------------------------------------
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
off <- 0.05*max(datset$x)
test_that("the linlog model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model = "linlog", addCovars = ~1,addArgs=list(off=off))
fitlm <- lm(y~log(x+off), data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
test_that("the linlog model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model = "linlog", addCovars = ~age+center,
addArgs=list(off=off))
fitlm <- lm(y~log(x+off)+age+center, data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
# quadratic model --------------------------------------------------------------
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
test_that("the quadratic model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model = "quadratic", addCovars = ~1)
fitlm <- lm(y~x+I(x^2), data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
test_that("the quadratic model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model = "quadratic", addCovars = ~age+center)
fitlm <- lm(y~x+I(x^2)+age+center, data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
# ------------------------------------------------------------------------------
# ensure that predict with no argument uses the original data not the sorted
# data that were used for fitting
test_that("predict with no argument uses the original data", {
data(IBScovars)
ff <- fitMod(dose, resp, data=IBScovars, model="quadratic", addCovars = ~gender)
expect_equal(predict(ff, predType = "ls-means"),
predict(ff, predType = "ls-means", doseSeq = IBScovars[,3]))
expect_equal(predict(ff, predType = "full-model"),
predict(ff, predType = "full-model", newdata = IBScovars[,-2]))
expect_equal(predict(ff, predType = "effect-curve"),
predict(ff, predType = "effect-curve", doseSeq = IBScovars[,3]))
ff2 <- fitMod(dose, resp, data=IBScovars, model="quadratic")
expect_equal(predict(ff2, predType = "ls-means"),
predict(ff2, predType = "ls-means", doseSeq = IBScovars[,3]))
expect_equal(predict(ff2, predType = "full-model"),
predict(ff2, predType = "full-model", newdata = IBScovars[,-2]))
expect_equal(predict(ff2, predType = "effect-curve"),
predict(ff2, predType = "effect-curve", doseSeq = IBScovars[,3]))
dose <- unique(IBScovars$dose)
ord <- c(2,4,1,3,5)
mns <- as.numeric(tapply(IBScovars$resp, IBScovars$dose, mean)[ord])
ff3 <- fitMod(dose, mns, S=diag(5), model="quadratic", type = "general")
expect_equal(predict(ff3, predType = "ls-means"),
predict(ff3, predType = "ls-means", doseSeq = dose))
expect_equal(predict(ff3, predType = "effect-curve"),
predict(ff3, predType = "effect-curve", doseSeq = dose))
})
# ------------------------------------------------------------------------------
# ensure that S is also sorted when the dose is not entered sorted
test_that("S is also sorted when the dose is not entered sorted", {
data(IBScovars)
dose <- sort(unique(IBScovars$dose))
mns <- as.numeric(tapply(IBScovars$resp, IBScovars$dose, mean))
S <- c(1000,1,1,1,1)*diag(5)
ff1 <- fitMod(dose, mns, S = S, model="linear", type="general")
dose <- unique(IBScovars$dose)
ord <- c(2,4,1,3,5)
mns <- as.numeric(tapply(IBScovars$resp, IBScovars$dose, mean)[ord])
ff2 <- fitMod(dose, mns, S = S, model="linear", type="general")
ff3 <- fitMod(dose, mns, S = S[ord,ord], model="linear", type="general")
expect_equal(coef(ff1), coef(ff3))
})
test_that("fitMod complains if `resp` is a row-vector", {
doses <- seq(0, 100, length.out=5)
resp_col <- emax(doses, 2, 8, 50)
resp_row <- t(resp_col)
cov_mat <- diag(0.5, 5)
fit <- fitMod(doses, resp_col, model = "emax", S = cov_mat,
type = "general", bnds = defBnds(max(doses))$emax)
coefs <- unname(coef(fit))
expect_equal(coefs, c(2, 8, 50), tolerance = 1e-5)
expect_warning(fitMod(doses, resp_row, model = "emax", S = cov_mat,
type = "general", bnds = defBnds(max(doses))$emax),
"resp_row is not a numeric but a matrix, converting with as.numeric()")
})
|
/tests/testthat/test-fitMod.R
|
no_license
|
bbnkmp/DoseFinding
|
R
| false | false | 13,655 |
r
|
context("Model Fitting")
source("generate_test_datasets.R")
# Generate data sets and compare results of fitDRModel to the result of nls and
# lm for AIC function (if these are consistent parameter estimates, residual
# sum of square and degrees of freedom are consistent) and the vcov function
# (if these are consistent parameter estimates, RSS, df and gradient are
# consistent)
# TODO:
# * Against what do we compare the following things from testsFitting.R?
# - predict(fit0, predType="effect-curve", se.fit=TRUE)
# - predict(fit0, predType="full-model", se.fit=TRUE)
# - TD(fit0, Delta = 1)
# * Using `unname` to make all.equal shut up about unequal dimnames is a bit ugly
# * exponential model with covariates
# beta model -------------------------------------------------------------------
set.seed(2000)
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
bnds <- matrix(c(0.05, 0.05, 6, 6), nrow=2)
test_that("the beta model can be fitted (without covariates)", {
fit0 <- fitMod(x, y, datset, model = "betaMod", addCovars = ~1,
addArgs=list(scal=1.2*max(datset$x)), bnds=bnds, start=c(0.6, 0.6))
fitnls <- nls(y~betaMod(x, e0, eMax, delta1, delta2, 1.2*max(datset$x)),
start=c(e0=15, eMax=14, delta1=0.8, delta2=0.5), data=datset)
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
expect_equal(coef(fit0), coef(fitnls), tolerance = 0.0001)
expect_equal(vcov(fit0), vcov(fitnls), tolerance = 0.0001)
})
test_that("the beta model can be fitted (with covariates)", {
fit0 <- fitMod(x, y, datset, model="betaMod", addCovars = ~age+center,
addArgs=list(scal=1.2*max(datset$x)), bnds=bnds)
XX <- model.matrix(~center+age, data=datset)
scl <- 1.2*max(datset$x)
fitnls <- nls(y~cbind(XX, betaMod(x, 0, 1, delta1, delta2, scl)),
data=datset, start=c(delta1=1, delta2=0.2),
algorithm = "plinear")
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
ord <- c(3, 9, 1, 2, 8, 4, 5, 6, 7)
expect_equal(unname(coef(fit0)), unname(coef(fitnls))[ord], tolerance = 0.0001)
expect_equal(unname(vcov(fit0)), unname(vcov(fitnls))[ord, ord], tolerance = 0.0001)
})
# emax model -------------------------------------------------------------------
set.seed(1)
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
bnds <- c(1e-5, max(datset$x))
test_that("the emax model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model="emax", addCovars = ~1, bnds=bnds)
fitnls <- nls(y~emax(x, e0, eMax, ed50), start=c(e0=-1, eMax=1.3, ed50=0.1), data=datset)
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
expect_equal(coef(fit0), coef(fitnls), tolerance = 0.0001)
expect_equal(vcov(fit0), vcov(fitnls), tolerance = 0.0001)
})
test_that("the emax model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model="emax", addCovars = ~age+center, bnds=bnds)
XX <- model.matrix(~center+age, data=datset)
fitnls <- nls(y~cbind(XX, emax(x, 0, 1, ed50)),
data=datset, start=list(ed50=1), algorithm = "plinear")
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
ord <- c(2, 8, 1, 7, 3, 4, 5, 6)
expect_equal(unname(coef(fit0)), unname(coef(fitnls))[ord], tolerance = 0.0001)
expect_equal(unname(vcov(fit0)), unname(vcov(fitnls))[ord, ord], tolerance = 0.0001)
})
# sigEmax model ----------------------------------------------------------------
set.seed(13)
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
bnds <- matrix(c(1e-5, 1e-5, max(datset$x), 30), nrow=2)
test_that("the sigEmax model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model = "sigEmax", addCovars = ~1, bnds=bnds)
fitnls <- nls(y~sigEmax(x, e0, eMax, ed50, h),
start=c(e0=6, eMax=17, ed50=240, h=2), data=datset)
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
expect_equal(coef(fit0), coef(fitnls), tolerance = 0.0001)
expect_equal(vcov(fit0), vcov(fitnls), tolerance = 0.0001)
})
test_that("the sigEmax model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model="sigEmax", addCovars = ~age+center, bnds=bnds)
XX <- model.matrix(~center+age, data=datset)
fitnls <- nls(y~cbind(XX, sigEmax(x, 0, 1, ed50, h)),
data=datset, start=list(ed50=368, h=2), algorithm = "plinear")
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
ord <- c(3, 9, 1, 2, 8, 4, 5, 6, 7)
expect_equal(unname(coef(fit0)), unname(coef(fitnls))[ord], tolerance = 0.0001)
expect_equal(unname(vcov(fit0)), unname(vcov(fitnls))[ord, ord], tolerance = 0.0001)
})
# logistic model ---------------------------------------------------------------
set.seed(200)
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
bnds <- matrix(c(1e-5, 1e-5, max(datset$x), max(datset$x)/2), nrow=2)
test_that("the logistic model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model="logistic", addCovars = ~1, bnds=bnds)
fitnls <- nls(y~logistic(x, e0, eMax, ed50, delta),
start=c(e0=0, eMax=16, ed50=250, delta=90), data=datset)
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
expect_equal(coef(fit0), coef(fitnls), tolerance = 0.0001)
expect_equal(vcov(fit0), vcov(fitnls), tolerance = 0.0001)
})
test_that("the logistic model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model="logistic", addCovars = ~age+center, bnds=bnds)
XX <- model.matrix(~center+age, data=datset)
fitnls <- nls(y~cbind(XX, logistic(x, 0, 1, ed50, delta)),
data=datset, start=list(ed50=220, delta=48), algorithm = "plinear")
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
ord <- c(3, 9, 1, 2, 8, 4, 5, 6, 7)
expect_equal(unname(coef(fit0)), unname(coef(fitnls))[ord], tolerance = 0.0001)
expect_equal(unname(vcov(fit0)), unname(vcov(fitnls))[ord, ord], tolerance = 0.0001)
})
# exponential model ------------------------------------------------------------
set.seed(104)
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
bnds <- c(0.1, 2)*max(datset$x)
test_that("the exponential model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model = "exponential", addCovars = ~1, bnds=bnds)
fitnls <- nls(y~exponential(x, e0, e1, delta), start=coef(fit0), data=datset)
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
expect_equal(coef(fit0), coef(fitnls), tolerance = 0.0001)
expect_equal(vcov(fit0), vcov(fitnls), tolerance = 0.0001)
})
test_that("the exponential model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model = "exponential", addCovars = ~age+center,
bnds=bnds)
XX <- model.matrix(~center+age, data=datset)
fitnls <- nls(y~cbind(XX, exponential(x, 0, 1, delta)),
data=datset, start=c(delta=450), algorithm = "plinear")
expect_equal(AIC(fit0), AIC(fitnls), tolerance = 0.0001)
expect_equal(fit0$df, summary(fitnls)$df[2], tolerance = 0.0001)
ord <- c(2, 8, 1, 7, 3, 4, 5, 6)
expect_equal(unname(coef(fit0)), unname(coef(fitnls))[ord], tolerance = 0.0001)
expect_equal(unname(vcov(fit0)), unname(vcov(fitnls))[ord, ord], tolerance = 0.0001)
})
# linear model -----------------------------------------------------------------
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
test_that("the linear model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model = "linear", addCovars = ~1)
fitlm <- lm(y~x, data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
test_that("the linear model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model = "linear", addCovars = ~age+center)
fitlm <- lm(y~x+age+center, data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
# linlog model -----------------------------------------------------------------
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
off <- 0.05*max(datset$x)
test_that("the linlog model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model = "linlog", addCovars = ~1,addArgs=list(off=off))
fitlm <- lm(y~log(x+off), data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
test_that("the linlog model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model = "linlog", addCovars = ~age+center,
addArgs=list(off=off))
fitlm <- lm(y~log(x+off)+age+center, data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
# quadratic model --------------------------------------------------------------
ll <- getDosSampSiz()
datset <- getDFdataSet(ll$doses, ll$n)
test_that("the quadratic model can be fitted (without covariates)", {
fit0 <- fitMod(x,y, datset, model = "quadratic", addCovars = ~1)
fitlm <- lm(y~x+I(x^2), data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
test_that("the quadratic model can be fitted (with covariates)", {
fit0 <- fitMod(x,y, datset, model = "quadratic", addCovars = ~age+center)
fitlm <- lm(y~x+I(x^2)+age+center, data=datset)
expect_equal(AIC(fit0), AIC(fitlm))
expect_equal(fit0$df, summary(fitlm)$df[2])
expect_equal(unname(coef(fit0)), unname(coef(fitlm)))
expect_equal(unname(vcov(fit0)), unname(vcov(fitlm)))
})
# ------------------------------------------------------------------------------
# ensure that predict with no argument uses the original data not the sorted
# data that were used for fitting
test_that("predict with no argument uses the original data", {
data(IBScovars)
ff <- fitMod(dose, resp, data=IBScovars, model="quadratic", addCovars = ~gender)
expect_equal(predict(ff, predType = "ls-means"),
predict(ff, predType = "ls-means", doseSeq = IBScovars[,3]))
expect_equal(predict(ff, predType = "full-model"),
predict(ff, predType = "full-model", newdata = IBScovars[,-2]))
expect_equal(predict(ff, predType = "effect-curve"),
predict(ff, predType = "effect-curve", doseSeq = IBScovars[,3]))
ff2 <- fitMod(dose, resp, data=IBScovars, model="quadratic")
expect_equal(predict(ff2, predType = "ls-means"),
predict(ff2, predType = "ls-means", doseSeq = IBScovars[,3]))
expect_equal(predict(ff2, predType = "full-model"),
predict(ff2, predType = "full-model", newdata = IBScovars[,-2]))
expect_equal(predict(ff2, predType = "effect-curve"),
predict(ff2, predType = "effect-curve", doseSeq = IBScovars[,3]))
dose <- unique(IBScovars$dose)
ord <- c(2,4,1,3,5)
mns <- as.numeric(tapply(IBScovars$resp, IBScovars$dose, mean)[ord])
ff3 <- fitMod(dose, mns, S=diag(5), model="quadratic", type = "general")
expect_equal(predict(ff3, predType = "ls-means"),
predict(ff3, predType = "ls-means", doseSeq = dose))
expect_equal(predict(ff3, predType = "effect-curve"),
predict(ff3, predType = "effect-curve", doseSeq = dose))
})
# ------------------------------------------------------------------------------
# ensure that S is also sorted when the dose is not entered sorted
test_that("S is also sorted when the dose is not entered sorted", {
data(IBScovars)
dose <- sort(unique(IBScovars$dose))
mns <- as.numeric(tapply(IBScovars$resp, IBScovars$dose, mean))
S <- c(1000,1,1,1,1)*diag(5)
ff1 <- fitMod(dose, mns, S = S, model="linear", type="general")
dose <- unique(IBScovars$dose)
ord <- c(2,4,1,3,5)
mns <- as.numeric(tapply(IBScovars$resp, IBScovars$dose, mean)[ord])
ff2 <- fitMod(dose, mns, S = S, model="linear", type="general")
ff3 <- fitMod(dose, mns, S = S[ord,ord], model="linear", type="general")
expect_equal(coef(ff1), coef(ff3))
})
test_that("fitMod complains if `resp` is a row-vector", {
doses <- seq(0, 100, length.out=5)
resp_col <- emax(doses, 2, 8, 50)
resp_row <- t(resp_col)
cov_mat <- diag(0.5, 5)
fit <- fitMod(doses, resp_col, model = "emax", S = cov_mat,
type = "general", bnds = defBnds(max(doses))$emax)
coefs <- unname(coef(fit))
expect_equal(coefs, c(2, 8, 50), tolerance = 1e-5)
expect_warning(fitMod(doses, resp_row, model = "emax", S = cov_mat,
type = "general", bnds = defBnds(max(doses))$emax),
"resp_row is not a numeric but a matrix, converting with as.numeric()")
})
|
## Copyright (c) 2016, James P. Howard, II <jh@jameshoward.us>
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
##
## Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
##
## Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#' @title Computational Methods for Numerical Analysis
#' @docType package
#'
#' @description
#' Provides the source and examples for \emph{Computational Methods for
#' Numerical Analysis with R}.
#'
#' @details
#' This package provides a suite of simple implementations of standard
#' methods from numerical analysis. The collection is designed to
#' accompany \emph{Computational Methods for Numerical Analysis with R}
#' by James P. Howard, II. Together, these functions provide methods to
#' support linear algebra, interpolation, integration, root finding,
#' optimization, and differential equations.
#'
#' @author
#' James P. Howard, II <jh@jameshoward.us>
#'
"_PACKAGE"
#> [1] "_PACKAGE"
|
/R/cmna.R
|
no_license
|
helixcn/cmna
|
R
| false | false | 2,085 |
r
|
## Copyright (c) 2016, James P. Howard, II <jh@jameshoward.us>
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
##
## Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
##
## Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#' @title Computational Methods for Numerical Analysis
#' @docType package
#'
#' @description
#' Provides the source and examples for \emph{Computational Methods for
#' Numerical Analysis with R}.
#'
#' @details
#' This package provides a suite of simple implementations of standard
#' methods from numerical analysis. The collection is designed to
#' accompany \emph{Computational Methods for Numerical Analysis with R}
#' by James P. Howard, II. Together, these functions provide methods to
#' support linear algebra, interpolation, integration, root finding,
#' optimization, and differential equations.
#'
#' @author
#' James P. Howard, II <jh@jameshoward.us>
#'
"_PACKAGE"
#> [1] "_PACKAGE"
|
summary(iris)
install.packages("sampling")
library(sampling)
#gerando estrato (conjunto de dados, vetor de colunas e vetor com tamanho dos estratos)
amostra_estrat = strata(iris, c("Species"), size=c(25,25,25), method="srswor")
summary(amostra_estrat)
summary(infert)
round(12 / 248 * 100)
round(120 / 248 * 100)
round(116 / 248 * 100)
amostra_infert = strata(infert, c("education"), size=c(5,48,47), method="srswor")
summary(amostra_infert)
|
/R/amostragem_estratificada.R
|
no_license
|
vinibfranc/CursoDataScience
|
R
| false | false | 444 |
r
|
summary(iris)
install.packages("sampling")
library(sampling)
#gerando estrato (conjunto de dados, vetor de colunas e vetor com tamanho dos estratos)
amostra_estrat = strata(iris, c("Species"), size=c(25,25,25), method="srswor")
summary(amostra_estrat)
summary(infert)
round(12 / 248 * 100)
round(120 / 248 * 100)
round(116 / 248 * 100)
amostra_infert = strata(infert, c("education"), size=c(5,48,47), method="srswor")
summary(amostra_infert)
|
imputeCensored <-
function(data=NULL, estimator=makeLearner("regr.lm"), epsilon=0.1, maxit=1000) {
if(!testClass(estimator, "Learner")) {
stop("Need regressor to impute values!")
}
assertClass(data, "llama.data")
if(is.null(data$success)) {
stop("Need successes to impute censored values!")
}
if(epsilon <= 0) {
stop("Epsilon must be > 0!")
}
data$original_data = data$data
i = 0
for(i in 1:length(data$success)) {
s = data$success[i]
p = data$performance[i]
if(!any(data$data[[s]])) {
stop(paste("Cannot impute for ", p, ", no non-censored values!"), sep="")
}
if(!all(data$data[[s]])) {
splits = split(1:nrow(data$data), data$data[[s]])
haveind = splits$`TRUE`
wantind = splits$`FALSE`
if(is.null(data$algorithmFeatures)) {
task = makeRegrTask(id="imputation", target="target", data=cbind(data.frame(target=data$data[haveind,p]), data$data[haveind,][data$features]))
model = train(estimator, task=task)
data$data[wantind,p] = predict(model, newdata=data$data[wantind,][data$features])$data$response
} else {
task = makeRegrTask(id="imputation", target="target", data=cbind(data.frame(target=data$data[haveind,p]), data$data[haveind,][c(data$features, data$algorithmFeatures)]))
model = train(estimator, task=task)
data$data[wantind,p] = predict(model, newdata=data$data[wantind,][c(data$features, data$algorithmFeatures)])$data$response
}
diff = Inf
it = 1
while(diff > epsilon) {
if(is.null(data$algorithmFeatures)) {
task = makeRegrTask(id="imputation", target="target", data=cbind(data.frame(target=data$data[[p]]), data$data[data$features]))
model = train(estimator, task=task)
preds = predict(model, newdata=data$data[wantind,][data$features])$data$response
} else {
task = makeRegrTask(id="imputation", target="target", data=cbind(data.frame(target=data$data[[p]]), data$data[c(data$features, data$algorithmFeatures)]))
model = train(estimator, task=task)
preds = predict(model, newdata=data$data[wantind,][c(data$features, data$algorithmFeatures)])$data$response
}
diff = max(abs(preds - data$data[wantind,p]))
data$data[wantind,p] = preds
it = it + 1
if(it > maxit) {
warning(paste("Did not reach convergence within ", maxit, " iterations for ", p, ".", sep=""))
break
}
}
data$data[[s]] = rep.int(T, nrow(data$data))
}
}
return(data)
}
|
/R/imputeCensored.R
|
no_license
|
cran/llama
|
R
| false | false | 2,901 |
r
|
imputeCensored <-
function(data=NULL, estimator=makeLearner("regr.lm"), epsilon=0.1, maxit=1000) {
if(!testClass(estimator, "Learner")) {
stop("Need regressor to impute values!")
}
assertClass(data, "llama.data")
if(is.null(data$success)) {
stop("Need successes to impute censored values!")
}
if(epsilon <= 0) {
stop("Epsilon must be > 0!")
}
data$original_data = data$data
i = 0
for(i in 1:length(data$success)) {
s = data$success[i]
p = data$performance[i]
if(!any(data$data[[s]])) {
stop(paste("Cannot impute for ", p, ", no non-censored values!"), sep="")
}
if(!all(data$data[[s]])) {
splits = split(1:nrow(data$data), data$data[[s]])
haveind = splits$`TRUE`
wantind = splits$`FALSE`
if(is.null(data$algorithmFeatures)) {
task = makeRegrTask(id="imputation", target="target", data=cbind(data.frame(target=data$data[haveind,p]), data$data[haveind,][data$features]))
model = train(estimator, task=task)
data$data[wantind,p] = predict(model, newdata=data$data[wantind,][data$features])$data$response
} else {
task = makeRegrTask(id="imputation", target="target", data=cbind(data.frame(target=data$data[haveind,p]), data$data[haveind,][c(data$features, data$algorithmFeatures)]))
model = train(estimator, task=task)
data$data[wantind,p] = predict(model, newdata=data$data[wantind,][c(data$features, data$algorithmFeatures)])$data$response
}
diff = Inf
it = 1
while(diff > epsilon) {
if(is.null(data$algorithmFeatures)) {
task = makeRegrTask(id="imputation", target="target", data=cbind(data.frame(target=data$data[[p]]), data$data[data$features]))
model = train(estimator, task=task)
preds = predict(model, newdata=data$data[wantind,][data$features])$data$response
} else {
task = makeRegrTask(id="imputation", target="target", data=cbind(data.frame(target=data$data[[p]]), data$data[c(data$features, data$algorithmFeatures)]))
model = train(estimator, task=task)
preds = predict(model, newdata=data$data[wantind,][c(data$features, data$algorithmFeatures)])$data$response
}
diff = max(abs(preds - data$data[wantind,p]))
data$data[wantind,p] = preds
it = it + 1
if(it > maxit) {
warning(paste("Did not reach convergence within ", maxit, " iterations for ", p, ".", sep=""))
break
}
}
data$data[[s]] = rep.int(T, nrow(data$data))
}
}
return(data)
}
|
# --- Effect of Italian govt spending shock on French exports
# Data period: 1980q1-2018q4
# 95% and 68% confidence intervals
# h = 4, 8 and 12
# OLS with left-hand side in growth rates and 4 lags of x(t-1)
source('~/Studie/MSc ECO/Period 5-6 MSc thesis/MSc thesis RStudio project/Scripts/Spillovers FR and IT v4 1.R')
# Load packages
library(ggplot2)
library(gridExtra)
library(dplyr)
library(data.table)
library(lmtest)
library(sandwich)
rmIT.l <- data.frame(shift(data2$rmIT, n = 1:12, type = "lead"))
names(rmIT.l) = c("rmIT.l1", "rmIT.l2", "rmIT.l3", "rmIT.l4", "rmIT.l5", "rmIT.l6",
"rmIT.l7", "rmIT.l8", "rmIT.l9", "rmIT.l10", "rmIT.l11", "rmIT.l12")
rxIT.l <- data.frame(shift(data2$rxIT, n = 1:12, type = "lead"))
names(rxIT.l) = c("rxIT.l1", "rxIT.l2", "rxIT.l3", "rxIT.l4", "rxIT.l5", "rxIT.l6",
"rxIT.l7", "rxIT.l8", "rxIT.l9", "rxIT.l10", "rxIT.l11", "rxIT.l12")
l.rmIT <- data.frame(shift(data2$rmIT, n = 1:4, type = "lag"))
names(l.rmIT) = c("l1.rmIT", "l2.rmIT", "l3.rmIT", "l4.rmIT")
l.rxIT <- data.frame(shift(data2$rxIT, n = 1:4, type = "lag"))
names(l.rxIT) = c("l1.rxIT", "l2.rxIT", "l3.rxIT", "l4.rxIT")
rmFR.l <- data.frame(shift(data2$rmFR, n = 1:12, type = "lead"))
names(rmFR.l) = c("rmFR.l1", "rmFR.l2", "rmFR.l3", "rmFR.l4", "rmFR.l5", "rmFR.l6",
"rmFR.l7", "rmFR.l8", "rmFR.l9", "rmFR.l10", "rmFR.l11", "rmFR.l12")
rxFR.l <- data.frame(shift(data2$rxFR, n = 1:12, type = "lead"))
names(rxFR.l) = c("rxFR.l1", "rxFR.l2", "rxFR.l3", "rxFR.l4", "rxFR.l5", "rxFR.l6",
"rxFR.l7", "rxFR.l8", "rxFR.l9", "rxFR.l10", "rxFR.l11", "rxFR.l12")
l.rmFR <- data.frame(shift(data2$rmFR, n = 1:4, type = "lag"))
names(l.rmFR) = c("l1.rmFR", "l2.rmFR", "l3.rmFR", "l4.rmFR")
l.rxFR <- data.frame(shift(data2$rxFR, n = 1:4, type = "lag"))
names(l.rxFR) = c("l1.rxFR", "l2.rxFR", "l3.rxFR", "l4.rxFR")
data3$shockFR2 <- (data3$shockFR / unlist(l.rxFR[1])) / sd((data3$shockFR / unlist(l.rxFR[1])), na.rm = TRUE)
data3$shockFR3 <- data3$shockFR2 / 100
data3$shockIT2 <- (data3$shockIT / unlist(l.rxIT[1])) / sd((data3$shockIT / unlist(l.rxIT[1])), na.rm = TRUE)
data3$shockIT3 <- data3$shockIT2 / 100
shockFR5 <- data.frame(data3$shockFR2 * 100); names(shockFR5) <- "shockFR5"
shockNL5 <- data.frame(data3$shockNL2 * 100); names(shockNL5) <- "shockNL5"
shockDE5 <- data.frame(data3$shockDE2 * 100); names(shockDE5) <- "shockDE5"
shockES5 <- data.frame(data3$shockES2 * 100); names(shockES5) <- "shockES5"
shockIT5 <- data.frame(data3$shockIT2 * 100); names(shockIT5) <- "shockIT5"
data4 <- cbind(data3, shockFR5, shockNL5, shockDE5, shockIT5, shockES5,
l.rmIT, l.rxIT, rmIT.l, rxIT.l, l.rmFR, l.rxFR, rmFR.l, rxFR.l)
data5 <- subset(data4, select = -c(30:32, 35:37, 153:204))
h <- 12
# -- OLS regressions
# -- Equation 5
lhsFRIT50 <- (data5$rxIT - data5$l1.rxIT) / data5$rxIT
lhsFRIT5 <- lapply(1:h, function(x) (data5[, 171+x] - data5$l1.rxIT) / data5$l1.rxIT)
lhsFRIT5 <- data.frame(lhsFRIT5)
names(lhsFRIT5) = paste("lhsFRIT5", 1:h, sep = "")
data6 <- cbind(data5, lhsFRIT50, lhsFRIT5)
FRIT5 <- lapply(1:13, function(x) lm(data6[, 215+x] ~ shockFR2 + l1.debtIT + l1.intIT + l1.lrtrIT + l1.lrgIT + l1.lryITc + l2.debtIT + l2.intIT + l2.lrtrIT + l2.lrgIT + l2.lryITc + l3.debtIT + l3.intIT + l3.lrtrIT + l3.lrgIT + l3.lryITc + l4.debtIT + l4.intIT + l4.lrtrIT + l4.lrgIT + l4.lryITc + shockDE2 + shockIT2 + shockES2 + shockNL2, data = data6))
summariesFRIT5 <- lapply(FRIT5, summary)
FRIT5conf95 <- lapply(FRIT5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
FRIT5conf68 <- lapply(FRIT5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
FRIT5up95 <- lapply(1:13, function(x) FRIT5conf95[[x]][2,2])
FRIT5low95 <- lapply(1:13, function(x) FRIT5conf95[[x]][2,1])
FRIT5up68 <- lapply(1:13, function(x) FRIT5conf68[[x]][2,2])
FRIT5low68 <- lapply(1:13, function(x) FRIT5conf68[[x]][2,1])
betaFRITt <- lapply(summariesFRIT5, function(x) x$coefficients[2,1])
names(betaFRITt) <- paste("betaFRITt", 0:h, sep = "")
# -- Equation 6
lhsFRIT60 <- (data6$rgFR - data6$l1.rgFR) / data6$l1.rxIT
lhsFRIT6 <- lapply(1:h, function(x) (data6[, 96+x] - data6$l1.rgFR) / data6$l1.rxIT)
lhsFRIT6 <- data.frame(lhsFRIT6)
names(lhsFRIT6) = paste("lhsFRIT6", 1:h, sep = "")
data6 <- cbind(data6, lhsFRIT60, lhsFRIT6)
FRIT6 <- lapply(1:13, function(x) lm(data6[, 228+x] ~ shockFR3 + l1.debtFR + l1.intFR + l1.lrtrFR + l1.lrgFR + l1.lryFRc + l2.debtFR + l2.intFR + l2.lrtrFR + l2.lrgFR + l2.lryFRc + l3.debtFR + l3.intFR + l3.lrtrFR + l3.lrgFR + l3.lryFRc + l4.debtFR + l4.intFR + l4.lrtrFR + l4.lrgFR + l4.lryFRc + shockDE3 + shockNL3 + shockES3 + shockIT3, data = data6))
summariesFRIT6 <- lapply(FRIT6, summary)
FRIT6conf95 <- lapply(FRIT6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
FRIT6conf68 <- lapply(FRIT6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
FRIT6up95 <- lapply(1:13, function(x) FRIT6conf95[[x]][2,2])
FRIT6low95 <- lapply(1:13, function(x) FRIT6conf95[[x]][2,1])
FRIT6up68 <- lapply(1:13, function(x) FRIT6conf68[[x]][2,2])
FRIT6low68 <- lapply(1:13, function(x) FRIT6conf68[[x]][2,1])
gammaFRITt <- lapply(summariesFRIT6, function(x) x$coefficients[2,1])
names(gammaFRITt) <- paste("gammaFRITt", 0:h, sep = "")
# -- Cumulative multiplier
mFRITtc <- cumsum(betaFRITt) / cumsum(as.numeric(gammaFRITt)); as.numeric(mFRITtc)
# --- Effect of Italian govt spending shock on French exports
# -- Equation 5
lhsITFR50 <- (data6$rxFR - data6$l1.rxFR) / data6$l1.rxFR
lhsITFR5 <- lapply(1:h, function(x) (data6[, 203+x] - data6$l1.rxFR) / data6$l1.rxFR)
lhsITFR5 <- data.frame(lhsITFR5)
names(lhsITFR5) = paste("lhsITFR5", 1:h, sep = "")
data6 <- cbind(data6, lhsITFR50, lhsITFR5)
ITFR5 <- lapply(1:13, function(x) lm(data6[, 241+x] ~ shockIT2 + l1.debtFR + l1.intFR + l1.lrtrFR + l1.lrgFR + l1.lryFRc + l2.debtFR + l2.intFR + l2.lrtrFR + l2.lrgFR + l2.lryFRc + l3.debtFR + l3.intFR + l3.lrtrFR + l3.lrgFR + l3.lryFRc + l4.debtFR + l4.intFR + l4.lrtrFR + l4.lrgFR + l4.lryFRc + shockFR2 + shockDE2 + shockNL2 + shockES2, data = data6))
summariesITFR5 <- lapply(ITFR5, summary)
ITFR5conf95 <- lapply(ITFR5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
ITFR5conf68 <- lapply(ITFR5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
ITFR5up95 <- lapply(1:13, function(x) ITFR5conf95[[x]][2,2])
ITFR5low95 <- lapply(1:13, function(x) ITFR5conf95[[x]][2,1])
ITFR5up68 <- lapply(1:13, function(x) ITFR5conf68[[x]][2,2])
ITFR5low68 <- lapply(1:13, function(x) ITFR5conf68[[x]][2,1])
betaITFRt <- lapply(summariesITFR5, function(x) x$coefficients[2,1])
names(betaITFRt) <- paste("betaITFRt", 0:h, sep = "")
# -- Equation 6
lhsITFR60 <- (data6$rgIT - data6$l1.rgIT) / data6$l1.rxFR
lhsITFR6 <- lapply(1:h, function(x) (data6[, 84+x] - data6$l1.rgIT) / data6$l1.rxFR)
lhsITFR6 <- data.frame(lhsITFR6)
names(lhsITFR6) = paste("lhsITFR6", 1:h, sep = "")
data6 <- cbind(data6, lhsITFR60, lhsITFR6)
ITFR6 <- lapply(1:13, function(x) lm(data6[, 254+x] ~ shockIT3 + l1.debtIT + l1.intIT + l1.lrtrIT + l1.lrgIT + l1.lryITc + l2.debtIT + l2.intIT + l2.lrtrIT + l2.lrgIT + l2.lryITc + l3.debtIT + l3.intIT + l3.lrtrIT + l3.lrgIT + l3.lryITc + l4.debtIT + l4.intIT + l4.lrtrIT + l4.lrgIT + l4.lryITc + shockDE3 + shockNL3 + shockFR3 + shockES3, data = data6))
summariesITFR6 <- lapply(ITFR6, summary)
ITFR6conf95 <- lapply(ITFR6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
ITFR6conf68 <- lapply(ITFR6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
ITFR6up95 <- lapply(1:13, function(x) ITFR6conf95[[x]][2,2])
ITFR6low95 <- lapply(1:13, function(x) ITFR6conf95[[x]][2,1])
ITFR6up68 <- lapply(1:13, function(x) ITFR6conf68[[x]][2,2])
ITFR6low68 <- lapply(1:13, function(x) ITFR6conf68[[x]][2,1])
gammaITFRt <- lapply(summariesITFR6, function(x) x$coefficients[2,1])
names(gammaITFRt) <- paste("gammaITFRt", 0:h, sep = "")
# -- Cumulative multiplier
mITFRtc <- cumsum(betaITFRt) / cumsum(as.numeric(gammaITFRt)); as.numeric(mITFRtc)
|
/trade spillovers/Trade spillovers IT and FR v3 1.R
|
no_license
|
mdg9709/spilloversNL
|
R
| false | false | 8,213 |
r
|
# --- Effect of Italian govt spending shock on French exports
# Data period: 1980q1-2018q4
# 95% and 68% confidence intervals
# h = 4, 8 and 12
# OLS with left-hand side in growth rates and 4 lags of x(t-1)
source('~/Studie/MSc ECO/Period 5-6 MSc thesis/MSc thesis RStudio project/Scripts/Spillovers FR and IT v4 1.R')
# Load packages
library(ggplot2)
library(gridExtra)
library(dplyr)
library(data.table)
library(lmtest)
library(sandwich)
rmIT.l <- data.frame(shift(data2$rmIT, n = 1:12, type = "lead"))
names(rmIT.l) = c("rmIT.l1", "rmIT.l2", "rmIT.l3", "rmIT.l4", "rmIT.l5", "rmIT.l6",
"rmIT.l7", "rmIT.l8", "rmIT.l9", "rmIT.l10", "rmIT.l11", "rmIT.l12")
rxIT.l <- data.frame(shift(data2$rxIT, n = 1:12, type = "lead"))
names(rxIT.l) = c("rxIT.l1", "rxIT.l2", "rxIT.l3", "rxIT.l4", "rxIT.l5", "rxIT.l6",
"rxIT.l7", "rxIT.l8", "rxIT.l9", "rxIT.l10", "rxIT.l11", "rxIT.l12")
l.rmIT <- data.frame(shift(data2$rmIT, n = 1:4, type = "lag"))
names(l.rmIT) = c("l1.rmIT", "l2.rmIT", "l3.rmIT", "l4.rmIT")
l.rxIT <- data.frame(shift(data2$rxIT, n = 1:4, type = "lag"))
names(l.rxIT) = c("l1.rxIT", "l2.rxIT", "l3.rxIT", "l4.rxIT")
rmFR.l <- data.frame(shift(data2$rmFR, n = 1:12, type = "lead"))
names(rmFR.l) = c("rmFR.l1", "rmFR.l2", "rmFR.l3", "rmFR.l4", "rmFR.l5", "rmFR.l6",
"rmFR.l7", "rmFR.l8", "rmFR.l9", "rmFR.l10", "rmFR.l11", "rmFR.l12")
rxFR.l <- data.frame(shift(data2$rxFR, n = 1:12, type = "lead"))
names(rxFR.l) = c("rxFR.l1", "rxFR.l2", "rxFR.l3", "rxFR.l4", "rxFR.l5", "rxFR.l6",
"rxFR.l7", "rxFR.l8", "rxFR.l9", "rxFR.l10", "rxFR.l11", "rxFR.l12")
l.rmFR <- data.frame(shift(data2$rmFR, n = 1:4, type = "lag"))
names(l.rmFR) = c("l1.rmFR", "l2.rmFR", "l3.rmFR", "l4.rmFR")
l.rxFR <- data.frame(shift(data2$rxFR, n = 1:4, type = "lag"))
names(l.rxFR) = c("l1.rxFR", "l2.rxFR", "l3.rxFR", "l4.rxFR")
data3$shockFR2 <- (data3$shockFR / unlist(l.rxFR[1])) / sd((data3$shockFR / unlist(l.rxFR[1])), na.rm = TRUE)
data3$shockFR3 <- data3$shockFR2 / 100
data3$shockIT2 <- (data3$shockIT / unlist(l.rxIT[1])) / sd((data3$shockIT / unlist(l.rxIT[1])), na.rm = TRUE)
data3$shockIT3 <- data3$shockIT2 / 100
shockFR5 <- data.frame(data3$shockFR2 * 100); names(shockFR5) <- "shockFR5"
shockNL5 <- data.frame(data3$shockNL2 * 100); names(shockNL5) <- "shockNL5"
shockDE5 <- data.frame(data3$shockDE2 * 100); names(shockDE5) <- "shockDE5"
shockES5 <- data.frame(data3$shockES2 * 100); names(shockES5) <- "shockES5"
shockIT5 <- data.frame(data3$shockIT2 * 100); names(shockIT5) <- "shockIT5"
data4 <- cbind(data3, shockFR5, shockNL5, shockDE5, shockIT5, shockES5,
l.rmIT, l.rxIT, rmIT.l, rxIT.l, l.rmFR, l.rxFR, rmFR.l, rxFR.l)
data5 <- subset(data4, select = -c(30:32, 35:37, 153:204))
h <- 12
# -- OLS regressions
# -- Equation 5
lhsFRIT50 <- (data5$rxIT - data5$l1.rxIT) / data5$rxIT
lhsFRIT5 <- lapply(1:h, function(x) (data5[, 171+x] - data5$l1.rxIT) / data5$l1.rxIT)
lhsFRIT5 <- data.frame(lhsFRIT5)
names(lhsFRIT5) = paste("lhsFRIT5", 1:h, sep = "")
data6 <- cbind(data5, lhsFRIT50, lhsFRIT5)
FRIT5 <- lapply(1:13, function(x) lm(data6[, 215+x] ~ shockFR2 + l1.debtIT + l1.intIT + l1.lrtrIT + l1.lrgIT + l1.lryITc + l2.debtIT + l2.intIT + l2.lrtrIT + l2.lrgIT + l2.lryITc + l3.debtIT + l3.intIT + l3.lrtrIT + l3.lrgIT + l3.lryITc + l4.debtIT + l4.intIT + l4.lrtrIT + l4.lrgIT + l4.lryITc + shockDE2 + shockIT2 + shockES2 + shockNL2, data = data6))
summariesFRIT5 <- lapply(FRIT5, summary)
FRIT5conf95 <- lapply(FRIT5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
FRIT5conf68 <- lapply(FRIT5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
FRIT5up95 <- lapply(1:13, function(x) FRIT5conf95[[x]][2,2])
FRIT5low95 <- lapply(1:13, function(x) FRIT5conf95[[x]][2,1])
FRIT5up68 <- lapply(1:13, function(x) FRIT5conf68[[x]][2,2])
FRIT5low68 <- lapply(1:13, function(x) FRIT5conf68[[x]][2,1])
betaFRITt <- lapply(summariesFRIT5, function(x) x$coefficients[2,1])
names(betaFRITt) <- paste("betaFRITt", 0:h, sep = "")
# -- Equation 6
lhsFRIT60 <- (data6$rgFR - data6$l1.rgFR) / data6$l1.rxIT
lhsFRIT6 <- lapply(1:h, function(x) (data6[, 96+x] - data6$l1.rgFR) / data6$l1.rxIT)
lhsFRIT6 <- data.frame(lhsFRIT6)
names(lhsFRIT6) = paste("lhsFRIT6", 1:h, sep = "")
data6 <- cbind(data6, lhsFRIT60, lhsFRIT6)
FRIT6 <- lapply(1:13, function(x) lm(data6[, 228+x] ~ shockFR3 + l1.debtFR + l1.intFR + l1.lrtrFR + l1.lrgFR + l1.lryFRc + l2.debtFR + l2.intFR + l2.lrtrFR + l2.lrgFR + l2.lryFRc + l3.debtFR + l3.intFR + l3.lrtrFR + l3.lrgFR + l3.lryFRc + l4.debtFR + l4.intFR + l4.lrtrFR + l4.lrgFR + l4.lryFRc + shockDE3 + shockNL3 + shockES3 + shockIT3, data = data6))
summariesFRIT6 <- lapply(FRIT6, summary)
FRIT6conf95 <- lapply(FRIT6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
FRIT6conf68 <- lapply(FRIT6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
FRIT6up95 <- lapply(1:13, function(x) FRIT6conf95[[x]][2,2])
FRIT6low95 <- lapply(1:13, function(x) FRIT6conf95[[x]][2,1])
FRIT6up68 <- lapply(1:13, function(x) FRIT6conf68[[x]][2,2])
FRIT6low68 <- lapply(1:13, function(x) FRIT6conf68[[x]][2,1])
gammaFRITt <- lapply(summariesFRIT6, function(x) x$coefficients[2,1])
names(gammaFRITt) <- paste("gammaFRITt", 0:h, sep = "")
# -- Cumulative multiplier
mFRITtc <- cumsum(betaFRITt) / cumsum(as.numeric(gammaFRITt)); as.numeric(mFRITtc)
# --- Effect of Italian govt spending shock on French exports
# -- Equation 5
lhsITFR50 <- (data6$rxFR - data6$l1.rxFR) / data6$l1.rxFR
lhsITFR5 <- lapply(1:h, function(x) (data6[, 203+x] - data6$l1.rxFR) / data6$l1.rxFR)
lhsITFR5 <- data.frame(lhsITFR5)
names(lhsITFR5) = paste("lhsITFR5", 1:h, sep = "")
data6 <- cbind(data6, lhsITFR50, lhsITFR5)
ITFR5 <- lapply(1:13, function(x) lm(data6[, 241+x] ~ shockIT2 + l1.debtFR + l1.intFR + l1.lrtrFR + l1.lrgFR + l1.lryFRc + l2.debtFR + l2.intFR + l2.lrtrFR + l2.lrgFR + l2.lryFRc + l3.debtFR + l3.intFR + l3.lrtrFR + l3.lrgFR + l3.lryFRc + l4.debtFR + l4.intFR + l4.lrtrFR + l4.lrgFR + l4.lryFRc + shockFR2 + shockDE2 + shockNL2 + shockES2, data = data6))
summariesITFR5 <- lapply(ITFR5, summary)
ITFR5conf95 <- lapply(ITFR5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
ITFR5conf68 <- lapply(ITFR5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
ITFR5up95 <- lapply(1:13, function(x) ITFR5conf95[[x]][2,2])
ITFR5low95 <- lapply(1:13, function(x) ITFR5conf95[[x]][2,1])
ITFR5up68 <- lapply(1:13, function(x) ITFR5conf68[[x]][2,2])
ITFR5low68 <- lapply(1:13, function(x) ITFR5conf68[[x]][2,1])
betaITFRt <- lapply(summariesITFR5, function(x) x$coefficients[2,1])
names(betaITFRt) <- paste("betaITFRt", 0:h, sep = "")
# -- Equation 6
lhsITFR60 <- (data6$rgIT - data6$l1.rgIT) / data6$l1.rxFR
lhsITFR6 <- lapply(1:h, function(x) (data6[, 84+x] - data6$l1.rgIT) / data6$l1.rxFR)
lhsITFR6 <- data.frame(lhsITFR6)
names(lhsITFR6) = paste("lhsITFR6", 1:h, sep = "")
data6 <- cbind(data6, lhsITFR60, lhsITFR6)
ITFR6 <- lapply(1:13, function(x) lm(data6[, 254+x] ~ shockIT3 + l1.debtIT + l1.intIT + l1.lrtrIT + l1.lrgIT + l1.lryITc + l2.debtIT + l2.intIT + l2.lrtrIT + l2.lrgIT + l2.lryITc + l3.debtIT + l3.intIT + l3.lrtrIT + l3.lrgIT + l3.lryITc + l4.debtIT + l4.intIT + l4.lrtrIT + l4.lrgIT + l4.lryITc + shockDE3 + shockNL3 + shockFR3 + shockES3, data = data6))
summariesITFR6 <- lapply(ITFR6, summary)
ITFR6conf95 <- lapply(ITFR6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
ITFR6conf68 <- lapply(ITFR6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
ITFR6up95 <- lapply(1:13, function(x) ITFR6conf95[[x]][2,2])
ITFR6low95 <- lapply(1:13, function(x) ITFR6conf95[[x]][2,1])
ITFR6up68 <- lapply(1:13, function(x) ITFR6conf68[[x]][2,2])
ITFR6low68 <- lapply(1:13, function(x) ITFR6conf68[[x]][2,1])
gammaITFRt <- lapply(summariesITFR6, function(x) x$coefficients[2,1])
names(gammaITFRt) <- paste("gammaITFRt", 0:h, sep = "")
# -- Cumulative multiplier
mITFRtc <- cumsum(betaITFRt) / cumsum(as.numeric(gammaITFRt)); as.numeric(mITFRtc)
|
########################################################################
## Description: Loads functions used by mir_prep script
## Input(s)/Outputs(s): see individual functions
## How To Use: must be sourced by mir_prep script
########################################################################
library(here)
if (!exists("code_repo")) {
code_repo <- sub("cancer_estimation.*", 'cancer_estimation', here())
if (!grepl("cancer_estimation", code_repo)) code_repo <- file.path(code_repo, 'cancer_estimation')
}
require(data.table)
library(plyr)
library(dplyr)
source(get_path("r_test_utilities", process='common'))
source(get_path('cdb_utils_r'))
################################################################################
## Define 'Set' and 'Get' Functions
################################################################################
mir_prep.get_uidColumns <- function(){
## Loads the current list of UID columns. If new_setting is passed, sets
## the UID columns to the new_setting before returning
##
uid_columns <- c('NID', 'location_id', 'year_id', 'sex_id',
'acause', 'age_group_id')
return(uid_columns)
}
mir_prep.get_causeConfig <- function(){
## Loads and returns the cause_config for the current model input
##
if (!exists("mir_prep.__causeConfigurationDF")) {
model_input_id <- mir.get_mir_version_id()
this_config <- as.numeric(subset(mir.get_inputConfig(),
mir_model_version_id == model_input_id,
'mir_cause_config_id'))
cause_config <- cdb.get_table("mir_cause_config")
this_config_loc = (cause_config$mir_cause_config_id == this_config &
!is.na(cause_config$mir_cause_config_id) )
cause_config <- cause_config[this_config_loc,
c('mir_cause_config_id', 'acause','year_agg','age_agg', 'min_cases')]
cause_config[is.na(cause_config$age_agg), 'age_agg'] <- 1
mir_prep.__causeConfigurationDF <<- cause_config
}
return(mir_prep.__causeConfigurationDF)
}
mir_prep.get_minCasesMap <- function(which_type){
## Loads and returns a map from the cause_config and input_config which
## dicatates the minimum number of cases by which to restrict input data
##
if (!(which_type %in% c("data", "lower_cap", "upper_cap"))) {
Sys.stop("wrong type sent to mir_prep.get_minCasesMap")
}
if (!exists("mir_prep.__minCasesMap")) {
input_config <- mir.get_inputConfig()
cause_config <- mir_prep.get_causeConfig()
# subset cause_config, keeping only the most recent entry for each acause
minimum_cases_map <- cause_config[, c('acause', 'min_cases')]
min_cases_name_all = names(minimum_cases_map) %in% c('min_cases')
names(minimum_cases_map)[min_cases_name_all] <- 'min_cases'
input_min <- minimum_cases_map
# for each map, if no cause-specific minimum requirement is set, replace
# the missing requirement with the general minimum_cases_requirement
set_data = (input_min$min_cases == 0 | is.na(input_min$min_cases))
input_min[set_data, 'min_cases'] <- input_config$min_cases_data
lower_cap_min <- minimum_cases_map
set_lower = (lower_cap_min$min_cases == 0 | is.na(lower_cap_min$min_cases))
lower_cap_min[set_lower, 'min_cases'] <- input_config$min_cases_lower_caps
# For upper caps use same minimum-cases requirement
upper_cap_min <- minimum_cases_map
set_upper = (upper_cap_min$min_cases == 0 | is.na(upper_cap_min$min_cases))
upper_cap_min[set_upper, 'min_cases'] <- input_config$min_cases_upper_caps
mir_prep.__minCasesMap <<- list(
"data" = input_min,
"lower_cap" = lower_cap_min,
"upper_cap" = upper_cap_min)
}
return(as.data.frame(mir_prep.__minCasesMap[[which_type]]))
}
mir_prep.load_aggregationConfig <- function(aggregate_type){
## Loads and returns an aggregate map from the cause_config which dictates
## by which year or age data are to be aggregated
##
cause_config <- mir_prep.get_causeConfig()
if (!(aggregate_type %in% c('year_id', 'age_group_id'))) {
stop(print("cannot aggregate by", var_to_aggregate))
} else if (aggregate_type == 'year_id') {
aggregate_map <- subset(cause_config,
cause_config$year_agg > 1,
c('acause', 'year_agg'))
setnames(aggregate_map, old='year_agg', new='agg')
} else if (aggregate_type == "age_group_id") {
aggregate_map <- cause_config[, c('acause', 'age_agg')]
setnames(aggregate_map, old='age_agg', new='agg')
}
return(aggregate_map)
}
mir_prep.get_new_causes <- function(){
## Loads and returns the causes that are new for the current gbd round
current_gbd <- as.integer(get_gbd_parameter("current_gbd_round"))
previous_gbd <- current_gbd - 1
mir_causes <- cdb.get_table("mir_model_entity")
mir_causes <- subset(mir_causes, is_active == 1 & gbd_round_id == current_gbd | gbd_round_id == previous_gbd)
mir_causes <- mir_causes[!duplicated(mir_causes$acause), ]
new_mir_causes <- subset(mir_causes, gbd_round_id == current_gbd)
return(new_mir_causes)
}
########################################################################
## Pipeline Functions
########################################################################
mir_prep.aggData <- function(input_df, var_to_aggregate, mor_type){
## Loads and returns an aggregation config, then aggregates data per the
## agg assigned in that config
##
uid_cols <- mir_prep.get_uidColumns()
# load cause_config to create a map of data that should be aggregated to bins
aggregate_map <- mir_prep.load_aggregationConfig(var_to_aggregate)
print(paste0('Aggregating by ', var_to_aggregate, '...'))
# create lists of data to be aggregated or kept
if (mor_type == "CR") {
cols_to_aggregate <- c('cases', 'deaths', 'seq')
} else {
cols_to_aggregate <- c('cases', 'deaths')
}
if ('pop' %in% colnames(input_df)) {cols_to_aggregate = c(cols_to_aggregate, 'pop')}
# subset data by aggregation requirement (saves time/memory)
to_ag <- merge(input_df, aggregate_map, by = 'acause', all=FALSE)
no_ag <- subset(input_df,
!(input_df$acause %in% unique(aggregate_map$acause)),
c(uid_cols, cols_to_aggregate))
# Replace values for the var_to_aggregate
if (var_to_aggregate == 'year_id') {
to_ag$origYear <- to_ag$year_id
# subtract modulus of bin to set new estimation year
to_ag$year_id <- to_ag$origYear - to_ag$origYear%%to_ag$agg
} else if (var_to_aggregate == "age_group_id"){
# adjust age groups to enable aggregation
tooYoung = to_ag$age_group_id <= to_ag$agg
to_ag[tooYoung, 'age_group_id'] <- to_ag[tooYoung, 'agg']
} else {
stop(paste(
"Error: var_to_aggregate must be 'year_id' or 'age_group_id'. You sent",
var_to_aggregate))
}
if (mor_type == "CR") {
to_ag <- to_ag %>% group_by(NID, location_id, year_id, sex_id, acause, age_group_id) %>% mutate(min_seq = min(seq))
to_ag$seq[which(to_ag$seq!=to_ag$min_seq)]<- 0
# aggregate data
aggregated <- aggregate(cbind(to_ag$cases, to_ag$deaths, to_ag$seq),
by = to_ag[,uid_cols],
FUN = sum)
setnames(aggregated, old = c('V1', 'V2', 'V3'), new=cols_to_aggregate)
test_cols <- uid_cols[uid_cols %ni% "NID"]
if ( nrow(aggregated[duplicated(aggregated[,test_cols]),]) > 0) {
generic_nid = get_gbd_parameter("generic_cancer_nid")
aggregated[duplicated(aggregated[,test_cols]),'NID'] <- generic_nid
aggregated <- aggregate(cbind(to_ag$cases, to_ag$deaths, to_ag$seq),
by = to_ag[,uid_cols],
FUN = sum)
setnames(aggregated, old = c('V1', 'V2','V3'), new=cols_to_aggregate)
}
} else {
# aggregate data
aggregated <- aggregate(cbind(to_ag$cases, to_ag$deaths),
by = to_ag[,uid_cols],
FUN = sum)
setnames(aggregated, old = c('V1', 'V2'), new=cols_to_aggregate)
test_cols <- uid_cols[uid_cols %ni% "NID"]
if ( nrow(aggregated[duplicated(aggregated[,test_cols]),]) > 0) {
generic_nid = get_gbd_parameter("generic_cancer_nid")
aggregated[duplicated(aggregated[,test_cols]),'NID'] <- generic_nid
aggregated <- aggregate(cbind(to_ag$cases, to_ag$deaths),
by = to_ag[,uid_cols],
FUN = sum)
setnames(aggregated, old = c('V1', 'V2'), new=cols_to_aggregate)
}
}
# Re-attach aggregated data to the data that was left aside
outputDf <- rbind(no_ag, aggregated)
## Calculate the mi ratio, dropping undefined values
outputDf$mi_ratio <- outputDf$deaths/outputDf$cases
notUndefined = which(!is.na(outputDf$mi_ratio) &
!is.nan(outputDf$mi_ratio) &
!is.infinite(outputDf$mi_ratio))
outputDf <- outputDf[notUndefined, ]
mir_prep.test_aggData(outputDf,
input_df,
var_to_aggregate,
columnsAggregated=cols_to_aggregate)
return(outputDf)
}
mir_prep.applyRestrictions <- function(mi_df, restriction_type) {
## Applies restrictions by type, then applies restrictions by HAQ value
##
print("Applying input restrictions...")
restricted_data <- mir_prep.restrictByConfigSettings(mi_df, restriction_type)
restricted_data <- mir_prep.restrictByHAQ(restricted_data)
return(restricted_data)
}
mir_prep.restrictByConfigSettings <- function(mi_df, config_type) {
## Apply standard restrictions as well as those dictated by the input_config
## and the cause_config
## Note: min_cases_map is a dataframe mapping each cause to its minimum-cases
## requirement
##
print(" applying config restrictions...")
input_cols = unique(colnames(mi_df), 'mi_ratio')
# Recalculate and restrict by mi_ratio
mi_df$mi_ratio <- mi_df$deaths/mi_df$cases
input_config <- mir.get_inputConfig()
max_mi_accepted = as.integer(input_config$max_mi_input_accepted)
new_mir_causes <- mir_prep.get_new_causes()
mi_df_new_causes <- mi_df[mi_df$acause %in% new_mir_causes$acause,]
acceptable_mi_new_causes = (mi_df_new_causes$mi_ratio <= max_mi_accepted &
mi_df_new_causes$cases > 0)
mi_df_new_causes <- mi_df_new_causes[acceptable_mi_new_causes ,]
mi_df_old_causes <- mi_df[!mi_df$acause %in% new_mir_causes$acause,]
acceptable_mi_old_causes = (mi_df_old_causes$mi_ratio <= max_mi_accepted &
mi_df_old_causes$mi_ratio > 0 &
mi_df_old_causes$deaths > 1 &
mi_df_old_causes$cases > 0)
mi_df_old_causes <- mi_df_old_causes[acceptable_mi_old_causes ,]
mi_df <- rbind(mi_df_old_causes, mi_df_new_causes)
# Keep only data with cases/deaths greater than an optional minimum threshold
# If doing so would drop all data for a cause-age pair, reset the minimum
min_cases_map <- mir_prep.get_minCasesMap(config_type)
# Set minimum cases to zero for new causes
min_cases_map$min_cases[min_cases_map$acause %in% new_mir_causes$acause] <- 0
mi_df <- merge(mi_df, min_cases_map, by="acause", all.x=TRUE)
meets_minCasesReq = (mi_df$cases >= mi_df$min_cases)
mi_df <- mi_df[meets_minCasesReq, input_cols]
## return restricted data
mir_prep.test_applyRestrictions(mi_df)
return(mi_df)
}
mir_prep.restrictByHAQ <- function(mi_df){
## Drops data based on haq quantile comparisons.
## Drops all data for quintile 1-4 that are > quartile 3 by haq quintile, age,
## sex_id, cancer, then collapse to max value by haq_quintile, age, sex_id, cancer
##
# Attach current best version of HAQ values to the data
print(" applying HAQ restrictions...")
## Load HAQ values
input_config <- mir.get_inputConfig()
gbd_round_id <- input_config$gbd_round_id
haq <- mir.get_HAQValues(gbd_round_id)
mi_df <- merge(mi_df,
haq,
by=c('location_id', 'year_id'),
all.x=TRUE)
# Determine median of high haq by age, sex, cancer, then collapse
high_haq_data <- mi_df[mi_df$haq_quintile == 5,]
high_haq_median <- aggregate(mi_ratio~age_group_id+sex_id+acause,
data=high_haq_data,
FUN = median)
names(high_haq_median)[names(high_haq_median)=='mi_ratio'] <- 'high_haq_median'
## Merge high_haq_median onto full dataset for comparison
cancer_data <- as.data.table(merge(mi_df,
high_haq_median,
by = c('age_group_id', 'sex_id', 'acause')))
## Drop all data points for quintile 1-4 that are < high_haq_median
cancer_data <- cancer_data[haq_quintile <= 4 & mi_ratio < high_haq_median,
drop := 1]
cancer_data <- cancer_data[is.na(drop), drop := 0]
cancer_data <- cancer_data[drop!=1, ]
# Drop
Q1 <- quantile(mi_df$mi_ratio, probs = .25)
Q3 <- quantile(mi_df$mi_ratio, probs = .75)
quartile_three <- cancer_data[,
list(quartile_three = ((Q3-Q1)*1.5 + Q3)),
by = c('age_group_id', 'sex_id', 'acause', 'haq_quintile')]
## Merge onto full dataset
haqUID = c('age_group_id', 'sex_id', 'acause', 'haq_quintile')
cancer_data <- merge(cancer_data, quartile_three, by=haqUID)
## Drop all data points for quintile 1-4 that are > quartile 3
cancer_data <- cancer_data[haq_quintile <= 4 & mi_ratio > quartile_three,
drop := 1]
cancer_data <- cancer_data[is.na(drop), drop := 0]
cancer_data <- cancer_data[drop!=1, ]
## revert to data.frame
output_df <- as.data.frame(cancer_data)
output_df <- output_df[, !(names(output_df) %in% c('drop', 'high_haq_median',
'quartile_three'))]
mir_prep.test_applyRestrictions(output_df)
return(output_df)
}
mir_prep.generateCaps <- function(type, mir_model_version_id, input_data, pctile,mor_type) {
## Generate "caps", the Winsorization values used to convert data to logit space
##
print(paste(" aggregating to", type, "caps..."))
## Define function to generate upper caps
if (type == "upper") {
caps_table = "mir_upper_cap"
required_columns = c("mir_model_version_id", "age_group_id")
these_caps <- aggregate(mi_ratio~age_group_id,
data=input_data,
FUN = quantile,
probs=pctile)
} else if (type == "lower") {
caps_table = "mir_lower_cap"
required_columns = c("mir_model_version_id", "age_group_id", "acause")
these_caps <- aggregate(mi_ratio~age_group_id+acause,
data=input_data,
FUN=quantile,
probs=pctile)
}
names(these_caps)[names(these_caps)%in%'mi_ratio'] <- paste0(type, "_cap")
which_model_input <- mir.get_mir_version_id()
these_caps$mir_model_version_id <- which_model_input
# Update caps record
existing_record <- mir.load_mi_caps(type, mir_model_version_id)
other_records = (existing_record$mir_model_version_id != which_model_input)
existing_record <- existing_record[other_records, ]
mir_prep.test_generateCaps(newMapOfCaps=rbind(these_caps, existing_record, fill=TRUE),
capType=type, required_columns, existing_record)
# Delete previous versions of this record, then attach the current version
if (mor_type == "CR") {
delete_old = paste( "DELETE FROM", caps_table,
"WHERE mir_model_version_id =", which_model_input)
cdb.run_query(delete_old)
cdb.append_to_table(caps_table, these_caps)
}
return(these_caps)
}
mir_prep.addVariance <-function(df, run_logit_models, is_logit_model=true){
## scales MIRs using the upper caps and the offset, then
## calculates variance using either the delta method or
## the variance using the binomial distribution
if (run_logit_models) offset <- mir.get_inputConfig()$offset
print('adding variance...')
## calculates scaled MIRs
if (is_logit_model) {
## convert data to probability space using the upper cap
## and winsorize to the offset
df$data <- df$mi_ratio / df$upper_cap
df$data[df$data >= (1 - offset)] <- 1 - offset
df$data[df$data <= offset] <- offset
} else {
df$data <- df$mi_ratio
}
## calculate variance
if (run_logit_models == TRUE) {
df$variance <- df$data*(1 - df$data)/df$cases
} else {
sd <- sd(df$mi_ratio)
avg <- mean(df$mi_ratio)
df$variance <- (((exp(1)^(sd^2))-1)*exp(1)^(2*avg + sd^2)) / df$cases
}
## ensure variance for causes which allow MIR = 0 to pass variance test
new_mir_causes <- mir_prep.get_new_causes()
df$variance[df$acause %in% new_mir_causes$acause] <- 0.000001
mir_prep.test_addVariance(df)
return(df)
}
mir_prep.addSTGPR_requirements <- function(df){
## re-formats and renames columns, and generates required columns for
## integration with shared st-gpr modeling function
##
print("adding stgpr requirements...")
final_data <- df
if (class(final_data$nid) == "character"){
final_data[final_data$nid==".", 'nid'] = ""
final_data$nid <- as.numeric(final_data$nid)
}
generic_nid = get_gbd_parameter("generic_cancer_nid")
final_data[is.null(final_data$nid), 'nid'] <- generic_nid
final_data$sample_size <- final_data$cases
final_data$mir_model_version_id <- mir.get_mir_version_id()
final_data$me_name = paste0(final_data$acause, '_mi_ratio')
setnames(final_data,
old = c('nid', 'cases'),
new = c('nid', 'incident_cases' ))
return(final_data)
}
mir_prep.updateOutliers <- function(new_mir_input, me_name) {
## updates any missing or generic nid entries with whatever nid is in the
## current input
##
# determine if an update is necessary
outlier_dir = get_path("stgpr_outlier_dir", process="cancer_model")
generic_nid = get_gbd_parameter("generic_cancer_nid")
outlier_file <- paste0(outlier_dir, "/", me_name, "/outlier_db.csv")
outliers <- read.csv(outlier_file)
outlier_cols = colnames(outliers)
unassigned_otlr = (outliers$nid %in% c(0, generic_nid) |
is.na(outliers$nid))
to_update <- outliers[unassigned_otlr,]
if (nrow(to_update) > 0) {
# archive the old version
archive_folder <- get_path("mi_model", base_folder="workspace")
a_file <- paste0("outlier_db_", gsub(" ", "_", Sys.time()), ".csv")
archive_file <- file.path(archive_folder, me_name, a_file)
ensure_dir(archive_file)
write.csv(outliers, archive_file, row.names=FALSE)
# update missing or generic nids
setnames(new_mir_input, old="nid", new="new_nid")
updated <- merge(to_update, new_mir_input, all.x=TRUE, all.y=FALSE)
has_replacement = (!is.na(updated$new_nid) &
updated$new_nid != updated$nid)
updated[has_replacement, 'nid'] <- updated[has_replacement, 'new_nid']
# re-combine with non-missing data and save
no_update = outliers[!unassigned_otlr,]
corrected <- rbind(no_update, updated[,outlier_cols])
corrected <- corrected[!duplicated(corrected),]
write.csv(corrected, outlier_file, row.names=FALSE) #new data
}
}
## -------------------------------------------------- ##
## test functions
## -------------------------------------------------- ##
mir_prep.test_inputData <- function(df){
results = list('missing columns'=c(), 'duplicates check'=c(),
'missing data from previous round'=data.frame())
# Run standard tests
uidCols = mir_prep.get_uidColumns()
required_columns = unique(c(uidCols, 'age_group_id', 'year_id', 'cases', 'deaths'))
results <- mir_prep.standardTests(results, df, required_columns)
test_utils.checkTestResults(results, "inputData")
}
mir_prep.test_inputConfig <- function(df){
results = list('missing columns'=c(), 'duplicates check'=c())
required_columns = c('mir_model_version_id', 'mir_model_input_version',
'max_mi_input_accepted', 'offset',
'upper_cap_percentile', 'lower_cap_percentile')
results['missing columns'] <- test_utils.findMissingColumns(df,
required_columns)
results['duplicates check'] <- test_utils.duplicateChecker(df,
uniqueidentifiers=c('mir_model_version_id'))
test_utils.checkTestResults(results, "inputConfig")
}
mir_prep.test_causeConfig <- function(df){
results = list('no data' =c(), 'missing columns'=c(), 'duplicates check'=c())
required_columns = c('acause', 'mir_cause_config_id')
if (nrow(df) == 0) {
results['no data'] = "no entries present in the cause config for this model number"
} else results['no data'] = "passed"
results['missing columns'] <- test_utils.findMissingColumns(df,
required_columns)
results['duplicates check'] <- test_utils.duplicateChecker(df,
uniqueidentifiers=required_columns)
test_utils.checkTestResults(results, "causeconfig")
}
mir_prep.test_applyRestrictions <- function(df) {
results = list('missing columns'=c(), 'duplicates check'=c())
required_columns = mir_prep.get_uidColumns()
results <- mir_prep.standardTests(results, df, required_columns)
test_utils.checkTestResults(results, "applyrestrictions")
}
mir_prep.test_aggData <- function(df, input_df, arUpdated, columnsAggregated){
results = list('missing columns'=c(), 'duplicates check'=c())
uidCols = mir_prep.get_uidColumns()
required_columns = unique(c(uidCols, columnsAggregated))
results <- mir_prep.standardTests(results, df, required_columns)
test_utils.checkTestResults(results, "aggData")
}
mir_prep.test_generateCaps <- function(newMapOfCaps, capType,
required_columns, existingCapMap){
results = list('replacement error' = "", 'dropping data'="",
'missing columns'=c(), 'duplicates check'=c())
if (length(unique(newMapOfCaps$mir_model_version_id)) < length(unique(existingCapMap$mir_model_version_id))) {
replacement_message = paste("generateCaps is trying to delete",
capType, "cap history")
results['replacement error'] <- replacement_message
}
for (c in required_columns) {
if (c %in% c("uppder_cap", "lower_cap")) next
existing_values <-unique(existingCapMap[[c]])
new_values <- unique(newMapOfCaps[[c]])
if (!all(existing_values %in% new_values)) {
this_deletion_message = paste0("generateCaps is trying to delete ",
capType, " caps ", c,
" from cap history. If this is an ",
"intentional change, update the ",
"test_generateCaps function.")
if (results['dropping data']=="") {
results['dropping data']= this_deletion_message
} else {
results['dropping data'] = c(results['dropping data'], this_deletion_message)
}
}
}
results['missing columns'] <- test_utils.findMissingColumns(newMapOfCaps,
required_columns)
results['duplicates check'] <- test_utils.duplicateChecker(newMapOfCaps,
uniqueIdentifiers=required_columns)
testName <- paste("generateCaps,", capType)
test_utils.checkTestResults(results, testName)
}
mir_prep.test_cleanedData <- function(df){
results = list('missing columns'=c(), 'duplicates check'=c())
uidCols = mir_prep.get_uidColumns()
required_columns = c(uidCols, 'mi_ratio')
results <- mir_prep.standardTests(results, df, requiredColList = uidCols)
test_utils.checkTestResults(results, "cleanedData")
}
mir_prep.test_addVariance <- function(df){
results = list('missing columns'=c(), 'duplicates check'=c(),
'null variance entries'=c(),
'some variance outside acceptable range'=c())
uidCols = mir_prep.get_uidColumns()
required_columns = c(uidCols, 'variance', 'mi_ratio')
results <- mir_prep.standardTests(results, df, requiredColList = required_columns)
results['null entries'] <- test_utils.booleanTest(!nrow(df[is.na(df$variance),]))
results['some variance outside acceptable range'] <- test_utils.booleanTest(
!nrow(df[df$variance <= 0,]) &
!nrow(df[df$variance >= 1,]))
test_utils.checkTestResults(results, "addVariance")
}
mir_prep.test_outputData <- function(df, input_df){
results = list('missing columns'=c(), 'duplicates check'=c())
uidCols = c((mir_prep.get_uidColumns()), 'nid')
results <- mir_prep.standardTests(results, df, requiredColList = uidCols)
test_utils.checkTestResults(results, "outputData")
}
mir_prep.standardTests <- function(results, testDf, requiredColList){
## Manages tests that are run for every function effecting the main
## dataset: missing columns and duplicates
uidCols = mir_prep.get_uidColumns()
results['duplicates check'] <- test_utils.duplicateChecker(testDf, uidCols)
results['missing columns'] <- test_utils.findMissingColumns(testDf,
requiredColList)
results['columns missing values'] <- test_utils.findMissingValues(testDf,
requiredColList)
return(results)
}
|
/gbd_2019/cod_code/cancer/c_models/a_mi_ratio/mir_prep_functions.r
|
no_license
|
Nermin-Ghith/ihme-modeling
|
R
| false | false | 27,161 |
r
|
########################################################################
## Description: Loads functions used by mir_prep script
## Input(s)/Outputs(s): see individual functions
## How To Use: must be sourced by mir_prep script
########################################################################
library(here)
if (!exists("code_repo")) {
code_repo <- sub("cancer_estimation.*", 'cancer_estimation', here())
if (!grepl("cancer_estimation", code_repo)) code_repo <- file.path(code_repo, 'cancer_estimation')
}
require(data.table)
library(plyr)
library(dplyr)
source(get_path("r_test_utilities", process='common'))
source(get_path('cdb_utils_r'))
################################################################################
## Define 'Set' and 'Get' Functions
################################################################################
mir_prep.get_uidColumns <- function(){
## Loads the current list of UID columns. If new_setting is passed, sets
## the UID columns to the new_setting before returning
##
uid_columns <- c('NID', 'location_id', 'year_id', 'sex_id',
'acause', 'age_group_id')
return(uid_columns)
}
mir_prep.get_causeConfig <- function(){
## Loads and returns the cause_config for the current model input
##
if (!exists("mir_prep.__causeConfigurationDF")) {
model_input_id <- mir.get_mir_version_id()
this_config <- as.numeric(subset(mir.get_inputConfig(),
mir_model_version_id == model_input_id,
'mir_cause_config_id'))
cause_config <- cdb.get_table("mir_cause_config")
this_config_loc = (cause_config$mir_cause_config_id == this_config &
!is.na(cause_config$mir_cause_config_id) )
cause_config <- cause_config[this_config_loc,
c('mir_cause_config_id', 'acause','year_agg','age_agg', 'min_cases')]
cause_config[is.na(cause_config$age_agg), 'age_agg'] <- 1
mir_prep.__causeConfigurationDF <<- cause_config
}
return(mir_prep.__causeConfigurationDF)
}
mir_prep.get_minCasesMap <- function(which_type){
## Loads and returns a map from the cause_config and input_config which
## dicatates the minimum number of cases by which to restrict input data
##
if (!(which_type %in% c("data", "lower_cap", "upper_cap"))) {
Sys.stop("wrong type sent to mir_prep.get_minCasesMap")
}
if (!exists("mir_prep.__minCasesMap")) {
input_config <- mir.get_inputConfig()
cause_config <- mir_prep.get_causeConfig()
# subset cause_config, keeping only the most recent entry for each acause
minimum_cases_map <- cause_config[, c('acause', 'min_cases')]
min_cases_name_all = names(minimum_cases_map) %in% c('min_cases')
names(minimum_cases_map)[min_cases_name_all] <- 'min_cases'
input_min <- minimum_cases_map
# for each map, if no cause-specific minimum requirement is set, replace
# the missing requirement with the general minimum_cases_requirement
set_data = (input_min$min_cases == 0 | is.na(input_min$min_cases))
input_min[set_data, 'min_cases'] <- input_config$min_cases_data
lower_cap_min <- minimum_cases_map
set_lower = (lower_cap_min$min_cases == 0 | is.na(lower_cap_min$min_cases))
lower_cap_min[set_lower, 'min_cases'] <- input_config$min_cases_lower_caps
# For upper caps use same minimum-cases requirement
upper_cap_min <- minimum_cases_map
set_upper = (upper_cap_min$min_cases == 0 | is.na(upper_cap_min$min_cases))
upper_cap_min[set_upper, 'min_cases'] <- input_config$min_cases_upper_caps
mir_prep.__minCasesMap <<- list(
"data" = input_min,
"lower_cap" = lower_cap_min,
"upper_cap" = upper_cap_min)
}
return(as.data.frame(mir_prep.__minCasesMap[[which_type]]))
}
mir_prep.load_aggregationConfig <- function(aggregate_type){
## Loads and returns an aggregate map from the cause_config which dictates
## by which year or age data are to be aggregated
##
cause_config <- mir_prep.get_causeConfig()
if (!(aggregate_type %in% c('year_id', 'age_group_id'))) {
stop(print("cannot aggregate by", var_to_aggregate))
} else if (aggregate_type == 'year_id') {
aggregate_map <- subset(cause_config,
cause_config$year_agg > 1,
c('acause', 'year_agg'))
setnames(aggregate_map, old='year_agg', new='agg')
} else if (aggregate_type == "age_group_id") {
aggregate_map <- cause_config[, c('acause', 'age_agg')]
setnames(aggregate_map, old='age_agg', new='agg')
}
return(aggregate_map)
}
mir_prep.get_new_causes <- function(){
## Loads and returns the causes that are new for the current gbd round
current_gbd <- as.integer(get_gbd_parameter("current_gbd_round"))
previous_gbd <- current_gbd - 1
mir_causes <- cdb.get_table("mir_model_entity")
mir_causes <- subset(mir_causes, is_active == 1 & gbd_round_id == current_gbd | gbd_round_id == previous_gbd)
mir_causes <- mir_causes[!duplicated(mir_causes$acause), ]
new_mir_causes <- subset(mir_causes, gbd_round_id == current_gbd)
return(new_mir_causes)
}
########################################################################
## Pipeline Functions
########################################################################
mir_prep.aggData <- function(input_df, var_to_aggregate, mor_type){
## Loads and returns an aggregation config, then aggregates data per the
## agg assigned in that config
##
uid_cols <- mir_prep.get_uidColumns()
# load cause_config to create a map of data that should be aggregated to bins
aggregate_map <- mir_prep.load_aggregationConfig(var_to_aggregate)
print(paste0('Aggregating by ', var_to_aggregate, '...'))
# create lists of data to be aggregated or kept
if (mor_type == "CR") {
cols_to_aggregate <- c('cases', 'deaths', 'seq')
} else {
cols_to_aggregate <- c('cases', 'deaths')
}
if ('pop' %in% colnames(input_df)) {cols_to_aggregate = c(cols_to_aggregate, 'pop')}
# subset data by aggregation requirement (saves time/memory)
to_ag <- merge(input_df, aggregate_map, by = 'acause', all=FALSE)
no_ag <- subset(input_df,
!(input_df$acause %in% unique(aggregate_map$acause)),
c(uid_cols, cols_to_aggregate))
# Replace values for the var_to_aggregate
if (var_to_aggregate == 'year_id') {
to_ag$origYear <- to_ag$year_id
# subtract modulus of bin to set new estimation year
to_ag$year_id <- to_ag$origYear - to_ag$origYear%%to_ag$agg
} else if (var_to_aggregate == "age_group_id"){
# adjust age groups to enable aggregation
tooYoung = to_ag$age_group_id <= to_ag$agg
to_ag[tooYoung, 'age_group_id'] <- to_ag[tooYoung, 'agg']
} else {
stop(paste(
"Error: var_to_aggregate must be 'year_id' or 'age_group_id'. You sent",
var_to_aggregate))
}
if (mor_type == "CR") {
to_ag <- to_ag %>% group_by(NID, location_id, year_id, sex_id, acause, age_group_id) %>% mutate(min_seq = min(seq))
to_ag$seq[which(to_ag$seq!=to_ag$min_seq)]<- 0
# aggregate data
aggregated <- aggregate(cbind(to_ag$cases, to_ag$deaths, to_ag$seq),
by = to_ag[,uid_cols],
FUN = sum)
setnames(aggregated, old = c('V1', 'V2', 'V3'), new=cols_to_aggregate)
test_cols <- uid_cols[uid_cols %ni% "NID"]
if ( nrow(aggregated[duplicated(aggregated[,test_cols]),]) > 0) {
generic_nid = get_gbd_parameter("generic_cancer_nid")
aggregated[duplicated(aggregated[,test_cols]),'NID'] <- generic_nid
aggregated <- aggregate(cbind(to_ag$cases, to_ag$deaths, to_ag$seq),
by = to_ag[,uid_cols],
FUN = sum)
setnames(aggregated, old = c('V1', 'V2','V3'), new=cols_to_aggregate)
}
} else {
# aggregate data
aggregated <- aggregate(cbind(to_ag$cases, to_ag$deaths),
by = to_ag[,uid_cols],
FUN = sum)
setnames(aggregated, old = c('V1', 'V2'), new=cols_to_aggregate)
test_cols <- uid_cols[uid_cols %ni% "NID"]
if ( nrow(aggregated[duplicated(aggregated[,test_cols]),]) > 0) {
generic_nid = get_gbd_parameter("generic_cancer_nid")
aggregated[duplicated(aggregated[,test_cols]),'NID'] <- generic_nid
aggregated <- aggregate(cbind(to_ag$cases, to_ag$deaths),
by = to_ag[,uid_cols],
FUN = sum)
setnames(aggregated, old = c('V1', 'V2'), new=cols_to_aggregate)
}
}
# Re-attach aggregated data to the data that was left aside
outputDf <- rbind(no_ag, aggregated)
## Calculate the mi ratio, dropping undefined values
outputDf$mi_ratio <- outputDf$deaths/outputDf$cases
notUndefined = which(!is.na(outputDf$mi_ratio) &
!is.nan(outputDf$mi_ratio) &
!is.infinite(outputDf$mi_ratio))
outputDf <- outputDf[notUndefined, ]
mir_prep.test_aggData(outputDf,
input_df,
var_to_aggregate,
columnsAggregated=cols_to_aggregate)
return(outputDf)
}
mir_prep.applyRestrictions <- function(mi_df, restriction_type) {
## Applies restrictions by type, then applies restrictions by HAQ value
##
print("Applying input restrictions...")
restricted_data <- mir_prep.restrictByConfigSettings(mi_df, restriction_type)
restricted_data <- mir_prep.restrictByHAQ(restricted_data)
return(restricted_data)
}
mir_prep.restrictByConfigSettings <- function(mi_df, config_type) {
## Apply standard restrictions as well as those dictated by the input_config
## and the cause_config
## Note: min_cases_map is a dataframe mapping each cause to its minimum-cases
## requirement
##
print(" applying config restrictions...")
input_cols = unique(colnames(mi_df), 'mi_ratio')
# Recalculate and restrict by mi_ratio
mi_df$mi_ratio <- mi_df$deaths/mi_df$cases
input_config <- mir.get_inputConfig()
max_mi_accepted = as.integer(input_config$max_mi_input_accepted)
new_mir_causes <- mir_prep.get_new_causes()
mi_df_new_causes <- mi_df[mi_df$acause %in% new_mir_causes$acause,]
acceptable_mi_new_causes = (mi_df_new_causes$mi_ratio <= max_mi_accepted &
mi_df_new_causes$cases > 0)
mi_df_new_causes <- mi_df_new_causes[acceptable_mi_new_causes ,]
mi_df_old_causes <- mi_df[!mi_df$acause %in% new_mir_causes$acause,]
acceptable_mi_old_causes = (mi_df_old_causes$mi_ratio <= max_mi_accepted &
mi_df_old_causes$mi_ratio > 0 &
mi_df_old_causes$deaths > 1 &
mi_df_old_causes$cases > 0)
mi_df_old_causes <- mi_df_old_causes[acceptable_mi_old_causes ,]
mi_df <- rbind(mi_df_old_causes, mi_df_new_causes)
# Keep only data with cases/deaths greater than an optional minimum threshold
# If doing so would drop all data for a cause-age pair, reset the minimum
min_cases_map <- mir_prep.get_minCasesMap(config_type)
# Set minimum cases to zero for new causes
min_cases_map$min_cases[min_cases_map$acause %in% new_mir_causes$acause] <- 0
mi_df <- merge(mi_df, min_cases_map, by="acause", all.x=TRUE)
meets_minCasesReq = (mi_df$cases >= mi_df$min_cases)
mi_df <- mi_df[meets_minCasesReq, input_cols]
## return restricted data
mir_prep.test_applyRestrictions(mi_df)
return(mi_df)
}
mir_prep.restrictByHAQ <- function(mi_df){
## Drops data based on haq quantile comparisons.
## Drops all data for quintile 1-4 that are > quartile 3 by haq quintile, age,
## sex_id, cancer, then collapse to max value by haq_quintile, age, sex_id, cancer
##
# Attach current best version of HAQ values to the data
print(" applying HAQ restrictions...")
## Load HAQ values
input_config <- mir.get_inputConfig()
gbd_round_id <- input_config$gbd_round_id
haq <- mir.get_HAQValues(gbd_round_id)
mi_df <- merge(mi_df,
haq,
by=c('location_id', 'year_id'),
all.x=TRUE)
# Determine median of high haq by age, sex, cancer, then collapse
high_haq_data <- mi_df[mi_df$haq_quintile == 5,]
high_haq_median <- aggregate(mi_ratio~age_group_id+sex_id+acause,
data=high_haq_data,
FUN = median)
names(high_haq_median)[names(high_haq_median)=='mi_ratio'] <- 'high_haq_median'
## Merge high_haq_median onto full dataset for comparison
cancer_data <- as.data.table(merge(mi_df,
high_haq_median,
by = c('age_group_id', 'sex_id', 'acause')))
## Drop all data points for quintile 1-4 that are < high_haq_median
cancer_data <- cancer_data[haq_quintile <= 4 & mi_ratio < high_haq_median,
drop := 1]
cancer_data <- cancer_data[is.na(drop), drop := 0]
cancer_data <- cancer_data[drop!=1, ]
# Drop
Q1 <- quantile(mi_df$mi_ratio, probs = .25)
Q3 <- quantile(mi_df$mi_ratio, probs = .75)
quartile_three <- cancer_data[,
list(quartile_three = ((Q3-Q1)*1.5 + Q3)),
by = c('age_group_id', 'sex_id', 'acause', 'haq_quintile')]
## Merge onto full dataset
haqUID = c('age_group_id', 'sex_id', 'acause', 'haq_quintile')
cancer_data <- merge(cancer_data, quartile_three, by=haqUID)
## Drop all data points for quintile 1-4 that are > quartile 3
cancer_data <- cancer_data[haq_quintile <= 4 & mi_ratio > quartile_three,
drop := 1]
cancer_data <- cancer_data[is.na(drop), drop := 0]
cancer_data <- cancer_data[drop!=1, ]
## revert to data.frame
output_df <- as.data.frame(cancer_data)
output_df <- output_df[, !(names(output_df) %in% c('drop', 'high_haq_median',
'quartile_three'))]
mir_prep.test_applyRestrictions(output_df)
return(output_df)
}
mir_prep.generateCaps <- function(type, mir_model_version_id, input_data, pctile,mor_type) {
## Generate "caps", the Winsorization values used to convert data to logit space
##
print(paste(" aggregating to", type, "caps..."))
## Define function to generate upper caps
if (type == "upper") {
caps_table = "mir_upper_cap"
required_columns = c("mir_model_version_id", "age_group_id")
these_caps <- aggregate(mi_ratio~age_group_id,
data=input_data,
FUN = quantile,
probs=pctile)
} else if (type == "lower") {
caps_table = "mir_lower_cap"
required_columns = c("mir_model_version_id", "age_group_id", "acause")
these_caps <- aggregate(mi_ratio~age_group_id+acause,
data=input_data,
FUN=quantile,
probs=pctile)
}
names(these_caps)[names(these_caps)%in%'mi_ratio'] <- paste0(type, "_cap")
which_model_input <- mir.get_mir_version_id()
these_caps$mir_model_version_id <- which_model_input
# Update caps record
existing_record <- mir.load_mi_caps(type, mir_model_version_id)
other_records = (existing_record$mir_model_version_id != which_model_input)
existing_record <- existing_record[other_records, ]
mir_prep.test_generateCaps(newMapOfCaps=rbind(these_caps, existing_record, fill=TRUE),
capType=type, required_columns, existing_record)
# Delete previous versions of this record, then attach the current version
if (mor_type == "CR") {
delete_old = paste( "DELETE FROM", caps_table,
"WHERE mir_model_version_id =", which_model_input)
cdb.run_query(delete_old)
cdb.append_to_table(caps_table, these_caps)
}
return(these_caps)
}
mir_prep.addVariance <-function(df, run_logit_models, is_logit_model=true){
## scales MIRs using the upper caps and the offset, then
## calculates variance using either the delta method or
## the variance using the binomial distribution
if (run_logit_models) offset <- mir.get_inputConfig()$offset
print('adding variance...')
## calculates scaled MIRs
if (is_logit_model) {
## convert data to probability space using the upper cap
## and winsorize to the offset
df$data <- df$mi_ratio / df$upper_cap
df$data[df$data >= (1 - offset)] <- 1 - offset
df$data[df$data <= offset] <- offset
} else {
df$data <- df$mi_ratio
}
## calculate variance
if (run_logit_models == TRUE) {
df$variance <- df$data*(1 - df$data)/df$cases
} else {
sd <- sd(df$mi_ratio)
avg <- mean(df$mi_ratio)
df$variance <- (((exp(1)^(sd^2))-1)*exp(1)^(2*avg + sd^2)) / df$cases
}
## ensure variance for causes which allow MIR = 0 to pass variance test
new_mir_causes <- mir_prep.get_new_causes()
df$variance[df$acause %in% new_mir_causes$acause] <- 0.000001
mir_prep.test_addVariance(df)
return(df)
}
mir_prep.addSTGPR_requirements <- function(df){
## re-formats and renames columns, and generates required columns for
## integration with shared st-gpr modeling function
##
print("adding stgpr requirements...")
final_data <- df
if (class(final_data$nid) == "character"){
final_data[final_data$nid==".", 'nid'] = ""
final_data$nid <- as.numeric(final_data$nid)
}
generic_nid = get_gbd_parameter("generic_cancer_nid")
final_data[is.null(final_data$nid), 'nid'] <- generic_nid
final_data$sample_size <- final_data$cases
final_data$mir_model_version_id <- mir.get_mir_version_id()
final_data$me_name = paste0(final_data$acause, '_mi_ratio')
setnames(final_data,
old = c('nid', 'cases'),
new = c('nid', 'incident_cases' ))
return(final_data)
}
mir_prep.updateOutliers <- function(new_mir_input, me_name) {
## updates any missing or generic nid entries with whatever nid is in the
## current input
##
# determine if an update is necessary
outlier_dir = get_path("stgpr_outlier_dir", process="cancer_model")
generic_nid = get_gbd_parameter("generic_cancer_nid")
outlier_file <- paste0(outlier_dir, "/", me_name, "/outlier_db.csv")
outliers <- read.csv(outlier_file)
outlier_cols = colnames(outliers)
unassigned_otlr = (outliers$nid %in% c(0, generic_nid) |
is.na(outliers$nid))
to_update <- outliers[unassigned_otlr,]
if (nrow(to_update) > 0) {
# archive the old version
archive_folder <- get_path("mi_model", base_folder="workspace")
a_file <- paste0("outlier_db_", gsub(" ", "_", Sys.time()), ".csv")
archive_file <- file.path(archive_folder, me_name, a_file)
ensure_dir(archive_file)
write.csv(outliers, archive_file, row.names=FALSE)
# update missing or generic nids
setnames(new_mir_input, old="nid", new="new_nid")
updated <- merge(to_update, new_mir_input, all.x=TRUE, all.y=FALSE)
has_replacement = (!is.na(updated$new_nid) &
updated$new_nid != updated$nid)
updated[has_replacement, 'nid'] <- updated[has_replacement, 'new_nid']
# re-combine with non-missing data and save
no_update = outliers[!unassigned_otlr,]
corrected <- rbind(no_update, updated[,outlier_cols])
corrected <- corrected[!duplicated(corrected),]
write.csv(corrected, outlier_file, row.names=FALSE) #new data
}
}
## -------------------------------------------------- ##
## test functions
## -------------------------------------------------- ##
mir_prep.test_inputData <- function(df){
results = list('missing columns'=c(), 'duplicates check'=c(),
'missing data from previous round'=data.frame())
# Run standard tests
uidCols = mir_prep.get_uidColumns()
required_columns = unique(c(uidCols, 'age_group_id', 'year_id', 'cases', 'deaths'))
results <- mir_prep.standardTests(results, df, required_columns)
test_utils.checkTestResults(results, "inputData")
}
mir_prep.test_inputConfig <- function(df){
results = list('missing columns'=c(), 'duplicates check'=c())
required_columns = c('mir_model_version_id', 'mir_model_input_version',
'max_mi_input_accepted', 'offset',
'upper_cap_percentile', 'lower_cap_percentile')
results['missing columns'] <- test_utils.findMissingColumns(df,
required_columns)
results['duplicates check'] <- test_utils.duplicateChecker(df,
uniqueidentifiers=c('mir_model_version_id'))
test_utils.checkTestResults(results, "inputConfig")
}
mir_prep.test_causeConfig <- function(df){
results = list('no data' =c(), 'missing columns'=c(), 'duplicates check'=c())
required_columns = c('acause', 'mir_cause_config_id')
if (nrow(df) == 0) {
results['no data'] = "no entries present in the cause config for this model number"
} else results['no data'] = "passed"
results['missing columns'] <- test_utils.findMissingColumns(df,
required_columns)
results['duplicates check'] <- test_utils.duplicateChecker(df,
uniqueidentifiers=required_columns)
test_utils.checkTestResults(results, "causeconfig")
}
mir_prep.test_applyRestrictions <- function(df) {
results = list('missing columns'=c(), 'duplicates check'=c())
required_columns = mir_prep.get_uidColumns()
results <- mir_prep.standardTests(results, df, required_columns)
test_utils.checkTestResults(results, "applyrestrictions")
}
mir_prep.test_aggData <- function(df, input_df, arUpdated, columnsAggregated){
results = list('missing columns'=c(), 'duplicates check'=c())
uidCols = mir_prep.get_uidColumns()
required_columns = unique(c(uidCols, columnsAggregated))
results <- mir_prep.standardTests(results, df, required_columns)
test_utils.checkTestResults(results, "aggData")
}
mir_prep.test_generateCaps <- function(newMapOfCaps, capType,
required_columns, existingCapMap){
results = list('replacement error' = "", 'dropping data'="",
'missing columns'=c(), 'duplicates check'=c())
if (length(unique(newMapOfCaps$mir_model_version_id)) < length(unique(existingCapMap$mir_model_version_id))) {
replacement_message = paste("generateCaps is trying to delete",
capType, "cap history")
results['replacement error'] <- replacement_message
}
for (c in required_columns) {
if (c %in% c("uppder_cap", "lower_cap")) next
existing_values <-unique(existingCapMap[[c]])
new_values <- unique(newMapOfCaps[[c]])
if (!all(existing_values %in% new_values)) {
this_deletion_message = paste0("generateCaps is trying to delete ",
capType, " caps ", c,
" from cap history. If this is an ",
"intentional change, update the ",
"test_generateCaps function.")
if (results['dropping data']=="") {
results['dropping data']= this_deletion_message
} else {
results['dropping data'] = c(results['dropping data'], this_deletion_message)
}
}
}
results['missing columns'] <- test_utils.findMissingColumns(newMapOfCaps,
required_columns)
results['duplicates check'] <- test_utils.duplicateChecker(newMapOfCaps,
uniqueIdentifiers=required_columns)
testName <- paste("generateCaps,", capType)
test_utils.checkTestResults(results, testName)
}
mir_prep.test_cleanedData <- function(df){
results = list('missing columns'=c(), 'duplicates check'=c())
uidCols = mir_prep.get_uidColumns()
required_columns = c(uidCols, 'mi_ratio')
results <- mir_prep.standardTests(results, df, requiredColList = uidCols)
test_utils.checkTestResults(results, "cleanedData")
}
mir_prep.test_addVariance <- function(df){
results = list('missing columns'=c(), 'duplicates check'=c(),
'null variance entries'=c(),
'some variance outside acceptable range'=c())
uidCols = mir_prep.get_uidColumns()
required_columns = c(uidCols, 'variance', 'mi_ratio')
results <- mir_prep.standardTests(results, df, requiredColList = required_columns)
results['null entries'] <- test_utils.booleanTest(!nrow(df[is.na(df$variance),]))
results['some variance outside acceptable range'] <- test_utils.booleanTest(
!nrow(df[df$variance <= 0,]) &
!nrow(df[df$variance >= 1,]))
test_utils.checkTestResults(results, "addVariance")
}
mir_prep.test_outputData <- function(df, input_df){
results = list('missing columns'=c(), 'duplicates check'=c())
uidCols = c((mir_prep.get_uidColumns()), 'nid')
results <- mir_prep.standardTests(results, df, requiredColList = uidCols)
test_utils.checkTestResults(results, "outputData")
}
mir_prep.standardTests <- function(results, testDf, requiredColList){
## Manages tests that are run for every function effecting the main
## dataset: missing columns and duplicates
uidCols = mir_prep.get_uidColumns()
results['duplicates check'] <- test_utils.duplicateChecker(testDf, uidCols)
results['missing columns'] <- test_utils.findMissingColumns(testDf,
requiredColList)
results['columns missing values'] <- test_utils.findMissingValues(testDf,
requiredColList)
return(results)
}
|
library(phylosim)
### Name: getRateParamList.T92
### Title: Get the rate parameters
### Aliases: getRateParamList.T92 T92.getRateParamList
### getRateParamList,T92-method
### ** Examples
# create a T92 object
p<-T92()
# set/get rate parameters
setRateParamList(p,list(
"Alpha"=1,
"Beta"=0.5
))
getRateParamList(p)
# set/get rate parameters via virtual field
p$rateParamList<-list(
"Alpha"=1,
"Beta"=3
)
p$rateParamList
# get object summary
summary(p)
|
/data/genthat_extracted_code/phylosim/examples/getRateParamList.T92.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 607 |
r
|
library(phylosim)
### Name: getRateParamList.T92
### Title: Get the rate parameters
### Aliases: getRateParamList.T92 T92.getRateParamList
### getRateParamList,T92-method
### ** Examples
# create a T92 object
p<-T92()
# set/get rate parameters
setRateParamList(p,list(
"Alpha"=1,
"Beta"=0.5
))
getRateParamList(p)
# set/get rate parameters via virtual field
p$rateParamList<-list(
"Alpha"=1,
"Beta"=3
)
p$rateParamList
# get object summary
summary(p)
|
# TODO: Add comment
#
# Author: FWang9
###############################################################################
############Using ASCAT
############segmentation and estimate DNA copy number based on germline mutation
############the formate of inputdata were chr, position, refallele counts in tumor, altAllele counts in tumor, refallele counts in normal, altAllele counts in normal
############tab seprate and without headline
############load ASCAT function
############install bbmle and emdbook package
##############################################################################################################
####part1.run ASCAT to get segmentation and allelic copy number in DNA level
###ASCAT in: get the inputdata of ASCAT
ASCATin<-function(data,sample){
Me=median(data$TlogR)
M=max(data$TlogR)
m=min(data$TlogR)
data$Tlog[data$TlogR>=Me]=(data$TlogR[data$TlogR>=Me]-Me)/(M-Me)
data$Tlog[data$TlogR<Me]=(data$TlogR[data$TlogR<Me]-Me)/(Me-m)
data$Nlog=1
tumor_logR=data.frame(SNP=paste("SNP",c(1:dim(data)[1]),sep=""),chr=as.character(data$chr),pos=data$pos,sample=data$Tlog)
tumor_BAF=data.frame(SNP=paste("SNP",c(1:dim(data)[1]),sep=""),chr=as.character(data$chr),pos=data$pos,sample=data$tfrac)
normal_logR=data.frame(SNP=paste("SNP",c(1:dim(data)[1]),sep=""),chr=as.character(data$chr),pos=data$pos,sample=data$Nlog)
normal_BAF=data.frame(SNP=paste("SNP",c(1:dim(data)[1]),sep=""),chr=as.character(data$chr),pos=data$pos,sample=data$nfrac)
colnames(tumor_logR)=c("SNP","chr","pos",sample)
colnames(tumor_BAF)=c("SNP","chr","pos",sample)
colnames(normal_logR)=c("SNP","chr","pos",sample)
colnames(normal_BAF)=c("SNP","chr","pos",sample)
tumor_logRname=paste(sample,"_tumorlogR.txt",sep="")
tumor_BAFname=paste(sample,"_tumorBAF.txt",sep="")
normal_logRname=paste(sample,"_normallogR.txt",sep="")
normal_BAFname=paste(sample,"_normalBAF.txt",sep="")
write.table(tumor_logR,tumor_logRname,sep="\t",col.names=TRUE,row.names = FALSE,quote = FALSE)
write.table(tumor_BAF,tumor_BAFname,sep="\t",col.names=TRUE,row.names = FALSE,quote = FALSE)
write.table(normal_logR,normal_logRname,sep="\t",col.names=TRUE,row.names = FALSE,quote = FALSE)
write.table(normal_BAF,normal_BAFname,sep="\t",col.names=TRUE,row.names = FALSE,quote = FALSE)
ASCATdata=c(tumor_logRname,tumor_BAFname,normal_logRname,normal_BAFname)
return (ASCATdata)
}
###ASCATout: get the output of ASCAT
ASCATout<-function(ASCATdata,sample,chromosome){
tumor_logRname=ASCATdata[1]
tumor_BAFname=ASCATdata[2]
normal_logRname=ASCATdata[3]
normal_BAFname=ASCATdata[4]
ascat.bc = ascat.loadData(tumor_logRname,tumor_BAFname,normal_logRname,normal_BAFname,chrs=chromosome)
ascat.bc = ascat.aspcf(ascat.bc)
ascat.output = ascat.runAscat(ascat.bc)
alpha=ascat.output$aberrantcellfraction
segment=ascat.output$segments
ACN=data.frame(nMajor=segment$nMajor,nMinor=segment$nMinor)
segment$nMajor=apply(ACN,1,max)
segment$nMinor=apply(ACN,1,min)
segment=data.frame(chr=segment$chr,startpos=segment$startpos,endpos=segment$endpos,nMajor=segment$nMajor,nMinor=segment$nMinor)
alpha=ascat.output$aberrantcellfraction
ploidy=ascat.output$ploidy
DNAout=list(alpha=alpha,segment=segment,ploidy=ploidy)
#save(DNAout,file=paste(sample,".somaticCN.Rdata",sep=""))
return(DNAout)
}
####part 1 end
#############################################################################################################
#############################################################################################################
####part 2: estimate the copy number of somatic mutation and iterative optimization
#MCN: get the integer copy number of somatic mutation
MCN<-function(alpha,segment,somatic){
for (j in 1:dim(segment)[1]){
tempsomatic=somatic[somatic$chr==as.character(segment$chr[j])&somatic$pos>=segment$startpos[j]&somatic$pos<=segment$endpos[j],]
tempsomatic$SMCN=tempsomatic$tfrac*(2*(1-alpha)+alpha*(segment$nMajor[j]+segment$nMinor[j]))/alpha
if (j==1){
somaticnew=tempsomatic
}else{
somaticnew=rbind(somaticnew,tempsomatic)
}
}
somaticnew$SACN=round(somaticnew$SMCN)
return(somaticnew)
}
#MutPlot: plot the copy number of somatic mutation
MutPlot<-function(segment,data,plotname,index){
pdf(plotname,width=20,height=7)
d=sum(segment$endpos-segment$startpos)
nMajor=max(segment$nMajor,segment$nMinor,data$SMCN)
plot(0,0,col="white",xlim=c(0,d),ylim=c(0,nMajor),xlab="",ylab="Absolute copy number",main="",axes=FALSE)
chro=unique(segment$chr)
axisindex=0
for (j in 1:length(chro)){
subseg=segment[segment$chr==chro[j],]
subd=sum(subseg$endpos-subseg$startpos)
subsomatic=data[data$chr==chro[j],]
for (k in 1:dim(subseg)[1]){
subsom=subsomatic[subsomatic$pos>=subseg$startpos[k]&subsomatic$pos<=subseg$endpos[k],]
subsom$index=subsom$pos-subseg$startpos[k]
if (index==1){
points(subsom$index+axisindex,subsom$SACN,pch=20,col=rgb(0,0,1,alpha=0.7),cex=1.2)
}
else{
points(subsom$index+axisindex,subsom$SMCN,pch=20,col=rgb(0,0,1,alpha=0.7),cex=1.2)
}
if (subseg$nMajor[k]==subseg$nMinor[k]){
x=c(axisindex,axisindex+subseg$endpos[k]-subseg$startpos[k])
y1=rep(subseg$nMajor[k]-0.1,length(x))
lines(x,y1,col="purple",lwd=2)
y2=rep(subseg$nMajor[k]+0.1,length(x))
lines(x,y2,col="purple",lwd=2)
z=c(subseg$nMajor[k]-0.1,subseg$nMajor[k]+0.1)
x1=c(axisindex,axisindex)
lines(x1,z,col="purple",lwd=2)
x2=c(axisindex+subseg$endpos[k]-subseg$startpos[k],axisindex+subseg$endpos[k]-subseg$startpos[k])
lines(x2,z,col="purple",lwd=2)
}else{
x=c(axisindex,axisindex+subseg$endpos[k]-subseg$startpos[k])
y1=rep(subseg$nMajor[k]-0.1,length(x))
lines(x,y1,col="red",lwd=2)
y2=rep(subseg$nMajor[k]+0.1,length(x))
lines(x,y2,col="red",lwd=2)
z=c(subseg$nMajor[k]-0.1,subseg$nMajor[k]+0.1)
x1=c(axisindex,axisindex)
lines(x1,z,col="red",lwd=2)
x2=c(axisindex+subseg$endpos[k]-subseg$startpos[k],axisindex+subseg$endpos[k]-subseg$startpos[k])
lines(x2,z,col="red",lwd=2)
y11=rep(subseg$nMinor[k]-0.1,length(x))
lines(x,y11,col="green",lwd=2)
y21=rep(subseg$nMinor[k]+0.1,length(x))
lines(x,y21,col="green",lwd=2)
z=c(subseg$nMinor[k]-0.1,subseg$nMinor[k]+0.1)
lines(x1,z,col="green",lwd=2)
lines(x2,z,col="green",lwd=2)
}
axisindex=axisindex+subseg$endpos[k]-subseg$startpos[k]+1
}
abline(v=axisindex-1,col="gray")
text((axisindex-1-sum(subseg$endpos-subseg$startpos)+axisindex-1)/2,nMajor,substr(chro[j],4,nchar(chro[j])))
}
axis(side=2)
dev.off()
}
SNPplot<-function(segment,data,plotname,index,alpha){
pdf(plotname,width=20,height=7)
d=sum(segment$endpos-segment$startpos)
nMajor=max(segment$nMajor,segment$nMinor,data$SMCN)
plot(0,0,col="white",xlim=c(0,d),ylim=c(0,nMajor),xlab="",ylab="Absolute copy number",main="",axes=FALSE)
chro=unique(segment$chr)
axisindex=0
for (j in 1:length(chro)){
subseg=segment[segment$chr==chro[j],]
subd=sum(subseg$endpos-subseg$startpos)
subsomatic=data[data$chr==chro[j],]
for (k in 1:dim(subseg)[1]){
subsom=subsomatic[subsomatic$pos>=subseg$startpos[k]&subsomatic$pos<=subseg$endpos[k],]
TCN=subseg$nMajor[k]+subseg$nMinor[k]
subsom$SMCN=(subsom$tfrac*(2*(1-alpha)+alpha*TCN)-(1-alpha))/alpha
subsom$SACN=round(subsom$SMCN)
subsom$index=subsom$pos-subseg$startpos[k]
if (index==1){
points(subsom$index+axisindex,subsom$SACN,pch=20,col=rgb(0,0,1,alpha=0.7),cex=1.2)
}
else{
points(subsom$index+axisindex,subsom$SMCN,pch=20,col=rgb(0,0,1,alpha=0.7),cex=1.2)
}
if (subseg$nMajor[k]==subseg$nMinor[k]){
x=c(axisindex,axisindex+subseg$endpos[k]-subseg$startpos[k])
y1=rep(subseg$nMajor[k]-0.1,length(x))
lines(x,y1,col="purple",lwd=2)
y2=rep(subseg$nMajor[k]+0.1,length(x))
lines(x,y2,col="purple",lwd=2)
z=c(subseg$nMajor[k]-0.1,subseg$nMajor[k]+0.1)
x1=c(axisindex,axisindex)
lines(x1,z,col="purple",lwd=2)
x2=c(axisindex+subseg$endpos[k]-subseg$startpos[k],axisindex+subseg$endpos[k]-subseg$startpos[k])
lines(x2,z,col="purple",lwd=2)
}else{
x=c(axisindex,axisindex+subseg$endpos[k]-subseg$startpos[k])
y1=rep(subseg$nMajor[k]-0.1,length(x))
lines(x,y1,col="red",lwd=2)
y2=rep(subseg$nMajor[k]+0.1,length(x))
lines(x,y2,col="red",lwd=2)
z=c(subseg$nMajor[k]-0.1,subseg$nMajor[k]+0.1)
x1=c(axisindex,axisindex)
lines(x1,z,col="red",lwd=2)
x2=c(axisindex+subseg$endpos[k]-subseg$startpos[k],axisindex+subseg$endpos[k]-subseg$startpos[k])
lines(x2,z,col="red",lwd=2)
y11=rep(subseg$nMinor[k]-0.1,length(x))
lines(x,y11,col="green",lwd=2)
y21=rep(subseg$nMinor[k]+0.1,length(x))
lines(x,y21,col="green",lwd=2)
z=c(subseg$nMinor[k]-0.1,subseg$nMinor[k]+0.1)
lines(x1,z,col="green",lwd=2)
lines(x2,z,col="green",lwd=2)
}
axisindex=axisindex+subseg$endpos[k]-subseg$startpos[k]+1
}
abline(v=axisindex-1,col="gray")
text((axisindex-1-sum(subseg$endpos-subseg$startpos)+axisindex-1)/2,nMajor,substr(chro[j],4,nchar(chro[j])))
}
axis(side=2)
dev.off()
}
#randomBinom: random sampling based on binomial distribution
randomBinom<-function(y,times){
return(rbinom(times,y[1],y[2]))
}
#ransomSample: random sample based on times
randomSample<-function(data,times){
if (dim(data)[1]>0){
random=apply(data,1,randomBinom,times=times)
return(t(random))
}
}
#RMCN: get the allele copy number of random sampling
RMCN<-function(R,somatic,alpha,segment){
randomsomatic=data.frame(chr=somatic$chr,pos=somatic$pos,refNumT=somatic$refNumT,altNumT=R)
randomsomatic$tfrac=randomsomatic$altNumT/(randomsomatic$altNumT+randomsomatic$refNumT)
randomres=MCN(alpha,segment,somatic=randomsomatic)
return(randomres$SMCN)
}
#difCN: calculated the difference between allelic copy number and integer copy number
difCN<-function(x){
dif=abs(x-round(x))
return(dif)
}
####calculated the fraction of integer copy number = 0
#subclone: calculated the number of somatic mutation with copy number = 0
subclone<-function(x){
return(sum(round(x)==0))
}
###somaticPurity: calculated the tumor purity based on the copy number of somatic mutation
somaticPurity<-function(somatic,segment,DNAalpha){
alpha_up=c()
for (j in 1:dim(segment)[1]){
subdata=somatic[somatic$chr==as.character(segment$chr[j])&somatic$pos>=segment$startpos[j]&somatic$pos<=segment$endpos[j],]
if (dim(subdata)[1]>0){
TCN=segment$nMajor[j]+segment$nMinor[j]
alpha1=2*subdata$tfrac/(2*subdata$tfrac+subdata$SACN-subdata$tfrac*TCN)
alpha_up=c(alpha_up,alpha1)
}
}
alpha_up=alpha_up[!is.na(alpha_up)&alpha_up!=Inf]
if (length(alpha_up)<=1){
if (length(alpha_up)==0){
alpha1=DNAalpha
}else{
if (alpha_up>=0 & alpha_up <=1){
alpha1=alpha_up
}else{
alpha1=DNAalpha
}
}
}else{
alpha1=density(alpha_up)$x[which.max(density(alpha_up)$y)]
if (alpha1 > 1 | alpha1 < 0){
alpha_up=alpha_up[alpha_up >=0 & alpha_up <= 1]
if (length(alpha_up)>0){
if (length(alpha_up)==1){
alpha1=alpha_up
}else{
alpha1=density(alpha_up)$x[which.max(density(alpha_up)$y)]
}
}else{
alpha1=DNAalpha
}
}
}
return(alpha1)
}
##CNopt: determine the optimal copy number for each segment among mang candiate CNs
CNpeak<-function(CN){
peak=c()
peakvalue=c()
if (max(CN)<=0){
CN=CN+abs(min(CN))
}
CN=CN[CN>0]
if (length(CN)>3){
k=1
x=density(CN)$x
y=density(CN)$y
for (i in 2:(length(y)-1)){
if (y[i]>=y[i-1]&y[i]>=y[i+1]){
peak[k]=x[i]
peakvalue[k]=y[i]
k=k+1
}
}
peak=peak[peakvalue> (max(peakvalue)/10)]
}else{
peak=unique(CN)
}
return(peak)
}
CNopt<-function(data,expBAF){
dbetabinom(data$y,f1,data$N,theta,log=FALSE)
}
##upACN: update the copy number of segment and ploidy based on germline mutation
upACN<-function(data,segment,alpha){
ploidy=c()
for (j in 1:dim(segment)[1]){
subseg=segment[j,]
subdata=data[data$chr==as.character(segment$chr[j])&data$pos>=segment$startpos[j]&data$pos<=segment$endpo[j],]
if (dim(subdata)[1]>=10){
TCN=round((2*subdata$Ratio-2*(1-alpha))/alpha)
canTCN=round(CNpeak(TCN))
lCN=Inf
sCN=c()
if (length(canTCN)!=0){
for (i in 1:length(canTCN)){
nMajor=round((2*subdata$BAF*(1-alpha)+alpha*subdata$BAF*canTCN[i]-(1-alpha))/alpha)
canMajor=round(CNpeak(nMajor))
if (length(canMajor)>0){
for (k in 1:length(canMajor)){
expBAF=((1-alpha)+alpha*canMajor[k])/(2*(1-alpha)+alpha*canTCN[i])
if (expBAF <= 1 & expBAF >=0){
lCN1=-sum(dbinom(round(subdata$BAF*(subdata$refNumT+subdata$altNumT)),(subdata$refNumT+subdata$altNumT),expBAF,log=TRUE))
if (lCN1 < lCN){
sCN=c(canMajor[k],canTCN[i])
lCN=lCN1
}
}
}
}
}
if (length(sCN)!=0){
subseg$nMajor=sCN[1]
if (sCN[2]-sCN[1]<0){
subseg$nMinor=0
}else{
subseg$nMinor=sCN[2]-sCN[1]
}
}
}
}
if (j==1){
upsegment=subseg
}else{
upsegment=rbind(upsegment,subseg)
}
ploidy=c(ploidy,rep(subseg$nMajor+subseg$nMinor,length=dim(subdata)[1]))
}
upres=list(upsegment=upsegment,ploidy=mean(ploidy))
return(upres)
}
###iterOPT: output optimal result based on Iterative optimization
iterOPT<-function(SNP,somatic,segment,realDIF,realsub,randomdata,times,PD,PS,DNAalpha,cutoff){
out=list()
out1=list()
out2=list()
run=1
PD2=PD
PS2=PS
while(run < cutoff){
alpha1=somaticPurity(somatic=somatic,segment=segment,DNAalpha=DNAalpha)
res1=upACN(data=SNP,segment=segment,alpha=alpha1)
ploidy1=res1$ploidy
segment1=res1$upsegment
somaticres1=MCN(alpha=alpha1,segment=segment1,somatic=somatic)
somaticres1=somaticres1[match(paste(somatic$chr,somatic$pos,sep=":"),paste(somaticres1$chr,somaticres1$pos,sep=":")),]
realDIF1=sum(abs(somaticres1$SMCN-somaticres1$SACN))/dim(somaticres1)[1]
registerDoMC(cores = 4)
randomout1=foreach (j = 1:dim(randomdata)[2], .combine=cbind) %dopar% RMCN(R=randomdata[,j],somatic=somaticres1,alpha=alpha1,segment=segment1)
randomDIF1=apply(randomout1,2,difCN)
aveDIF1=apply(randomDIF1,2,sum)/dim(somaticres1)[1]
realDIF1=sum(abs(somaticres1$SMCN-somaticres1$SACN))/dim(somaticres1)[1]
realsub1=sum(somaticres1$SACN==0)/dim(somaticres1)[1]
if (realDIF1<realDIF | realsub1<realsub){
out$segment=segment1
out$somatic=somaticres1
out$alpha=alpha1
out$ploidy=ploidy1
}
randomsub1=apply(randomout1,2,subclone)
PD1=1-length(aveDIF1[aveDIF1>=realDIF1])/length(aveDIF1)
PS1=length(randomsub1[randomsub1<sum(somaticres1$SACN==0)])/times
if (PD1 < 0.05 & PS1 < 0.05){
out1$segment=segment1
out1$somatic=somaticres1
out1$alpha=alpha1
out1$ploidy=ploidy1
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out1)
break
}
if ((PD1 + PS1) < (PD2+PS2)){
out2$segment=segment1
out2$somatic=somaticres1
out2$alpha=alpha1
out2$ploidy=ploidy1
PD2=PD1
PS2=PS1
}
realDIF=realDIF1
realsub=realsub1
somatic=somaticres1
segment=segment1
if(dim(somatic)[1]>=100){
if (abs(alpha1-DNAalpha)<0.01){
if (length(out2)!=0){
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out2)
}else{
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out)
}
}else{
DNAalpha=alpha1
run=run+1
}
}else if (dim(somatic)[1]<100&dim(somatic)[1]>=20){
if (abs(alpha1-DNAalpha)<0.001){
if (length(out2)!=0){
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out2)
}else{
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out)
}
}else{
DNAalpha=alpha1
run=run+1
}
}else{
if (abs(alpha1-DNAalpha)<0.0001){
if (length(out2)!=0){
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out2)
}else{
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out)
}
}else{
DNAalpha=alpha1
run=run+1
}
}
}
if (run==cutoff){
if (length(out2)!=0){
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out2)
}else{
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out)
}
}
}
sequenzRun<-function(data,sample,chromosome){
outdata=data.frame(chromosome=data$chr,position=data$pos,base.ref=data$ref,depth.normal=data$Nsum,depth.tumor=data$Tsum,depth.ratio=(data$Nsum)/(data$Tsum),Af=1-data$tfrac,Bf=data$tfrac)
outdata$zygosity.normal[data$nfrac>=0.2&data$nfrac<=0.8]="het"
outdata$zygosity.normal[data$nfrac<0.2|data$nfrac>0.8]="hom"
outdata$GC.percent=50
outdata$good.reads=outdata$depth.tumor
genotype=data.frame(ref=data$ref[outdata$zygosity.normal=="het"],alt=data$alt[outdata$zygosity.normal=="het"])
genotype=t(apply(genotype,1,sort))
AB.normal=paste(genotype[,1],genotype[,2],sep="")
outdata$AB.normal[outdata$zygosity.normal=="het"]=AB.normal
genotype=data.frame(ref=data$ref[outdata$zygosity.normal=="hom"],alt=data$alt[outdata$zygosity.normal=="hom"])
subdata=data[outdata$zygosity.normal=="hom",]
if (dim(subdata)[1]>0){
subdata$genotype[subdata$nfrac<0.2]=as.character(subdata$ref[subdata$nfrac<0.2])
subdata$genotype[subdata$nfrac>0.8]=as.character(subdata$alt[subdata$nfrac>0.8])
outdata$AB.normal[outdata$zygosity.normal=="hom"]=subdata$genotype
}
AB.tumor <- rep(".", length(outdata$AB.normal))
outdata$AB.tumor=AB.tumor
strand <- AB.tumor
outdata$tumor.strand=strand
normal.pos <- outdata$zygosity.normal == "hom" & outdata$AB.tumor == "."
if (sum(normal.pos)>0){
outdata <-outdata[outdata$depth.ratio > 0 & !is.infinite(outdata$depth.ratio) & !normal.pos, ]
}
outname=paste(sample,".seqz",sep="")
write.table(outdata, outname, col.names = TRUE, row.names = FALSE, sep = "\t")
seqz.data<-read.seqz(outname,gz=FALSE)
##GC correction and normalization depth ratio
print.noquote("GC correction")
gc.stats<-gc.sample.stats(seqz.data)
#gc.stats <- gc.norm(x = seqz.data$depth.ratio,gc = seqz.data$GC.percent)
gc.vect <- setNames(gc.stats$raw.mean, gc.stats$gc.values)
seqz.data$adjusted.ratio <- seqz.data$depth.ratio / gc.vect[as.character(seqz.data$GC.percent)]
##extract information of sequenza input
print.noquote("extract sequenza input")
seqz.data$chr=seqz.data$chromosome
test <- sequenza.extract(seqz.data,gc.stats,chroso=intersect(chromosome,seqz.data$chromosome))
##infer tumor purity and ploidy
print.noquote("sequenza infering")
CP.example <- sequenza.fit(test,mc.cores = getOption("mc.cores", 1L),female=FALSE)
cint <- get.ci(CP.example)
alpha <- cint$max.cellularity
ploidy <- cint$max.ploidy
avg.depth.ratio <- mean(test$gc$adj[, 2])
seg.tab <- na.exclude(do.call(rbind, test$segments))
print.noquote("integrating result")
cn.alleles <- baf.bayes(Bf = seg.tab$Bf, depth.ratio = seg.tab$depth.ratio,cellularity = alpha, ploidy = ploidy,avg.depth.ratio = avg.depth.ratio)
seg.tab <- cbind(seg.tab, cn.alleles)
segment=data.frame(chr=seg.tab$chromosome,startpos=seg.tab$start.pos,endpos=seg.tab$end.pos,nMajor=seg.tab$A,nMinor=seg.tab$B)
index=which(segment$nMajor==0&segment$nMinor==0)
rowindex=setdiff(c(1:dim(segment)[1]),index)
segment=segment[rowindex,]
DNAout=list(alpha=alpha,ploidy=ploidy,segment=segment)
return(DNAout)
}
###DNArun: get final result at the DNA level
outputTitanSegments <- function(results, id, convergeParams, filename = NULL, igvfilename = NULL){
# get all possible states in this set of results
stateTable <- unique(results[, c("TITANstate", "TITANcall")])
rownames(stateTable) <- stateTable[, 1]
rleResults <- t(sapply(unique(results$Chr), function(x){
ind <- results$Chr == x
r <- rle(results$TITANstate[ind])
}))
rleLengths <- unlist(rleResults[, "lengths"])
rleValues <- unlist(rleResults[, "values"])
numSegs <- length(rleLengths)
# convert allelic ratio to symmetric ratios #
results$AllelicRatio <- apply(cbind(results$AllelicRatio, 1-results$AllelicRatio), 1, max, na.rm = TRUE)
segs <- as.data.frame(matrix(NA, ncol = 14, nrow = numSegs,
dimnames = list(c(), c("Sample", "Chromosome", "Start_Position.bp.", "End_Position.bp.",
"Length.snp.", "Median_Ratio", "Median_logR", "TITAN_state", "TITAN_call", "Copy_Number",
"MinorCN", "MajorCN", "Clonal_Cluster", "Cellular_Frequency"))))
segs$Sample <- id
colNames <- c("Chr", "Position", "TITANstate", "AllelicRatio", "LogRatio")
prevInd <- 0
for (j in 1:numSegs){
start <- prevInd + 1
end <- prevInd + rleLengths[j]
segDF <- results[start:end, ]
prevInd <- end
numR <- nrow(segDF)
segs[j, "Chromosome"] <- as.character(segDF[1, "Chr"])
segs[j, "Start_Position.bp."] <- segDF[1, "Position"]
segs[j, "TITAN_state"] <- rleValues[j]
segs[j, "TITAN_call"] <- segDF[1, "TITANcall"]#stateTable[as.character(rleValues[j]), 2]
segs[j, "Copy_Number"] <- segDF[1, "CopyNumber"]
segs[j, "Median_Ratio"] <- round(median(segDF$AllelicRatio, na.rm = TRUE), digits = 6)
segs[j, "Median_logR"] <- round(median(segDF$LogRatio, na.rm = TRUE), digits = 6)
segs[j, "MinorCN"] <- getMajorMinorCN(rleValues[j], convergeParams$symmetric)$majorCN
segs[j, "MajorCN"] <- getMajorMinorCN(rleValues[j], convergeParams$symmetric)$minorCN
segs[j, "Clonal_Cluster"] <- segDF[1, "ClonalCluster"]
segs[j, "Cellular_Frequency"] <- segDF[1, "CellularPrevalence"]
if (segDF[1, "Chr"] == segDF[numR, "Chr"]){
segs[j, "End_Position.bp."] <- segDF[numR, "Position"]
segs[j, "Length.snp."] <- numR
}else{ # segDF contains 2 different chromosomes
print(j)
}
}
if (!is.null(filename)){
# write out detailed segment file #
write.table(segs, file = filename, col.names = TRUE, row.names = FALSE, quote = FALSE, sep = "\t")
}
# write out IGV seg file #
if (!is.null(igvfilename)){
igv <- segs[, c("Sample", "Chromosome", "Start_Position.bp.",
"End_Position.bp.", "Length.snp.", "Median_logR")]
colnames(igv) <- c("sample", "chr", "start", "end", "num.snps", "median.logR")
write.table(igv, file = igvfilename, col.names = TRUE, row.names = FALSE, quote = FALSE, sep = "\t")
}
return(segs)
}
getMajorMinorCN <- function(state, symmetric = TRUE){
majorCN <- NA
minorCN <- NA
if (symmetric){
if (state==0){
majorCN = 0; minorCN = 0;
}else if (state==1){
majorCN = 0; minorCN = 1;
}else if(state==2){
majorCN = 0; minorCN = 2;
}else if (state==3){
majorCN = 1; minorCN = 1;
}else if (state==4){
majorCN = 0; minorCN = 3;
}else if (state==5){
majorCN = 1; minorCN = 2;
}else if (state==6){
majorCN = 0; minorCN = 4;
}else if (state==7){
majorCN = 1; minorCN = 3;
}else if (state==8){
majorCN = 2; minorCN = 2;
}else if (state==9){
majorCN = 0; minorCN = 5;
}else if (state==10){
majorCN = 1; minorCN = 4;
}else if (state==11){
majorCN = 2; minorCN = 3;
}else if (state==12){
majorCN = 0; minorCN = 6;
}else if (state==13){
majorCN = 1; minorCN = 5;
}else if (state==14){
majorCN = 2; minorCN = 4;
}else if (state==15){
majorCN = 3; minorCN = 3;
}else if (state==16){
majorCN = 0; minorCN = 7;
}else if (state==17){
majorCN = 1; minorCN = 6;
}else if (state==18){
majorCN = 2; minorCN = 5;
}else if (state==19){
majorCN = 3; minorCN = 4;
}else if (state==20){
majorCN = 0; minorCN = 8;
}else if (state==21){
majorCN = 1; minorCN = 7;
}else if (state==22){
majorCN = 2; minorCN = 6;
}else if (state==23){
majorCN = 3; minorCN = 5;
}else if (state==24){
majorCN = 4; minorCN = 4;
}
}else{
#stop("symmetric=FALSE not yet supported.")
}
return(list(majorCN = majorCN, minorCN = minorCN))
}
TITANout<-function(DNAinput,chromosome){
DNAinput=DNAinput[DNAinput$nfrac>=0.2&DNAinput$nfrac<=0.8,]
rlength=nchar(as.character(DNAinput$ref))
alength=nchar(as.character(DNAinput$alt))
DNAinput=DNAinput[rlength==1&alength==1,]
titandata=data.frame(chr=DNAinput$chr,pos=DNAinput$pos,ref=DNAinput$ref,refCount=DNAinput$refNumT,Nref=DNAinput$alt,NrefCount=DNAinput$altNumT)
chro=intersect(chromosome,names(table(titandata$chr))[table(titandata$chr)>1])
index=match(as.character(titandata$chr),chro)
titandata=titandata[!is.na(index),]
DNAinput=DNAinput[!is.na(index),]
write.table(titandata,"TITAN.input",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
numClusters <- 2
params <- loadDefaultParameters(copyNumber=5,numberClonalClusters=numClusters)
data <- loadAlleleCounts("TITAN.input")
data$logR=log((DNAinput$refNumT+DNAinput$altNumT)/(DNAinput$refNumN+DNAinput$altNumN),base=10)
convergeParams <- runEMclonalCN(data,gParams=params$genotypeParams,nParams=params$normalParams,
pParams=params$ploidyParams,sParams=params$cellPrevParams,maxiter=20,maxiterUpdate=1500,
txnExpLen=1e15,txnZstrength=1e5,useOutlierState=FALSE,normalEstimateMethod="map",
estimateS=TRUE,estimatePloidy=TRUE)
optimalPath <- viterbiClonalCN(data,convergeParams)
results <- outputTitanResults(data,convergeParams,optimalPath,filename=NULL,posteriorProbs=F)
results$AllelicRatio=as.numeric(results$AllelicRatio)
results$LogRatio=as.numeric(results$LogRatio)
segs <- outputTitanSegments(results, id = "test", convergeParams,filename=NULL)
ploidy <- tail(convergeParams$phi, 1)
normal <- tail(convergeParams$n, 1)
#mean(as.numeric(segs$Cellular_Frequency[!is.na(segs$Cellular_Frequency)])*(1-normal))
DNAout=list(segment=segs,ploidy=ploidy,alpha=1-normal)
return(DNAout)
}
FACETSout<-function(DNAinput){
facetsinput=data.frame(chr=DNAinput$chr,pos=DNAinput$pos,Nsum=DNAinput$Nsum,NAP=DNAinput$altNumN,Tsum=DNAinput$Tsum,TAP=DNAinput$altNumT)
write.table(facetsinput,"FACETS.input",sep="\t",col.names=FALSE,row.names=FALSE,quote=FALSE)
set.seed(1234)
xx=preProcSample(file="FACETS.input")
oo=procSample(xx,cval=150)
fit<-try(emcncf(oo),silent=TRUE)
alpha=fit$purity
if (!is.na(alpha)){
ploidy=fit$ploidy
segment=data.frame(chr=fit$cncf$chrom,startpos=fit$start,endpos=fit$end,nMajor=(fit$cncf$tcn.em-fit$cncf$lcn.em),nMinor=fit$cncf$lcn.em)
segment=segment[!is.na(segment$nMajor)&!is.na(segment$nMinor)&segment$nMajor!=0&segment$nMinor!=0,]
DNAout=list(alpha=alpha,segment=segment,ploidy=ploidy)
return (DNAout)
}
}
ASCATrun<-function(DNAinput,sample,chromosome){
ASCATdata=ASCATin(data=DNAinput,sample=sample)
##run ASCAT
ASCATres<-try(ASCATout(ASCATdata=ASCATdata,sample=sample,chromosome=chromosome),silent=TRUE)
if (mode(ASCATres)=="list"){
return(ASCATres)
}
}
DNACNA<-function(DNAinput,sample,tempth,chromosome,i){
if (i==1){
print.noquote("Run germline data")
ASCATres<-try(ASCATrun(DNAinput,sample,chromosome),silent=TRUE)
return(ASCATres)
}else if (i==2){
registerDoMC(cores = 4)
TITANres<-try(TITANout(DNAinput=DNAinput,chromosome=chromosome),silent=TRUE)
return (TITANres)
}else if (i ==3){
FACETSres<-try(FACETSout(DNAinput=DNAinput),silent=TRUE)
return (FACETSres)
}else{
sequenzres<-try(sequenzRun(data=DNAinput,sample=sample,chromosome=chromosome),silent=TRUE)
return (sequenzres)
}
}
somaticCN<-function(res,i,somaticdata,method){
if (mode(res[[i]])=="list"){
if (method[i]=="TITAN" & !is.null(res[[i]])&!is.null(res[[i]]$alpha)){
DNAalpha=res[[i]]$alpha
DNAploidy=res[[i]]$ploidy
segment=res[[i]]$segment
segment=data.frame(chr=paste("chr",segment$Chromosome,sep=""),startpos=as.numeric(as.character(segment$Start_Position.bp.)),endpos=as.numeric(as.character(segment$End_Position.bp.)),nMajor=segment$MajorCN,nMinor=segment$MinorCN)
somaticres=MCN(alpha=DNAalpha,segment=segment,somatic=somaticdata)
return (somaticres)
}else if (method[i]=="FACETS" & !is.null(res[[i]])&!is.null(res[[i]]$alpha)){
DNAalpha=res[[i]]$alpha
DNAploidy=res[[i]]$ploidy
segment=res[[i]]$segment
segment=data.frame(chr=paste("chr",segment$chr,sep=""),startpos=segment$startpos,endpos=segment$endpos,nMajor=segment$nMajor,nMinor=segment$nMinor)
somaticres=MCN(alpha=DNAalpha,segment=segment,somatic=somaticdata)
return (somaticres)
}else if (!is.null(res[[i]])&!is.null(res[[i]]$alpha)){
DNAalpha=res[[i]]$alpha
DNAploidy=res[[i]]$ploidy
segment=res[[i]]$segment
somaticres=MCN(alpha=DNAalpha,segment=segment,somatic=somaticdata)
return (somaticres)
}
}
}
randomCN<-function(res,i,randomdata,somaticres,method){
if (mode(res[[i]])=="list"){
if (method[i]=="TITAN" & !is.null(res[[i]])&!is.null(res[[i]]$alpha)&!is.null(somaticres[[i]])){
if (dim(somaticres[[i]])[1]>0){
DNAalpha=res[[i]]$alpha
segment=res[[i]]$segment
segment=data.frame(chr=paste("chr",segment$Chromosome,sep=""),startpos=as.numeric(as.character(segment$Start_Position.bp.)),endpos=as.numeric(as.character(segment$End_Position.bp.)),nMajor=segment$MajorCN,nMinor=segment$MinorCN)
registerDoMC(cores = 4)
randomout=foreach (j = 1:dim(randomdata[[i]])[2], .combine=cbind) %dopar% RMCN(R=randomdata[[i]][,j],somatic=somaticres[[i]],alpha=DNAalpha,segment=segment)
#randomout=apply(randomdata[[i]],2,RMCN,somatic=somaticres[[i]],alpha=DNAalpha,segment=segment)
return(randomout)
}
}else if (method[i]=="FACETS" & !is.null(res[[i]])&!is.null(res[[i]]$alpha)&!is.null(somaticres[[i]])){
if (dim(somaticres[[i]])[1]>0){
DNAalpha=res[[i]]$alpha
segment=res[[i]]$segment
segment=data.frame(chr=paste("chr",segment$chr,sep=""),startpos=segment$startpos,endpos=segment$endpos,nMajor=segment$nMajor,nMinor=segment$nMinor)
registerDoMC(cores = 4)
randomout=foreach (j = 1:dim(randomdata[[i]])[2], .combine=cbind) %dopar% RMCN(R=randomdata[[i]][,j],somatic=somaticres[[i]],alpha=DNAalpha,segment=segment)
#randomout=apply(randomdata[[i]],2,RMCN,somatic=somaticres[[i]],alpha=DNAalpha,segment=segment)
return(randomout)
}
}else if (!is.null(res[[i]])&!is.null(res[[i]]$alpha)&!is.null(somaticres[[i]])){
if (dim(somaticres[[i]])[1]>0){
DNAalpha=res[[i]]$alpha
segment=res[[i]]$segment
registerDoMC(cores = 4)
randomout=foreach (j = 1:dim(randomdata[[i]])[2], .combine=cbind) %dopar% RMCN(R=randomdata[[i]][,j],somatic=somaticres[[i]],alpha=DNAalpha,segment=segment)
return(randomout)
}
}
}
}
randomDIF<-function(randomout,somaticres,i){
if (!is.null(randomout[[i]])){
if (dim(somaticres[[i]])>1){
randomDIF=apply(randomout[[i]],2,difCN)
aveDIF=apply(randomDIF,2,sum)/dim(somaticres[[i]])[1]
return(aveDIF)
}
}
}
realDiff<-function(somaticres,i){
if (!is.null(somaticres[[i]])){
realDIF=sum(abs(somaticres[[i]]$SMCN-somaticres[[i]]$SACN))/dim(somaticres[[i]])[1]
return(realDIF)
}
}
realSub<-function(somaticres,i){
if (!is.null(somaticres[[i]])){
realsub=sum(somaticres[[i]]$SACN==0)/dim(somaticres[[i]])[1]
return(realsub)
}
}
subcloneP<-function(x){
sub0<-function(y){
return(sum(round(y)==0))
}
if (!is.null(x)){
randomsub=apply(x,2,sub0)
return(randomsub)
}
}
DNArun<-function(SNPinput,somaticinput,sample,temppath){
DNAinput=read.csv(SNPinput,sep="\t",header=TRUE)
colnames(DNAinput)=c("chr","pos","ref","alt","refNumN","altNumN","refNumT","altNumT")
chromosome=paste("chr",c(1:22,"X","Y"),sep="")
if (length(levels(factor(DNAinput$chr)))>5){
DNAinput$Tsum=DNAinput$refNumT+DNAinput$altNumT
DNAinput$Nsum=DNAinput$refNumN+DNAinput$altNumN
DNAinput=DNAinput[DNAinput$Tsum!=0,]
DNAinput$tfrac=(DNAinput$altNumT)/(DNAinput$Tsum)
DNAinput$nfrac=(DNAinput$altNumN)/(DNAinput$Nsum)
DNAinput$TlogR=log2(DNAinput$Tsum/DNAinput$Nsum)
DNAinput$BAF[DNAinput$tfrac>=0.5]=DNAinput$tfrac[DNAinput$tfrac>=0.5]
DNAinput$BAF[DNAinput$tfrac<0.5]=1-DNAinput$tfrac[DNAinput$tfrac<0.5]
DNAinput$Ratio=(DNAinput$altNumT+DNAinput$refNumT)/(DNAinput$altNumN+DNAinput$refNumN)
DNAinput=DNAinput[DNAinput$Tsum>=10&DNAinput$Nsum>=10,]
DNAinput=DNAinput[sapply(as.character(DNAinput$chr),nchar)<=5,]
#rlength=nchar(as.character(DNAinput$ref))
#alength=nchar(as.character(DNAinput$alt))
#DNAinput=DNAinput[rlength==1&alength==1,]
setwd(temppath)
methods=c("ASCAT","TITAN","FACETS","sequenz")
somaticdata=read.csv(somaticinput,header=TRUE,sep="\t")
colnames(somaticdata)=c("chr","pos","ref","alt","refNumN","altNumN","refNumT","altNumT")
somaticdata$Tsum=somaticdata$altNumT+somaticdata$refNumT
somaticdata=somaticdata[somaticdata$Tsum>=10,]
somaticdata$tfrac=somaticdata$altNumT/somaticdata$Tsum
#rlength=nchar(as.character(somaticdata$ref))
#alength=nchar(as.character(somaticdata$alt))
#somaticdata=somaticdata[rlength==1&alength==1,]
registerDoMC(cores = 4)
times=1000
res=foreach(i = 1:4) %dopar% DNACNA(DNAinput=DNAinput,sample=sample,tempth=temppath,chromosome=chromosome,i)
print.noquote("Somatic mutation copy number")
somaticres<-foreach(i =1:length(res)) %dopar% somaticCN(res=res,i,somaticdata=somaticdata,method=methods)
somaticindex=0
for (i in 1:length(somaticres)){
if (!is.null(somaticres[[i]])){
if (dim(somaticres[[i]])[1]>0){
somaticindex=1
break
}
}
}
if (somaticindex!=0){
randomdata<-foreach(i=1:length(somaticres)) %dopar% randomSample(data=data.frame(Tsum=somaticres[[i]]$Tsum,tfrac=somaticres[[i]]$tfrac),times=times)
randomout<-foreach(i =1:length(somaticres)) %dopar% randomCN(res,i,randomdata,somaticres,method=methods)
aveDIF<-foreach(i=1:length(somaticres)) %dopar% randomDIF(randomout,somaticres,i)
realDIF<-foreach(i=1:length(somaticres)) %dopar% realDiff(somaticres,i)
realsub<-foreach(i=1:length(somaticres)) %dopar% realSub(somaticres,i)
randomsub<-foreach(i = 1:length(somaticres)) %dopar% subcloneP(x=randomout[[i]])
PDl=list()
PSl=list()
for (i in 1:length(realDIF)){
if (!is.null(realDIF[[i]])){
PDl[[i]]=1-length(aveDIF[[i]][aveDIF[[i]]>=realDIF[[i]]])/length(aveDIF[[i]])
PSl[[i]]=length(randomsub[[i]][randomsub[[i]]<sum(somaticres[[i]]$SACN==0)])/times
}
}
modelindex=c()
k=1
for (i in 1:length(PDl)){
if (!is.null(PDl[[i]])){
if(!is.na(PDl[[i]])){
modelindex[k]=i
k=k+1
}
}
}
PD=c()
PS=c()
for (k in 1:length(modelindex)){
PD[k]=PDl[[modelindex[k]]]
PS[k]=PSl[[modelindex[k]]]
}
minimum=min(PD+PS)
selmeth=methods[modelindex[which(PD+PS==minimum)]]
if ("ASCAT" %in% selmeth){
DNAout=res[[1]]
DNAalpha=res[[1]]$alpha
DNAploidy=res[[1]]$ploidy
segment=res[[1]]$segment
somaticres=somaticres[[1]]
realDIF=realDIF[[1]]
realsub=realsub[[1]]
randomdata=randomdata[[1]]
PD=PDl[[1]]
PS=PSl[[1]]
resmthod="ASCAT"
}else if ("FACETS" %in% selmeth){
DNAout=res[[3]]
DNAalpha=res[[3]]$alpha
DNAploidy=res[[3]]$ploidy
segment=res[[3]]$segment
segment=data.frame(chr=paste("chr",segment$chr,sep=""),startpos=segment$startpos,endpos=segment$endpos,nMajor=segment$nMajor,nMinor=segment$nMinor)
somaticres=somaticres[[3]]
realDIF=realDIF[[3]]
realsub=realsub[[3]]
randomdata=randomdata[[3]]
PD=PDl[[3]]
PS=PSl[[3]]
resmthod="FACETS"
}else if ("TITAN" %in% selmeth){
DNAout=res[[2]]
DNAalpha=res[[2]]$alpha
DNAploidy=res[[2]]$ploidy
segment=res[[2]]$segment
segment=data.frame(chr=paste("chr",segment$Chromosome,sep=""),startpos=as.numeric(as.character(segment$Start_Position.bp.)),endpos=as.numeric(as.character(segment$End_Position.bp.)),nMajor=segment$MajorCN,nMinor=segment$MinorCN)
somaticres=somaticres[[2]]
realDIF=realDIF[[2]]
realsub=realsub[[2]]
randomdata=randomdata[[2]]
PD=PDl[[2]]
PS=PSl[[2]]
resmthod="TITAN"
}else{
DNAout=res[[4]]
DNAalpha=res[[4]]$alpha
DNAploidy=res[[4]]$ploidy
segment=res[[4]]$segment
somaticres=somaticres[[4]]
realDIF=realDIF[[4]]
realsub=realsub[[4]]
randomdata=randomdata[[4]]
PD=PDl[[4]]
PS=PSl[[4]]
resmthod="sequenz"
}
DNAout=list()
DNAout$segment=segment
DNAout$alpha=DNAalpha
DNAout$ploidy=DNAploidy
DNAout$somatic=somaticres
DNAout$method=resmthod
if (PD >= 0.05 | PS >= 0.05){
print.noquote("Iter")
iterout=iterOPT(SNP=DNAinput,somatic=somaticres,segment=segment,realDIF=realDIF,realsub=realsub,randomdata=randomdata,times=times,PD=PD,PS=PS,DNAalpha=DNAalpha,cutoff=50)
if (length(iterout)==0){
return(DNAout)
}else{
iterout$method=resmthod
return(iterout)
}
}else{
return(DNAout)
}
}else{
DNAres=list()
k=1
for (i in 1:4){
if (mode(res[[i]])=="list"){
if (!is.null(res[[i]]$alpha)){
res[[i]]$method=methods[i]
DNAres[[k]]=res[[i]]
k=k+1
}
}
}
ll=c()
for (j in 1:length(DNAres)){
ll[j]=dim(DNAres[[j]]$segment)[1]
}
DNAout=DNAres[[which.max(ll)]]
return(DNAout)
}
}else{
print.noquote(paste(sample,":Germline data is insuccifient",sep=""))
}
}
DNArun1<-function(SNPinput,somaticinput,sample,temppath){
DNAinput=read.csv(SNPinput,sep="\t",header=TRUE)
colnames(DNAinput)=c("chr","pos","ref","alt","refNumN","altNumN","refNumT","altNumT")
chromosome=paste("chr",c(1:22,"X","Y"),sep="")
if (length(levels(factor(DNAinput$chr)))>5){
DNAinput$Tsum=DNAinput$refNumT+DNAinput$altNumT
DNAinput$Nsum=DNAinput$refNumN+DNAinput$altNumN
DNAinput$tfrac=(DNAinput$altNumT)/(DNAinput$Tsum)
DNAinput$nfrac=(DNAinput$altNumN)/(DNAinput$Nsum)
DNAinput$TlogR=log2(DNAinput$Tsum/DNAinput$Nsum)
DNAinput=DNAinput[DNAinput$Tsum!=0,]
DNAinput$BAF[DNAinput$tfrac>=0.5]=DNAinput$tfrac[DNAinput$tfrac>=0.5]
DNAinput$BAF[DNAinput$tfrac<0.5]=1-DNAinput$tfrac[DNAinput$tfrac<0.5]
DNAinput$Ratio=(DNAinput$altNumT+DNAinput$refNumT)/(DNAinput$altNumN+DNAinput$refNumN)
DNAinput=DNAinput[DNAinput$Tsum>=10&DNAinput$Nsum>=10,]
DNAinput=DNAinput[sapply(as.character(DNAinput$chr),nchar)<=5,]
#rlength=nchar(as.character(DNAinput$ref))
#alength=nchar(as.character(DNAinput$alt))
#DNAinput=DNAinput[rlength==1&alength==1,]
setwd(temppath)
methods=c("ASCAT","TITAN","FACETS","sequenz")
somaticdata=read.csv(somaticinput,header=TRUE,sep="\t")
colnames(somaticdata)=c("chr","pos","ref","alt","refNumN","altNumN","refNumT","altNumT")
somaticdata$Tsum=somaticdata$altNumT+somaticdata$refNumT
somaticdata$tfrac=somaticdata$altNumT/somaticdata$Tsum
somaticdata=somaticdata[somaticdata$Tsum>=10,]
#rlength=nchar(as.character(somaticdata$ref))
#alength=nchar(as.character(somaticdata$alt))
#somaticdata=somaticdata[rlength==1&alength==1,]
registerDoMC(cores = 4)
times=1000
res=foreach(i = 1:4) %dopar% DNACNA(DNAinput=DNAinput,sample=sample,tempth=temppath,chromosome=chromosome,i)
newmethod=c()
newres=list()
k=1
DNAres=list()
for (i in 1:4){
if (!is.null(res[[i]])){
if (!is.null(res[[i]]$alpha)){
if (i ==1 ){
DNAres$ASCAT=res[[i]]
}else if (i ==2){
DNAres$TITAN=res[[i]]
}else if (i == 3){
DNAres$FACETS=res[[i]]
}else{
DNAres$sequenza=res[[i]]
}
newmethod[k]=methods[i]
newres[[k]]=res[[i]]
k=k+1
}
}
}
print.noquote("Somatic mutation copy number")
somaticres<-foreach(i =1:length(newmethod)) %dopar% somaticCN(res=newres,i,somaticdata=somaticdata,method=newmethod)
somaticindex=0
for (i in 1:4){
if (!is.null(somaticres[[i]])){
if (dim(somaticres[[i]])[1]>0){
somaticindex=1
break
}
}
}
if (somaticindex!=0){
randomdata<-foreach(i=1:length(newmethod)) %dopar% randomSample(data=data.frame(Tsum=somaticres[[i]]$Tsum,tfrac=somaticres[[i]]$tfrac),times=times)
randomout<-foreach(i =1:length(newmethod)) %dopar% randomCN(newres,i,randomdata,somaticres,method=newmethod)
aveDIF<-foreach(i=1:length(newmethod)) %dopar% randomDIF(randomout,somaticres,i)
realDIF<-foreach(i=1:length(newmethod)) %dopar% realDiff(somaticres,i)
realsub<-foreach(i=1:length(newmethod)) %dopar% realSub(somaticres,i)
randomsub<-foreach(i = 1:length(newmethod)) %dopar% subcloneP(x=randomout[[i]])
PDl=list()
PSl=list()
for (i in 1:length(newmethod)){
if (!is.null(realDIF[[i]])){
PDl[[i]]=1-length(aveDIF[[i]][aveDIF[[i]]>=realDIF[[i]]])/length(aveDIF[[i]])
PSl[[i]]=length(randomsub[[i]][randomsub[[i]]<sum(somaticres[[i]]$SACN==0)])/times
}
}
modelindex=c()
k=1
for (i in 1:length(newmethod)){
if (!is.null(PDl[[i]])){
if (!is.na(PDl[[i]])){
modelindex[k]=i
k=k+1
}
}
}
PD=c()
PS=c()
for (k in 1:length(modelindex)){
PD[k]=PDl[[modelindex[k]]]
PS[k]=PSl[[modelindex[k]]]
}
minimum=min(PD+PS)
selmeth=newmethod[modelindex[which(PD+PS==minimum)]]
if ("ASCAT" %in% selmeth){
DNAout=newres[[which(newmethod=="ASCAT")]]
DNAalpha=newres[[which(newmethod=="ASCAT")]]$alpha
DNAploidy=newres[[which(newmethod=="ASCAT")]]$ploidy
segment=newres[[which(newmethod=="ASCAT")]]$segment
somaticres=somaticres[[which(newmethod=="ASCAT")]]
realDIF=realDIF[[which(newmethod=="ASCAT")]]
realsub=realsub[[which(newmethod=="ASCAT")]]
randomdata=randomdata[[which(newmethod=="ASCAT")]]
PD=PDl[[which(newmethod=="ASCAT")]]
PS=PSl[[which(newmethod=="ASCAT")]]
resmthod="ASCAT"
}else if ("FACETS" %in% selmeth){
DNAout=newres[[which(newmethod=="FACETS")]]
DNAalpha=newres[[which(newmethod=="FACETS")]]$alpha
DNAploidy=newres[[which(newmethod=="FACETS")]]$ploidy
segment=newres[[which(newmethod=="FACETS")]]$segment
segment=data.frame(chr=paste("chr",segment$chr,sep=""),startpos=segment$startpos,endpos=segment$endpos,nMajor=segment$nMajor,nMinor=segment$nMinor)
somaticres=somaticres[[which(newmethod=="FACETS")]]
realDIF=realDIF[[which(newmethod=="FACETS")]]
realsub=realsub[[which(newmethod=="FACETS")]]
randomdata=randomdata[[which(newmethod=="FACETS")]]
PD=PDl[[which(newmethod=="FACETS")]]
PS=PSl[[which(newmethod=="FACETS")]]
resmthod="FACETS"
}else if ("TITAN" %in% selmeth){
DNAout=newres[[which(newmethod=="TITAN")]]
DNAalpha=newres[[which(newmethod=="TITAN")]]$alpha
DNAploidy=newres[[which(newmethod=="TITAN")]]$ploidy
segment=newres[[which(newmethod=="TITAN")]]$segment
segment=data.frame(chr=paste("chr",segment$Chromosome,sep=""),startpos=as.numeric(as.character(segment$Start_Position.bp.)),endpos=as.numeric(as.character(segment$End_Position.bp.)),nMajor=segment$MajorCN,nMinor=segment$MinorCN)
somaticres=somaticres[[which(newmethod=="TITAN")]]
realDIF=realDIF[[which(newmethod=="TITAN")]]
realsub=realsub[[which(newmethod=="TITAN")]]
randomdata=randomdata[[which(newmethod=="TITAN")]]
PD=PDl[[which(newmethod=="TITAN")]]
PS=PSl[[which(newmethod=="TITAN")]]
resmthod="TITAN"
}else{
DNAout=newres[[which(newmethod=="sequenz")]]
DNAalpha=newres[[which(newmethod=="sequenz")]]$alpha
DNAploidy=newres[[which(newmethod=="sequenz")]]$ploidy
segment=newres[[which(newmethod=="sequenz")]]$segment
somaticres=somaticres[[which(newmethod=="sequenz")]]
realDIF=realDIF[[which(newmethod=="sequenz")]]
realsub=realsub[[which(newmethod=="sequenz")]]
randomdata=randomdata[[which(newmethod=="sequenz")]]
PD=PDl[[which(newmethod=="sequenz")]]
PS=PSl[[which(newmethod=="sequenz")]]
resmthod="sequenz"
}
DNAout=list()
DNAout$segment=segment
DNAout$alpha=DNAalpha
DNAout$ploidy=DNAploidy
DNAout$somatic=somaticres
DNAout$method=resmthod
#DNAres$DNAout=DNAout
return(DNAout)
}else{
DNAres=list()
k=1
for (i in 1:length(res)){
if (mode(res[[i]])=="list"){
if (!is.null(res[[i]]$alpha)){
res[[i]]$method=methods[i]
DNAres[[k]]=res[[i]]
k=k+1
}
}
}
ll=c()
for (j in 1:length(DNAres)){
ll[j]=dim(DNAres[[j]]$segment)[1]
}
DNAout=DNAres[[which.max(ll)]]
return(DNAout)
}
}else{
print.noquote(paste(sample,":Germline data is insuccifient",sep=""))
}
}
Consistent<-function(GCN,MCN){
lower=min(c(GCN,MCN))-1
upper=max(c(GCN,MCN))+1
da=density(GCN,from=lower,to=upper)
db=density(MCN,from=lower,to=upper)
d=data.frame(x=da$x,a=da$y,b=db$y)
d$w=pmin(d$a,d$b)
total=integrate.xy(d$x,d$a)+integrate.xy(d$x,d$b)
intersection=integrate.xy(d$x,d$w)
overlap=2*intersection/total
diff=(integrate.xy(d$x,d$b)-intersection)/integrate.xy(d$x,d$b)
return(list(overlap=overlap,diff=diff))
}
Heterogeneity<-function(DNAout){
if ("somatic" %in% names(DNAout)){
segment=DNAout$segment
somatic=DNAout$somatic
somatic1=c()
for (i in 1:dim(segment)[1]){
subdata=somatic[somatic$chr==as.character(segment$chr[i])&somatic$pos>=segment$startpos[i]&somatic$pos<=segment$endpos[i],]
if (dim(subdata)[1]>0){
subdata$Dmajor=segment$nMajor[i]
subdata$Dminor=segment$nMinor[i]
somatic1=rbind(somatic1,subdata)
}
}
DNAout$heterogeneity=1-Consistent(c(somatic1$Dmajor,somatic1$Dminor),somatic1$SMCN)$overlap
}else{
DNAout$heterogeneity=0
}
return(DNAout)
}
###############################################
wildtype<-function(data,segment,type,resout){
for (i in 1:dim(segment)[1]){
subdata=data[data$chr==as.character(segment$chr[i])&data$pos>=segment$startpos[i]&data$pos<=segment$endpos[i],]
Dmajor=segment$nMajor[i]
Dminor=segment$nMinor[i]
if (dim(subdata)[1]>0){
subres=subdata[,1:6]
if (type == "somatic"){
index=which(is.na(subdata$BayesP))
if (length(index)>0){
subdata$RTCN[index]=(Dmajor+Dminor)*2^(subdata$ratio[index])
subdata$RMCN[index]=subdata$RTCN[index]*subdata$tfrac[index]
subdata$BayesP[index]=1-10^(-abs(subdata$RMCN[index]-subdata$DMCN[index]))
}
index=which(subdata$RTCN==Inf|subdata$RTCN==-Inf)
if (length(index)>0){
subdata$RTCN[index]=(Dmajor+Dminor)*2^(subdata$ratio[index])
subdata$RMCN[index]=subdata$RTCN[index]*subdata$tfrac[index]
}
}
if (type=="germline"){
altD=subdata$Dminor
wildD=subdata$Dmajor
altD[subdata$alt==subdata$DMajorAllele]=subdata$Dmajor[subdata$alt==subdata$DMajorAllele]
wildD[subdata$alt==subdata$DMajorAllele]=subdata$Dminor[subdata$alt==subdata$DMajorAllele]
altR=abs(subdata$Rminor)
wildR=subdata$Rmajor
altR[subdata$alt==subdata$RMajorAllele]=subdata$Rmajor[subdata$alt==subdata$RMajorAllele]
wildR[subdata$alt==subdata$RMajorAllele]=abs(subdata$Rminor[subdata$alt==subdata$RMajorAllele])
subres=cbind(subres,rep("Germline",length=dim(subres)[1]))
subres=cbind(subres,altD,altR,wildD,wildR)
subres=cbind(subres,subdata$BayesP)
}else{
altD=subdata$DMCN
altR=subdata$RMCN
if (abs(altD-Dmajor)<abs(altD-Dminor)){
wildD=Dminor
}else{
wildD=Dmajor
}
wildR=subdata$RTCN-subdata$RMCN
subres=cbind(subres,rep("Somatic",length=dim(subres)))
subres=cbind(subres,altD,altR,wildD,wildR)
subres=cbind(subres,subdata$BayesP)
}
names(subres)=c("chr","pos","ref","alt","refNum","altNum","type","altD","altR","wildD","wildR","BayesP")
resout=rbind(resout,subres)
}
}
return(resout)
}
altalle<-function(x){
dif1=abs(x[2]-x[1])
dif2=abs(x[2]-x[3])
if (dif1<=dif2){
return(data.frame(alt=x[1],ref=x[3]))
}else{
return(data.frame(alt=x[3],ref=x[1]))
}
}
WGD<-function(segment,SNPinput){
DNAinput=read.csv(SNPinput,sep="\t",header=TRUE)
colnames(DNAinput)=c("chr","pos","ref","alt","refNumN","altNumN","refNumT","altNumT")
segment$TCN=segment$Dmajor+segment$Dminor
if (dim(segment)[1]==1){
subdata=DNAinput[DNAinput$chr==as.character(segment$chr)&DNAinput$pos>=segment$start&DNAinput$pos<=segment$end,]
ll=dim(DNAinput)[1]
gdouble=dim(subdata)[1]
}else{
ll=sum(as.numeric(segment$end-segment$start))
gdouble=sum(as.numeric(segment$end[segment$TCN>2]-segment$start[segment$TCN>2]))
}
return(gdouble/ll)
}
DACRE<-function(resout){
germ=resout[resout$type=="Germline",]
germ1=do.call(rbind,apply(germ[,8:10],1,altalle))
germ1=cbind(germ$altR,germ$wildR,germ1,germ$BayesP)
germ1=as.data.frame(germ1)
names(germ1)=c("ASELm","ASELw","ASCNm","ASCNw","BayesP")
germ1$ASEmP=germ1$ASELm-exp(-abs(germ1$ASCNm-1))*germ1$ASCNm
germ1$ASEwP=germ1$ASELw-exp(-abs(germ1$ASCNw-1))*germ1$ASCNw
germ1$ASEm=germ1$ASELm-germ1$ASCNm
germ1$ASEw=germ1$ASELw-germ1$ASCNw
germ1$ASELm[germ1$ASELm==0]=0.01
germ1$ASELw[germ1$ASEKw==0]=0.01
germ1$AEI=log(germ1$ASELm/germ1$ASELw,base=2)
germ1$DACRE=germ1$ASEmP-germ1$ASEwP
germres=cbind(germ,germ1$ASEm,germ1$AEI,germ1$DACRE)
names(germres)=c(names(germ),"eASEL","AEI","DACRE")
soma=resout[resout$type=="Somatic",]
if (dim(soma)[1]>0){
soma$wildR[soma$wildR<0]=soma$altR[soma$wildR<0]*soma$wildD[soma$wildR<0]/soma$altD[soma$wildR<0]
soma1=soma
soma1$ASEmP=soma1$altR-exp(-abs(soma1$altD-1))*soma1$altD
soma1$ASEwP=soma1$wildR-exp(-abs(soma1$wildD-1))*soma1$wildD
soma1$ASEm=soma1$altR-soma1$altD
soma1$ASEw=soma1$wildR-soma1$wildD
soma1$wildR[soma1$wildR==0]=0.01
soma1$altR[soma1$altR==0]=0.01
soma1$AEI=log(soma1$altR/soma1$wildR,base=2)
soma1$DACRE=soma1$ASEmP-soma1$ASEwP
somares=cbind(soma,soma1$ASEm,soma1$AEI,soma1$DACRE)
names(somares)=c(names(soma),"eASEL","AEI","DACRE")
res=rbind(germres,somares)
}else{
res=germres
}
return(res)
}
|
/DNAfunction.R
|
no_license
|
xtmgah/Texomer
|
R
| false | false | 50,979 |
r
|
# TODO: Add comment
#
# Author: FWang9
###############################################################################
############Using ASCAT
############segmentation and estimate DNA copy number based on germline mutation
############the formate of inputdata were chr, position, refallele counts in tumor, altAllele counts in tumor, refallele counts in normal, altAllele counts in normal
############tab seprate and without headline
############load ASCAT function
############install bbmle and emdbook package
##############################################################################################################
####part1.run ASCAT to get segmentation and allelic copy number in DNA level
###ASCAT in: get the inputdata of ASCAT
ASCATin<-function(data,sample){
Me=median(data$TlogR)
M=max(data$TlogR)
m=min(data$TlogR)
data$Tlog[data$TlogR>=Me]=(data$TlogR[data$TlogR>=Me]-Me)/(M-Me)
data$Tlog[data$TlogR<Me]=(data$TlogR[data$TlogR<Me]-Me)/(Me-m)
data$Nlog=1
tumor_logR=data.frame(SNP=paste("SNP",c(1:dim(data)[1]),sep=""),chr=as.character(data$chr),pos=data$pos,sample=data$Tlog)
tumor_BAF=data.frame(SNP=paste("SNP",c(1:dim(data)[1]),sep=""),chr=as.character(data$chr),pos=data$pos,sample=data$tfrac)
normal_logR=data.frame(SNP=paste("SNP",c(1:dim(data)[1]),sep=""),chr=as.character(data$chr),pos=data$pos,sample=data$Nlog)
normal_BAF=data.frame(SNP=paste("SNP",c(1:dim(data)[1]),sep=""),chr=as.character(data$chr),pos=data$pos,sample=data$nfrac)
colnames(tumor_logR)=c("SNP","chr","pos",sample)
colnames(tumor_BAF)=c("SNP","chr","pos",sample)
colnames(normal_logR)=c("SNP","chr","pos",sample)
colnames(normal_BAF)=c("SNP","chr","pos",sample)
tumor_logRname=paste(sample,"_tumorlogR.txt",sep="")
tumor_BAFname=paste(sample,"_tumorBAF.txt",sep="")
normal_logRname=paste(sample,"_normallogR.txt",sep="")
normal_BAFname=paste(sample,"_normalBAF.txt",sep="")
write.table(tumor_logR,tumor_logRname,sep="\t",col.names=TRUE,row.names = FALSE,quote = FALSE)
write.table(tumor_BAF,tumor_BAFname,sep="\t",col.names=TRUE,row.names = FALSE,quote = FALSE)
write.table(normal_logR,normal_logRname,sep="\t",col.names=TRUE,row.names = FALSE,quote = FALSE)
write.table(normal_BAF,normal_BAFname,sep="\t",col.names=TRUE,row.names = FALSE,quote = FALSE)
ASCATdata=c(tumor_logRname,tumor_BAFname,normal_logRname,normal_BAFname)
return (ASCATdata)
}
###ASCATout: get the output of ASCAT
ASCATout<-function(ASCATdata,sample,chromosome){
tumor_logRname=ASCATdata[1]
tumor_BAFname=ASCATdata[2]
normal_logRname=ASCATdata[3]
normal_BAFname=ASCATdata[4]
ascat.bc = ascat.loadData(tumor_logRname,tumor_BAFname,normal_logRname,normal_BAFname,chrs=chromosome)
ascat.bc = ascat.aspcf(ascat.bc)
ascat.output = ascat.runAscat(ascat.bc)
alpha=ascat.output$aberrantcellfraction
segment=ascat.output$segments
ACN=data.frame(nMajor=segment$nMajor,nMinor=segment$nMinor)
segment$nMajor=apply(ACN,1,max)
segment$nMinor=apply(ACN,1,min)
segment=data.frame(chr=segment$chr,startpos=segment$startpos,endpos=segment$endpos,nMajor=segment$nMajor,nMinor=segment$nMinor)
alpha=ascat.output$aberrantcellfraction
ploidy=ascat.output$ploidy
DNAout=list(alpha=alpha,segment=segment,ploidy=ploidy)
#save(DNAout,file=paste(sample,".somaticCN.Rdata",sep=""))
return(DNAout)
}
####part 1 end
#############################################################################################################
#############################################################################################################
####part 2: estimate the copy number of somatic mutation and iterative optimization
#MCN: get the integer copy number of somatic mutation
MCN<-function(alpha,segment,somatic){
for (j in 1:dim(segment)[1]){
tempsomatic=somatic[somatic$chr==as.character(segment$chr[j])&somatic$pos>=segment$startpos[j]&somatic$pos<=segment$endpos[j],]
tempsomatic$SMCN=tempsomatic$tfrac*(2*(1-alpha)+alpha*(segment$nMajor[j]+segment$nMinor[j]))/alpha
if (j==1){
somaticnew=tempsomatic
}else{
somaticnew=rbind(somaticnew,tempsomatic)
}
}
somaticnew$SACN=round(somaticnew$SMCN)
return(somaticnew)
}
#MutPlot: plot the copy number of somatic mutation
MutPlot<-function(segment,data,plotname,index){
pdf(plotname,width=20,height=7)
d=sum(segment$endpos-segment$startpos)
nMajor=max(segment$nMajor,segment$nMinor,data$SMCN)
plot(0,0,col="white",xlim=c(0,d),ylim=c(0,nMajor),xlab="",ylab="Absolute copy number",main="",axes=FALSE)
chro=unique(segment$chr)
axisindex=0
for (j in 1:length(chro)){
subseg=segment[segment$chr==chro[j],]
subd=sum(subseg$endpos-subseg$startpos)
subsomatic=data[data$chr==chro[j],]
for (k in 1:dim(subseg)[1]){
subsom=subsomatic[subsomatic$pos>=subseg$startpos[k]&subsomatic$pos<=subseg$endpos[k],]
subsom$index=subsom$pos-subseg$startpos[k]
if (index==1){
points(subsom$index+axisindex,subsom$SACN,pch=20,col=rgb(0,0,1,alpha=0.7),cex=1.2)
}
else{
points(subsom$index+axisindex,subsom$SMCN,pch=20,col=rgb(0,0,1,alpha=0.7),cex=1.2)
}
if (subseg$nMajor[k]==subseg$nMinor[k]){
x=c(axisindex,axisindex+subseg$endpos[k]-subseg$startpos[k])
y1=rep(subseg$nMajor[k]-0.1,length(x))
lines(x,y1,col="purple",lwd=2)
y2=rep(subseg$nMajor[k]+0.1,length(x))
lines(x,y2,col="purple",lwd=2)
z=c(subseg$nMajor[k]-0.1,subseg$nMajor[k]+0.1)
x1=c(axisindex,axisindex)
lines(x1,z,col="purple",lwd=2)
x2=c(axisindex+subseg$endpos[k]-subseg$startpos[k],axisindex+subseg$endpos[k]-subseg$startpos[k])
lines(x2,z,col="purple",lwd=2)
}else{
x=c(axisindex,axisindex+subseg$endpos[k]-subseg$startpos[k])
y1=rep(subseg$nMajor[k]-0.1,length(x))
lines(x,y1,col="red",lwd=2)
y2=rep(subseg$nMajor[k]+0.1,length(x))
lines(x,y2,col="red",lwd=2)
z=c(subseg$nMajor[k]-0.1,subseg$nMajor[k]+0.1)
x1=c(axisindex,axisindex)
lines(x1,z,col="red",lwd=2)
x2=c(axisindex+subseg$endpos[k]-subseg$startpos[k],axisindex+subseg$endpos[k]-subseg$startpos[k])
lines(x2,z,col="red",lwd=2)
y11=rep(subseg$nMinor[k]-0.1,length(x))
lines(x,y11,col="green",lwd=2)
y21=rep(subseg$nMinor[k]+0.1,length(x))
lines(x,y21,col="green",lwd=2)
z=c(subseg$nMinor[k]-0.1,subseg$nMinor[k]+0.1)
lines(x1,z,col="green",lwd=2)
lines(x2,z,col="green",lwd=2)
}
axisindex=axisindex+subseg$endpos[k]-subseg$startpos[k]+1
}
abline(v=axisindex-1,col="gray")
text((axisindex-1-sum(subseg$endpos-subseg$startpos)+axisindex-1)/2,nMajor,substr(chro[j],4,nchar(chro[j])))
}
axis(side=2)
dev.off()
}
SNPplot<-function(segment,data,plotname,index,alpha){
pdf(plotname,width=20,height=7)
d=sum(segment$endpos-segment$startpos)
nMajor=max(segment$nMajor,segment$nMinor,data$SMCN)
plot(0,0,col="white",xlim=c(0,d),ylim=c(0,nMajor),xlab="",ylab="Absolute copy number",main="",axes=FALSE)
chro=unique(segment$chr)
axisindex=0
for (j in 1:length(chro)){
subseg=segment[segment$chr==chro[j],]
subd=sum(subseg$endpos-subseg$startpos)
subsomatic=data[data$chr==chro[j],]
for (k in 1:dim(subseg)[1]){
subsom=subsomatic[subsomatic$pos>=subseg$startpos[k]&subsomatic$pos<=subseg$endpos[k],]
TCN=subseg$nMajor[k]+subseg$nMinor[k]
subsom$SMCN=(subsom$tfrac*(2*(1-alpha)+alpha*TCN)-(1-alpha))/alpha
subsom$SACN=round(subsom$SMCN)
subsom$index=subsom$pos-subseg$startpos[k]
if (index==1){
points(subsom$index+axisindex,subsom$SACN,pch=20,col=rgb(0,0,1,alpha=0.7),cex=1.2)
}
else{
points(subsom$index+axisindex,subsom$SMCN,pch=20,col=rgb(0,0,1,alpha=0.7),cex=1.2)
}
if (subseg$nMajor[k]==subseg$nMinor[k]){
x=c(axisindex,axisindex+subseg$endpos[k]-subseg$startpos[k])
y1=rep(subseg$nMajor[k]-0.1,length(x))
lines(x,y1,col="purple",lwd=2)
y2=rep(subseg$nMajor[k]+0.1,length(x))
lines(x,y2,col="purple",lwd=2)
z=c(subseg$nMajor[k]-0.1,subseg$nMajor[k]+0.1)
x1=c(axisindex,axisindex)
lines(x1,z,col="purple",lwd=2)
x2=c(axisindex+subseg$endpos[k]-subseg$startpos[k],axisindex+subseg$endpos[k]-subseg$startpos[k])
lines(x2,z,col="purple",lwd=2)
}else{
x=c(axisindex,axisindex+subseg$endpos[k]-subseg$startpos[k])
y1=rep(subseg$nMajor[k]-0.1,length(x))
lines(x,y1,col="red",lwd=2)
y2=rep(subseg$nMajor[k]+0.1,length(x))
lines(x,y2,col="red",lwd=2)
z=c(subseg$nMajor[k]-0.1,subseg$nMajor[k]+0.1)
x1=c(axisindex,axisindex)
lines(x1,z,col="red",lwd=2)
x2=c(axisindex+subseg$endpos[k]-subseg$startpos[k],axisindex+subseg$endpos[k]-subseg$startpos[k])
lines(x2,z,col="red",lwd=2)
y11=rep(subseg$nMinor[k]-0.1,length(x))
lines(x,y11,col="green",lwd=2)
y21=rep(subseg$nMinor[k]+0.1,length(x))
lines(x,y21,col="green",lwd=2)
z=c(subseg$nMinor[k]-0.1,subseg$nMinor[k]+0.1)
lines(x1,z,col="green",lwd=2)
lines(x2,z,col="green",lwd=2)
}
axisindex=axisindex+subseg$endpos[k]-subseg$startpos[k]+1
}
abline(v=axisindex-1,col="gray")
text((axisindex-1-sum(subseg$endpos-subseg$startpos)+axisindex-1)/2,nMajor,substr(chro[j],4,nchar(chro[j])))
}
axis(side=2)
dev.off()
}
#randomBinom: random sampling based on binomial distribution
randomBinom<-function(y,times){
return(rbinom(times,y[1],y[2]))
}
#ransomSample: random sample based on times
randomSample<-function(data,times){
if (dim(data)[1]>0){
random=apply(data,1,randomBinom,times=times)
return(t(random))
}
}
#RMCN: get the allele copy number of random sampling
RMCN<-function(R,somatic,alpha,segment){
randomsomatic=data.frame(chr=somatic$chr,pos=somatic$pos,refNumT=somatic$refNumT,altNumT=R)
randomsomatic$tfrac=randomsomatic$altNumT/(randomsomatic$altNumT+randomsomatic$refNumT)
randomres=MCN(alpha,segment,somatic=randomsomatic)
return(randomres$SMCN)
}
#difCN: calculated the difference between allelic copy number and integer copy number
difCN<-function(x){
dif=abs(x-round(x))
return(dif)
}
####calculated the fraction of integer copy number = 0
#subclone: calculated the number of somatic mutation with copy number = 0
subclone<-function(x){
return(sum(round(x)==0))
}
###somaticPurity: calculated the tumor purity based on the copy number of somatic mutation
somaticPurity<-function(somatic,segment,DNAalpha){
alpha_up=c()
for (j in 1:dim(segment)[1]){
subdata=somatic[somatic$chr==as.character(segment$chr[j])&somatic$pos>=segment$startpos[j]&somatic$pos<=segment$endpos[j],]
if (dim(subdata)[1]>0){
TCN=segment$nMajor[j]+segment$nMinor[j]
alpha1=2*subdata$tfrac/(2*subdata$tfrac+subdata$SACN-subdata$tfrac*TCN)
alpha_up=c(alpha_up,alpha1)
}
}
alpha_up=alpha_up[!is.na(alpha_up)&alpha_up!=Inf]
if (length(alpha_up)<=1){
if (length(alpha_up)==0){
alpha1=DNAalpha
}else{
if (alpha_up>=0 & alpha_up <=1){
alpha1=alpha_up
}else{
alpha1=DNAalpha
}
}
}else{
alpha1=density(alpha_up)$x[which.max(density(alpha_up)$y)]
if (alpha1 > 1 | alpha1 < 0){
alpha_up=alpha_up[alpha_up >=0 & alpha_up <= 1]
if (length(alpha_up)>0){
if (length(alpha_up)==1){
alpha1=alpha_up
}else{
alpha1=density(alpha_up)$x[which.max(density(alpha_up)$y)]
}
}else{
alpha1=DNAalpha
}
}
}
return(alpha1)
}
##CNopt: determine the optimal copy number for each segment among mang candiate CNs
CNpeak<-function(CN){
peak=c()
peakvalue=c()
if (max(CN)<=0){
CN=CN+abs(min(CN))
}
CN=CN[CN>0]
if (length(CN)>3){
k=1
x=density(CN)$x
y=density(CN)$y
for (i in 2:(length(y)-1)){
if (y[i]>=y[i-1]&y[i]>=y[i+1]){
peak[k]=x[i]
peakvalue[k]=y[i]
k=k+1
}
}
peak=peak[peakvalue> (max(peakvalue)/10)]
}else{
peak=unique(CN)
}
return(peak)
}
CNopt<-function(data,expBAF){
dbetabinom(data$y,f1,data$N,theta,log=FALSE)
}
##upACN: update the copy number of segment and ploidy based on germline mutation
upACN<-function(data,segment,alpha){
ploidy=c()
for (j in 1:dim(segment)[1]){
subseg=segment[j,]
subdata=data[data$chr==as.character(segment$chr[j])&data$pos>=segment$startpos[j]&data$pos<=segment$endpo[j],]
if (dim(subdata)[1]>=10){
TCN=round((2*subdata$Ratio-2*(1-alpha))/alpha)
canTCN=round(CNpeak(TCN))
lCN=Inf
sCN=c()
if (length(canTCN)!=0){
for (i in 1:length(canTCN)){
nMajor=round((2*subdata$BAF*(1-alpha)+alpha*subdata$BAF*canTCN[i]-(1-alpha))/alpha)
canMajor=round(CNpeak(nMajor))
if (length(canMajor)>0){
for (k in 1:length(canMajor)){
expBAF=((1-alpha)+alpha*canMajor[k])/(2*(1-alpha)+alpha*canTCN[i])
if (expBAF <= 1 & expBAF >=0){
lCN1=-sum(dbinom(round(subdata$BAF*(subdata$refNumT+subdata$altNumT)),(subdata$refNumT+subdata$altNumT),expBAF,log=TRUE))
if (lCN1 < lCN){
sCN=c(canMajor[k],canTCN[i])
lCN=lCN1
}
}
}
}
}
if (length(sCN)!=0){
subseg$nMajor=sCN[1]
if (sCN[2]-sCN[1]<0){
subseg$nMinor=0
}else{
subseg$nMinor=sCN[2]-sCN[1]
}
}
}
}
if (j==1){
upsegment=subseg
}else{
upsegment=rbind(upsegment,subseg)
}
ploidy=c(ploidy,rep(subseg$nMajor+subseg$nMinor,length=dim(subdata)[1]))
}
upres=list(upsegment=upsegment,ploidy=mean(ploidy))
return(upres)
}
###iterOPT: output optimal result based on Iterative optimization
iterOPT<-function(SNP,somatic,segment,realDIF,realsub,randomdata,times,PD,PS,DNAalpha,cutoff){
out=list()
out1=list()
out2=list()
run=1
PD2=PD
PS2=PS
while(run < cutoff){
alpha1=somaticPurity(somatic=somatic,segment=segment,DNAalpha=DNAalpha)
res1=upACN(data=SNP,segment=segment,alpha=alpha1)
ploidy1=res1$ploidy
segment1=res1$upsegment
somaticres1=MCN(alpha=alpha1,segment=segment1,somatic=somatic)
somaticres1=somaticres1[match(paste(somatic$chr,somatic$pos,sep=":"),paste(somaticres1$chr,somaticres1$pos,sep=":")),]
realDIF1=sum(abs(somaticres1$SMCN-somaticres1$SACN))/dim(somaticres1)[1]
registerDoMC(cores = 4)
randomout1=foreach (j = 1:dim(randomdata)[2], .combine=cbind) %dopar% RMCN(R=randomdata[,j],somatic=somaticres1,alpha=alpha1,segment=segment1)
randomDIF1=apply(randomout1,2,difCN)
aveDIF1=apply(randomDIF1,2,sum)/dim(somaticres1)[1]
realDIF1=sum(abs(somaticres1$SMCN-somaticres1$SACN))/dim(somaticres1)[1]
realsub1=sum(somaticres1$SACN==0)/dim(somaticres1)[1]
if (realDIF1<realDIF | realsub1<realsub){
out$segment=segment1
out$somatic=somaticres1
out$alpha=alpha1
out$ploidy=ploidy1
}
randomsub1=apply(randomout1,2,subclone)
PD1=1-length(aveDIF1[aveDIF1>=realDIF1])/length(aveDIF1)
PS1=length(randomsub1[randomsub1<sum(somaticres1$SACN==0)])/times
if (PD1 < 0.05 & PS1 < 0.05){
out1$segment=segment1
out1$somatic=somaticres1
out1$alpha=alpha1
out1$ploidy=ploidy1
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out1)
break
}
if ((PD1 + PS1) < (PD2+PS2)){
out2$segment=segment1
out2$somatic=somaticres1
out2$alpha=alpha1
out2$ploidy=ploidy1
PD2=PD1
PS2=PS1
}
realDIF=realDIF1
realsub=realsub1
somatic=somaticres1
segment=segment1
if(dim(somatic)[1]>=100){
if (abs(alpha1-DNAalpha)<0.01){
if (length(out2)!=0){
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out2)
}else{
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out)
}
}else{
DNAalpha=alpha1
run=run+1
}
}else if (dim(somatic)[1]<100&dim(somatic)[1]>=20){
if (abs(alpha1-DNAalpha)<0.001){
if (length(out2)!=0){
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out2)
}else{
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out)
}
}else{
DNAalpha=alpha1
run=run+1
}
}else{
if (abs(alpha1-DNAalpha)<0.0001){
if (length(out2)!=0){
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out2)
}else{
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out)
}
}else{
DNAalpha=alpha1
run=run+1
}
}
}
if (run==cutoff){
if (length(out2)!=0){
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out2)
}else{
print.noquote(paste("iteration times = ",run,sep=""))
p=PD1+PS1
print.noquote(paste("p value = ",p,sep=""))
return(out)
}
}
}
sequenzRun<-function(data,sample,chromosome){
outdata=data.frame(chromosome=data$chr,position=data$pos,base.ref=data$ref,depth.normal=data$Nsum,depth.tumor=data$Tsum,depth.ratio=(data$Nsum)/(data$Tsum),Af=1-data$tfrac,Bf=data$tfrac)
outdata$zygosity.normal[data$nfrac>=0.2&data$nfrac<=0.8]="het"
outdata$zygosity.normal[data$nfrac<0.2|data$nfrac>0.8]="hom"
outdata$GC.percent=50
outdata$good.reads=outdata$depth.tumor
genotype=data.frame(ref=data$ref[outdata$zygosity.normal=="het"],alt=data$alt[outdata$zygosity.normal=="het"])
genotype=t(apply(genotype,1,sort))
AB.normal=paste(genotype[,1],genotype[,2],sep="")
outdata$AB.normal[outdata$zygosity.normal=="het"]=AB.normal
genotype=data.frame(ref=data$ref[outdata$zygosity.normal=="hom"],alt=data$alt[outdata$zygosity.normal=="hom"])
subdata=data[outdata$zygosity.normal=="hom",]
if (dim(subdata)[1]>0){
subdata$genotype[subdata$nfrac<0.2]=as.character(subdata$ref[subdata$nfrac<0.2])
subdata$genotype[subdata$nfrac>0.8]=as.character(subdata$alt[subdata$nfrac>0.8])
outdata$AB.normal[outdata$zygosity.normal=="hom"]=subdata$genotype
}
AB.tumor <- rep(".", length(outdata$AB.normal))
outdata$AB.tumor=AB.tumor
strand <- AB.tumor
outdata$tumor.strand=strand
normal.pos <- outdata$zygosity.normal == "hom" & outdata$AB.tumor == "."
if (sum(normal.pos)>0){
outdata <-outdata[outdata$depth.ratio > 0 & !is.infinite(outdata$depth.ratio) & !normal.pos, ]
}
outname=paste(sample,".seqz",sep="")
write.table(outdata, outname, col.names = TRUE, row.names = FALSE, sep = "\t")
seqz.data<-read.seqz(outname,gz=FALSE)
##GC correction and normalization depth ratio
print.noquote("GC correction")
gc.stats<-gc.sample.stats(seqz.data)
#gc.stats <- gc.norm(x = seqz.data$depth.ratio,gc = seqz.data$GC.percent)
gc.vect <- setNames(gc.stats$raw.mean, gc.stats$gc.values)
seqz.data$adjusted.ratio <- seqz.data$depth.ratio / gc.vect[as.character(seqz.data$GC.percent)]
##extract information of sequenza input
print.noquote("extract sequenza input")
seqz.data$chr=seqz.data$chromosome
test <- sequenza.extract(seqz.data,gc.stats,chroso=intersect(chromosome,seqz.data$chromosome))
##infer tumor purity and ploidy
print.noquote("sequenza infering")
CP.example <- sequenza.fit(test,mc.cores = getOption("mc.cores", 1L),female=FALSE)
cint <- get.ci(CP.example)
alpha <- cint$max.cellularity
ploidy <- cint$max.ploidy
avg.depth.ratio <- mean(test$gc$adj[, 2])
seg.tab <- na.exclude(do.call(rbind, test$segments))
print.noquote("integrating result")
cn.alleles <- baf.bayes(Bf = seg.tab$Bf, depth.ratio = seg.tab$depth.ratio,cellularity = alpha, ploidy = ploidy,avg.depth.ratio = avg.depth.ratio)
seg.tab <- cbind(seg.tab, cn.alleles)
segment=data.frame(chr=seg.tab$chromosome,startpos=seg.tab$start.pos,endpos=seg.tab$end.pos,nMajor=seg.tab$A,nMinor=seg.tab$B)
index=which(segment$nMajor==0&segment$nMinor==0)
rowindex=setdiff(c(1:dim(segment)[1]),index)
segment=segment[rowindex,]
DNAout=list(alpha=alpha,ploidy=ploidy,segment=segment)
return(DNAout)
}
###DNArun: get final result at the DNA level
outputTitanSegments <- function(results, id, convergeParams, filename = NULL, igvfilename = NULL){
# get all possible states in this set of results
stateTable <- unique(results[, c("TITANstate", "TITANcall")])
rownames(stateTable) <- stateTable[, 1]
rleResults <- t(sapply(unique(results$Chr), function(x){
ind <- results$Chr == x
r <- rle(results$TITANstate[ind])
}))
rleLengths <- unlist(rleResults[, "lengths"])
rleValues <- unlist(rleResults[, "values"])
numSegs <- length(rleLengths)
# convert allelic ratio to symmetric ratios #
results$AllelicRatio <- apply(cbind(results$AllelicRatio, 1-results$AllelicRatio), 1, max, na.rm = TRUE)
segs <- as.data.frame(matrix(NA, ncol = 14, nrow = numSegs,
dimnames = list(c(), c("Sample", "Chromosome", "Start_Position.bp.", "End_Position.bp.",
"Length.snp.", "Median_Ratio", "Median_logR", "TITAN_state", "TITAN_call", "Copy_Number",
"MinorCN", "MajorCN", "Clonal_Cluster", "Cellular_Frequency"))))
segs$Sample <- id
colNames <- c("Chr", "Position", "TITANstate", "AllelicRatio", "LogRatio")
prevInd <- 0
for (j in 1:numSegs){
start <- prevInd + 1
end <- prevInd + rleLengths[j]
segDF <- results[start:end, ]
prevInd <- end
numR <- nrow(segDF)
segs[j, "Chromosome"] <- as.character(segDF[1, "Chr"])
segs[j, "Start_Position.bp."] <- segDF[1, "Position"]
segs[j, "TITAN_state"] <- rleValues[j]
segs[j, "TITAN_call"] <- segDF[1, "TITANcall"]#stateTable[as.character(rleValues[j]), 2]
segs[j, "Copy_Number"] <- segDF[1, "CopyNumber"]
segs[j, "Median_Ratio"] <- round(median(segDF$AllelicRatio, na.rm = TRUE), digits = 6)
segs[j, "Median_logR"] <- round(median(segDF$LogRatio, na.rm = TRUE), digits = 6)
segs[j, "MinorCN"] <- getMajorMinorCN(rleValues[j], convergeParams$symmetric)$majorCN
segs[j, "MajorCN"] <- getMajorMinorCN(rleValues[j], convergeParams$symmetric)$minorCN
segs[j, "Clonal_Cluster"] <- segDF[1, "ClonalCluster"]
segs[j, "Cellular_Frequency"] <- segDF[1, "CellularPrevalence"]
if (segDF[1, "Chr"] == segDF[numR, "Chr"]){
segs[j, "End_Position.bp."] <- segDF[numR, "Position"]
segs[j, "Length.snp."] <- numR
}else{ # segDF contains 2 different chromosomes
print(j)
}
}
if (!is.null(filename)){
# write out detailed segment file #
write.table(segs, file = filename, col.names = TRUE, row.names = FALSE, quote = FALSE, sep = "\t")
}
# write out IGV seg file #
if (!is.null(igvfilename)){
igv <- segs[, c("Sample", "Chromosome", "Start_Position.bp.",
"End_Position.bp.", "Length.snp.", "Median_logR")]
colnames(igv) <- c("sample", "chr", "start", "end", "num.snps", "median.logR")
write.table(igv, file = igvfilename, col.names = TRUE, row.names = FALSE, quote = FALSE, sep = "\t")
}
return(segs)
}
getMajorMinorCN <- function(state, symmetric = TRUE){
majorCN <- NA
minorCN <- NA
if (symmetric){
if (state==0){
majorCN = 0; minorCN = 0;
}else if (state==1){
majorCN = 0; minorCN = 1;
}else if(state==2){
majorCN = 0; minorCN = 2;
}else if (state==3){
majorCN = 1; minorCN = 1;
}else if (state==4){
majorCN = 0; minorCN = 3;
}else if (state==5){
majorCN = 1; minorCN = 2;
}else if (state==6){
majorCN = 0; minorCN = 4;
}else if (state==7){
majorCN = 1; minorCN = 3;
}else if (state==8){
majorCN = 2; minorCN = 2;
}else if (state==9){
majorCN = 0; minorCN = 5;
}else if (state==10){
majorCN = 1; minorCN = 4;
}else if (state==11){
majorCN = 2; minorCN = 3;
}else if (state==12){
majorCN = 0; minorCN = 6;
}else if (state==13){
majorCN = 1; minorCN = 5;
}else if (state==14){
majorCN = 2; minorCN = 4;
}else if (state==15){
majorCN = 3; minorCN = 3;
}else if (state==16){
majorCN = 0; minorCN = 7;
}else if (state==17){
majorCN = 1; minorCN = 6;
}else if (state==18){
majorCN = 2; minorCN = 5;
}else if (state==19){
majorCN = 3; minorCN = 4;
}else if (state==20){
majorCN = 0; minorCN = 8;
}else if (state==21){
majorCN = 1; minorCN = 7;
}else if (state==22){
majorCN = 2; minorCN = 6;
}else if (state==23){
majorCN = 3; minorCN = 5;
}else if (state==24){
majorCN = 4; minorCN = 4;
}
}else{
#stop("symmetric=FALSE not yet supported.")
}
return(list(majorCN = majorCN, minorCN = minorCN))
}
TITANout<-function(DNAinput,chromosome){
DNAinput=DNAinput[DNAinput$nfrac>=0.2&DNAinput$nfrac<=0.8,]
rlength=nchar(as.character(DNAinput$ref))
alength=nchar(as.character(DNAinput$alt))
DNAinput=DNAinput[rlength==1&alength==1,]
titandata=data.frame(chr=DNAinput$chr,pos=DNAinput$pos,ref=DNAinput$ref,refCount=DNAinput$refNumT,Nref=DNAinput$alt,NrefCount=DNAinput$altNumT)
chro=intersect(chromosome,names(table(titandata$chr))[table(titandata$chr)>1])
index=match(as.character(titandata$chr),chro)
titandata=titandata[!is.na(index),]
DNAinput=DNAinput[!is.na(index),]
write.table(titandata,"TITAN.input",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
numClusters <- 2
params <- loadDefaultParameters(copyNumber=5,numberClonalClusters=numClusters)
data <- loadAlleleCounts("TITAN.input")
data$logR=log((DNAinput$refNumT+DNAinput$altNumT)/(DNAinput$refNumN+DNAinput$altNumN),base=10)
convergeParams <- runEMclonalCN(data,gParams=params$genotypeParams,nParams=params$normalParams,
pParams=params$ploidyParams,sParams=params$cellPrevParams,maxiter=20,maxiterUpdate=1500,
txnExpLen=1e15,txnZstrength=1e5,useOutlierState=FALSE,normalEstimateMethod="map",
estimateS=TRUE,estimatePloidy=TRUE)
optimalPath <- viterbiClonalCN(data,convergeParams)
results <- outputTitanResults(data,convergeParams,optimalPath,filename=NULL,posteriorProbs=F)
results$AllelicRatio=as.numeric(results$AllelicRatio)
results$LogRatio=as.numeric(results$LogRatio)
segs <- outputTitanSegments(results, id = "test", convergeParams,filename=NULL)
ploidy <- tail(convergeParams$phi, 1)
normal <- tail(convergeParams$n, 1)
#mean(as.numeric(segs$Cellular_Frequency[!is.na(segs$Cellular_Frequency)])*(1-normal))
DNAout=list(segment=segs,ploidy=ploidy,alpha=1-normal)
return(DNAout)
}
FACETSout<-function(DNAinput){
facetsinput=data.frame(chr=DNAinput$chr,pos=DNAinput$pos,Nsum=DNAinput$Nsum,NAP=DNAinput$altNumN,Tsum=DNAinput$Tsum,TAP=DNAinput$altNumT)
write.table(facetsinput,"FACETS.input",sep="\t",col.names=FALSE,row.names=FALSE,quote=FALSE)
set.seed(1234)
xx=preProcSample(file="FACETS.input")
oo=procSample(xx,cval=150)
fit<-try(emcncf(oo),silent=TRUE)
alpha=fit$purity
if (!is.na(alpha)){
ploidy=fit$ploidy
segment=data.frame(chr=fit$cncf$chrom,startpos=fit$start,endpos=fit$end,nMajor=(fit$cncf$tcn.em-fit$cncf$lcn.em),nMinor=fit$cncf$lcn.em)
segment=segment[!is.na(segment$nMajor)&!is.na(segment$nMinor)&segment$nMajor!=0&segment$nMinor!=0,]
DNAout=list(alpha=alpha,segment=segment,ploidy=ploidy)
return (DNAout)
}
}
ASCATrun<-function(DNAinput,sample,chromosome){
ASCATdata=ASCATin(data=DNAinput,sample=sample)
##run ASCAT
ASCATres<-try(ASCATout(ASCATdata=ASCATdata,sample=sample,chromosome=chromosome),silent=TRUE)
if (mode(ASCATres)=="list"){
return(ASCATres)
}
}
DNACNA<-function(DNAinput,sample,tempth,chromosome,i){
if (i==1){
print.noquote("Run germline data")
ASCATres<-try(ASCATrun(DNAinput,sample,chromosome),silent=TRUE)
return(ASCATres)
}else if (i==2){
registerDoMC(cores = 4)
TITANres<-try(TITANout(DNAinput=DNAinput,chromosome=chromosome),silent=TRUE)
return (TITANres)
}else if (i ==3){
FACETSres<-try(FACETSout(DNAinput=DNAinput),silent=TRUE)
return (FACETSres)
}else{
sequenzres<-try(sequenzRun(data=DNAinput,sample=sample,chromosome=chromosome),silent=TRUE)
return (sequenzres)
}
}
somaticCN<-function(res,i,somaticdata,method){
if (mode(res[[i]])=="list"){
if (method[i]=="TITAN" & !is.null(res[[i]])&!is.null(res[[i]]$alpha)){
DNAalpha=res[[i]]$alpha
DNAploidy=res[[i]]$ploidy
segment=res[[i]]$segment
segment=data.frame(chr=paste("chr",segment$Chromosome,sep=""),startpos=as.numeric(as.character(segment$Start_Position.bp.)),endpos=as.numeric(as.character(segment$End_Position.bp.)),nMajor=segment$MajorCN,nMinor=segment$MinorCN)
somaticres=MCN(alpha=DNAalpha,segment=segment,somatic=somaticdata)
return (somaticres)
}else if (method[i]=="FACETS" & !is.null(res[[i]])&!is.null(res[[i]]$alpha)){
DNAalpha=res[[i]]$alpha
DNAploidy=res[[i]]$ploidy
segment=res[[i]]$segment
segment=data.frame(chr=paste("chr",segment$chr,sep=""),startpos=segment$startpos,endpos=segment$endpos,nMajor=segment$nMajor,nMinor=segment$nMinor)
somaticres=MCN(alpha=DNAalpha,segment=segment,somatic=somaticdata)
return (somaticres)
}else if (!is.null(res[[i]])&!is.null(res[[i]]$alpha)){
DNAalpha=res[[i]]$alpha
DNAploidy=res[[i]]$ploidy
segment=res[[i]]$segment
somaticres=MCN(alpha=DNAalpha,segment=segment,somatic=somaticdata)
return (somaticres)
}
}
}
randomCN<-function(res,i,randomdata,somaticres,method){
if (mode(res[[i]])=="list"){
if (method[i]=="TITAN" & !is.null(res[[i]])&!is.null(res[[i]]$alpha)&!is.null(somaticres[[i]])){
if (dim(somaticres[[i]])[1]>0){
DNAalpha=res[[i]]$alpha
segment=res[[i]]$segment
segment=data.frame(chr=paste("chr",segment$Chromosome,sep=""),startpos=as.numeric(as.character(segment$Start_Position.bp.)),endpos=as.numeric(as.character(segment$End_Position.bp.)),nMajor=segment$MajorCN,nMinor=segment$MinorCN)
registerDoMC(cores = 4)
randomout=foreach (j = 1:dim(randomdata[[i]])[2], .combine=cbind) %dopar% RMCN(R=randomdata[[i]][,j],somatic=somaticres[[i]],alpha=DNAalpha,segment=segment)
#randomout=apply(randomdata[[i]],2,RMCN,somatic=somaticres[[i]],alpha=DNAalpha,segment=segment)
return(randomout)
}
}else if (method[i]=="FACETS" & !is.null(res[[i]])&!is.null(res[[i]]$alpha)&!is.null(somaticres[[i]])){
if (dim(somaticres[[i]])[1]>0){
DNAalpha=res[[i]]$alpha
segment=res[[i]]$segment
segment=data.frame(chr=paste("chr",segment$chr,sep=""),startpos=segment$startpos,endpos=segment$endpos,nMajor=segment$nMajor,nMinor=segment$nMinor)
registerDoMC(cores = 4)
randomout=foreach (j = 1:dim(randomdata[[i]])[2], .combine=cbind) %dopar% RMCN(R=randomdata[[i]][,j],somatic=somaticres[[i]],alpha=DNAalpha,segment=segment)
#randomout=apply(randomdata[[i]],2,RMCN,somatic=somaticres[[i]],alpha=DNAalpha,segment=segment)
return(randomout)
}
}else if (!is.null(res[[i]])&!is.null(res[[i]]$alpha)&!is.null(somaticres[[i]])){
if (dim(somaticres[[i]])[1]>0){
DNAalpha=res[[i]]$alpha
segment=res[[i]]$segment
registerDoMC(cores = 4)
randomout=foreach (j = 1:dim(randomdata[[i]])[2], .combine=cbind) %dopar% RMCN(R=randomdata[[i]][,j],somatic=somaticres[[i]],alpha=DNAalpha,segment=segment)
return(randomout)
}
}
}
}
randomDIF<-function(randomout,somaticres,i){
if (!is.null(randomout[[i]])){
if (dim(somaticres[[i]])>1){
randomDIF=apply(randomout[[i]],2,difCN)
aveDIF=apply(randomDIF,2,sum)/dim(somaticres[[i]])[1]
return(aveDIF)
}
}
}
realDiff<-function(somaticres,i){
if (!is.null(somaticres[[i]])){
realDIF=sum(abs(somaticres[[i]]$SMCN-somaticres[[i]]$SACN))/dim(somaticres[[i]])[1]
return(realDIF)
}
}
realSub<-function(somaticres,i){
if (!is.null(somaticres[[i]])){
realsub=sum(somaticres[[i]]$SACN==0)/dim(somaticres[[i]])[1]
return(realsub)
}
}
subcloneP<-function(x){
sub0<-function(y){
return(sum(round(y)==0))
}
if (!is.null(x)){
randomsub=apply(x,2,sub0)
return(randomsub)
}
}
DNArun<-function(SNPinput,somaticinput,sample,temppath){
DNAinput=read.csv(SNPinput,sep="\t",header=TRUE)
colnames(DNAinput)=c("chr","pos","ref","alt","refNumN","altNumN","refNumT","altNumT")
chromosome=paste("chr",c(1:22,"X","Y"),sep="")
if (length(levels(factor(DNAinput$chr)))>5){
DNAinput$Tsum=DNAinput$refNumT+DNAinput$altNumT
DNAinput$Nsum=DNAinput$refNumN+DNAinput$altNumN
DNAinput=DNAinput[DNAinput$Tsum!=0,]
DNAinput$tfrac=(DNAinput$altNumT)/(DNAinput$Tsum)
DNAinput$nfrac=(DNAinput$altNumN)/(DNAinput$Nsum)
DNAinput$TlogR=log2(DNAinput$Tsum/DNAinput$Nsum)
DNAinput$BAF[DNAinput$tfrac>=0.5]=DNAinput$tfrac[DNAinput$tfrac>=0.5]
DNAinput$BAF[DNAinput$tfrac<0.5]=1-DNAinput$tfrac[DNAinput$tfrac<0.5]
DNAinput$Ratio=(DNAinput$altNumT+DNAinput$refNumT)/(DNAinput$altNumN+DNAinput$refNumN)
DNAinput=DNAinput[DNAinput$Tsum>=10&DNAinput$Nsum>=10,]
DNAinput=DNAinput[sapply(as.character(DNAinput$chr),nchar)<=5,]
#rlength=nchar(as.character(DNAinput$ref))
#alength=nchar(as.character(DNAinput$alt))
#DNAinput=DNAinput[rlength==1&alength==1,]
setwd(temppath)
methods=c("ASCAT","TITAN","FACETS","sequenz")
somaticdata=read.csv(somaticinput,header=TRUE,sep="\t")
colnames(somaticdata)=c("chr","pos","ref","alt","refNumN","altNumN","refNumT","altNumT")
somaticdata$Tsum=somaticdata$altNumT+somaticdata$refNumT
somaticdata=somaticdata[somaticdata$Tsum>=10,]
somaticdata$tfrac=somaticdata$altNumT/somaticdata$Tsum
#rlength=nchar(as.character(somaticdata$ref))
#alength=nchar(as.character(somaticdata$alt))
#somaticdata=somaticdata[rlength==1&alength==1,]
registerDoMC(cores = 4)
times=1000
res=foreach(i = 1:4) %dopar% DNACNA(DNAinput=DNAinput,sample=sample,tempth=temppath,chromosome=chromosome,i)
print.noquote("Somatic mutation copy number")
somaticres<-foreach(i =1:length(res)) %dopar% somaticCN(res=res,i,somaticdata=somaticdata,method=methods)
somaticindex=0
for (i in 1:length(somaticres)){
if (!is.null(somaticres[[i]])){
if (dim(somaticres[[i]])[1]>0){
somaticindex=1
break
}
}
}
if (somaticindex!=0){
randomdata<-foreach(i=1:length(somaticres)) %dopar% randomSample(data=data.frame(Tsum=somaticres[[i]]$Tsum,tfrac=somaticres[[i]]$tfrac),times=times)
randomout<-foreach(i =1:length(somaticres)) %dopar% randomCN(res,i,randomdata,somaticres,method=methods)
aveDIF<-foreach(i=1:length(somaticres)) %dopar% randomDIF(randomout,somaticres,i)
realDIF<-foreach(i=1:length(somaticres)) %dopar% realDiff(somaticres,i)
realsub<-foreach(i=1:length(somaticres)) %dopar% realSub(somaticres,i)
randomsub<-foreach(i = 1:length(somaticres)) %dopar% subcloneP(x=randomout[[i]])
PDl=list()
PSl=list()
for (i in 1:length(realDIF)){
if (!is.null(realDIF[[i]])){
PDl[[i]]=1-length(aveDIF[[i]][aveDIF[[i]]>=realDIF[[i]]])/length(aveDIF[[i]])
PSl[[i]]=length(randomsub[[i]][randomsub[[i]]<sum(somaticres[[i]]$SACN==0)])/times
}
}
modelindex=c()
k=1
for (i in 1:length(PDl)){
if (!is.null(PDl[[i]])){
if(!is.na(PDl[[i]])){
modelindex[k]=i
k=k+1
}
}
}
PD=c()
PS=c()
for (k in 1:length(modelindex)){
PD[k]=PDl[[modelindex[k]]]
PS[k]=PSl[[modelindex[k]]]
}
minimum=min(PD+PS)
selmeth=methods[modelindex[which(PD+PS==minimum)]]
if ("ASCAT" %in% selmeth){
DNAout=res[[1]]
DNAalpha=res[[1]]$alpha
DNAploidy=res[[1]]$ploidy
segment=res[[1]]$segment
somaticres=somaticres[[1]]
realDIF=realDIF[[1]]
realsub=realsub[[1]]
randomdata=randomdata[[1]]
PD=PDl[[1]]
PS=PSl[[1]]
resmthod="ASCAT"
}else if ("FACETS" %in% selmeth){
DNAout=res[[3]]
DNAalpha=res[[3]]$alpha
DNAploidy=res[[3]]$ploidy
segment=res[[3]]$segment
segment=data.frame(chr=paste("chr",segment$chr,sep=""),startpos=segment$startpos,endpos=segment$endpos,nMajor=segment$nMajor,nMinor=segment$nMinor)
somaticres=somaticres[[3]]
realDIF=realDIF[[3]]
realsub=realsub[[3]]
randomdata=randomdata[[3]]
PD=PDl[[3]]
PS=PSl[[3]]
resmthod="FACETS"
}else if ("TITAN" %in% selmeth){
DNAout=res[[2]]
DNAalpha=res[[2]]$alpha
DNAploidy=res[[2]]$ploidy
segment=res[[2]]$segment
segment=data.frame(chr=paste("chr",segment$Chromosome,sep=""),startpos=as.numeric(as.character(segment$Start_Position.bp.)),endpos=as.numeric(as.character(segment$End_Position.bp.)),nMajor=segment$MajorCN,nMinor=segment$MinorCN)
somaticres=somaticres[[2]]
realDIF=realDIF[[2]]
realsub=realsub[[2]]
randomdata=randomdata[[2]]
PD=PDl[[2]]
PS=PSl[[2]]
resmthod="TITAN"
}else{
DNAout=res[[4]]
DNAalpha=res[[4]]$alpha
DNAploidy=res[[4]]$ploidy
segment=res[[4]]$segment
somaticres=somaticres[[4]]
realDIF=realDIF[[4]]
realsub=realsub[[4]]
randomdata=randomdata[[4]]
PD=PDl[[4]]
PS=PSl[[4]]
resmthod="sequenz"
}
DNAout=list()
DNAout$segment=segment
DNAout$alpha=DNAalpha
DNAout$ploidy=DNAploidy
DNAout$somatic=somaticres
DNAout$method=resmthod
if (PD >= 0.05 | PS >= 0.05){
print.noquote("Iter")
iterout=iterOPT(SNP=DNAinput,somatic=somaticres,segment=segment,realDIF=realDIF,realsub=realsub,randomdata=randomdata,times=times,PD=PD,PS=PS,DNAalpha=DNAalpha,cutoff=50)
if (length(iterout)==0){
return(DNAout)
}else{
iterout$method=resmthod
return(iterout)
}
}else{
return(DNAout)
}
}else{
DNAres=list()
k=1
for (i in 1:4){
if (mode(res[[i]])=="list"){
if (!is.null(res[[i]]$alpha)){
res[[i]]$method=methods[i]
DNAres[[k]]=res[[i]]
k=k+1
}
}
}
ll=c()
for (j in 1:length(DNAres)){
ll[j]=dim(DNAres[[j]]$segment)[1]
}
DNAout=DNAres[[which.max(ll)]]
return(DNAout)
}
}else{
print.noquote(paste(sample,":Germline data is insuccifient",sep=""))
}
}
DNArun1<-function(SNPinput,somaticinput,sample,temppath){
DNAinput=read.csv(SNPinput,sep="\t",header=TRUE)
colnames(DNAinput)=c("chr","pos","ref","alt","refNumN","altNumN","refNumT","altNumT")
chromosome=paste("chr",c(1:22,"X","Y"),sep="")
if (length(levels(factor(DNAinput$chr)))>5){
DNAinput$Tsum=DNAinput$refNumT+DNAinput$altNumT
DNAinput$Nsum=DNAinput$refNumN+DNAinput$altNumN
DNAinput$tfrac=(DNAinput$altNumT)/(DNAinput$Tsum)
DNAinput$nfrac=(DNAinput$altNumN)/(DNAinput$Nsum)
DNAinput$TlogR=log2(DNAinput$Tsum/DNAinput$Nsum)
DNAinput=DNAinput[DNAinput$Tsum!=0,]
DNAinput$BAF[DNAinput$tfrac>=0.5]=DNAinput$tfrac[DNAinput$tfrac>=0.5]
DNAinput$BAF[DNAinput$tfrac<0.5]=1-DNAinput$tfrac[DNAinput$tfrac<0.5]
DNAinput$Ratio=(DNAinput$altNumT+DNAinput$refNumT)/(DNAinput$altNumN+DNAinput$refNumN)
DNAinput=DNAinput[DNAinput$Tsum>=10&DNAinput$Nsum>=10,]
DNAinput=DNAinput[sapply(as.character(DNAinput$chr),nchar)<=5,]
#rlength=nchar(as.character(DNAinput$ref))
#alength=nchar(as.character(DNAinput$alt))
#DNAinput=DNAinput[rlength==1&alength==1,]
setwd(temppath)
methods=c("ASCAT","TITAN","FACETS","sequenz")
somaticdata=read.csv(somaticinput,header=TRUE,sep="\t")
colnames(somaticdata)=c("chr","pos","ref","alt","refNumN","altNumN","refNumT","altNumT")
somaticdata$Tsum=somaticdata$altNumT+somaticdata$refNumT
somaticdata$tfrac=somaticdata$altNumT/somaticdata$Tsum
somaticdata=somaticdata[somaticdata$Tsum>=10,]
#rlength=nchar(as.character(somaticdata$ref))
#alength=nchar(as.character(somaticdata$alt))
#somaticdata=somaticdata[rlength==1&alength==1,]
registerDoMC(cores = 4)
times=1000
res=foreach(i = 1:4) %dopar% DNACNA(DNAinput=DNAinput,sample=sample,tempth=temppath,chromosome=chromosome,i)
newmethod=c()
newres=list()
k=1
DNAres=list()
for (i in 1:4){
if (!is.null(res[[i]])){
if (!is.null(res[[i]]$alpha)){
if (i ==1 ){
DNAres$ASCAT=res[[i]]
}else if (i ==2){
DNAres$TITAN=res[[i]]
}else if (i == 3){
DNAres$FACETS=res[[i]]
}else{
DNAres$sequenza=res[[i]]
}
newmethod[k]=methods[i]
newres[[k]]=res[[i]]
k=k+1
}
}
}
print.noquote("Somatic mutation copy number")
somaticres<-foreach(i =1:length(newmethod)) %dopar% somaticCN(res=newres,i,somaticdata=somaticdata,method=newmethod)
somaticindex=0
for (i in 1:4){
if (!is.null(somaticres[[i]])){
if (dim(somaticres[[i]])[1]>0){
somaticindex=1
break
}
}
}
if (somaticindex!=0){
randomdata<-foreach(i=1:length(newmethod)) %dopar% randomSample(data=data.frame(Tsum=somaticres[[i]]$Tsum,tfrac=somaticres[[i]]$tfrac),times=times)
randomout<-foreach(i =1:length(newmethod)) %dopar% randomCN(newres,i,randomdata,somaticres,method=newmethod)
aveDIF<-foreach(i=1:length(newmethod)) %dopar% randomDIF(randomout,somaticres,i)
realDIF<-foreach(i=1:length(newmethod)) %dopar% realDiff(somaticres,i)
realsub<-foreach(i=1:length(newmethod)) %dopar% realSub(somaticres,i)
randomsub<-foreach(i = 1:length(newmethod)) %dopar% subcloneP(x=randomout[[i]])
PDl=list()
PSl=list()
for (i in 1:length(newmethod)){
if (!is.null(realDIF[[i]])){
PDl[[i]]=1-length(aveDIF[[i]][aveDIF[[i]]>=realDIF[[i]]])/length(aveDIF[[i]])
PSl[[i]]=length(randomsub[[i]][randomsub[[i]]<sum(somaticres[[i]]$SACN==0)])/times
}
}
modelindex=c()
k=1
for (i in 1:length(newmethod)){
if (!is.null(PDl[[i]])){
if (!is.na(PDl[[i]])){
modelindex[k]=i
k=k+1
}
}
}
PD=c()
PS=c()
for (k in 1:length(modelindex)){
PD[k]=PDl[[modelindex[k]]]
PS[k]=PSl[[modelindex[k]]]
}
minimum=min(PD+PS)
selmeth=newmethod[modelindex[which(PD+PS==minimum)]]
if ("ASCAT" %in% selmeth){
DNAout=newres[[which(newmethod=="ASCAT")]]
DNAalpha=newres[[which(newmethod=="ASCAT")]]$alpha
DNAploidy=newres[[which(newmethod=="ASCAT")]]$ploidy
segment=newres[[which(newmethod=="ASCAT")]]$segment
somaticres=somaticres[[which(newmethod=="ASCAT")]]
realDIF=realDIF[[which(newmethod=="ASCAT")]]
realsub=realsub[[which(newmethod=="ASCAT")]]
randomdata=randomdata[[which(newmethod=="ASCAT")]]
PD=PDl[[which(newmethod=="ASCAT")]]
PS=PSl[[which(newmethod=="ASCAT")]]
resmthod="ASCAT"
}else if ("FACETS" %in% selmeth){
DNAout=newres[[which(newmethod=="FACETS")]]
DNAalpha=newres[[which(newmethod=="FACETS")]]$alpha
DNAploidy=newres[[which(newmethod=="FACETS")]]$ploidy
segment=newres[[which(newmethod=="FACETS")]]$segment
segment=data.frame(chr=paste("chr",segment$chr,sep=""),startpos=segment$startpos,endpos=segment$endpos,nMajor=segment$nMajor,nMinor=segment$nMinor)
somaticres=somaticres[[which(newmethod=="FACETS")]]
realDIF=realDIF[[which(newmethod=="FACETS")]]
realsub=realsub[[which(newmethod=="FACETS")]]
randomdata=randomdata[[which(newmethod=="FACETS")]]
PD=PDl[[which(newmethod=="FACETS")]]
PS=PSl[[which(newmethod=="FACETS")]]
resmthod="FACETS"
}else if ("TITAN" %in% selmeth){
DNAout=newres[[which(newmethod=="TITAN")]]
DNAalpha=newres[[which(newmethod=="TITAN")]]$alpha
DNAploidy=newres[[which(newmethod=="TITAN")]]$ploidy
segment=newres[[which(newmethod=="TITAN")]]$segment
segment=data.frame(chr=paste("chr",segment$Chromosome,sep=""),startpos=as.numeric(as.character(segment$Start_Position.bp.)),endpos=as.numeric(as.character(segment$End_Position.bp.)),nMajor=segment$MajorCN,nMinor=segment$MinorCN)
somaticres=somaticres[[which(newmethod=="TITAN")]]
realDIF=realDIF[[which(newmethod=="TITAN")]]
realsub=realsub[[which(newmethod=="TITAN")]]
randomdata=randomdata[[which(newmethod=="TITAN")]]
PD=PDl[[which(newmethod=="TITAN")]]
PS=PSl[[which(newmethod=="TITAN")]]
resmthod="TITAN"
}else{
DNAout=newres[[which(newmethod=="sequenz")]]
DNAalpha=newres[[which(newmethod=="sequenz")]]$alpha
DNAploidy=newres[[which(newmethod=="sequenz")]]$ploidy
segment=newres[[which(newmethod=="sequenz")]]$segment
somaticres=somaticres[[which(newmethod=="sequenz")]]
realDIF=realDIF[[which(newmethod=="sequenz")]]
realsub=realsub[[which(newmethod=="sequenz")]]
randomdata=randomdata[[which(newmethod=="sequenz")]]
PD=PDl[[which(newmethod=="sequenz")]]
PS=PSl[[which(newmethod=="sequenz")]]
resmthod="sequenz"
}
DNAout=list()
DNAout$segment=segment
DNAout$alpha=DNAalpha
DNAout$ploidy=DNAploidy
DNAout$somatic=somaticres
DNAout$method=resmthod
#DNAres$DNAout=DNAout
return(DNAout)
}else{
DNAres=list()
k=1
for (i in 1:length(res)){
if (mode(res[[i]])=="list"){
if (!is.null(res[[i]]$alpha)){
res[[i]]$method=methods[i]
DNAres[[k]]=res[[i]]
k=k+1
}
}
}
ll=c()
for (j in 1:length(DNAres)){
ll[j]=dim(DNAres[[j]]$segment)[1]
}
DNAout=DNAres[[which.max(ll)]]
return(DNAout)
}
}else{
print.noquote(paste(sample,":Germline data is insuccifient",sep=""))
}
}
Consistent<-function(GCN,MCN){
lower=min(c(GCN,MCN))-1
upper=max(c(GCN,MCN))+1
da=density(GCN,from=lower,to=upper)
db=density(MCN,from=lower,to=upper)
d=data.frame(x=da$x,a=da$y,b=db$y)
d$w=pmin(d$a,d$b)
total=integrate.xy(d$x,d$a)+integrate.xy(d$x,d$b)
intersection=integrate.xy(d$x,d$w)
overlap=2*intersection/total
diff=(integrate.xy(d$x,d$b)-intersection)/integrate.xy(d$x,d$b)
return(list(overlap=overlap,diff=diff))
}
Heterogeneity<-function(DNAout){
if ("somatic" %in% names(DNAout)){
segment=DNAout$segment
somatic=DNAout$somatic
somatic1=c()
for (i in 1:dim(segment)[1]){
subdata=somatic[somatic$chr==as.character(segment$chr[i])&somatic$pos>=segment$startpos[i]&somatic$pos<=segment$endpos[i],]
if (dim(subdata)[1]>0){
subdata$Dmajor=segment$nMajor[i]
subdata$Dminor=segment$nMinor[i]
somatic1=rbind(somatic1,subdata)
}
}
DNAout$heterogeneity=1-Consistent(c(somatic1$Dmajor,somatic1$Dminor),somatic1$SMCN)$overlap
}else{
DNAout$heterogeneity=0
}
return(DNAout)
}
###############################################
wildtype<-function(data,segment,type,resout){
for (i in 1:dim(segment)[1]){
subdata=data[data$chr==as.character(segment$chr[i])&data$pos>=segment$startpos[i]&data$pos<=segment$endpos[i],]
Dmajor=segment$nMajor[i]
Dminor=segment$nMinor[i]
if (dim(subdata)[1]>0){
subres=subdata[,1:6]
if (type == "somatic"){
index=which(is.na(subdata$BayesP))
if (length(index)>0){
subdata$RTCN[index]=(Dmajor+Dminor)*2^(subdata$ratio[index])
subdata$RMCN[index]=subdata$RTCN[index]*subdata$tfrac[index]
subdata$BayesP[index]=1-10^(-abs(subdata$RMCN[index]-subdata$DMCN[index]))
}
index=which(subdata$RTCN==Inf|subdata$RTCN==-Inf)
if (length(index)>0){
subdata$RTCN[index]=(Dmajor+Dminor)*2^(subdata$ratio[index])
subdata$RMCN[index]=subdata$RTCN[index]*subdata$tfrac[index]
}
}
if (type=="germline"){
altD=subdata$Dminor
wildD=subdata$Dmajor
altD[subdata$alt==subdata$DMajorAllele]=subdata$Dmajor[subdata$alt==subdata$DMajorAllele]
wildD[subdata$alt==subdata$DMajorAllele]=subdata$Dminor[subdata$alt==subdata$DMajorAllele]
altR=abs(subdata$Rminor)
wildR=subdata$Rmajor
altR[subdata$alt==subdata$RMajorAllele]=subdata$Rmajor[subdata$alt==subdata$RMajorAllele]
wildR[subdata$alt==subdata$RMajorAllele]=abs(subdata$Rminor[subdata$alt==subdata$RMajorAllele])
subres=cbind(subres,rep("Germline",length=dim(subres)[1]))
subres=cbind(subres,altD,altR,wildD,wildR)
subres=cbind(subres,subdata$BayesP)
}else{
altD=subdata$DMCN
altR=subdata$RMCN
if (abs(altD-Dmajor)<abs(altD-Dminor)){
wildD=Dminor
}else{
wildD=Dmajor
}
wildR=subdata$RTCN-subdata$RMCN
subres=cbind(subres,rep("Somatic",length=dim(subres)))
subres=cbind(subres,altD,altR,wildD,wildR)
subres=cbind(subres,subdata$BayesP)
}
names(subres)=c("chr","pos","ref","alt","refNum","altNum","type","altD","altR","wildD","wildR","BayesP")
resout=rbind(resout,subres)
}
}
return(resout)
}
altalle<-function(x){
dif1=abs(x[2]-x[1])
dif2=abs(x[2]-x[3])
if (dif1<=dif2){
return(data.frame(alt=x[1],ref=x[3]))
}else{
return(data.frame(alt=x[3],ref=x[1]))
}
}
WGD<-function(segment,SNPinput){
DNAinput=read.csv(SNPinput,sep="\t",header=TRUE)
colnames(DNAinput)=c("chr","pos","ref","alt","refNumN","altNumN","refNumT","altNumT")
segment$TCN=segment$Dmajor+segment$Dminor
if (dim(segment)[1]==1){
subdata=DNAinput[DNAinput$chr==as.character(segment$chr)&DNAinput$pos>=segment$start&DNAinput$pos<=segment$end,]
ll=dim(DNAinput)[1]
gdouble=dim(subdata)[1]
}else{
ll=sum(as.numeric(segment$end-segment$start))
gdouble=sum(as.numeric(segment$end[segment$TCN>2]-segment$start[segment$TCN>2]))
}
return(gdouble/ll)
}
DACRE<-function(resout){
germ=resout[resout$type=="Germline",]
germ1=do.call(rbind,apply(germ[,8:10],1,altalle))
germ1=cbind(germ$altR,germ$wildR,germ1,germ$BayesP)
germ1=as.data.frame(germ1)
names(germ1)=c("ASELm","ASELw","ASCNm","ASCNw","BayesP")
germ1$ASEmP=germ1$ASELm-exp(-abs(germ1$ASCNm-1))*germ1$ASCNm
germ1$ASEwP=germ1$ASELw-exp(-abs(germ1$ASCNw-1))*germ1$ASCNw
germ1$ASEm=germ1$ASELm-germ1$ASCNm
germ1$ASEw=germ1$ASELw-germ1$ASCNw
germ1$ASELm[germ1$ASELm==0]=0.01
germ1$ASELw[germ1$ASEKw==0]=0.01
germ1$AEI=log(germ1$ASELm/germ1$ASELw,base=2)
germ1$DACRE=germ1$ASEmP-germ1$ASEwP
germres=cbind(germ,germ1$ASEm,germ1$AEI,germ1$DACRE)
names(germres)=c(names(germ),"eASEL","AEI","DACRE")
soma=resout[resout$type=="Somatic",]
if (dim(soma)[1]>0){
soma$wildR[soma$wildR<0]=soma$altR[soma$wildR<0]*soma$wildD[soma$wildR<0]/soma$altD[soma$wildR<0]
soma1=soma
soma1$ASEmP=soma1$altR-exp(-abs(soma1$altD-1))*soma1$altD
soma1$ASEwP=soma1$wildR-exp(-abs(soma1$wildD-1))*soma1$wildD
soma1$ASEm=soma1$altR-soma1$altD
soma1$ASEw=soma1$wildR-soma1$wildD
soma1$wildR[soma1$wildR==0]=0.01
soma1$altR[soma1$altR==0]=0.01
soma1$AEI=log(soma1$altR/soma1$wildR,base=2)
soma1$DACRE=soma1$ASEmP-soma1$ASEwP
somares=cbind(soma,soma1$ASEm,soma1$AEI,soma1$DACRE)
names(somares)=c(names(soma),"eASEL","AEI","DACRE")
res=rbind(germres,somares)
}else{
res=germres
}
return(res)
}
|
# tools for text analysis of pubmed data
# source: pubmed abstracts and metadata
# objective - create tools to information extraction, visualization and knowledge creation
### packages
library(tidyverse)
library(rentrez)
library(tidytext)
library(XML)
### querying pubmed
# example: septic shock
# date: the month of may 2018
q <- '(septic shock AND ("2018/05/01"[PDAT] : "2018/05/31"[PDAT])")'
search_results <- entrez_search(db="pubmed", term = q, retmax = 1000, use_history = T)
# the search results
# search_results$ids
# getting the data from the search_results
q_summary <- entrez_summary(db="pubmed",web_history = search_results$web_history)
q_data <- entrez_fetch(db="pubmed", id = search_results$ids, rettype = "xml")
data_xml <- xmlParse(q_data)
xtop <- xmlRoot(data_xml)
#xtop
metadata_df <- data_frame(uid = sapply(q_summary, function(x) x$uid), title = sapply(q_summary, function(x) x$title))
#abstract_df <- data_frame(uid = xmlSApply(xtop, function(x) {x[[2]][['ArticleIdList']][[1]] %>% xmlValue()}),
# abstract = xmlSApply(xtop, function(x) xmlValue(x[[1]][['Article']][['Abstract']]) ))
# create the abstract data_frame
# This inserted an empty space between abstract sections
abstract_df <- data_frame(uid = xmlSApply(xtop, function(x) {x[[2]][['ArticleIdList']][[1]] %>% xmlValue()}),
abstract = xmlSApply(xtop, function(x) {
if(is.na(xmlValue(x[[1]][['Article']][['Abstract']]))) { "vazio" }
else { getChildrenStrings(node = x[[1]][['Article']][['Abstract']]) %>% paste(collapse = " ") }
}
))
final_df <- inner_join(metadata_df,abstract_df)
# remove empty abstracts from final_df
final_df <- final_df[final_df$abstract !="vazio",]
#### remove all numbers from the abstracts
#a <- str_replace_all(string = final_df$abstract, pattern = "[0-9]+",replacement = "__NUMBER__")
#a <- str_replace_all(string = a, pattern = "__NUMBER__\\.__NUMBER__",replacement = "__NUMBER__")
#final_df$abstract <- str_replace_all(string = a, pattern = "__NUMBER__%",replacement = "__NUMBER__")
|
/start.R
|
no_license
|
gusmmm/entrez_critical_care
|
R
| false | false | 2,119 |
r
|
# tools for text analysis of pubmed data
# source: pubmed abstracts and metadata
# objective - create tools to information extraction, visualization and knowledge creation
### packages
library(tidyverse)
library(rentrez)
library(tidytext)
library(XML)
### querying pubmed
# example: septic shock
# date: the month of may 2018
q <- '(septic shock AND ("2018/05/01"[PDAT] : "2018/05/31"[PDAT])")'
search_results <- entrez_search(db="pubmed", term = q, retmax = 1000, use_history = T)
# the search results
# search_results$ids
# getting the data from the search_results
q_summary <- entrez_summary(db="pubmed",web_history = search_results$web_history)
q_data <- entrez_fetch(db="pubmed", id = search_results$ids, rettype = "xml")
data_xml <- xmlParse(q_data)
xtop <- xmlRoot(data_xml)
#xtop
metadata_df <- data_frame(uid = sapply(q_summary, function(x) x$uid), title = sapply(q_summary, function(x) x$title))
#abstract_df <- data_frame(uid = xmlSApply(xtop, function(x) {x[[2]][['ArticleIdList']][[1]] %>% xmlValue()}),
# abstract = xmlSApply(xtop, function(x) xmlValue(x[[1]][['Article']][['Abstract']]) ))
# create the abstract data_frame
# This inserted an empty space between abstract sections
abstract_df <- data_frame(uid = xmlSApply(xtop, function(x) {x[[2]][['ArticleIdList']][[1]] %>% xmlValue()}),
abstract = xmlSApply(xtop, function(x) {
if(is.na(xmlValue(x[[1]][['Article']][['Abstract']]))) { "vazio" }
else { getChildrenStrings(node = x[[1]][['Article']][['Abstract']]) %>% paste(collapse = " ") }
}
))
final_df <- inner_join(metadata_df,abstract_df)
# remove empty abstracts from final_df
final_df <- final_df[final_df$abstract !="vazio",]
#### remove all numbers from the abstracts
#a <- str_replace_all(string = final_df$abstract, pattern = "[0-9]+",replacement = "__NUMBER__")
#a <- str_replace_all(string = a, pattern = "__NUMBER__\\.__NUMBER__",replacement = "__NUMBER__")
#final_df$abstract <- str_replace_all(string = a, pattern = "__NUMBER__%",replacement = "__NUMBER__")
|
## Below are two functions that are used to create a special
## object that stores a Matrix and cache's its inversion.
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() i
setinversion <- function(solve) i <<- solve
getinversion <- function() i
list(set = set, get = get,
setinversion = setinversion,
getinversion = getinversion)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already
## been calculated (and the matrix has not changed), then the cachesolve
## should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
i <- x$getinversion()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
m <- solve(data, ...)
x$setinversion(i)
i
}
|
/cachematrix.R
|
no_license
|
sjtuyanyan/ProgrammingAssignment2
|
R
| false | false | 965 |
r
|
## Below are two functions that are used to create a special
## object that stores a Matrix and cache's its inversion.
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() i
setinversion <- function(solve) i <<- solve
getinversion <- function() i
list(set = set, get = get,
setinversion = setinversion,
getinversion = getinversion)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already
## been calculated (and the matrix has not changed), then the cachesolve
## should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
i <- x$getinversion()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
m <- solve(data, ...)
x$setinversion(i)
i
}
|
estimate.psf <-
function (outenv=parent.env(environment()),n.bins=1,bloom.bin=FALSE,n.sources=5e2,onlyContams=TRUE,bin.type='SNR.quan',
lo=20,hi=200,type='num',check.one.sky=length(point.sources)>5*n.sources,blend.tolerance=0.5,
mask.tolerance=0.0,radial.tolerance=25,all.limit=0.15,env=NULL,plot=FALSE) {
message('--------------------------Estimate_PSF-------------------------------------')
# Load Parameter Space {{{
if(!is.null(env)) {
attach(env, warn.conflicts=FALSE)
}
if(is.null(outenv)&!is.null(env)) { outenv<-env }
else if (is.null(outenv)) {
warning("Output Environment cannot be NULL; using parent env")
outenv<-parent.env(environment())
}
#}}}
# Identify the point sources we want to try and stack
if (exists('sdfa') & exists('ssfa')) {
blendfrac<-1-sdfa/ssfa
if (onlyContams & filt.contam) {
point.sources<-which(cat.a==min.ap.rad & blendfrac <= blend.tolerance & contams==1)
} else {
point.sources<-which(cat.a==min.ap.rad & blendfrac <= blend.tolerance)
}
} else {
blendfrac<-rep(0,length(cat.a))
if (onlyContams & filt.contam) {
point.sources<-which(cat.a==min.ap.rad & contams==1)
} else {
point.sources<-which(cat.a==min.ap.rad)
}
}
#Remove things that are blended
if (exists('sdfa') & exists('ssfa')) {
if (length(point.sources) > 1) {
#Use pixel-space nearest neighbours
match<-nn2(data.frame(cat.x,cat.y)[point.sources,][which(blendfrac[point.sources]<=blend.tolerance),],data.frame(cat.x,cat.y)[point.sources,],searchtype='radius',
radius=radial.tolerance*2.0,k=min(10,length(which(blendfrac[point.sources]<=blend.tolerance))))
#Order by the nearest non-self match (2nd nnd column)
point.sources<-point.sources[order(match$nn.dists[,2],decreasing=TRUE)]
nn.dist<-match$nn.dists[order(match$nn.dists[,2],decreasing=TRUE),2]
#Reject sources that are, assuming at-least Nyquist sampling, within 3sigma overlap of the point source
if (length(which(nn.dist<1e4))/length(nn.dist)<all.limit) {
#Just remove all the blends
point.sources<-point.sources[-which(nn.dist<1e4)]
nn.dist<-nn.dist[-which(nn.dist<1e4)]
} else if (any(nn.dist<radial.tolerance)) {
point.sources<-point.sources[-which(nn.dist<radial.tolerance)]
nn.dist<-nn.dist[-which(nn.dist<radial.tolerance)]
}
} else {
nn.dist<-NULL
}
} else {
if (length(point.sources) > 0) {
#Use pixel-space nearest neighbours
match<-nn2(data.frame(cat.x,cat.y),data.frame(cat.x,cat.y)[point.sources,],searchtype='radius',radius=radial.tolerance*2.0,k=min(length(cat.x),10))
#Order by the nearest non-self match (2nd nnd column)
nn.dist<-match$nn.dists[order(match$nn.dists[,2],decreasing=TRUE),2]
point.sources<-point.sources[order(match$nn.dists[,2],decreasing=TRUE)]
#Reject sources that are, assuming at-least Nyquist sampling, within 3sigma overlap of the point source
if (length(which(nn.dist<1e4))/length(nn.dist)<all.limit) {
#Just remove all the blends
point.sources<-point.sources[-which(nn.dist<1e4)]
nn.dist<-nn.dist[-which(nn.dist<1e4)]
} else if (any(nn.dist<radial.tolerance)) {
point.sources<-point.sources[-which(nn.dist<radial.tolerance)]
nn.dist<-nn.dist[-which(nn.dist<radial.tolerance)]
}
} else {
nn.dist<-NULL
}
}
if (do.sky.est & exists('skylocal')) {
pixval.all<-pixval<-image.env$im[cbind(cat.x,cat.y)]-skylocal
} else if (do.sky.est) {
if (check.one.sky) {
#Remove things with pixel values far outside what is requested
if (length(image.env$imm)>1) {
skypix<-image.env$im
skypix[which(image.env$imm==0)]<-NA
skypix<-skypix[-(cat.x + nrow(image.env$im) * (cat.y - 1))]
skypix<-skypix[which(is.finite(skypix))]
} else {
skypix<-image.env$im[-(cat.x + nrow(image.env$im) * (cat.y - 1))]
}
skypix<-skypix[which(abs(skypix-median(skypix,na.rm=T))<10*mad(skypix,na.rm=T))]
onesky<-try(fit.gauss2low(skypix))
if (class(onesky)=='try-error') {
onesky<-data.frame(mu=median(skypix,na.rm=TRUE),sd=mad(skypix,na.rm=T))
}
pixval<-image.env$im[cbind(cat.x[point.sources],cat.y[point.sources])] - onesky$mu
if (grepl("SNR",bin.type)) {
pixval<-pixval/onesky$sd
}
if (grepl("quan",bin.type)) {
if (type=='quan') {
#quantile bin limits
bin.lim<-quantile(pixval[is.finite(pixval)],c(max(c(0,lo-0.1)),min(c(hi+0.1,1))))
} else {
#Absolute bins limits
bin.lim<-c(lo*0.9,hi*1.1)
}
} else {
if (type=='quan') {
#quantile bin limits
quans<-quantile(pixval[is.finite(pixval)],c(max(c(0,lo-0.1)),min(c(hi+0.1,1))))
bin.lim<-c(quans[1],quans[2])
} else {
#Absolute bins limits
bin.lim<-c(lo*0.9,hi*1.1)
}
}
if (bloom.bin) {
new.bin<-image.env$saturation
if (do.sky.est) { new.bin<-new.bin-onesky$mu }
if (grepl('SNR',bin.type)) { new.bin<-new.bin/onesky$sd }
bin.lim[2]<-new.bin
}
keep<-which(pixval >= bin.lim[1] & pixval <= bin.lim[2])
point.sources<-point.sources[keep]
nn.dist<-nn.dist[keep]
}
if (cutup) {
if (quick.sky) {
message("Perfoming Fast Sky Estimation")
skyest<-fast.sky.estimate(cat.x=cat.x,cat.y=cat.y,data.stamp.lims=data.stamp.lims,fit.gauss=fit.sky,saturate=image.env$saturation,
cutlo=(cat.a/arcsec.per.pix),cuthi=(cat.a/arcsec.per.pix)*5,data.stamp=data.stamp,mask.stamp=mask.stamp,
clipiters=sky.clip.iters,sigma.cut=sky.clip.prob,PSFFWHMinPIX=psffwhm, mpi.opts=mpi.opts,subset=point.sources)
skyest$sources<-cat.id[point.sources]
if (fit.sky) {
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'skyMu']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skySD']
} else {
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'skyMedian']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skyRMS']
}
} else {
message("Perfoming Sky Estimation")
skyest<-sky.estimate(cat.x=cat.x,cat.y=cat.y,data.stamp.lims=data.stamp.lims,saturate=image.env$saturation,
cutlo=(cat.a/arcsec.per.pix),cuthi=(cat.a/arcsec.per.pix)*5,data.stamp=data.stamp,mask.stamp=mask.stamp,
clipiters=sky.clip.iters,sigma.cut=sky.clip.prob,PSFFWHMinPIX=psffwhm, mpi.opts=mpi.opts,subset=point.sources)
skyest$sources<-cat.id[point.sources]
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'sky']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skyRMS']
}
} else {
if (quick.sky) {
message("Perfoming Fast Sky Estimation")
skyest<-fast.sky.estimate(cat.x=cat.x,cat.y=cat.y,data.stamp.lims=data.stamp.lims,fit.gauss=fit.sky,saturate=image.env$saturation,
cutlo=(cat.a/arcsec.per.pix),cuthi=(cat.a/arcsec.per.pix)*5,
data.stamp=image.env$im, mask.stamp=image.env$imm.dimim,
clipiters=sky.clip.iters,sigma.cut=sky.clip.prob,PSFFWHMinPIX=psffwhm, mpi.opts=mpi.opts,subset=point.sources)
skyest$sources<-cat.id[point.sources]
if (fit.sky) {
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'skyMu']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skySD']
} else {
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'skyMedian']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skyRMS']
}
} else {
message("Perfoming Sky Estimation")
skyest<-sky.estimate(cat.x=cat.x,cat.y=cat.y,data.stamp.lims=data.stamp.lims,saturate=image.env$saturation,
cutlo=(cat.a/arcsec.per.pix),cuthi=(cat.a/arcsec.per.pix)*5,
data.stamp=image.env$im, mask.stamp=image.env$imm.dimim,
clipiters=sky.clip.iters,sigma.cut=sky.clip.prob,PSFFWHMinPIX=psffwhm, mpi.opts=mpi.opts,subset=point.sources)
skyest$sources<-cat.id[point.sources]
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'sky']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skyRMS']
}
}
pixval.all<-image.env$im[cbind(cat.x,cat.y)]
pixval<-pixval.all-skylocal
pixval.all<-pixval.all-median(skylocal,na.rm=TRUE)
} else {
pixval.all<-pixval<-image.env$im[cbind(cat.x,cat.y)]
}
if (grepl('SNR',bin.type)) {
if (!(do.sky.est|get.sky.rms)) {
message("WARNING: cannot SNR bin without RMS estimate!
Using MAD of all pixels without a source centred on them (i.e. im[-(cat.x,cat.y)]) ")
tmprms<-mad(image.env$im[-(floor(cat.x)+nrow(image.env$im)*(floor(cat.y)-1))],na.rm=T)
pixval<-pixval/tmprms
pixval.all<-pixval.all/tmprms
} else {
pixval<-pixval/skyrms
pixval.all<-pixval.all/median(skyrms,na.rm=TRUE)
}
}
if (grepl("quan",bin.type)) {
if (type=='quan') {
#quantile bin limits
bin.lim<-quantile(pixval[is.finite(pixval)],seq(lo,hi,length=n.bins+1))
} else {
#Absolute bins limits
bin.lim<-quantile(pixval[which(pixval>=lo & pixval<=hi)],seq(0,1,length=n.bins+1))
}
bin.zero<-bin.lim[1]
bin.lim<-bin.lim[-1]
} else {
if (type=='quan') {
#quantile bin limits
quans<-quantile(pixval[is.finite(pixval)],c(lo,hi))
bin.lim<-seq(quans[1],quans[2],length=n.bins+1)
} else {
#Absolute bins limits
bin.lim<-seq(lo,hi,length=n.bins+1)
}
#Equal spaced bins
bin.zero<-bin.lim[1]
bin.lim<-bin.lim[-1]
}
#Do we want an additional blooming bin?
if (bloom.bin) {
new.bin<-image.env$saturation
if (do.sky.est) { new.bin<-new.bin-median(skylocal,na.rm=TRUE) }
if (grepl('SNR',bin.type) & (do.sky.est | get.sky.rms)) {
new.bin<-new.bin/median(skyrms,na.rm=TRUE)
} else if (grepl('SNR',bin.type) & !(do.sky.est | get.sky.rms)) {
new.bin<-new.bin/tmprms
}
bin.lim<-c(bin.lim,new.bin)
n.bins<-n.bins+1
}
#If the pixval is outside the bins, skip it
keep <- which(pixval[point.sources] >= bin.zero & pixval[point.sources] <= max(bin.lim))
point.sources<-point.sources[keep]
nn.dist<-nn.dist[keep]
#Assign the bins
bin<-rep(-1,length(pixval))
for (i in 1:n.bins) {
bin[point.sources][which(bin[point.sources]<0 & pixval[point.sources]<=bin.lim[i] & pixval[point.sources] >= bin.zero)]<-i
}
#Remove sources which have masked _datamaps_ (can create artefacts under convolution)
if (length(image.env$imm.orig) > 1) {
maskfrac<-rep(NA,length=length(cat.x))
for (i in point.sources) {
maskfrac[i]<-sum(image.env$imm.orig[ap.lims.mask.map[i,1]:ap.lims.mask.map[i,2],ap.lims.mask.map[i,3]:ap.lims.mask.map[i,4]]==0)
maskfrac[i]<-maskfrac[i]/length(image.env$imm.orig[ap.lims.mask.map[i,1]:ap.lims.mask.map[i,2],ap.lims.mask.map[i,3]:ap.lims.mask.map[i,4]])
}
} else if (length(image.env$imm) > 1) {
maskfrac<-rep(NA,length=length(cat.x))
for (i in point.sources) {
maskfrac[i]<-sum(image.env$imm[ap.lims.mask.map[i,1]:ap.lims.mask.map[i,2],ap.lims.mask.map[i,3]:ap.lims.mask.map[i,4]]==0)
maskfrac[i]<-maskfrac[i]/length(image.env$imm[ap.lims.mask.map[i,1]:ap.lims.mask.map[i,2],ap.lims.mask.map[i,3]:ap.lims.mask.map[i,4]])
}
}
if (exists('maskfrac')) {
keep<-which(maskfrac[point.sources]<=mask.tolerance)
point.sources<-point.sources[keep]
nn.dist<-nn.dist[keep]
}
# Initialise the arrays
im_psf.nomask<-im_psf<-weight<-nomask.n<-list()
for (i in 1:n.bins) {
if (length(which(bin[point.sources]==i))>0){
mat<-matrix(0,max(stamplen[point.sources][which(bin[point.sources]==i)]),max(stamplen[point.sources][which(bin[point.sources]==i)]))
} else {
mat<-matrix(0,min(stamplen),min(stamplen))
}
im_psf.nomask[[i]]<-im_psf[[i]]<-weight[[i]]<-mat
nomask.n[[i]]<-0
}
#Remove sources above and beyond what is requested
for (i in 1:n.bins) {
keep<-which(bin[point.sources]==i)
blend.tolerance.tmp<-blend.tolerance
mask.tolerance.tmp<-mask.tolerance
radial.tolerance.tmp<-radial.tolerance
#Check if we should iteratively can clean the sample
if (length(which(nn.dist[keep] > radial.tolerance*2.0 & maskfrac[point.sources[keep]] == 0 & blendfrac[point.sources[keep]] == 0)) < n.sources) {
radial.tolerance.use<-radial.tolerance
mask.tolerance.use<-mask.tolerance
blend.tolerance.use<-blend.tolerance
while (length(keep) > n.sources) {
#Grow the distance tolerance
radial.tolerance.use<-radial.tolerance.tmp
radial.tolerance.tmp<-radial.tolerance.tmp+0.5
#Reduce the masking tolerance
mask.tolerance.use<-mask.tolerance.tmp
mask.tolerance.tmp<-max(c(0,mask.tolerance.tmp-0.05))
#Reduce the blending tolerance
blend.tolerance.use<-blend.tolerance.tmp
blend.tolerance.tmp<-max(c(0,blend.tolerance.tmp-0.05))
#Calculate the new sample size
keep<-which(bin[point.sources]==i & nn.dist>=radial.tolerance.tmp & maskfrac[point.sources]<=mask.tolerance.tmp &
blendfrac[point.sources]<=blend.tolerance.tmp)
}
keep<-which(bin[point.sources]==i & nn.dist>=radial.tolerance.use & maskfrac[point.sources]<=mask.tolerance.use &
blendfrac[point.sources]<=blend.tolerance.use)
throw<-which(bin[point.sources]==i)
throw<-throw[which(!throw%in%keep)]
if (length(throw)!=0) {
point.sources<-point.sources[-throw]
nn.dist<-nn.dist[-throw]
}
} else {
#There are more pure PSF sources than the number requested; just pick the first N.sources of them...
keep<-which(bin[point.sources]==i & nn.dist > radial.tolerance*2.0 & maskfrac[point.sources] == 0 & blendfrac[point.sources] == 0)
if (n.sources+1 < length(keep)) {
throw<-sample(keep,size=length(keep)-n.sources,replace=FALSE)
#throw<-keep[(n.sources+1):length(keep)]
point.sources<-point.sources[-throw]
nn.dist<-nn.dist[-throw]
} else {
point.sources<-point.sources[keep]
nn.dist<-nn.dist[keep]
}
}
}
if (plot & length(point.sources)>0) {
diagnostic<-TRUE
if (diagnostic) {
#show all of the PSFs used in the stack
nsamp<-length(point.sources)
sample<-point.sources
} else {
#show a sample of the PSFs used in the stack
nsamp<-min(24,length(point.sources))
sample=sample(point.sources,nsamp)
}
laymat<-(matrix(c(1:(ifelse(nsamp>12,12,nsamp)*4),rep(0,12*4-ifelse(nsamp>12,12,nsamp)*4)),ncol=8,byrow=T))
layout(laymat)
par(mar=c(0,0,0,0),oma=c(2,2,2,2))
} else {
sample<-NULL
}
#Loop through the sources remaining
im=image.env$im
if (length(image.env$imm.orig) > 1) {
mask=image.env$imm.orig
} else {
mask=image.env$imm
}
for (i in point.sources) {
xc=cat.x[i]
yc=cat.y[i]
xlo=ap.lims.data.map[i,1]
xup=ap.lims.data.map[i,2]
ylo=ap.lims.data.map[i,3]
yup=ap.lims.data.map[i,4]
xmlo=ap.lims.mask.map[i,1]
xmup=ap.lims.mask.map[i,2]
ymlo=ap.lims.mask.map[i,3]
ymup=ap.lims.mask.map[i,4]
if (do.sky.est) {
sky=skylocal[i]
rms=skyrms[i]
} else {
sky=0
rms=0
}
#Skip the source if it's saturated
if (any(im[xlo:xup,ylo:yup]>=image.env$saturation)) { next }
#Skip the source if there's a much brighter source that isn't this source
if (max(im[xlo:xup,ylo:yup])-sky > 5*pixval[i]) {
if (sqrt(sum(abs(which(im[xlo:xup,ylo:yup]==max(im[xlo:xup,ylo:yup],na.rm=T),arr.ind=T)-cbind(xc-xlo,yc-ylo))^2))>=3) { next }
}
#Remove any sub-pixel centroiding
#Make grid for psf at old pixel centres /*fold*/ {{{
im.obj<-list(x=xlo:xup, y=ylo:yup,z=im[xlo:xup,ylo:yup]-sky)
# /*fend*/ }}}
#Make expanded grid of new pixel centres /*fold*/ {{{
expanded<-expand.grid(xlo:xup,ylo:yup)
xnew<-expanded[,1]-xc%%1
ynew<-expanded[,2]-yc%%1
# /*fend*/ }}}
#Interpolate /*fold*/ {{{
im.cen<-matrix(interp.2d(xnew, ynew, im.obj)[,3], ncol=ncol(im.obj$z),nrow=nrow(im.obj$z))
# /*fend*/ }}}
#conv<-array(0,dim=dim(im[xlo:xup,ylo:yup]))
#conv[dim(conv)[1]/2,dim(conv)[2]/2]<-1
#im.cen<-convolve.psf(im[xlo:xup,ylo:yup]-sky,conv)
#}
#Ensure that the central source is unmasked (except when blended)
#And add to the stack
if (length(mask)==1) {
if (mask==1) {
#Add the image to the stack
im_psf[[bin[i]]]<-(im_psf[[bin[i]]]+(im.cen))
weight[[bin[i]]]<-(weight[[bin[i]]])+1
}
} else {
if (exists('dfa')) {
mask[xmlo:xmup,ymlo:ymup][which(dfa[[i]]>= (1-sourcemask.conf.lim))]<-1
}
#Add the image to the stack
im_psf[[bin[i]]]<-(im_psf[[bin[i]]]+(im.cen)*(mask[xmlo:xmup,ymlo:ymup]))
weight[[bin[i]]]<-(weight[[bin[i]]]+(mask[xmlo:xmup,ymlo:ymup]))
}
im_psf.nomask[[bin[i]]]<-(im_psf.nomask[[bin[i]]]+(im.cen))
nomask.n[[bin[i]]]<-nomask.n[[bin[i]]]+1
if (plot & i%in%sample) {
if (length(mask)==1) {
capture=magimage(im[xlo:xup,ylo:yup],axes=FALSE)
label('topleft',paste0(i,': (1) raw (there is no mask)'),col='red',lwd=2)
magimage(im.cen,axes=FALSE)
label('topleft',paste0(i,': (2) recentred (no mask)'),col='red',lwd=2)
capture=magimage(im.cen-im[xlo:xup,ylo:yup],axes=FALSE)
label('topleft',paste0(i,': Raw-centred (no mask)'),col='red',lwd=2)
capture=magimage(im_psf[[bin[i]]]/weight[[bin[i]]],axes=FALSE)
label('topleft',paste0(i,': PSF-stack (no mask)'),col='red',lwd=2)
} else {
capture=magimage(im[xlo:xup,ylo:yup]*mask[xmlo:xmup,ymlo:ymup],axes=FALSE)
label('topleft',paste0(i,': (1) raw (masked)'),col='red',lwd=2)
magimage(im.cen*mask[xmlo:xmup,ymlo:ymup],axes=FALSE)
label('topleft',paste0(i,': (2) recentred (masked)'),col='red',lwd=2)
capture=magimage((im.cen-im[xlo:xup,ylo:yup])*mask[xmlo:xmup,ymlo:ymup],axes=FALSE)
label('topleft',paste0(i,': Raw-centred (masked)'),col='red',lwd=2)
capture=magimage(im_psf[[bin[i]]]/weight[[bin[i]]],axes=FALSE)
label('topleft',paste0(i,': PSF-stack (masked)'),col='red',lwd=2)
}
}
}
# Divide by the per-pixel weights
for (i in 1:n.bins) {
im_psf[[i]]<-im_psf[[i]]/weight[[i]]
im_psf.nomask[[i]]<-im_psf[[i]]/nomask.n[[i]]
}
if (plot & length(point.sources) > 0) {
mtext(side=3,outer=T,text=paste0("There are ",length(point.sources),"sources for ",n.bins," bins"))
layout(matrix(1:(n.bins*3),ncol=n.bins))
par(mar=c(0,0,0,0),oma=c(2,2,2,2))
for (j in 1:n.bins) {
if (!all(is.na(im_psf[[j]]))) {
catch=capture.output(magimage(im_psf[[j]],axes=FALSE))
} else {
image(axes=FALSE,matrix(1),col='white')
label('centre','NO ESTIMATE')
}
label('topleft',paste0('Final PSF estimate'),col='red',lwd=2)
if (!all(is.na(weight[[j]]))) {
catch=capture.output(magimage(weight[[j]],axes=FALSE))
} else {
image(axes=FALSE,matrix(1),col='white')
label('centre','NO ESTIMATE')
}
label('topleft',paste0('Masking (Weights)'),col='red',lwd=2)
if (!all(is.na(im_psf.nomask[[j]]))) {
catch=capture.output(magimage(im_psf.nomask[[j]],axes=FALSE))
} else {
image(axes=FALSE,matrix(1),col='white')
label('centre','NO ESTIMATE')
}
label('topleft',paste0('Without Mask Estimate'),col='red',lwd=2)
}
}
#Put the PSF onto the big stamp {{{
centre<-dim(im_psf[[1]])/2
#}}}
#Update bin definition {{{
#Used bin limits
bin<-rep(-1,length(pixval.all))
bin[which(pixval.all < bin.zero)]<-1
bin[which(pixval.all > max(bin.lim))]<-n.bins
for (i in 1:n.bins) {
bin[which(bin<0 & pixval.all < bin.lim[i])]<-i
}
#}}}
#Parse Parameter Space & Return{{{
if (!is.null(env)) { detach(env) }
if (exists('skyest')) {
assign("tmp.skyest" , skyest , envir = outenv)
}
assign("tmp.psf.id" , bin, envir = outenv)
assign("tmp.psfest.val",pixval.all, envir = outenv)
#assign("psfwidth" , psfwidth , envir = outenv)
#assign("sumpsf" , sumpsf , envir = outenv)
return=list(PSF=im_psf,WEIGHT=weight,NOMASK=im_psf.nomask,centre=centre)
#}}}
}
|
/R/estimate.psf.R
|
no_license
|
AngusWright/LAMBDAR
|
R
| false | false | 20,979 |
r
|
estimate.psf <-
function (outenv=parent.env(environment()),n.bins=1,bloom.bin=FALSE,n.sources=5e2,onlyContams=TRUE,bin.type='SNR.quan',
lo=20,hi=200,type='num',check.one.sky=length(point.sources)>5*n.sources,blend.tolerance=0.5,
mask.tolerance=0.0,radial.tolerance=25,all.limit=0.15,env=NULL,plot=FALSE) {
message('--------------------------Estimate_PSF-------------------------------------')
# Load Parameter Space {{{
if(!is.null(env)) {
attach(env, warn.conflicts=FALSE)
}
if(is.null(outenv)&!is.null(env)) { outenv<-env }
else if (is.null(outenv)) {
warning("Output Environment cannot be NULL; using parent env")
outenv<-parent.env(environment())
}
#}}}
# Identify the point sources we want to try and stack
if (exists('sdfa') & exists('ssfa')) {
blendfrac<-1-sdfa/ssfa
if (onlyContams & filt.contam) {
point.sources<-which(cat.a==min.ap.rad & blendfrac <= blend.tolerance & contams==1)
} else {
point.sources<-which(cat.a==min.ap.rad & blendfrac <= blend.tolerance)
}
} else {
blendfrac<-rep(0,length(cat.a))
if (onlyContams & filt.contam) {
point.sources<-which(cat.a==min.ap.rad & contams==1)
} else {
point.sources<-which(cat.a==min.ap.rad)
}
}
#Remove things that are blended
if (exists('sdfa') & exists('ssfa')) {
if (length(point.sources) > 1) {
#Use pixel-space nearest neighbours
match<-nn2(data.frame(cat.x,cat.y)[point.sources,][which(blendfrac[point.sources]<=blend.tolerance),],data.frame(cat.x,cat.y)[point.sources,],searchtype='radius',
radius=radial.tolerance*2.0,k=min(10,length(which(blendfrac[point.sources]<=blend.tolerance))))
#Order by the nearest non-self match (2nd nnd column)
point.sources<-point.sources[order(match$nn.dists[,2],decreasing=TRUE)]
nn.dist<-match$nn.dists[order(match$nn.dists[,2],decreasing=TRUE),2]
#Reject sources that are, assuming at-least Nyquist sampling, within 3sigma overlap of the point source
if (length(which(nn.dist<1e4))/length(nn.dist)<all.limit) {
#Just remove all the blends
point.sources<-point.sources[-which(nn.dist<1e4)]
nn.dist<-nn.dist[-which(nn.dist<1e4)]
} else if (any(nn.dist<radial.tolerance)) {
point.sources<-point.sources[-which(nn.dist<radial.tolerance)]
nn.dist<-nn.dist[-which(nn.dist<radial.tolerance)]
}
} else {
nn.dist<-NULL
}
} else {
if (length(point.sources) > 0) {
#Use pixel-space nearest neighbours
match<-nn2(data.frame(cat.x,cat.y),data.frame(cat.x,cat.y)[point.sources,],searchtype='radius',radius=radial.tolerance*2.0,k=min(length(cat.x),10))
#Order by the nearest non-self match (2nd nnd column)
nn.dist<-match$nn.dists[order(match$nn.dists[,2],decreasing=TRUE),2]
point.sources<-point.sources[order(match$nn.dists[,2],decreasing=TRUE)]
#Reject sources that are, assuming at-least Nyquist sampling, within 3sigma overlap of the point source
if (length(which(nn.dist<1e4))/length(nn.dist)<all.limit) {
#Just remove all the blends
point.sources<-point.sources[-which(nn.dist<1e4)]
nn.dist<-nn.dist[-which(nn.dist<1e4)]
} else if (any(nn.dist<radial.tolerance)) {
point.sources<-point.sources[-which(nn.dist<radial.tolerance)]
nn.dist<-nn.dist[-which(nn.dist<radial.tolerance)]
}
} else {
nn.dist<-NULL
}
}
if (do.sky.est & exists('skylocal')) {
pixval.all<-pixval<-image.env$im[cbind(cat.x,cat.y)]-skylocal
} else if (do.sky.est) {
if (check.one.sky) {
#Remove things with pixel values far outside what is requested
if (length(image.env$imm)>1) {
skypix<-image.env$im
skypix[which(image.env$imm==0)]<-NA
skypix<-skypix[-(cat.x + nrow(image.env$im) * (cat.y - 1))]
skypix<-skypix[which(is.finite(skypix))]
} else {
skypix<-image.env$im[-(cat.x + nrow(image.env$im) * (cat.y - 1))]
}
skypix<-skypix[which(abs(skypix-median(skypix,na.rm=T))<10*mad(skypix,na.rm=T))]
onesky<-try(fit.gauss2low(skypix))
if (class(onesky)=='try-error') {
onesky<-data.frame(mu=median(skypix,na.rm=TRUE),sd=mad(skypix,na.rm=T))
}
pixval<-image.env$im[cbind(cat.x[point.sources],cat.y[point.sources])] - onesky$mu
if (grepl("SNR",bin.type)) {
pixval<-pixval/onesky$sd
}
if (grepl("quan",bin.type)) {
if (type=='quan') {
#quantile bin limits
bin.lim<-quantile(pixval[is.finite(pixval)],c(max(c(0,lo-0.1)),min(c(hi+0.1,1))))
} else {
#Absolute bins limits
bin.lim<-c(lo*0.9,hi*1.1)
}
} else {
if (type=='quan') {
#quantile bin limits
quans<-quantile(pixval[is.finite(pixval)],c(max(c(0,lo-0.1)),min(c(hi+0.1,1))))
bin.lim<-c(quans[1],quans[2])
} else {
#Absolute bins limits
bin.lim<-c(lo*0.9,hi*1.1)
}
}
if (bloom.bin) {
new.bin<-image.env$saturation
if (do.sky.est) { new.bin<-new.bin-onesky$mu }
if (grepl('SNR',bin.type)) { new.bin<-new.bin/onesky$sd }
bin.lim[2]<-new.bin
}
keep<-which(pixval >= bin.lim[1] & pixval <= bin.lim[2])
point.sources<-point.sources[keep]
nn.dist<-nn.dist[keep]
}
if (cutup) {
if (quick.sky) {
message("Perfoming Fast Sky Estimation")
skyest<-fast.sky.estimate(cat.x=cat.x,cat.y=cat.y,data.stamp.lims=data.stamp.lims,fit.gauss=fit.sky,saturate=image.env$saturation,
cutlo=(cat.a/arcsec.per.pix),cuthi=(cat.a/arcsec.per.pix)*5,data.stamp=data.stamp,mask.stamp=mask.stamp,
clipiters=sky.clip.iters,sigma.cut=sky.clip.prob,PSFFWHMinPIX=psffwhm, mpi.opts=mpi.opts,subset=point.sources)
skyest$sources<-cat.id[point.sources]
if (fit.sky) {
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'skyMu']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skySD']
} else {
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'skyMedian']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skyRMS']
}
} else {
message("Perfoming Sky Estimation")
skyest<-sky.estimate(cat.x=cat.x,cat.y=cat.y,data.stamp.lims=data.stamp.lims,saturate=image.env$saturation,
cutlo=(cat.a/arcsec.per.pix),cuthi=(cat.a/arcsec.per.pix)*5,data.stamp=data.stamp,mask.stamp=mask.stamp,
clipiters=sky.clip.iters,sigma.cut=sky.clip.prob,PSFFWHMinPIX=psffwhm, mpi.opts=mpi.opts,subset=point.sources)
skyest$sources<-cat.id[point.sources]
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'sky']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skyRMS']
}
} else {
if (quick.sky) {
message("Perfoming Fast Sky Estimation")
skyest<-fast.sky.estimate(cat.x=cat.x,cat.y=cat.y,data.stamp.lims=data.stamp.lims,fit.gauss=fit.sky,saturate=image.env$saturation,
cutlo=(cat.a/arcsec.per.pix),cuthi=(cat.a/arcsec.per.pix)*5,
data.stamp=image.env$im, mask.stamp=image.env$imm.dimim,
clipiters=sky.clip.iters,sigma.cut=sky.clip.prob,PSFFWHMinPIX=psffwhm, mpi.opts=mpi.opts,subset=point.sources)
skyest$sources<-cat.id[point.sources]
if (fit.sky) {
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'skyMu']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skySD']
} else {
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'skyMedian']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skyRMS']
}
} else {
message("Perfoming Sky Estimation")
skyest<-sky.estimate(cat.x=cat.x,cat.y=cat.y,data.stamp.lims=data.stamp.lims,saturate=image.env$saturation,
cutlo=(cat.a/arcsec.per.pix),cuthi=(cat.a/arcsec.per.pix)*5,
data.stamp=image.env$im, mask.stamp=image.env$imm.dimim,
clipiters=sky.clip.iters,sigma.cut=sky.clip.prob,PSFFWHMinPIX=psffwhm, mpi.opts=mpi.opts,subset=point.sources)
skyest$sources<-cat.id[point.sources]
skylocal<-rep(NA,length(cat.x))
skylocal[point.sources]<-skyest[,'sky']
skyrms<-rep(NA,length(cat.x))
skyrms[point.sources]<-skyest[,'skyRMS']
}
}
pixval.all<-image.env$im[cbind(cat.x,cat.y)]
pixval<-pixval.all-skylocal
pixval.all<-pixval.all-median(skylocal,na.rm=TRUE)
} else {
pixval.all<-pixval<-image.env$im[cbind(cat.x,cat.y)]
}
if (grepl('SNR',bin.type)) {
if (!(do.sky.est|get.sky.rms)) {
message("WARNING: cannot SNR bin without RMS estimate!
Using MAD of all pixels without a source centred on them (i.e. im[-(cat.x,cat.y)]) ")
tmprms<-mad(image.env$im[-(floor(cat.x)+nrow(image.env$im)*(floor(cat.y)-1))],na.rm=T)
pixval<-pixval/tmprms
pixval.all<-pixval.all/tmprms
} else {
pixval<-pixval/skyrms
pixval.all<-pixval.all/median(skyrms,na.rm=TRUE)
}
}
if (grepl("quan",bin.type)) {
if (type=='quan') {
#quantile bin limits
bin.lim<-quantile(pixval[is.finite(pixval)],seq(lo,hi,length=n.bins+1))
} else {
#Absolute bins limits
bin.lim<-quantile(pixval[which(pixval>=lo & pixval<=hi)],seq(0,1,length=n.bins+1))
}
bin.zero<-bin.lim[1]
bin.lim<-bin.lim[-1]
} else {
if (type=='quan') {
#quantile bin limits
quans<-quantile(pixval[is.finite(pixval)],c(lo,hi))
bin.lim<-seq(quans[1],quans[2],length=n.bins+1)
} else {
#Absolute bins limits
bin.lim<-seq(lo,hi,length=n.bins+1)
}
#Equal spaced bins
bin.zero<-bin.lim[1]
bin.lim<-bin.lim[-1]
}
#Do we want an additional blooming bin?
if (bloom.bin) {
new.bin<-image.env$saturation
if (do.sky.est) { new.bin<-new.bin-median(skylocal,na.rm=TRUE) }
if (grepl('SNR',bin.type) & (do.sky.est | get.sky.rms)) {
new.bin<-new.bin/median(skyrms,na.rm=TRUE)
} else if (grepl('SNR',bin.type) & !(do.sky.est | get.sky.rms)) {
new.bin<-new.bin/tmprms
}
bin.lim<-c(bin.lim,new.bin)
n.bins<-n.bins+1
}
#If the pixval is outside the bins, skip it
keep <- which(pixval[point.sources] >= bin.zero & pixval[point.sources] <= max(bin.lim))
point.sources<-point.sources[keep]
nn.dist<-nn.dist[keep]
#Assign the bins
bin<-rep(-1,length(pixval))
for (i in 1:n.bins) {
bin[point.sources][which(bin[point.sources]<0 & pixval[point.sources]<=bin.lim[i] & pixval[point.sources] >= bin.zero)]<-i
}
#Remove sources which have masked _datamaps_ (can create artefacts under convolution)
if (length(image.env$imm.orig) > 1) {
maskfrac<-rep(NA,length=length(cat.x))
for (i in point.sources) {
maskfrac[i]<-sum(image.env$imm.orig[ap.lims.mask.map[i,1]:ap.lims.mask.map[i,2],ap.lims.mask.map[i,3]:ap.lims.mask.map[i,4]]==0)
maskfrac[i]<-maskfrac[i]/length(image.env$imm.orig[ap.lims.mask.map[i,1]:ap.lims.mask.map[i,2],ap.lims.mask.map[i,3]:ap.lims.mask.map[i,4]])
}
} else if (length(image.env$imm) > 1) {
maskfrac<-rep(NA,length=length(cat.x))
for (i in point.sources) {
maskfrac[i]<-sum(image.env$imm[ap.lims.mask.map[i,1]:ap.lims.mask.map[i,2],ap.lims.mask.map[i,3]:ap.lims.mask.map[i,4]]==0)
maskfrac[i]<-maskfrac[i]/length(image.env$imm[ap.lims.mask.map[i,1]:ap.lims.mask.map[i,2],ap.lims.mask.map[i,3]:ap.lims.mask.map[i,4]])
}
}
if (exists('maskfrac')) {
keep<-which(maskfrac[point.sources]<=mask.tolerance)
point.sources<-point.sources[keep]
nn.dist<-nn.dist[keep]
}
# Initialise the arrays
im_psf.nomask<-im_psf<-weight<-nomask.n<-list()
for (i in 1:n.bins) {
if (length(which(bin[point.sources]==i))>0){
mat<-matrix(0,max(stamplen[point.sources][which(bin[point.sources]==i)]),max(stamplen[point.sources][which(bin[point.sources]==i)]))
} else {
mat<-matrix(0,min(stamplen),min(stamplen))
}
im_psf.nomask[[i]]<-im_psf[[i]]<-weight[[i]]<-mat
nomask.n[[i]]<-0
}
#Remove sources above and beyond what is requested
for (i in 1:n.bins) {
keep<-which(bin[point.sources]==i)
blend.tolerance.tmp<-blend.tolerance
mask.tolerance.tmp<-mask.tolerance
radial.tolerance.tmp<-radial.tolerance
#Check if we should iteratively can clean the sample
if (length(which(nn.dist[keep] > radial.tolerance*2.0 & maskfrac[point.sources[keep]] == 0 & blendfrac[point.sources[keep]] == 0)) < n.sources) {
radial.tolerance.use<-radial.tolerance
mask.tolerance.use<-mask.tolerance
blend.tolerance.use<-blend.tolerance
while (length(keep) > n.sources) {
#Grow the distance tolerance
radial.tolerance.use<-radial.tolerance.tmp
radial.tolerance.tmp<-radial.tolerance.tmp+0.5
#Reduce the masking tolerance
mask.tolerance.use<-mask.tolerance.tmp
mask.tolerance.tmp<-max(c(0,mask.tolerance.tmp-0.05))
#Reduce the blending tolerance
blend.tolerance.use<-blend.tolerance.tmp
blend.tolerance.tmp<-max(c(0,blend.tolerance.tmp-0.05))
#Calculate the new sample size
keep<-which(bin[point.sources]==i & nn.dist>=radial.tolerance.tmp & maskfrac[point.sources]<=mask.tolerance.tmp &
blendfrac[point.sources]<=blend.tolerance.tmp)
}
keep<-which(bin[point.sources]==i & nn.dist>=radial.tolerance.use & maskfrac[point.sources]<=mask.tolerance.use &
blendfrac[point.sources]<=blend.tolerance.use)
throw<-which(bin[point.sources]==i)
throw<-throw[which(!throw%in%keep)]
if (length(throw)!=0) {
point.sources<-point.sources[-throw]
nn.dist<-nn.dist[-throw]
}
} else {
#There are more pure PSF sources than the number requested; just pick the first N.sources of them...
keep<-which(bin[point.sources]==i & nn.dist > radial.tolerance*2.0 & maskfrac[point.sources] == 0 & blendfrac[point.sources] == 0)
if (n.sources+1 < length(keep)) {
throw<-sample(keep,size=length(keep)-n.sources,replace=FALSE)
#throw<-keep[(n.sources+1):length(keep)]
point.sources<-point.sources[-throw]
nn.dist<-nn.dist[-throw]
} else {
point.sources<-point.sources[keep]
nn.dist<-nn.dist[keep]
}
}
}
if (plot & length(point.sources)>0) {
diagnostic<-TRUE
if (diagnostic) {
#show all of the PSFs used in the stack
nsamp<-length(point.sources)
sample<-point.sources
} else {
#show a sample of the PSFs used in the stack
nsamp<-min(24,length(point.sources))
sample=sample(point.sources,nsamp)
}
laymat<-(matrix(c(1:(ifelse(nsamp>12,12,nsamp)*4),rep(0,12*4-ifelse(nsamp>12,12,nsamp)*4)),ncol=8,byrow=T))
layout(laymat)
par(mar=c(0,0,0,0),oma=c(2,2,2,2))
} else {
sample<-NULL
}
#Loop through the sources remaining
im=image.env$im
if (length(image.env$imm.orig) > 1) {
mask=image.env$imm.orig
} else {
mask=image.env$imm
}
for (i in point.sources) {
xc=cat.x[i]
yc=cat.y[i]
xlo=ap.lims.data.map[i,1]
xup=ap.lims.data.map[i,2]
ylo=ap.lims.data.map[i,3]
yup=ap.lims.data.map[i,4]
xmlo=ap.lims.mask.map[i,1]
xmup=ap.lims.mask.map[i,2]
ymlo=ap.lims.mask.map[i,3]
ymup=ap.lims.mask.map[i,4]
if (do.sky.est) {
sky=skylocal[i]
rms=skyrms[i]
} else {
sky=0
rms=0
}
#Skip the source if it's saturated
if (any(im[xlo:xup,ylo:yup]>=image.env$saturation)) { next }
#Skip the source if there's a much brighter source that isn't this source
if (max(im[xlo:xup,ylo:yup])-sky > 5*pixval[i]) {
if (sqrt(sum(abs(which(im[xlo:xup,ylo:yup]==max(im[xlo:xup,ylo:yup],na.rm=T),arr.ind=T)-cbind(xc-xlo,yc-ylo))^2))>=3) { next }
}
#Remove any sub-pixel centroiding
#Make grid for psf at old pixel centres /*fold*/ {{{
im.obj<-list(x=xlo:xup, y=ylo:yup,z=im[xlo:xup,ylo:yup]-sky)
# /*fend*/ }}}
#Make expanded grid of new pixel centres /*fold*/ {{{
expanded<-expand.grid(xlo:xup,ylo:yup)
xnew<-expanded[,1]-xc%%1
ynew<-expanded[,2]-yc%%1
# /*fend*/ }}}
#Interpolate /*fold*/ {{{
im.cen<-matrix(interp.2d(xnew, ynew, im.obj)[,3], ncol=ncol(im.obj$z),nrow=nrow(im.obj$z))
# /*fend*/ }}}
#conv<-array(0,dim=dim(im[xlo:xup,ylo:yup]))
#conv[dim(conv)[1]/2,dim(conv)[2]/2]<-1
#im.cen<-convolve.psf(im[xlo:xup,ylo:yup]-sky,conv)
#}
#Ensure that the central source is unmasked (except when blended)
#And add to the stack
if (length(mask)==1) {
if (mask==1) {
#Add the image to the stack
im_psf[[bin[i]]]<-(im_psf[[bin[i]]]+(im.cen))
weight[[bin[i]]]<-(weight[[bin[i]]])+1
}
} else {
if (exists('dfa')) {
mask[xmlo:xmup,ymlo:ymup][which(dfa[[i]]>= (1-sourcemask.conf.lim))]<-1
}
#Add the image to the stack
im_psf[[bin[i]]]<-(im_psf[[bin[i]]]+(im.cen)*(mask[xmlo:xmup,ymlo:ymup]))
weight[[bin[i]]]<-(weight[[bin[i]]]+(mask[xmlo:xmup,ymlo:ymup]))
}
im_psf.nomask[[bin[i]]]<-(im_psf.nomask[[bin[i]]]+(im.cen))
nomask.n[[bin[i]]]<-nomask.n[[bin[i]]]+1
if (plot & i%in%sample) {
if (length(mask)==1) {
capture=magimage(im[xlo:xup,ylo:yup],axes=FALSE)
label('topleft',paste0(i,': (1) raw (there is no mask)'),col='red',lwd=2)
magimage(im.cen,axes=FALSE)
label('topleft',paste0(i,': (2) recentred (no mask)'),col='red',lwd=2)
capture=magimage(im.cen-im[xlo:xup,ylo:yup],axes=FALSE)
label('topleft',paste0(i,': Raw-centred (no mask)'),col='red',lwd=2)
capture=magimage(im_psf[[bin[i]]]/weight[[bin[i]]],axes=FALSE)
label('topleft',paste0(i,': PSF-stack (no mask)'),col='red',lwd=2)
} else {
capture=magimage(im[xlo:xup,ylo:yup]*mask[xmlo:xmup,ymlo:ymup],axes=FALSE)
label('topleft',paste0(i,': (1) raw (masked)'),col='red',lwd=2)
magimage(im.cen*mask[xmlo:xmup,ymlo:ymup],axes=FALSE)
label('topleft',paste0(i,': (2) recentred (masked)'),col='red',lwd=2)
capture=magimage((im.cen-im[xlo:xup,ylo:yup])*mask[xmlo:xmup,ymlo:ymup],axes=FALSE)
label('topleft',paste0(i,': Raw-centred (masked)'),col='red',lwd=2)
capture=magimage(im_psf[[bin[i]]]/weight[[bin[i]]],axes=FALSE)
label('topleft',paste0(i,': PSF-stack (masked)'),col='red',lwd=2)
}
}
}
# Divide by the per-pixel weights
for (i in 1:n.bins) {
im_psf[[i]]<-im_psf[[i]]/weight[[i]]
im_psf.nomask[[i]]<-im_psf[[i]]/nomask.n[[i]]
}
if (plot & length(point.sources) > 0) {
mtext(side=3,outer=T,text=paste0("There are ",length(point.sources),"sources for ",n.bins," bins"))
layout(matrix(1:(n.bins*3),ncol=n.bins))
par(mar=c(0,0,0,0),oma=c(2,2,2,2))
for (j in 1:n.bins) {
if (!all(is.na(im_psf[[j]]))) {
catch=capture.output(magimage(im_psf[[j]],axes=FALSE))
} else {
image(axes=FALSE,matrix(1),col='white')
label('centre','NO ESTIMATE')
}
label('topleft',paste0('Final PSF estimate'),col='red',lwd=2)
if (!all(is.na(weight[[j]]))) {
catch=capture.output(magimage(weight[[j]],axes=FALSE))
} else {
image(axes=FALSE,matrix(1),col='white')
label('centre','NO ESTIMATE')
}
label('topleft',paste0('Masking (Weights)'),col='red',lwd=2)
if (!all(is.na(im_psf.nomask[[j]]))) {
catch=capture.output(magimage(im_psf.nomask[[j]],axes=FALSE))
} else {
image(axes=FALSE,matrix(1),col='white')
label('centre','NO ESTIMATE')
}
label('topleft',paste0('Without Mask Estimate'),col='red',lwd=2)
}
}
#Put the PSF onto the big stamp {{{
centre<-dim(im_psf[[1]])/2
#}}}
#Update bin definition {{{
#Used bin limits
bin<-rep(-1,length(pixval.all))
bin[which(pixval.all < bin.zero)]<-1
bin[which(pixval.all > max(bin.lim))]<-n.bins
for (i in 1:n.bins) {
bin[which(bin<0 & pixval.all < bin.lim[i])]<-i
}
#}}}
#Parse Parameter Space & Return{{{
if (!is.null(env)) { detach(env) }
if (exists('skyest')) {
assign("tmp.skyest" , skyest , envir = outenv)
}
assign("tmp.psf.id" , bin, envir = outenv)
assign("tmp.psfest.val",pixval.all, envir = outenv)
#assign("psfwidth" , psfwidth , envir = outenv)
#assign("sumpsf" , sumpsf , envir = outenv)
return=list(PSF=im_psf,WEIGHT=weight,NOMASK=im_psf.nomask,centre=centre)
#}}}
}
|
unzip("rprog_data_ProgAssignment3-data.zip", exdir = "hospitals")
dir('hospitals')
setwd('hospitals')
data <- read.csv("outcome-of-care-measures.csv", colClasses = 'character')
head(data)
ncol(data)
nrow(data)
names(data)
# 30-day mortality is in col 11
data[, 11] <- as.numeric(data[, 11]) # change from character to numeric
hist(data[, 11])
#tests
source('best.R')
best("TX", "heart attack") # suggested answer is NA!
best("TX", "heart failure") #ok
best("MD", "heart attack") #ok
best("MD", "pneumonia") #ok
best("BB", "heart attack") #ok
best("NY", "hert attack") #ok
source('rankhospital.R')
rankhospital("TX", "heart failure", 4) # ok
rankhospital("MD", "heart attack", "worst") # ok
rankhospital("MN", "heart attack", 5000) # ok
source('rankall.R')
head(rankall("heart attack", 20), 10) # ok
tail(rankall("pneumonia", "worst"), 3) # doesn't work
tail(rankall("heart failure"), 10) # almost
|
/assignment 3.R
|
no_license
|
PawFran/r-programming
|
R
| false | false | 962 |
r
|
unzip("rprog_data_ProgAssignment3-data.zip", exdir = "hospitals")
dir('hospitals')
setwd('hospitals')
data <- read.csv("outcome-of-care-measures.csv", colClasses = 'character')
head(data)
ncol(data)
nrow(data)
names(data)
# 30-day mortality is in col 11
data[, 11] <- as.numeric(data[, 11]) # change from character to numeric
hist(data[, 11])
#tests
source('best.R')
best("TX", "heart attack") # suggested answer is NA!
best("TX", "heart failure") #ok
best("MD", "heart attack") #ok
best("MD", "pneumonia") #ok
best("BB", "heart attack") #ok
best("NY", "hert attack") #ok
source('rankhospital.R')
rankhospital("TX", "heart failure", 4) # ok
rankhospital("MD", "heart attack", "worst") # ok
rankhospital("MN", "heart attack", 5000) # ok
source('rankall.R')
head(rankall("heart attack", 20), 10) # ok
tail(rankall("pneumonia", "worst"), 3) # doesn't work
tail(rankall("heart failure"), 10) # almost
|
library(survcomp)
library(genefu)
# censor time in years#
censorTime <- 10
args <- (commandArgs(TRUE))
if(length(args)==0){
dataSets <- c('tothill2008')
}else{
dataSets <- NULL
for(i in 1:length(args)){
if(i == 1){
saveres <- args[[i]]
} else {
dataSets <- c( dataSets, args[[i]] )
}
}
}
setwd(sprintf("/common/projects/trisch/Ovarian_cancer/%s", dataSets))
load(sprintf("%s.RData", substring(dataSets, 1 , nchar(dataSets)-4 )))
#load(sprintf("%s_analyze.RData", substring(dataSets, 1 , (nchar(dataSets)-4) )))
setwd(sprintf("%s", saveres))
load("sig_stab_res.RData")
#survival data
#survd <- censor.time(surv.time=demo[ ,"t.os"] / 365, surv.event=demo[ ,"e.os"], time.cens=censorTime)
####### Subtype 1 ############
sig.size.s1 <- as.numeric(substring( names(which.max(sizeStab.s1$kuncheva[30:length(sizeStab.s1$kuncheva)])),6) )
sig.size.s1.k <- as.numeric(substring( names(which.max(sizeStab.s1$kuncheva[30:length(sizeStab.s1$kuncheva)])),6) )
sig.size.s1.d <- as.numeric(substring( names(which.max(sizeStab.s1$davis[30:length(sizeStab.s1$davis)])),6) )
pdf(sprintf("%s_sig_size_s1_k.pdf", dataSets ), width=10, height=10)
plot(y=sizeStab.s1$kuncheva, x=as.numeric(substring(names(sizeStab.s1$kuncheva), 6 )), col="blue", type = "o")
legend(x="topright", paste("Optimal signature size =",sig.size.s1.k, sep=" ") )
abline( v=sig.size.s1.k)
dev.off()
pdf(sprintf("%s_sig_size_s1_d.pdf", dataSets ), width=10, height=10)
plot(y=sizeStab.s1$davis, x=as.numeric(substring(names(sizeStab.s1$davis), 6 )), col="blue", type = "o")
legend(x="topright", paste("Optimal signature size =",sig.size.s1.d, sep=" ") )
abline( v=sig.size.s1.d)
dev.off()
sig.probes.s1 <- ranking.sel.full.s1[1:sig.size.s1, "probe"]
sig.s1 <- cbind("probe"=sig.probes.s1, "ensembl.id"=annot[sig.probes.s1, "ensembl.id"], "coefficient"=sign(as.numeric(ranking.sel.full.s1[1:sig.size.s1, "c.index"]) - 0.5))
sig.probes.s1.full <- ranking.sel.full.s1[ , "probe"]
sig.s1.full <- cbind("probe"=sig.probes.s1.full, "ensembl.id"=annot[sig.probes.s1.full, "ensembl.id"], "coefficient"=sign(as.numeric(ranking.sel.full.s1[ , "c.index"]) - 0.5))
####### Subtype 2 #############
sig.size.s2 <- as.numeric(substring( names(which.max(sizeStab.s2$kuncheva[30:length(sizeStab.s2$kuncheva)])),6) )
sig.size.s2.k <- as.numeric(substring( names(which.max(sizeStab.s2$kuncheva[30:length(sizeStab.s2$kuncheva)])),6) )
sig.size.s2.d <- as.numeric(substring( names(which.max(sizeStab.s2$davis[30:length(sizeStab.s2$davis)])),6) )
pdf(sprintf("%s_sig_size_s2_k.pdf", dataSets ), width=10, height=10)
plot(y=sizeStab.s2$kuncheva, x=as.numeric(substring(names(sizeStab.s2$kuncheva), 6 )), col="blue", type = "o")
legend(x="topright", paste("Optimal signature size =",sig.size.s2.k, sep=" ") )
abline( v=sig.size.s2.k)
dev.off()
pdf(sprintf("%s_sig_size_s2_d.pdf", dataSets ), width=10, height=10)
plot(y=sizeStab.s2$davis, x=as.numeric(substring(names(sizeStab.s2$davis), 6 )), col="blue", type = "o")
legend(x="topright", paste("Optimal signature size =",sig.size.s2.d, sep=" ") )
abline( v=sig.size.s2.d)
dev.off()
sig.probes.s2 <- ranking.sel.full.s2[1:sig.size.s2, "probe"]
sig.s2 <- cbind("probe"=sig.probes.s2, "ensembl.id"=annot[sig.probes.s2, "ensembl.id"], "coefficient"=sign(as.numeric(ranking.sel.full.s2[1:sig.size.s2, "c.index"]) - 0.5))
sig.probes.s2.full <- ranking.sel.full.s2[ , "probe"]
sig.s2.full <- cbind("probe"=sig.probes.s2.full, "ensembl.id"=annot[sig.probes.s2.full, "ensembl.id"], "coefficient"=sign(as.numeric(ranking.sel.full.s2[ , "c.index"]) - 0.5))
save(list=c("sig.s1", "sig.s2", "sig.s2.full", "sig.s1.full"), compress=TRUE, file="gene_sigs.RData")
|
/r_code/stem_like/GENIUS_gene_sig_extract.R
|
no_license
|
xulijunji/GENIUS_ovarian
|
R
| false | false | 3,720 |
r
|
library(survcomp)
library(genefu)
# censor time in years#
censorTime <- 10
args <- (commandArgs(TRUE))
if(length(args)==0){
dataSets <- c('tothill2008')
}else{
dataSets <- NULL
for(i in 1:length(args)){
if(i == 1){
saveres <- args[[i]]
} else {
dataSets <- c( dataSets, args[[i]] )
}
}
}
setwd(sprintf("/common/projects/trisch/Ovarian_cancer/%s", dataSets))
load(sprintf("%s.RData", substring(dataSets, 1 , nchar(dataSets)-4 )))
#load(sprintf("%s_analyze.RData", substring(dataSets, 1 , (nchar(dataSets)-4) )))
setwd(sprintf("%s", saveres))
load("sig_stab_res.RData")
#survival data
#survd <- censor.time(surv.time=demo[ ,"t.os"] / 365, surv.event=demo[ ,"e.os"], time.cens=censorTime)
####### Subtype 1 ############
sig.size.s1 <- as.numeric(substring( names(which.max(sizeStab.s1$kuncheva[30:length(sizeStab.s1$kuncheva)])),6) )
sig.size.s1.k <- as.numeric(substring( names(which.max(sizeStab.s1$kuncheva[30:length(sizeStab.s1$kuncheva)])),6) )
sig.size.s1.d <- as.numeric(substring( names(which.max(sizeStab.s1$davis[30:length(sizeStab.s1$davis)])),6) )
pdf(sprintf("%s_sig_size_s1_k.pdf", dataSets ), width=10, height=10)
plot(y=sizeStab.s1$kuncheva, x=as.numeric(substring(names(sizeStab.s1$kuncheva), 6 )), col="blue", type = "o")
legend(x="topright", paste("Optimal signature size =",sig.size.s1.k, sep=" ") )
abline( v=sig.size.s1.k)
dev.off()
pdf(sprintf("%s_sig_size_s1_d.pdf", dataSets ), width=10, height=10)
plot(y=sizeStab.s1$davis, x=as.numeric(substring(names(sizeStab.s1$davis), 6 )), col="blue", type = "o")
legend(x="topright", paste("Optimal signature size =",sig.size.s1.d, sep=" ") )
abline( v=sig.size.s1.d)
dev.off()
sig.probes.s1 <- ranking.sel.full.s1[1:sig.size.s1, "probe"]
sig.s1 <- cbind("probe"=sig.probes.s1, "ensembl.id"=annot[sig.probes.s1, "ensembl.id"], "coefficient"=sign(as.numeric(ranking.sel.full.s1[1:sig.size.s1, "c.index"]) - 0.5))
sig.probes.s1.full <- ranking.sel.full.s1[ , "probe"]
sig.s1.full <- cbind("probe"=sig.probes.s1.full, "ensembl.id"=annot[sig.probes.s1.full, "ensembl.id"], "coefficient"=sign(as.numeric(ranking.sel.full.s1[ , "c.index"]) - 0.5))
####### Subtype 2 #############
sig.size.s2 <- as.numeric(substring( names(which.max(sizeStab.s2$kuncheva[30:length(sizeStab.s2$kuncheva)])),6) )
sig.size.s2.k <- as.numeric(substring( names(which.max(sizeStab.s2$kuncheva[30:length(sizeStab.s2$kuncheva)])),6) )
sig.size.s2.d <- as.numeric(substring( names(which.max(sizeStab.s2$davis[30:length(sizeStab.s2$davis)])),6) )
pdf(sprintf("%s_sig_size_s2_k.pdf", dataSets ), width=10, height=10)
plot(y=sizeStab.s2$kuncheva, x=as.numeric(substring(names(sizeStab.s2$kuncheva), 6 )), col="blue", type = "o")
legend(x="topright", paste("Optimal signature size =",sig.size.s2.k, sep=" ") )
abline( v=sig.size.s2.k)
dev.off()
pdf(sprintf("%s_sig_size_s2_d.pdf", dataSets ), width=10, height=10)
plot(y=sizeStab.s2$davis, x=as.numeric(substring(names(sizeStab.s2$davis), 6 )), col="blue", type = "o")
legend(x="topright", paste("Optimal signature size =",sig.size.s2.d, sep=" ") )
abline( v=sig.size.s2.d)
dev.off()
sig.probes.s2 <- ranking.sel.full.s2[1:sig.size.s2, "probe"]
sig.s2 <- cbind("probe"=sig.probes.s2, "ensembl.id"=annot[sig.probes.s2, "ensembl.id"], "coefficient"=sign(as.numeric(ranking.sel.full.s2[1:sig.size.s2, "c.index"]) - 0.5))
sig.probes.s2.full <- ranking.sel.full.s2[ , "probe"]
sig.s2.full <- cbind("probe"=sig.probes.s2.full, "ensembl.id"=annot[sig.probes.s2.full, "ensembl.id"], "coefficient"=sign(as.numeric(ranking.sel.full.s2[ , "c.index"]) - 0.5))
save(list=c("sig.s1", "sig.s2", "sig.s2.full", "sig.s1.full"), compress=TRUE, file="gene_sigs.RData")
|
# Similar to single picto but has more flexibility with text labels.
# Arguments are designed to match with PrettyNumber
#' @importFrom verbs Sum
iconsWithText <- function (x,
total.icons = NA,
image = "star",
base.image = "",
is.custom.url = FALSE,
number.rows = NA,
number.cols = NA,
width.height.ratio = 1,
layout = NA,
scale = 1,
maximum.value = NA,
hide.base.image = FALSE,
fill.direction = "fromleft",
fill.icon.color = "black",
base.icon.color = "",
background.color = "transparent", # background.fill.opacity not supported
auto.size = TRUE,
icon.width = 50,
pad.row = 0,
pad.col = 0,
margin = 0,
margin.top = margin, # not used
margin.right = margin,
margin.bottom = margin,
margin.left = margin,
global.font.family = "Arial",
global.font.color = rgb(44, 44, 44, maxColorValue = 255),
text.overlay = "",
text.overlay.halign = "center",
text.overlay.valign = "middle",
text.overlay.pad = 0.0,
test.overlap.xpad = 0.0,
text.overlay.font.family = global.font.family,
text.overlay.font.color = global.font.color,
text.overlay.font.size = 10,
text.overlay.font.weight = "normal",
text.below = "",
text.below.halign = "center",
text.below.pad = 0.0,
text.below.xpad = 0.0,
text.below.font.family = global.font.family,
text.below.font.color = global.font.color,
text.below.font.size = 10,
text.below.font.weight = "normal",
text.above = "",
text.above.halign = "center",
text.above.pad = 0.0,
text.above.xpad = 0.0,
text.above.font.family = global.font.family,
text.above.font.color = global.font.color,
text.above.font.size = 10,
text.above.font.weight = "normal",
print.config = FALSE,
x.limit = 1000,
...)
{
if (!(length(x) == 1 && x >= 0))
stop("Input data must be a single positive number\n")
if (scale <= 0 && is.na(maximum.value))
stop("Scale must be greater than zero\n")
if (isTRUE(grepl("%", attr(scale, "statistic"))))
scale <- scale/100
if (!is.na(maximum.value) && scale != 1)
warning("Parameter scale overridden by maximum value\n")
if (!is.na(total.icons) && total.icons <= 0)
stop("Total icons must be greater than zero\n")
if (!is.na(maximum.value))
{
if (maximum.value <= 0)
stop("Maximum value must be greater than zero\n")
if (maximum.value < x)
stop("Input data cannot be greater than 'Maximum value'. ",
"Change 'Display' to 'Pictograph (repeated icons)' to show more than 1 icon.\n")
if (is.na(total.icons))
total.icons <- maximum.value
scale <- maximum.value/total.icons
}
# Some parameter substitutions for R GUI Controls
if (is.custom.url)
{
fill.icon.color <- ""
base.icon.color <- ""
hide.base.image <- nchar(base.image) == 0
} else
{
image <- gsub(" ", "", tolower(image))
}
fill.direction <- gsub(" ", "", tolower(fill.direction))
if (auto.size)
icon.width <- 50
if (!is.na(total.icons) && total.icons == 1)
{
# Parameters not supplied in Pictographs - Single
layout <- "Width-to-height ratio"
pad.row <- 0
pad.col <- 0
}
if (!is.na(layout))
{
if (layout != "Width-to-height ratio")
width.height.ratio = 1
if (layout != "Number of rows")
number.rows = NA
if (layout != "Number of columns")
number.cols = NA
}
# Determine plot values
if (!is.na(x.limit) && x/scale > x.limit)
{
scale <- scale * 10^{floor(log10(x/scale)) - 1}
warning("The input value is too large to plot, and the Scale has been set to ", scale, ". Consider entering a larger Scale value in the inputs.\n")
}
x <- x/scale
if (is.na(total.icons))
total.icons <- ceiling(x)
if (length(total.icons) != 1 && total.icons > 0)
stop("The total icons must be a single numeric value and greater than zero\n")
if (!is.na(number.rows) && (number.rows <= 0 || number.rows != ceiling(number.rows)))
stop("The number of rows must be a positive integer\n")
if (!is.na(number.rows))
number.rows <- min(number.rows, total.icons)
if (!is.na(number.cols) && (number.cols <= 0 || number.cols != ceiling(number.cols)))
stop("The number of columns must be a positive integer\n")
if (!is.na(number.cols))
number.cols <- min(number.cols, total.icons)
if (width.height.ratio <= 0)
stop("The width-height ratio must be greater than zero\n")
if (icon.width <= 0)
stop("icon width must be greater than zero\n")
prop <- x/total.icons
if (prop < 0 | prop > 1)
stop("Input data must be between 0 and total icons\n")
if (round(total.icons) != total.icons)
stop("The number of total icons must be an integer\n")
# Determine layout based on which parameters are supplied
layout.str <- ""
icon.WHratio <- if (is.custom.url) getWidthHeightRatio(image) * (1 + pad.col) / (1 + pad.row)
else imageWHRatio[image] * (1 + pad.col) / (1 + pad.row)
if (!is.na(number.rows) && is.na(number.cols))
{
layout.str <- paste(",\"numRows\":", number.rows, sep="")
number.cols <- ceiling(total.icons/number.rows)
} else if (!is.na(number.cols))
{
layout.str <- paste(",\"numCols\":", number.cols, sep="")
number.rows <- ceiling(total.icons/number.cols)
} else
{
number.rows <- max(1, round(sqrt(icon.WHratio/width.height.ratio * total.icons)))
if (number.rows > total.icons)
number.rows <- total.icons
number.cols <- ceiling(total.icons/number.rows)
layout.str <- paste(",\"numRows\":", number.rows, sep="")
}
image.type <- "url"
if (image %in% c("circle", "square"))
image.type <- image
base.image.str <- ""
if (!hide.base.image)
{
if (nchar(base.icon.color) > 0)
base.icon.color <- paste(base.icon.color, ":", sep="")
base.image.url <- if (is.custom.url)
{
checkImageUrl(base.image)
base.image
}
else
imageURL[image]
base.image.str <- if (nchar(base.image.url) == 0 && is.custom.url) ""
else paste(",\"baseImage\":\"", image.type, ":", base.icon.color, base.image.url, "\"", sep="")
}
image.url <- if (is.custom.url) image else imageURL[image]
variable.image <- if (is.custom.url)
paste(image.type, ":", fill.direction, ":", image.url, sep="")
else
paste(image.type, ":", fill.direction, ":", fill.icon.color, ":", image.url, sep="")
# size of pictograph output
dim.str <- ""
icon.size.str <- ""
if (auto.size)
dim.str <- "\"rowHeights\":[\"proportion:1\"], \"colWidths\":[\"flexible:graphic\"]"
else
{
dim.str <- "\"rowHeights\":[\"fixedsize:graphic\"], \"colWidths\":[\"fixedsize:graphic\"]"
icon.size.str <- paste0(",\"imageWidth\":", icon.width)
}
# Text labels
label.overlay.str <- ""
label.above.str <- ""
label.below.str <- ""
# Adjust margins to fit text labels
# padding format: top right bottom left
pad.above.left <- pad.above.right <- pad.above.top <- pad.above.bottom <- 0
pad.below.left <- pad.below.right <- pad.below.top <- pad.below.bottom <- 0
if (any(nzchar(text.above)))
{
if (text.above.halign == "left")
pad.above.left <- text.above.xpad
if (text.above.halign == "right")
pad.above.right <- text.above.xpad
}
if (any(nzchar(text.below)))
{
if (text.below.halign == "left")
pad.below.left <- text.below.xpad
if (text.below.halign == "right")
pad.below.right <- text.below.xpad
}
margin.left <- margin.left + max(0, -pad.above.left, -pad.below.left)
margin.right <- margin.right + max(0, pad.above.right, pad.below.right)
if (any(nzchar(text.above)))
label.above.str <- sprintf(paste0(", \"table-header\":{\"padding\": \"%f %f %f %f\", ",
"\"text\":\"%s\", \"font-size\":\"%fpx\", \"font-family\":\"%s\", ",
"\"font-color\":\"%s\", \"font-weight\":\"%s\", ",
"\"horizontal-align\":\"%s\", \"vertical-align\":\"top\"}"),
margin.top + 1, margin.right - pad.above.right + 1,
max(0, text.above.pad) + 1, margin.left + pad.above.left + 1,
cleanPictographLabels(text.above), text.above.font.size, text.above.font.family,
text.above.font.color, text.above.font.weight, text.above.halign)
if (any(nzchar(text.below)))
label.below.str <- sprintf(paste0(", \"table-footer\":{\"padding\": \"%f %f %f %f\", ",
"\"text\":\"%s\", \"font-size\":\"%fpx\", \"font-family\":\"%s\", ",
"\"font-color\":\"%s\", \"font-weight\":\"%s\", ",
"\"horizontal-align\":\"%s\", \"vertical-align\":\"bottom\"}"),
max(text.below.pad, 0) + 1, margin.right - pad.below.right + 1,
margin.bottom + 1, margin.left + pad.below.left + 1,
cleanPictographLabels(text.below), text.below.font.size, text.below.font.family,
text.below.font.color, text.below.font.weight, text.below.halign)
if (any(nzchar(text.overlay)))
{
xpos <- if (text.overlay.halign == "left") 0
else if (text.overlay.halign == "right") number.cols
else number.cols/2
ypos <- if (text.overlay.valign == "top") 0
else if (text.overlay.valign == "bottom") number.rows
else number.rows/2
label.overlay.str <- sprintf(paste0(",\"floatingLabels\":[{\"position\":\"%f:%f\", ",
"\"text\":\"%s\", \"font-size\":\"%fpx\", \"font-family\":\"%s\", ",
"\"font-color\":\"%s\", \"font-weight\":\"%s\", \"horizontal-align\":\"%s\"}]"),
ypos, xpos, cleanPictographLabels(text.overlay), text.overlay.font.size, text.overlay.font.family,
text.overlay.font.color, text.overlay.font.weight, text.overlay.halign)
}
pad.around.icons <- sprintf(",\"padding\":\"%f %f %f %f\"",
margin.top, margin.right, margin.bottom, margin.left)
json.string <- paste0("{\"table\": {", dim.str,
",\"rows\":[[{\"type\":\"graphic\", \"value\":{",
"\"proportion\":", prop, pad.around.icons,
",\"numImages\":", total.icons,
label.overlay.str,
icon.size.str,
layout.str,
",\"rowGutter\":", pad.row,
",\"columnGutter\":", pad.col,
",\"variableImage\":\"", variable.image, "\"", base.image.str, "}}]]}",
label.above.str, label.below.str,
",\"background-color\":\"", background.color, "\"}")
if (print.config)
cat(json.string)
res <- graphic(json.string)
class(res) <- c(class(res), "visualization-selector")
return(res)
}
cleanPictographLabels <- function(x)
{
# New line characters were causing errors in the JSON
# Note these can be coded as \n or \r
x <- gsub("\\s", " ", x)
# Escape backslashes in labels
x <- gsub("\\", "\\\\", x, fixed = TRUE)
# These characters used to be shown as text but that is
# probably not what the user wants to see
x <- gsub(" ", " ", x)
x <- gsub('"', '\\"', x, fixed = TRUE)
return(x)
}
|
/R/iconswithtext.R
|
no_license
|
Displayr/flipPictographs
|
R
| false | false | 12,690 |
r
|
# Similar to single picto but has more flexibility with text labels.
# Arguments are designed to match with PrettyNumber
#' @importFrom verbs Sum
iconsWithText <- function (x,
total.icons = NA,
image = "star",
base.image = "",
is.custom.url = FALSE,
number.rows = NA,
number.cols = NA,
width.height.ratio = 1,
layout = NA,
scale = 1,
maximum.value = NA,
hide.base.image = FALSE,
fill.direction = "fromleft",
fill.icon.color = "black",
base.icon.color = "",
background.color = "transparent", # background.fill.opacity not supported
auto.size = TRUE,
icon.width = 50,
pad.row = 0,
pad.col = 0,
margin = 0,
margin.top = margin, # not used
margin.right = margin,
margin.bottom = margin,
margin.left = margin,
global.font.family = "Arial",
global.font.color = rgb(44, 44, 44, maxColorValue = 255),
text.overlay = "",
text.overlay.halign = "center",
text.overlay.valign = "middle",
text.overlay.pad = 0.0,
test.overlap.xpad = 0.0,
text.overlay.font.family = global.font.family,
text.overlay.font.color = global.font.color,
text.overlay.font.size = 10,
text.overlay.font.weight = "normal",
text.below = "",
text.below.halign = "center",
text.below.pad = 0.0,
text.below.xpad = 0.0,
text.below.font.family = global.font.family,
text.below.font.color = global.font.color,
text.below.font.size = 10,
text.below.font.weight = "normal",
text.above = "",
text.above.halign = "center",
text.above.pad = 0.0,
text.above.xpad = 0.0,
text.above.font.family = global.font.family,
text.above.font.color = global.font.color,
text.above.font.size = 10,
text.above.font.weight = "normal",
print.config = FALSE,
x.limit = 1000,
...)
{
if (!(length(x) == 1 && x >= 0))
stop("Input data must be a single positive number\n")
if (scale <= 0 && is.na(maximum.value))
stop("Scale must be greater than zero\n")
if (isTRUE(grepl("%", attr(scale, "statistic"))))
scale <- scale/100
if (!is.na(maximum.value) && scale != 1)
warning("Parameter scale overridden by maximum value\n")
if (!is.na(total.icons) && total.icons <= 0)
stop("Total icons must be greater than zero\n")
if (!is.na(maximum.value))
{
if (maximum.value <= 0)
stop("Maximum value must be greater than zero\n")
if (maximum.value < x)
stop("Input data cannot be greater than 'Maximum value'. ",
"Change 'Display' to 'Pictograph (repeated icons)' to show more than 1 icon.\n")
if (is.na(total.icons))
total.icons <- maximum.value
scale <- maximum.value/total.icons
}
# Some parameter substitutions for R GUI Controls
if (is.custom.url)
{
fill.icon.color <- ""
base.icon.color <- ""
hide.base.image <- nchar(base.image) == 0
} else
{
image <- gsub(" ", "", tolower(image))
}
fill.direction <- gsub(" ", "", tolower(fill.direction))
if (auto.size)
icon.width <- 50
if (!is.na(total.icons) && total.icons == 1)
{
# Parameters not supplied in Pictographs - Single
layout <- "Width-to-height ratio"
pad.row <- 0
pad.col <- 0
}
if (!is.na(layout))
{
if (layout != "Width-to-height ratio")
width.height.ratio = 1
if (layout != "Number of rows")
number.rows = NA
if (layout != "Number of columns")
number.cols = NA
}
# Determine plot values
if (!is.na(x.limit) && x/scale > x.limit)
{
scale <- scale * 10^{floor(log10(x/scale)) - 1}
warning("The input value is too large to plot, and the Scale has been set to ", scale, ". Consider entering a larger Scale value in the inputs.\n")
}
x <- x/scale
if (is.na(total.icons))
total.icons <- ceiling(x)
if (length(total.icons) != 1 && total.icons > 0)
stop("The total icons must be a single numeric value and greater than zero\n")
if (!is.na(number.rows) && (number.rows <= 0 || number.rows != ceiling(number.rows)))
stop("The number of rows must be a positive integer\n")
if (!is.na(number.rows))
number.rows <- min(number.rows, total.icons)
if (!is.na(number.cols) && (number.cols <= 0 || number.cols != ceiling(number.cols)))
stop("The number of columns must be a positive integer\n")
if (!is.na(number.cols))
number.cols <- min(number.cols, total.icons)
if (width.height.ratio <= 0)
stop("The width-height ratio must be greater than zero\n")
if (icon.width <= 0)
stop("icon width must be greater than zero\n")
prop <- x/total.icons
if (prop < 0 | prop > 1)
stop("Input data must be between 0 and total icons\n")
if (round(total.icons) != total.icons)
stop("The number of total icons must be an integer\n")
# Determine layout based on which parameters are supplied
layout.str <- ""
icon.WHratio <- if (is.custom.url) getWidthHeightRatio(image) * (1 + pad.col) / (1 + pad.row)
else imageWHRatio[image] * (1 + pad.col) / (1 + pad.row)
if (!is.na(number.rows) && is.na(number.cols))
{
layout.str <- paste(",\"numRows\":", number.rows, sep="")
number.cols <- ceiling(total.icons/number.rows)
} else if (!is.na(number.cols))
{
layout.str <- paste(",\"numCols\":", number.cols, sep="")
number.rows <- ceiling(total.icons/number.cols)
} else
{
number.rows <- max(1, round(sqrt(icon.WHratio/width.height.ratio * total.icons)))
if (number.rows > total.icons)
number.rows <- total.icons
number.cols <- ceiling(total.icons/number.rows)
layout.str <- paste(",\"numRows\":", number.rows, sep="")
}
image.type <- "url"
if (image %in% c("circle", "square"))
image.type <- image
base.image.str <- ""
if (!hide.base.image)
{
if (nchar(base.icon.color) > 0)
base.icon.color <- paste(base.icon.color, ":", sep="")
base.image.url <- if (is.custom.url)
{
checkImageUrl(base.image)
base.image
}
else
imageURL[image]
base.image.str <- if (nchar(base.image.url) == 0 && is.custom.url) ""
else paste(",\"baseImage\":\"", image.type, ":", base.icon.color, base.image.url, "\"", sep="")
}
image.url <- if (is.custom.url) image else imageURL[image]
variable.image <- if (is.custom.url)
paste(image.type, ":", fill.direction, ":", image.url, sep="")
else
paste(image.type, ":", fill.direction, ":", fill.icon.color, ":", image.url, sep="")
# size of pictograph output
dim.str <- ""
icon.size.str <- ""
if (auto.size)
dim.str <- "\"rowHeights\":[\"proportion:1\"], \"colWidths\":[\"flexible:graphic\"]"
else
{
dim.str <- "\"rowHeights\":[\"fixedsize:graphic\"], \"colWidths\":[\"fixedsize:graphic\"]"
icon.size.str <- paste0(",\"imageWidth\":", icon.width)
}
# Text labels
label.overlay.str <- ""
label.above.str <- ""
label.below.str <- ""
# Adjust margins to fit text labels
# padding format: top right bottom left
pad.above.left <- pad.above.right <- pad.above.top <- pad.above.bottom <- 0
pad.below.left <- pad.below.right <- pad.below.top <- pad.below.bottom <- 0
if (any(nzchar(text.above)))
{
if (text.above.halign == "left")
pad.above.left <- text.above.xpad
if (text.above.halign == "right")
pad.above.right <- text.above.xpad
}
if (any(nzchar(text.below)))
{
if (text.below.halign == "left")
pad.below.left <- text.below.xpad
if (text.below.halign == "right")
pad.below.right <- text.below.xpad
}
margin.left <- margin.left + max(0, -pad.above.left, -pad.below.left)
margin.right <- margin.right + max(0, pad.above.right, pad.below.right)
if (any(nzchar(text.above)))
label.above.str <- sprintf(paste0(", \"table-header\":{\"padding\": \"%f %f %f %f\", ",
"\"text\":\"%s\", \"font-size\":\"%fpx\", \"font-family\":\"%s\", ",
"\"font-color\":\"%s\", \"font-weight\":\"%s\", ",
"\"horizontal-align\":\"%s\", \"vertical-align\":\"top\"}"),
margin.top + 1, margin.right - pad.above.right + 1,
max(0, text.above.pad) + 1, margin.left + pad.above.left + 1,
cleanPictographLabels(text.above), text.above.font.size, text.above.font.family,
text.above.font.color, text.above.font.weight, text.above.halign)
if (any(nzchar(text.below)))
label.below.str <- sprintf(paste0(", \"table-footer\":{\"padding\": \"%f %f %f %f\", ",
"\"text\":\"%s\", \"font-size\":\"%fpx\", \"font-family\":\"%s\", ",
"\"font-color\":\"%s\", \"font-weight\":\"%s\", ",
"\"horizontal-align\":\"%s\", \"vertical-align\":\"bottom\"}"),
max(text.below.pad, 0) + 1, margin.right - pad.below.right + 1,
margin.bottom + 1, margin.left + pad.below.left + 1,
cleanPictographLabels(text.below), text.below.font.size, text.below.font.family,
text.below.font.color, text.below.font.weight, text.below.halign)
if (any(nzchar(text.overlay)))
{
xpos <- if (text.overlay.halign == "left") 0
else if (text.overlay.halign == "right") number.cols
else number.cols/2
ypos <- if (text.overlay.valign == "top") 0
else if (text.overlay.valign == "bottom") number.rows
else number.rows/2
label.overlay.str <- sprintf(paste0(",\"floatingLabels\":[{\"position\":\"%f:%f\", ",
"\"text\":\"%s\", \"font-size\":\"%fpx\", \"font-family\":\"%s\", ",
"\"font-color\":\"%s\", \"font-weight\":\"%s\", \"horizontal-align\":\"%s\"}]"),
ypos, xpos, cleanPictographLabels(text.overlay), text.overlay.font.size, text.overlay.font.family,
text.overlay.font.color, text.overlay.font.weight, text.overlay.halign)
}
pad.around.icons <- sprintf(",\"padding\":\"%f %f %f %f\"",
margin.top, margin.right, margin.bottom, margin.left)
json.string <- paste0("{\"table\": {", dim.str,
",\"rows\":[[{\"type\":\"graphic\", \"value\":{",
"\"proportion\":", prop, pad.around.icons,
",\"numImages\":", total.icons,
label.overlay.str,
icon.size.str,
layout.str,
",\"rowGutter\":", pad.row,
",\"columnGutter\":", pad.col,
",\"variableImage\":\"", variable.image, "\"", base.image.str, "}}]]}",
label.above.str, label.below.str,
",\"background-color\":\"", background.color, "\"}")
if (print.config)
cat(json.string)
res <- graphic(json.string)
class(res) <- c(class(res), "visualization-selector")
return(res)
}
cleanPictographLabels <- function(x)
{
# New line characters were causing errors in the JSON
# Note these can be coded as \n or \r
x <- gsub("\\s", " ", x)
# Escape backslashes in labels
x <- gsub("\\", "\\\\", x, fixed = TRUE)
# These characters used to be shown as text but that is
# probably not what the user wants to see
x <- gsub(" ", " ", x)
x <- gsub('"', '\\"', x, fixed = TRUE)
return(x)
}
|
library(tidyverse)
murders <- read_csv("data/murders.csv")
murders <- murders %>% mutate(region = factor(region), rate=total/population*10^5)
save(murders,file = "rda/murders.rda")
|
/wrangle-data.R
|
no_license
|
dlwlals0101/murders
|
R
| false | false | 180 |
r
|
library(tidyverse)
murders <- read_csv("data/murders.csv")
murders <- murders %>% mutate(region = factor(region), rate=total/population*10^5)
save(murders,file = "rda/murders.rda")
|
Relocation section '\.rela\.plt' at offset .* contains 2 entries:
Offset Info Type Sym\.Value Sym\. Name \+ Addend
0008140c .*a4 R_SH_JMP_SLOT 00080c4c _sglobal \+ 0
00081410 .*a4 R_SH_JMP_SLOT 00000000 _sexternal \+ 0
Relocation section '\.rela\.dyn' at offset .* contains 4 entries:
Offset Info Type Sym\.Value Sym\. Name \+ Addend
00081800 000000a5 R_SH_RELATIVE +80c48
00080c30 .*01 R_SH_DIR32 00000000 ___GOTT_BASE__ \+ 0
00080c34 .*01 R_SH_DIR32 00000000 ___GOTT_INDEX__ \+ 0
00081414 .*a3 R_SH_GLOB_DAT 00081c00 x \+ 0
|
/external/binutils-2.38/ld/testsuite/ld-sh/vxworks1-lib.rd
|
permissive
|
zhmu/ananas
|
R
| false | false | 610 |
rd
|
Relocation section '\.rela\.plt' at offset .* contains 2 entries:
Offset Info Type Sym\.Value Sym\. Name \+ Addend
0008140c .*a4 R_SH_JMP_SLOT 00080c4c _sglobal \+ 0
00081410 .*a4 R_SH_JMP_SLOT 00000000 _sexternal \+ 0
Relocation section '\.rela\.dyn' at offset .* contains 4 entries:
Offset Info Type Sym\.Value Sym\. Name \+ Addend
00081800 000000a5 R_SH_RELATIVE +80c48
00080c30 .*01 R_SH_DIR32 00000000 ___GOTT_BASE__ \+ 0
00080c34 .*01 R_SH_DIR32 00000000 ___GOTT_INDEX__ \+ 0
00081414 .*a3 R_SH_GLOB_DAT 00081c00 x \+ 0
|
context("canvasXpress LegendPosition")
default_legend_position <- "right"
all_legend_positions <- c("topRight", "right", "bottomRight", "bottom", "bottomLeft", "left", "topLeft", "top")
inside_legend_only_positions <- c("topRight", "bottomRight", "bottomLeft", "topLeft")
segregated_legend_positions <- c("right", "bottom", "left", "top")
test_legend_positions <- setdiff(all_legend_positions, default_legend_position)
barplot_y <- read.table(system.file("extdata", "cX-basic-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
boxplot_y <- read.table(system.file("extdata", "cX-toothgrowth-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
boxplot_x <- read.table(system.file("extdata", "cX-toothgrowth-smp.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
print_legend_pos_warning <- function() {
warning(paste("Legend position not set correctly for :", paste(c("topLeft", "bottomLeft", "bottomRight", "topRight"), collapse = ",")))
}
test_that("scatterplot legendposition", {
y <- read.table(system.file("extdata", "cX-mtcars-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
legend_inside <- FALSE
for (legend_pos in test_legend_positions) {
result <- canvasXpress(
data = y,
asSampleFactors = list("cyl"),
colorBy = "cyl",
graphType = "Scatter2D",
stringVariableFactors = list("cyl"),
title = paste("Scatterplot - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("barplot legendposition", {
legend_inside <- FALSE
for (legend_pos in test_legend_positions) {
result <- canvasXpress(
data = barplot_y,
graphOrientation = "vertical",
graphType = "Bar",
title = paste("Barplot - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("barplot (segregated) legendposition", {
z <- data.frame(Plot = "Bar1", stringsAsFactors = F)
rownames(z) <- rownames(barplot_y)
legend_inside <- FALSE
for (legend_pos in segregated_legend_positions) {
result <- canvasXpress(
data = barplot_y,
varAnnot = z,
graphOrientation = "vertical",
graphType = "Bar",
segregateVariablesBy = list("Plot"),
title = paste("Barplot (segregated) - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("boxplot legendposition", {
legend_inside <- FALSE
for (legend_pos in test_legend_positions) {
result <- canvasXpress(
data = boxplot_y,
smpAnnot = boxplot_x,
colorBy = "dose",
graphOrientation = "vertical",
graphType = "Boxplot",
groupingFactors = list("dose"),
showLegend = TRUE,
smpTitle = "dose",
stringSampleFactors = list("dose"),
title = paste("Boxplot - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("boxplot (segregated) legendposition", {
z <- data.frame(Plot = "Box1", stringsAsFactors = F)
rownames(z) <- rownames(boxplot_y)
legend_inside <- FALSE
for (legend_pos in segregated_legend_positions) {
result <- canvasXpress(
data = boxplot_y,
smpAnnot = boxplot_x,
varAnnot = z,
colorBy = "dose",
graphOrientation = "vertical",
graphType = "Boxplot",
segregateVariablesBy = list("Plot"),
groupingFactors = list("dose"),
showLegend = TRUE,
smpTitle = "dose",
stringSampleFactors = list("dose"),
title = paste("Boxplot (segregated) - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("Scatterplot matrix legendposition", {
y <- read.table(system.file("extdata", "cX-irist-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
z <- read.table(system.file("extdata", "cX-irist-var.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
legend_inside <- FALSE
for (legend_pos in segregated_legend_positions) {
result <- canvasXpress(
data = y,
varAnnot = z,
colorBy = "Species",
graphType = "Scatter2D",
scatterPlotMatrix = TRUE,
title = paste("Scatterplot matrix - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("dotplot legendposition", {
y <- read.table(system.file("extdata", "cX-iris-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
x <- read.table(system.file("extdata", "cX-iris-smp.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
legend_inside <- TRUE
for (legend_pos in test_legend_positions) {
result <- canvasXpress(
data = y,
smpAnnot = x,
graphOrientation = "vertical",
graphType = "Dotplot",
afterRender = list(list("groupSamples", list("Species"))),
title = paste("Dotplot - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("heatmap legendposition", {
y <- read.table(system.file("extdata", "cX-multidimensionalheatmap-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
y2 <- read.table(system.file("extdata", "cX-multidimensionalheatmap-dat3.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
legend_inside <- FALSE
for (legend_pos in test_legend_positions) {
result <- canvasXpress(
data = list(y = y, data2 = y2),
shapeBy = "Shape",
shapeByData = "data2",
graphType = "Heatmap",
title = paste("Heatmap - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
|
/tests/testthat/test-other-legend-position.R
|
no_license
|
kar-agg-gen/canvasXpress
|
R
| false | false | 8,930 |
r
|
context("canvasXpress LegendPosition")
default_legend_position <- "right"
all_legend_positions <- c("topRight", "right", "bottomRight", "bottom", "bottomLeft", "left", "topLeft", "top")
inside_legend_only_positions <- c("topRight", "bottomRight", "bottomLeft", "topLeft")
segregated_legend_positions <- c("right", "bottom", "left", "top")
test_legend_positions <- setdiff(all_legend_positions, default_legend_position)
barplot_y <- read.table(system.file("extdata", "cX-basic-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
boxplot_y <- read.table(system.file("extdata", "cX-toothgrowth-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
boxplot_x <- read.table(system.file("extdata", "cX-toothgrowth-smp.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
print_legend_pos_warning <- function() {
warning(paste("Legend position not set correctly for :", paste(c("topLeft", "bottomLeft", "bottomRight", "topRight"), collapse = ",")))
}
test_that("scatterplot legendposition", {
y <- read.table(system.file("extdata", "cX-mtcars-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
legend_inside <- FALSE
for (legend_pos in test_legend_positions) {
result <- canvasXpress(
data = y,
asSampleFactors = list("cyl"),
colorBy = "cyl",
graphType = "Scatter2D",
stringVariableFactors = list("cyl"),
title = paste("Scatterplot - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("barplot legendposition", {
legend_inside <- FALSE
for (legend_pos in test_legend_positions) {
result <- canvasXpress(
data = barplot_y,
graphOrientation = "vertical",
graphType = "Bar",
title = paste("Barplot - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("barplot (segregated) legendposition", {
z <- data.frame(Plot = "Bar1", stringsAsFactors = F)
rownames(z) <- rownames(barplot_y)
legend_inside <- FALSE
for (legend_pos in segregated_legend_positions) {
result <- canvasXpress(
data = barplot_y,
varAnnot = z,
graphOrientation = "vertical",
graphType = "Bar",
segregateVariablesBy = list("Plot"),
title = paste("Barplot (segregated) - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("boxplot legendposition", {
legend_inside <- FALSE
for (legend_pos in test_legend_positions) {
result <- canvasXpress(
data = boxplot_y,
smpAnnot = boxplot_x,
colorBy = "dose",
graphOrientation = "vertical",
graphType = "Boxplot",
groupingFactors = list("dose"),
showLegend = TRUE,
smpTitle = "dose",
stringSampleFactors = list("dose"),
title = paste("Boxplot - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("boxplot (segregated) legendposition", {
z <- data.frame(Plot = "Box1", stringsAsFactors = F)
rownames(z) <- rownames(boxplot_y)
legend_inside <- FALSE
for (legend_pos in segregated_legend_positions) {
result <- canvasXpress(
data = boxplot_y,
smpAnnot = boxplot_x,
varAnnot = z,
colorBy = "dose",
graphOrientation = "vertical",
graphType = "Boxplot",
segregateVariablesBy = list("Plot"),
groupingFactors = list("dose"),
showLegend = TRUE,
smpTitle = "dose",
stringSampleFactors = list("dose"),
title = paste("Boxplot (segregated) - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("Scatterplot matrix legendposition", {
y <- read.table(system.file("extdata", "cX-irist-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
z <- read.table(system.file("extdata", "cX-irist-var.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
legend_inside <- FALSE
for (legend_pos in segregated_legend_positions) {
result <- canvasXpress(
data = y,
varAnnot = z,
colorBy = "Species",
graphType = "Scatter2D",
scatterPlotMatrix = TRUE,
title = paste("Scatterplot matrix - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("dotplot legendposition", {
y <- read.table(system.file("extdata", "cX-iris-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
x <- read.table(system.file("extdata", "cX-iris-smp.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
legend_inside <- TRUE
for (legend_pos in test_legend_positions) {
result <- canvasXpress(
data = y,
smpAnnot = x,
graphOrientation = "vertical",
graphType = "Dotplot",
afterRender = list(list("groupSamples", list("Species"))),
title = paste("Dotplot - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
test_that("heatmap legendposition", {
y <- read.table(system.file("extdata", "cX-multidimensionalheatmap-dat.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
y2 <- read.table(system.file("extdata", "cX-multidimensionalheatmap-dat3.txt.gz", package = "canvasXpress"), header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE)
legend_inside <- FALSE
for (legend_pos in test_legend_positions) {
result <- canvasXpress(
data = list(y = y, data2 = y2),
shapeBy = "Shape",
shapeByData = "data2",
graphType = "Heatmap",
title = paste("Heatmap - LegendPosition", legend_pos),
legendPosition = legend_pos,
legendInside = ifelse(legend_pos %in% inside_legend_only_positions, TRUE, legend_inside)
)
check_ui_test(result)
}
})
|
#' query UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @import RSQLite
#' @importFrom shiny NS
#' @importFrom shinyjqui jqui_resizable
#' @importFrom shinyAce aceEditor
#' @importFrom DT DTOutput
#' @importFrom DT renderDT
#' @importFrom DT datatable
mod_query_ui <- function(id) {
ns <- NS(id)
tabPanel(
title = "Query",
column(
width = 12,
fluidRow(column(width = 12, h2(
"Query the Database"
))),
# shinyjqui to make it resizable
fluidRow(column(
width = 12,
shinyjqui::jqui_resizable(
shinyAce::aceEditor(
outputId = ns("ace"),
placeholder = "Enter query here.",
mode = "sql",
height = "200px"
)
)
)),
fluidRow(
column(
width = 12,
actionButton(inputId = ns("execute"),
label = "Execute Query"),
actionButton(inputId = ns("recent_queries"),
label = "Recent Queries"),
actionButton(inputId = ns("save_query"),
label = "Save Query"),
actionButton(inputId = ns("saved_queries"),
label = "Show Saved Queries")
)
),
br(),
fluidRow(column(width = 12, verbatimTextOutput(
ns("display_error")
))),
br(),
fluidRow(uiOutput(ns(
"query_results_ui"
))),
br()
)
)
}
#' query Server Function
#'
#' @noRd
mod_query_server <- function(input, output, session, conn) {
ns <- session$ns
# info$data - stores data fetched from query
# info$error - stores error fetched from query
# info$saved_data - stores data of saved queries
info <- reactiveValues(
data = NULL,
error = NULL,
saved_data = NULL,
recent_data = NULL
)
#action_query$data_updated - updates when a query is executed
# to notify other modules
action_query <- reactiveValues(
data_updated = NULL,
data_updated_save = NULL,
data_updated_recent = NULL
)
conn_save_db <- RSQLite::dbConnect(
RSQLite::SQLite(),
system.file(
"extdata",
"saved_queries.db",
package = "rsqliteadmin",
mustWork = TRUE
)
)
conn_recent_db <- RSQLite::dbConnect(
RSQLite::SQLite(),
system.file(
"extdata",
"recent_queries.db",
package = "rsqliteadmin",
mustWork = TRUE
)
)
output$query_results_ui <- renderUI({
column(
width = 12,
id = "query_results",
conditionalPanel(condition = !is.null(info$data),
fluidRow(column(
width = 12,
DT::DTOutput(ns("query_results"))
)))
)
})
output$query_results <-
DT::renderDT(expr = {
DT::datatable(data = info$data)
})
output$display_error <- renderText({
info$error
})
observeEvent(input$execute, {
if (!is.null(conn$active_db)) {
query <- input$ace
query <- gsub("\n", " ", query)
# Queries with "SELECT" string are executed with dbGetQuery and
# others with dbExecuteQuery
tryCatch({
if (isTRUE(grepl("select", query, ignore.case = TRUE))) {
info$data <- RSQLite::dbGetQuery(conn$active_db, query)
showNotification(ui = "Query Completed.",
duration = 5,
type = "message")
info$error <- NULL
}
else{
RSQLite::dbExecute(conn$active_db, query)
action_query$data_updated <- input$execute
showNotification(ui = "Query Completed.",
duration = 3,
type = "message")
info$error <- NULL
}
active_db_path <- RSQLite::dbGetInfo(conn$active_db)$dbname
active_db_name <- basename(active_db_path)
RSQLite::dbExecute(conn_recent_db, recent_query(query, active_db_name))
},
error = function(err) {
info$error <- toString(err)
})
}
else{
showNotification(ui = "No database selected.",
duration = 3,
type = "error")
}
})
output$display_saved_queries <- DT::renderDT(expr = {
DT::datatable(
data = info$saved_data[, c(-1,-2)],
rownames = FALSE,
selection = "single",
plugins = "ellipsis",
options = list(columnDefs = list(
list(
targets = "_all",
render = DT::JS("$.fn.dataTable.render.ellipsis(75)")
)
))
)
})
output$display_recent_queries <- DT::renderDT(expr = {
DT::datatable(
data = info$recent_data[, c(-1,-2)],
rownames = FALSE,
selection = "single",
plugins = "ellipsis",
options = list(columnDefs = list(
list(
targets = "_all",
render = DT::JS("$.fn.dataTable.render.ellipsis(75)")
)
))
)
})
observeEvent(input$recent_queries, {
info$recent_data <- RSQLite::dbGetQuery(conn_recent_db,
recent_data_fetch_query())
showModal(
modalDialog(
size = "l",
title = "Recent Queries",
DT::DTOutput(ns("display_recent_queries")),
shinyAce::aceEditor(
outputId = ns("ace_recent"),
placeholder = "",
mode = "sql",
height = "200px"
),
actionButton(inputId = ns("execute_recent"),
label = "Execute Query")
)
)
})
observeEvent(input$display_recent_queries_rows_selected, {
shinyAce::updateAceEditor(
session = session,
editorId = "ace_recent",
value = info$recent_data$Query[input$display_recent_queries_rows_selected]
)
})
observeEvent(input$execute_recent, {
if (!is.null(conn$active_db)) {
active_db_path <- RSQLite::dbGetInfo(conn$active_db)$dbname
active_db_name <- basename(active_db_path)
print(info$recent_data$Database[input$display_recent_queries_rows_selected])
print(active_db_name)
if (!identical(info$recent_data$Database[input$display_recent_queries_rows_selected], active_db_name))
showNotification(ui = "Warning: Currently active database not same as originally saved database.",
duration = 3,
type = "warning")
query <- input$ace_recent
query <- gsub("\n", " ", query)
# Queries with "SELECT" string are executed with dbGetQuery and
# others with dbExecuteQuery
tryCatch({
if (isTRUE(grepl("select", query, ignore.case = TRUE))) {
info$data <- RSQLite::dbGetQuery(conn$active_db, query)
showNotification(ui = "Query Completed.",
duration = 5,
type = "message")
info$error <- NULL
}
else{
RSQLite::dbExecute(conn$active_db, query)
action_query$data_updated_recent <- input$execute_recent
showNotification(ui = "Query Completed.",
duration = 3,
type = "message")
info$error <- NULL
}
},
error = function(err) {
info$error <- toString(err)
})
removeModal()
}
else{
showNotification(ui = "No database selected.",
duration = 3,
type = "error")
}
})
observeEvent(input$saved_queries, {
info$saved_data <- RSQLite::dbGetQuery(conn_save_db,
data_fetch_query("table",
100000,
0))
showModal(
modalDialog(
size = "l",
title = "Saved Queries",
DT::DTOutput(ns("display_saved_queries")),
shinyAce::aceEditor(
outputId = ns("ace_save"),
placeholder = "",
mode = "sql",
height = "200px"
),
actionButton(inputId = ns("execute_saved"),
label = "Execute Query"),
actionButton(inputId = ns("delete_saved"),
label = "Delete Saved Query"),
)
)
})
observeEvent(input$delete_saved, {
if (is.null(input$display_saved_queries_rows_selected)) {
showNotification(ui = "No query selected.",
duration = 3,
type = "error")
}
else{
RSQLite::dbExecute(conn_save_db,
delete_query("table",
info$saved_data$row_id
[info$saved_data$row_number ==
input$display_saved_queries_rows_selected]))
info$saved_data <- RSQLite::dbGetQuery(conn_save_db,
data_fetch_query("table",
100000,
0))
}
})
observeEvent(input$save_query, {
showModal(modalDialog(
easyClose = TRUE,
title = "Save Query",
textInput(inputId = ns("save_query_name"),
label = "Enter Query Name(optional)"),
actionButton(inputId = ns("confirm_save"),
label = "Confirm")
))
})
observeEvent(input$confirm_save, {
tryCatch({
if (!is.null(conn$active_db)) {
active_db_path <- RSQLite::dbGetInfo(conn$active_db)$dbname
active_db_name <- basename(active_db_path)
RSQLite::dbExecute(conn_save_db,
insert_query(
"table",
c(input$save_query_name, input$ace, active_db_name)
))
showNotification(ui = "Query Saved Successfully.",
duration = 5,
type = "message")
removeModal()
}
},
error = function(err) {
showNotification(
ui = paste0(err, ". Query not saved"),
duration = 3,
type = "error"
)
})
})
observeEvent(input$display_saved_queries_rows_selected, {
shinyAce::updateAceEditor(
session = session,
editorId = "ace_save",
value = info$saved_data$Query[input$display_saved_queries_rows_selected]
)
})
observeEvent(input$execute_saved, {
if (!is.null(conn$active_db)) {
active_db_path <- RSQLite::dbGetInfo(conn$active_db)$dbname
active_db_name <- basename(active_db_path)
print(info$saved_data$Database[input$display_saved_queries_rows_selected])
print(active_db_name)
if (!identical(info$saved_data$Database[input$display_saved_queries_rows_selected], active_db_name))
showNotification(ui = "Warning: Currently active database not same as originally saved database.",
duration = 3,
type = "warning")
query <- input$ace_save
query <- gsub("\n", " ", query)
# Queries with "SELECT" string are executed with dbGetQuery and
# others with dbExecuteQuery
tryCatch({
if (isTRUE(grepl("select", query, ignore.case = TRUE))) {
info$data <- RSQLite::dbGetQuery(conn$active_db, query)
showNotification(ui = "Query Completed.",
duration = 5,
type = "message")
info$error <- NULL
}
else{
RSQLite::dbExecute(conn$active_db, query)
action_query$data_updated_save <- input$execute_saved
showNotification(ui = "Query Completed.",
duration = 3,
type = "message")
info$error <- NULL
}
},
error = function(err) {
info$error <- toString(err)
})
removeModal()
}
else{
showNotification(ui = "No database selected.",
duration = 3,
type = "error")
}
})
return(action_query)
}
## To be copied in the UI
# mod_query_ui("query_ui_1")
## To be copied in the server
# callModule(mod_query_server, "query_ui_1")
|
/R/mod_query.R
|
no_license
|
cran/rsqliteadmin
|
R
| false | false | 12,822 |
r
|
#' query UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @import RSQLite
#' @importFrom shiny NS
#' @importFrom shinyjqui jqui_resizable
#' @importFrom shinyAce aceEditor
#' @importFrom DT DTOutput
#' @importFrom DT renderDT
#' @importFrom DT datatable
mod_query_ui <- function(id) {
ns <- NS(id)
tabPanel(
title = "Query",
column(
width = 12,
fluidRow(column(width = 12, h2(
"Query the Database"
))),
# shinyjqui to make it resizable
fluidRow(column(
width = 12,
shinyjqui::jqui_resizable(
shinyAce::aceEditor(
outputId = ns("ace"),
placeholder = "Enter query here.",
mode = "sql",
height = "200px"
)
)
)),
fluidRow(
column(
width = 12,
actionButton(inputId = ns("execute"),
label = "Execute Query"),
actionButton(inputId = ns("recent_queries"),
label = "Recent Queries"),
actionButton(inputId = ns("save_query"),
label = "Save Query"),
actionButton(inputId = ns("saved_queries"),
label = "Show Saved Queries")
)
),
br(),
fluidRow(column(width = 12, verbatimTextOutput(
ns("display_error")
))),
br(),
fluidRow(uiOutput(ns(
"query_results_ui"
))),
br()
)
)
}
#' query Server Function
#'
#' @noRd
mod_query_server <- function(input, output, session, conn) {
ns <- session$ns
# info$data - stores data fetched from query
# info$error - stores error fetched from query
# info$saved_data - stores data of saved queries
info <- reactiveValues(
data = NULL,
error = NULL,
saved_data = NULL,
recent_data = NULL
)
#action_query$data_updated - updates when a query is executed
# to notify other modules
action_query <- reactiveValues(
data_updated = NULL,
data_updated_save = NULL,
data_updated_recent = NULL
)
conn_save_db <- RSQLite::dbConnect(
RSQLite::SQLite(),
system.file(
"extdata",
"saved_queries.db",
package = "rsqliteadmin",
mustWork = TRUE
)
)
conn_recent_db <- RSQLite::dbConnect(
RSQLite::SQLite(),
system.file(
"extdata",
"recent_queries.db",
package = "rsqliteadmin",
mustWork = TRUE
)
)
output$query_results_ui <- renderUI({
column(
width = 12,
id = "query_results",
conditionalPanel(condition = !is.null(info$data),
fluidRow(column(
width = 12,
DT::DTOutput(ns("query_results"))
)))
)
})
output$query_results <-
DT::renderDT(expr = {
DT::datatable(data = info$data)
})
output$display_error <- renderText({
info$error
})
observeEvent(input$execute, {
if (!is.null(conn$active_db)) {
query <- input$ace
query <- gsub("\n", " ", query)
# Queries with "SELECT" string are executed with dbGetQuery and
# others with dbExecuteQuery
tryCatch({
if (isTRUE(grepl("select", query, ignore.case = TRUE))) {
info$data <- RSQLite::dbGetQuery(conn$active_db, query)
showNotification(ui = "Query Completed.",
duration = 5,
type = "message")
info$error <- NULL
}
else{
RSQLite::dbExecute(conn$active_db, query)
action_query$data_updated <- input$execute
showNotification(ui = "Query Completed.",
duration = 3,
type = "message")
info$error <- NULL
}
active_db_path <- RSQLite::dbGetInfo(conn$active_db)$dbname
active_db_name <- basename(active_db_path)
RSQLite::dbExecute(conn_recent_db, recent_query(query, active_db_name))
},
error = function(err) {
info$error <- toString(err)
})
}
else{
showNotification(ui = "No database selected.",
duration = 3,
type = "error")
}
})
output$display_saved_queries <- DT::renderDT(expr = {
DT::datatable(
data = info$saved_data[, c(-1,-2)],
rownames = FALSE,
selection = "single",
plugins = "ellipsis",
options = list(columnDefs = list(
list(
targets = "_all",
render = DT::JS("$.fn.dataTable.render.ellipsis(75)")
)
))
)
})
output$display_recent_queries <- DT::renderDT(expr = {
DT::datatable(
data = info$recent_data[, c(-1,-2)],
rownames = FALSE,
selection = "single",
plugins = "ellipsis",
options = list(columnDefs = list(
list(
targets = "_all",
render = DT::JS("$.fn.dataTable.render.ellipsis(75)")
)
))
)
})
observeEvent(input$recent_queries, {
info$recent_data <- RSQLite::dbGetQuery(conn_recent_db,
recent_data_fetch_query())
showModal(
modalDialog(
size = "l",
title = "Recent Queries",
DT::DTOutput(ns("display_recent_queries")),
shinyAce::aceEditor(
outputId = ns("ace_recent"),
placeholder = "",
mode = "sql",
height = "200px"
),
actionButton(inputId = ns("execute_recent"),
label = "Execute Query")
)
)
})
observeEvent(input$display_recent_queries_rows_selected, {
shinyAce::updateAceEditor(
session = session,
editorId = "ace_recent",
value = info$recent_data$Query[input$display_recent_queries_rows_selected]
)
})
observeEvent(input$execute_recent, {
if (!is.null(conn$active_db)) {
active_db_path <- RSQLite::dbGetInfo(conn$active_db)$dbname
active_db_name <- basename(active_db_path)
print(info$recent_data$Database[input$display_recent_queries_rows_selected])
print(active_db_name)
if (!identical(info$recent_data$Database[input$display_recent_queries_rows_selected], active_db_name))
showNotification(ui = "Warning: Currently active database not same as originally saved database.",
duration = 3,
type = "warning")
query <- input$ace_recent
query <- gsub("\n", " ", query)
# Queries with "SELECT" string are executed with dbGetQuery and
# others with dbExecuteQuery
tryCatch({
if (isTRUE(grepl("select", query, ignore.case = TRUE))) {
info$data <- RSQLite::dbGetQuery(conn$active_db, query)
showNotification(ui = "Query Completed.",
duration = 5,
type = "message")
info$error <- NULL
}
else{
RSQLite::dbExecute(conn$active_db, query)
action_query$data_updated_recent <- input$execute_recent
showNotification(ui = "Query Completed.",
duration = 3,
type = "message")
info$error <- NULL
}
},
error = function(err) {
info$error <- toString(err)
})
removeModal()
}
else{
showNotification(ui = "No database selected.",
duration = 3,
type = "error")
}
})
observeEvent(input$saved_queries, {
info$saved_data <- RSQLite::dbGetQuery(conn_save_db,
data_fetch_query("table",
100000,
0))
showModal(
modalDialog(
size = "l",
title = "Saved Queries",
DT::DTOutput(ns("display_saved_queries")),
shinyAce::aceEditor(
outputId = ns("ace_save"),
placeholder = "",
mode = "sql",
height = "200px"
),
actionButton(inputId = ns("execute_saved"),
label = "Execute Query"),
actionButton(inputId = ns("delete_saved"),
label = "Delete Saved Query"),
)
)
})
observeEvent(input$delete_saved, {
if (is.null(input$display_saved_queries_rows_selected)) {
showNotification(ui = "No query selected.",
duration = 3,
type = "error")
}
else{
RSQLite::dbExecute(conn_save_db,
delete_query("table",
info$saved_data$row_id
[info$saved_data$row_number ==
input$display_saved_queries_rows_selected]))
info$saved_data <- RSQLite::dbGetQuery(conn_save_db,
data_fetch_query("table",
100000,
0))
}
})
observeEvent(input$save_query, {
showModal(modalDialog(
easyClose = TRUE,
title = "Save Query",
textInput(inputId = ns("save_query_name"),
label = "Enter Query Name(optional)"),
actionButton(inputId = ns("confirm_save"),
label = "Confirm")
))
})
observeEvent(input$confirm_save, {
tryCatch({
if (!is.null(conn$active_db)) {
active_db_path <- RSQLite::dbGetInfo(conn$active_db)$dbname
active_db_name <- basename(active_db_path)
RSQLite::dbExecute(conn_save_db,
insert_query(
"table",
c(input$save_query_name, input$ace, active_db_name)
))
showNotification(ui = "Query Saved Successfully.",
duration = 5,
type = "message")
removeModal()
}
},
error = function(err) {
showNotification(
ui = paste0(err, ". Query not saved"),
duration = 3,
type = "error"
)
})
})
observeEvent(input$display_saved_queries_rows_selected, {
shinyAce::updateAceEditor(
session = session,
editorId = "ace_save",
value = info$saved_data$Query[input$display_saved_queries_rows_selected]
)
})
observeEvent(input$execute_saved, {
if (!is.null(conn$active_db)) {
active_db_path <- RSQLite::dbGetInfo(conn$active_db)$dbname
active_db_name <- basename(active_db_path)
print(info$saved_data$Database[input$display_saved_queries_rows_selected])
print(active_db_name)
if (!identical(info$saved_data$Database[input$display_saved_queries_rows_selected], active_db_name))
showNotification(ui = "Warning: Currently active database not same as originally saved database.",
duration = 3,
type = "warning")
query <- input$ace_save
query <- gsub("\n", " ", query)
# Queries with "SELECT" string are executed with dbGetQuery and
# others with dbExecuteQuery
tryCatch({
if (isTRUE(grepl("select", query, ignore.case = TRUE))) {
info$data <- RSQLite::dbGetQuery(conn$active_db, query)
showNotification(ui = "Query Completed.",
duration = 5,
type = "message")
info$error <- NULL
}
else{
RSQLite::dbExecute(conn$active_db, query)
action_query$data_updated_save <- input$execute_saved
showNotification(ui = "Query Completed.",
duration = 3,
type = "message")
info$error <- NULL
}
},
error = function(err) {
info$error <- toString(err)
})
removeModal()
}
else{
showNotification(ui = "No database selected.",
duration = 3,
type = "error")
}
})
return(action_query)
}
## To be copied in the UI
# mod_query_ui("query_ui_1")
## To be copied in the server
# callModule(mod_query_server, "query_ui_1")
|
# Data visualiations for Alpha, Beta, and Gamma diversity in Sphagnum peat bogs
# Population & Community Ecology - Year 3
# Emma Gemal, s1758915@sms.ed.ac.uk
# 19/11/2019
# Library ----
library(tidyverse)
# Creating the diversity data frame ----
mesocosm <- c(rep("one", 3), rep("two", 3), rep("pooled", 3)) %>%
as.factor()
type <- rep(c("alpha", "gamma", "beta"), 3) %>%
as.factor()
diversity <- c(12.23, 13.86, 1.13, 11.01, 16.58, 1.51, 13.75, 17.00, 1.24)
str(type) # in the wrong order
type <- factor(type, levels = unique(type))
str(type)
str(mesocosm) # wrong order
mesocosm <- factor(mesocosm, levels = unique(mesocosm))
str(mesocosm)
diversity_data <- data.frame(mesocosm, type, diversity)
str(diversity_data)
View(diversity_data)
# Creating data frame for rank-abundance curve ----
abundance <- c(207, 198, 147, 123, 112, 66, 62, 59, 46, 46, 43, 31, 29, 20,
17, 12, 11, 11, 11, 7, 6, 5, 5, 4, 3, 3, 2, 2, 1, 1)
# Plot for diversities ----
(plot <- ggplot(diversity_data,
aes(x = mesocosm, y = diversity,
fill = type, order = type)) +
geom_bar(position = "dodge", stat = "identity", width = 0.7, color = "black") +
labs(x = "Mesocosm", y = "True Diversity") +
scale_y_continuous(expand = c(0,0), limits = c(0, 18)) +
scale_x_discrete(labels = c("1", "2", "1 + 2")) +
scale_fill_manual(name = "Diversity\nType",
breaks = c("alpha", "gamma", "beta"),
labels = c("Alpha", "Gamma", "Beta"),
values = c("#cccccc",
"#969696",
"#636363")) +
theme_classic() + # make it bw with borders, and skinny
theme(plot.margin = unit(c(1,1,1,1), units = , "cm"),
axis.text.x = element_text(size = 13,
margin = margin(t = 7, unit = "pt")),
axis.text.y = element_text(size = 13),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15),
legend.title = element_text(size = 14),
legend.text = element_text(size = 12)))
ggsave(plot = plot, filename = "true_div_plot.png", path = "Other_work/PCE",
width = 15, height = 15, units = "cm")
|
/Script/sphagnum_plots.R
|
permissive
|
emmagemal/PCE
|
R
| false | false | 2,491 |
r
|
# Data visualiations for Alpha, Beta, and Gamma diversity in Sphagnum peat bogs
# Population & Community Ecology - Year 3
# Emma Gemal, s1758915@sms.ed.ac.uk
# 19/11/2019
# Library ----
library(tidyverse)
# Creating the diversity data frame ----
mesocosm <- c(rep("one", 3), rep("two", 3), rep("pooled", 3)) %>%
as.factor()
type <- rep(c("alpha", "gamma", "beta"), 3) %>%
as.factor()
diversity <- c(12.23, 13.86, 1.13, 11.01, 16.58, 1.51, 13.75, 17.00, 1.24)
str(type) # in the wrong order
type <- factor(type, levels = unique(type))
str(type)
str(mesocosm) # wrong order
mesocosm <- factor(mesocosm, levels = unique(mesocosm))
str(mesocosm)
diversity_data <- data.frame(mesocosm, type, diversity)
str(diversity_data)
View(diversity_data)
# Creating data frame for rank-abundance curve ----
abundance <- c(207, 198, 147, 123, 112, 66, 62, 59, 46, 46, 43, 31, 29, 20,
17, 12, 11, 11, 11, 7, 6, 5, 5, 4, 3, 3, 2, 2, 1, 1)
# Plot for diversities ----
(plot <- ggplot(diversity_data,
aes(x = mesocosm, y = diversity,
fill = type, order = type)) +
geom_bar(position = "dodge", stat = "identity", width = 0.7, color = "black") +
labs(x = "Mesocosm", y = "True Diversity") +
scale_y_continuous(expand = c(0,0), limits = c(0, 18)) +
scale_x_discrete(labels = c("1", "2", "1 + 2")) +
scale_fill_manual(name = "Diversity\nType",
breaks = c("alpha", "gamma", "beta"),
labels = c("Alpha", "Gamma", "Beta"),
values = c("#cccccc",
"#969696",
"#636363")) +
theme_classic() + # make it bw with borders, and skinny
theme(plot.margin = unit(c(1,1,1,1), units = , "cm"),
axis.text.x = element_text(size = 13,
margin = margin(t = 7, unit = "pt")),
axis.text.y = element_text(size = 13),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15),
legend.title = element_text(size = 14),
legend.text = element_text(size = 12)))
ggsave(plot = plot, filename = "true_div_plot.png", path = "Other_work/PCE",
width = 15, height = 15, units = "cm")
|
# TODO: Add comment
#
# Author: ajinkya
###############################################################################
getPeriodReturnSignals <- function(ticker,timeFrequency,percent)
{
#percent.absolute <- (percent/100)
percent.absolute <- percent
print(percent.absolute)
print(" calculating daily return signals ")
ticker.return <- dailyReturn(ticker[,"Close"], subset=NULL, type='arithmetic', leading=TRUE)
for(k in 1:nrow(ticker.return))
{
ticker.return[k] <- ticker.return[k] * 100
}
print(" calculated percent daily returns ")
#rounding the returns
#ticker.return <- round(ticker.return)
# ticker.return.percent <- Delt(ticker.return,type="arithmetic")
ticker.return.signals <- (nrow(ticker))
print(" calculating daily return signals ")
print(nrow(ticker.return.signals))
print(nrow(ticker))
n<-nrow(ticker)
for(i in 2:n)
{
#ticker.return.signals[i] <- ifelse(percent.absolute < ticker.return[i],"Buy","Sell")
if(ticker.return[i] > percent.absolute)
{
#print("buy")
ticker.return.signals[i-1] <- "buy"
}
else if(ticker.return[i] < -percent.absolute)
{
#print("sell")
ticker.return.signals[i-1] <- "sell"
}
else
{
#print("hold")
ticker.return.signals[i-1] <- "hold"
}
ticker.return.signals[i] <- "hold"
}
t<-ticker[,"Close"]
#edited:7dec13 removed "ticker.return.factors"
#result <-data.frame(ticker.return.signals,ticker.return,ticker.return.factors,ticker$Open,ticker$Close,ticker$High,ticker$Low,ticker$Volume)
result <-data.frame(ticker.return.signals,ticker.return,ticker$Open,ticker$Close,ticker$High,ticker$Low,ticker$Volume)
return(result)
}
|
/srs-cran/src/technicalindicators/TickerDailyReturns.R
|
no_license
|
ajinkya-github/stocksimulation
|
R
| false | false | 1,655 |
r
|
# TODO: Add comment
#
# Author: ajinkya
###############################################################################
getPeriodReturnSignals <- function(ticker,timeFrequency,percent)
{
#percent.absolute <- (percent/100)
percent.absolute <- percent
print(percent.absolute)
print(" calculating daily return signals ")
ticker.return <- dailyReturn(ticker[,"Close"], subset=NULL, type='arithmetic', leading=TRUE)
for(k in 1:nrow(ticker.return))
{
ticker.return[k] <- ticker.return[k] * 100
}
print(" calculated percent daily returns ")
#rounding the returns
#ticker.return <- round(ticker.return)
# ticker.return.percent <- Delt(ticker.return,type="arithmetic")
ticker.return.signals <- (nrow(ticker))
print(" calculating daily return signals ")
print(nrow(ticker.return.signals))
print(nrow(ticker))
n<-nrow(ticker)
for(i in 2:n)
{
#ticker.return.signals[i] <- ifelse(percent.absolute < ticker.return[i],"Buy","Sell")
if(ticker.return[i] > percent.absolute)
{
#print("buy")
ticker.return.signals[i-1] <- "buy"
}
else if(ticker.return[i] < -percent.absolute)
{
#print("sell")
ticker.return.signals[i-1] <- "sell"
}
else
{
#print("hold")
ticker.return.signals[i-1] <- "hold"
}
ticker.return.signals[i] <- "hold"
}
t<-ticker[,"Close"]
#edited:7dec13 removed "ticker.return.factors"
#result <-data.frame(ticker.return.signals,ticker.return,ticker.return.factors,ticker$Open,ticker$Close,ticker$High,ticker$Low,ticker$Volume)
result <-data.frame(ticker.return.signals,ticker.return,ticker$Open,ticker$Close,ticker$High,ticker$Low,ticker$Volume)
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biglm_mapper.R
\name{cluster_se}
\alias{cluster_se}
\title{Map-reduce clustered standard errors with \code{biglm}}
\usage{
cluster_se(file_list, fitted_model, ...)
}
\arguments{
\item{file_list}{Character vector of data file names. Must be in \code{.rds} format.}
\item{fitted_model}{Fitted \code{biglm} object.}
\item{...}{Additional arguments to pass to \code{chunk_clusterse}
\itemize{
\item \code{clustervar}: character name of variable to cluster on
\item \code{levels_union}: named list with factor variables to standardize
}}
}
\value{
A list with elements
\itemize{
\item \code{xxinv}: (X'X)^-1
\item \code{cluster_vcov}: cluster-robust variance-covariance matrix estimate
\item \code{g_cov}: inner matrix in cluster-robust vcov matrix
\item \code{e_hat}: mean of squared residuals
}
}
\description{
Map-reduce clustered standard errors with \code{biglm}
}
\details{
Computation DOES NOT do small sample DOF adjustment (I assume if you're using \code{biglm}, the data is large).
Also importantly, computation assumes no group overlap across files (all observations from each group are in one file).
}
\examples{
cluster_se(data_files, m, clustervar="group")
}
|
/man/cluster_se.Rd
|
no_license
|
gregobad/biglm-mapper
|
R
| false | true | 1,248 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biglm_mapper.R
\name{cluster_se}
\alias{cluster_se}
\title{Map-reduce clustered standard errors with \code{biglm}}
\usage{
cluster_se(file_list, fitted_model, ...)
}
\arguments{
\item{file_list}{Character vector of data file names. Must be in \code{.rds} format.}
\item{fitted_model}{Fitted \code{biglm} object.}
\item{...}{Additional arguments to pass to \code{chunk_clusterse}
\itemize{
\item \code{clustervar}: character name of variable to cluster on
\item \code{levels_union}: named list with factor variables to standardize
}}
}
\value{
A list with elements
\itemize{
\item \code{xxinv}: (X'X)^-1
\item \code{cluster_vcov}: cluster-robust variance-covariance matrix estimate
\item \code{g_cov}: inner matrix in cluster-robust vcov matrix
\item \code{e_hat}: mean of squared residuals
}
}
\description{
Map-reduce clustered standard errors with \code{biglm}
}
\details{
Computation DOES NOT do small sample DOF adjustment (I assume if you're using \code{biglm}, the data is large).
Also importantly, computation assumes no group overlap across files (all observations from each group are in one file).
}
\examples{
cluster_se(data_files, m, clustervar="group")
}
|
library(tm)
library(wordcloud)
library(memoise)
# We load serveral books.
books <<- list("A Mid Summer Night's Dream" = "summer",
"The Merchant of Venice" = "merchant",
"Romeo and Juliet" = "romeo")
# Using memoise function to automatically cache the results
getTermMatrix <- memoise(function(book) {
if (!(book %in% books))
stop("Unknown book")
text <- readLines(sprintf("./%s.txt.gz", book),encoding="UTF-8")
myCorpus = Corpus(VectorSource(text))
myCorpus = tm_map(myCorpus, content_transformer(tolower))
myCorpus = tm_map(myCorpus, removePunctuation)
myCorpus = tm_map(myCorpus, removeNumbers)
myCorpus = tm_map(myCorpus, removeWords,
c(stopwords("SMART"), "thy", "thou", "thee", "the", "and", "but"))
myDTM = TermDocumentMatrix(myCorpus, control = list(minWordLength = 1))
m = as.matrix(myDTM)
sort(rowSums(m), decreasing = TRUE)
})
|
/Developing_data_products/Project_course/global.R
|
no_license
|
pvmontes/datasciencecoursera
|
R
| false | false | 955 |
r
|
library(tm)
library(wordcloud)
library(memoise)
# We load serveral books.
books <<- list("A Mid Summer Night's Dream" = "summer",
"The Merchant of Venice" = "merchant",
"Romeo and Juliet" = "romeo")
# Using memoise function to automatically cache the results
getTermMatrix <- memoise(function(book) {
if (!(book %in% books))
stop("Unknown book")
text <- readLines(sprintf("./%s.txt.gz", book),encoding="UTF-8")
myCorpus = Corpus(VectorSource(text))
myCorpus = tm_map(myCorpus, content_transformer(tolower))
myCorpus = tm_map(myCorpus, removePunctuation)
myCorpus = tm_map(myCorpus, removeNumbers)
myCorpus = tm_map(myCorpus, removeWords,
c(stopwords("SMART"), "thy", "thou", "thee", "the", "and", "but"))
myDTM = TermDocumentMatrix(myCorpus, control = list(minWordLength = 1))
m = as.matrix(myDTM)
sort(rowSums(m), decreasing = TRUE)
})
|
getwd()
setwd("C:\\Users\\minahm\\Documents\\School\\Fall 2014\\CMDA 3654")
load('phsample.RData')
#The data is an anonymized dataset of a person/household containing the information
#Age, Employment class, Education Level, Sex of Worker
#Selects a subset that is self-described fulltime
#working 40 hours a week atleast
#20 to 50 years of age
#income between $1000 nd $250000
psub = subset(dpus,with(dpus,(PINCP>1000)&(ESR==1)&
(PINCP<=250000)&(PERNP>1000)&(PERNP<=250000)&
(WKHP>=40)&(AGEP>=20)&(AGEP<=50)&
(PWGTP1>0)&(COW %in% (1:7))&(SCHL %in% (1:24))))
#Reencodes sex from 1/2 to M/F
psub$SEX = as.factor(ifelse(psub$SEX==1,'M','F'))
#Sets reference of sex to M, so when graphing M and F are differentiated
psub$SEX = relevel(psub$SEX,'M')
cowmap <- c("Employee of a private for-profit",
"Private not-for-profit employee",
"Local government employee",
"State government employee",
"Federal government employee",
"Self-employed not incorporated",
"Self-employed incorporated")
#Refactors class of worker so it can be better accessed
psub$COW = as.factor(cowmap[psub$COW])
psub$COW = relevel(psub$COW,cowmap[1])
#education is reencoded to be more readable
schlmap = c(
rep("no high school diploma",15),
"Regular high school diploma",
"GED or alternative credential",
"some college credit, no degree",
"some college credit, no degree",
"Associate's degree",
"Bachelor's degree",
"Master's degree",
"Professional degree",
"Doctorate degree")
psub$SCHL = as.factor(schlmap[psub$SCHL])
psub$SCHL = relevel(psub$SCHL,schlmap[1])
#gets subset of data for training purposes
dtrain = subset(psub,ORIGRANDGROUP >= 500)
#Data subset used for testing
dtest = subset(psub,ORIGRANDGROUP < 500)
summary(dtrain$COW)
project = read.csv("Cars93.csv")
project
|
/minahm92_hw2.r
|
no_license
|
kim-minahm/CMDA-3654
|
R
| false | false | 1,921 |
r
|
getwd()
setwd("C:\\Users\\minahm\\Documents\\School\\Fall 2014\\CMDA 3654")
load('phsample.RData')
#The data is an anonymized dataset of a person/household containing the information
#Age, Employment class, Education Level, Sex of Worker
#Selects a subset that is self-described fulltime
#working 40 hours a week atleast
#20 to 50 years of age
#income between $1000 nd $250000
psub = subset(dpus,with(dpus,(PINCP>1000)&(ESR==1)&
(PINCP<=250000)&(PERNP>1000)&(PERNP<=250000)&
(WKHP>=40)&(AGEP>=20)&(AGEP<=50)&
(PWGTP1>0)&(COW %in% (1:7))&(SCHL %in% (1:24))))
#Reencodes sex from 1/2 to M/F
psub$SEX = as.factor(ifelse(psub$SEX==1,'M','F'))
#Sets reference of sex to M, so when graphing M and F are differentiated
psub$SEX = relevel(psub$SEX,'M')
cowmap <- c("Employee of a private for-profit",
"Private not-for-profit employee",
"Local government employee",
"State government employee",
"Federal government employee",
"Self-employed not incorporated",
"Self-employed incorporated")
#Refactors class of worker so it can be better accessed
psub$COW = as.factor(cowmap[psub$COW])
psub$COW = relevel(psub$COW,cowmap[1])
#education is reencoded to be more readable
schlmap = c(
rep("no high school diploma",15),
"Regular high school diploma",
"GED or alternative credential",
"some college credit, no degree",
"some college credit, no degree",
"Associate's degree",
"Bachelor's degree",
"Master's degree",
"Professional degree",
"Doctorate degree")
psub$SCHL = as.factor(schlmap[psub$SCHL])
psub$SCHL = relevel(psub$SCHL,schlmap[1])
#gets subset of data for training purposes
dtrain = subset(psub,ORIGRANDGROUP >= 500)
#Data subset used for testing
dtest = subset(psub,ORIGRANDGROUP < 500)
summary(dtrain$COW)
project = read.csv("Cars93.csv")
project
|
#' Preprocessing of TNS class objects.
#'
#' Creates TNS class onbjects for regulons an survival data.
#'
#' @param tni A \linkS4class{TNI} class, already processed with the same samples
#' listed in the survival data.frame.
#' @param survivalData A named data.frame with samples in rows and survival data
#' in the columns.
#' @param keycovar A character vector of the 'keycovars' listed in the
#' data.frame columns.
#' @param time A numeric or character value corresponding to the column of the
#' data.frame where the time of last observation is given.
#' @param event A numeric or character value, corresponding to the columm of
#' the data.frame where the 'event' information is given.
#' @param samples An optional character vector listing samples to be analyzed.
#' @return A preprocessed \linkS4class{TNS} class
#' @examples
#' data(dt4rtn, package="RTN")
#' data(survival.data)
#'
#' # compute regulons for 3 TFs using the RTN package
#' rtni <- new("TNI", gexp=dt4rtn$gexp,
#' transcriptionFactors=dt4rtn$tfs[c("FOXM1","E2F2","PTTG1")])
#' rtni <- tni.preprocess(rtni,gexpIDs=dt4rtn$gexpIDs, verbose=FALSE)
#' rtni<-tni.permutation(rtni, nPermutations=100, verbose=FALSE) #sets 'nPermutations'>=1000
#' rtni<-tni.dpi.filter(rtni, verbose=FALSE)
#'
#' # create a new TNS object
#' rtns <- tnsPreprocess(rtni, survival.data, keycovar = c("Grade","Age"),
#' time = 1, event = 2)
#'
#' @seealso \code{\link[RTN:tni.preprocess]{tni.preprocess}} for similar
#' preprocessing.
#' @import methods
#' @docType methods
#' @rdname tnsPreprocess-methods
#' @aliases tnsPreprocess
#' @export
#'
setMethod("tnsPreprocess",
"TNI",
function (tni, survivalData, keycovar, time = 1, event = 2, samples = NULL)
{
#-- tni checks
if(tni@status["Preprocess"]!="[x]")
stop("NOTE: TNI object requires preprocessing in the
RTN package!")
if(tni@status["Permutation"]!="[x]")
stop("NOTE: TNI object requires permutation/bootstrap and
DPI filter in the RTN package!")
if(tni@status["DPI.filter"]!="[x]")
stop("NOTE: TNI object requires DPI filter in the RTN
package!")
#-- missing
if (missing(survivalData))
stop("Must provide a 'survivalData' object.")
if (missing(keycovar))
stop("Must provide a 'keycovar' object.")
#-- par checks
.tns.checks(survivalData, type = "survivalData")
time = .tns.checks(time, survivalData, type = "Time")
event = .tns.checks(event, survivalData, type = "Event")
.tns.checks(keycovar, survivalData, "Keycovars")
samples = .tns.checks(samples, survivalData, type = "Samples")
#-- other checks
if(!all(samples %in% colnames(tni@gexp))){
stop("all samples listed in 'survivalData' rownames must be
available in the 'tni' object!")
}
#-- reorganize survivalData
idx <- c(time, event)
te.data <- survivalData[,idx]
survivalData <- survivalData[,-idx]
survivalData <- cbind(te.data, survivalData)
names(survivalData)[1:2] <- c("time", "event")
survivalData <- survivalData[samples,]
#-- making TNS object
object <- new("TNS", tni=tni, survivalData = survivalData,
keycovar = keycovar)
#-- status update
object <- tns.set(object, what="status-1")
object
}
)
#' 2-tailed Gene Set Enrichment Analysis on Transcriptional Networks.
#'
#' Works as a wrapper for \code{\link[RTN:tni.gsea2]{tni.gsea2}}, performing a
#' 2-tailed GSEA analysis on a \linkS4class{TNI} class object and integrating
#' the results into the \linkS4class{TNS} class object.
#'
#' @param tns A \linkS4class{TNS} class, which has been preprocessed
#' @param ... Parameters passed to the \code{\link[RTN:tni.gsea2]{tni.gsea2}}
#' function.
#' @return A \linkS4class{TNS} class, with added Enrichment Scores.
#' @examples
#' data(dt4rtn, package="RTN")
#' data(survival.data)
#'
#' # compute regulons for 3 TFs using the RTN package
#' rtni <- new("TNI", gexp=dt4rtn$gexp,
#' transcriptionFactors=dt4rtn$tfs[c("FOXM1","E2F2","PTTG1")])
#' rtni <- tni.preprocess(rtni,gexpIDs=dt4rtn$gexpIDs, verbose=FALSE)
#' rtni<-tni.permutation(rtni, nPermutations=100, verbose=FALSE) #sets 'nPermutations'>=1000
#' rtni<-tni.dpi.filter(rtni, verbose=FALSE)
#'
#' rtns <- tnsPreprocess(rtni, survival.data, keycovar = c("Grade","Age"), time = 1, event = 2)
#' rtns <- tnsGSEA2(rtns, verbose=FALSE)
#'
#' @seealso \code{\link[RTN:tni.gsea2]{tni.gsea2}} for information on all
#' parameters.
#' @importClassesFrom RTN TNI
#' @docType methods
#' @rdname tnsGSEA2-methods
#' @aliases tnsGSEA2
#' @export
#'
setMethod("tnsGSEA2",
"TNS",
function(tns, ...) {
#-- checks
if(tns@status["Preprocess"]!="[x]")
stop("NOTE: TNS object requires preprocessing!")
#-- run gsea2 and update TNS
tni <- tnsGet(tns, what = "TNI")
EScores <- tni.gsea2(tni, ... = ...)
tns <- tns.set(tns, EScores, "EScores")
tns <- tns.set(tns, what = "status-2")
return(tns)
}
)
#' Kaplan-Meier analysis for TNS class objects.
#'
#' Makes a 2 or 3 panel plot for survival analysis. The first panel shows the
#' differential Enrichment score (dES) for all samples, ranked by expression
#' in their sections. The second (optional) panel shows the status of other
#' attributes which may be present in the survival data.frame for all samples.
#' The third panel shows a Kaplan-Meier plot computed for the given survival
#' data, with a curve for each section.
#'
#' @param tns a \linkS4class{TNS} object, which must have passed GSEA2 analysis.
#' @param regs a string vector. Contains all the regulons which are going to be
#' plotted.
#' @param attribs a numeric vector. Contains the columns of the survival
#' data.frame which will be plotted for the second panel.
#' @param nSections A numeric value for the stratification of the sample. The
#' larger the number, the more subdivisions will be created for the Kaplan-Meier
#' analysis.
#' @param endpoint a numeric value. It represents the cut-off point for the
#' "time", if any.
#' @param fname a string. The name of the file in which the plot will be saved
#' @param fpath a string. The path to the directory where the plot will be saved
#' @param ylab a string. The label for the y axis on the third panel
#' @param xlab a string. The label for the x axis on the third panel. This should
#' be the measure of time shown in the survival data.frame after the last
#' check-up.
#' @param pal a string, which can be "red", "blue" or "redblue". Represents the
#' colors used in the first and third panels. Alternatively, it can
#' contains the hex values.
#' @param excludeMid a logical value. If TRUE, inconclusive dES values will not
#' be consired in the survival analysis.
#' @param flipcols a logical value. If TRUE, flips the order of the samples to
#' lowest expression on top, highest on the bottom.
#' @param plotpdf a logical value. If TRUE, the plot is saved as a pdf file.
#' If false, it is plotted in the plotting area.
#' @param plotbatch a logical value. If TRUE, plots for all regs are saved in
#' the same file.
#' If FALSE, each plot for each reg is saved in a different file.
#' @param width a numeric value. Represents the width of the plot.
#' @param height a numeric value. Represents the height of the plot.
#' @param panelWidths a numeric vector of length=3 specifying the relative
#' width of the internal panels.
#' @return A plot, showing the graphical analysis of provided survival data.
#' @examples
#' data(dt4rtn, package="RTN")
#' data(survival.data)
#'
#' # compute regulons for 3 TFs using the RTN package
#' rtni <- new("TNI", gexp=dt4rtn$gexp,
#' transcriptionFactors=dt4rtn$tfs[c("FOXM1","E2F2","PTTG1")])
#' rtni <- tni.preprocess(rtni,gexpIDs=dt4rtn$gexpIDs, verbose=FALSE)
#' rtni<-tni.permutation(rtni, nPermutations=100, verbose=FALSE) #sets 'nPermutations'>=1000
#' rtni<-tni.dpi.filter(rtni, verbose=FALSE)
#'
#' rtns <- tnsPreprocess(rtni, survival.data, keycovar = c("Grade","Age"),
#' time = 1, event = 2)
#' rtns <- tnsGSEA2(rtns, verbose=FALSE)
#' tnsKM(rtns, regs="FOXM1", attribs = list(c("ER+","ER-"),c("G1","G2","G3")),
#' plotpdf = FALSE)
#'
#' @importFrom RColorBrewer brewer.pal
#' @importFrom survival survdiff survfit coxph Surv
#' @docType methods
#' @rdname tnsKM-methods
#' @aliases tnsKM
#' @export
#'
setMethod("tnsKM",
"TNS",
function (tns, regs = NULL, attribs=NULL, nSections=2, endpoint = 60,
fname="survplot", fpath=".", ylab="Survival probability",
xlab="Months", pal="redblue", excludeMid=FALSE, flipcols=FALSE,
plotpdf=TRUE, plotbatch=FALSE, width = 6.3, height = 3.6,
panelWidths=c(3,2,4))
{
#-- checks
.tns.checks(tns, type = "status")
.tns.checks(nSections, type = "nSec")
.tns.checks(fname, type = "Fname")
.tns.checks(fpath, type = "Path")
.tns.checks (ylab, type = "Ylab")
.tns.checks (xlab, type = "Xlab")
.tns.checks (regs, type = "Regs")
.tns.checks (attribs, tns@survivalData, type = "Attribs")
.tns.checks(pal, tns@para$strat, type = "Pal")
.tns.checks(excludeMid, type = "ExcludeMid")
.tns.checks(flipcols, type = "FlipCols")
.tns.checks (plotpdf, type = "PlotPDF")
.tns.checks (plotbatch, type = "PlotBatch")
.tns.checks (width, height, type = "WidthHeight")
.tns.checks (endpoint, type = "EndPoint")
.tns.checks (panelWidths, type = "panelWidths")
#-- stratification
tns <- .tns.stratification(tns, nSections = nSections)
#-- organize data
survData <- tnsGet(tns, what = "survivalData")
EScores <- tnsGet(tns, what = "EScores")
#--
survData$event[survData$time>endpoint]<-0
survData$time[survData$time>endpoint]<-endpoint
#--
sp1<-rownames(survData)
sp2<-rownames(EScores$regstatus)
survData <- survData[sp1%in%sp2,,drop=FALSE]
EScores$pos <- EScores$pos[sp2%in%sp1,,drop=FALSE]
EScores$neg <- EScores$neg[sp2%in%sp1,,drop=FALSE]
EScores$dif <- EScores$dif[sp2%in%sp1,,drop=FALSE]
#--
tns <- tns.set(tns, EScores, what = "EScores")
tns <- tns.set(tns, survData, what = "survivalData")
#-- get attribs
if(!is.null(attribs)){
if(is.list(attribs)){
groups<-unlist(lapply(attribs, length))
idx<-unlist(attribs)
attribs<-as.matrix(survData[,idx])
} else {
groups<-NULL
attribs<-as.matrix(survData[,attribs])
}
if( !all( attribs %in% c(0,1,NA) ) )
stop("NOTE: 'attribs' variables should only include binary values!")
}
#-- making reglist
reglist<-colnames(tns@EScores$regstatus)
if(!is.null(regs)){
if(!all(regs%in%reglist)){
stop("NOTE: all names in 'regs' should be listed
in the slot 'EScores' of the 'tns' object!")
}
reglist<-regs
}
idx<-apply(EScores$regstatus,2,function(es){any(is.na(es))})
validregs<-colnames(EScores$regstatus)[!idx]
reglist<-reglist[reglist%in%validregs]
#---plot
if(plotbatch & plotpdf){
pdf(file=paste(fpath,"/",fname,".pdf",sep=""), width=width,
height=height)
for(reg in reglist){
.survplot(EScores, survData, reg,fname,fpath,ylab,xlab,pal,
panelWidths,plotpdf,
excludeMid,flipcols,attribs,groups, endpoint)
}
dev.off()
} else {
for(reg in reglist){
if(plotpdf)
{
pdf(file=paste(fpath,"/",reg,fname,".pdf",sep=""),
width=width, height=height)
}
.survplot(EScores,survData,reg,fname,fpath,ylab,xlab,
pal,panelWidths,
plotpdf,excludeMid,flipcols,attribs,groups,
endpoint)
if(plotpdf)
{
message("NOTE: 'PDF' file was generated")
dev.off()
}
}
}
invisible(list(EScores = EScores, survivalData = survData))
}
)
#' Cox regression analysis for TNS class objects.
#'
#' Run Cox multivariate regression for regulons and key covariables.
#'
#' @param tns a \linkS4class{TNS} object, which must have passed GSEA2 analysis.
#' @param regs a string vector. Contains the regulons which will be used to
#' compute the Cox multivariate model. If left NULL, all regulons will be used.
#' @param endpoint a numeric value. The final point in time for the samples. All
#' time values larger than endpoint will be set at endpoint.
#' @param fname a string. The name of the PDF file which will contain the plot.
#' @param fpath a string. The directory where the file will be saved.
#' @param ylab a string. The label of the y-axis, describing what is represented.
#' @param xlab a string. The label of the x-axis.
#' @param qqkeycovar a logical value. If TRUE, only the samples in the 2nd and
#' 3rd quarters of dES are used to compute. If FALSE, all samples are used.
#' @param excludeMid a logical value. If TRUE, inconclusive dES values will not be
#' consired in the survival analysis.
#' @param width a numeric value. The width of the plot.
#' @param height a numeric value. The height of the plot.
#' @param xlim a vector with 2 values. The first value represents the lowest
#' value in the x-axis, the second value is the highest.
#' @param sortregs a logical value. If TRUE, regulons are sorted from most
#' negatively associated with hazard to most positively associated with hazard.
#' @param plotpdf a logical value.
#' @return A Cox hazard model plot. If TRUE, generates a pdf plot.
#' @examples
#' data(dt4rtn, package="RTN")
#' data(survival.data)
#'
#' # compute regulons for 3 TFs using the RTN package
#' rtni <- new("TNI", gexp=dt4rtn$gexp,
#' transcriptionFactors=dt4rtn$tfs[c("FOXM1","E2F2","PTTG1")])
#' rtni <- tni.preprocess(rtni,gexpIDs=dt4rtn$gexpIDs, verbose=FALSE)
#' rtni<-tni.permutation(rtni, nPermutations=100, verbose=FALSE) #sets 'nPermutations'>=1000
#' rtni<-tni.dpi.filter(rtni, verbose=FALSE)
#'
#' rtns <- tnsPreprocess(rtni, survival.data, keycovar = c("Grade","Age"),
#' time = 1, event = 2)
#' rtns <- tnsGSEA2(rtns, verbose=FALSE)
#' tnsCox(rtns, regs = c("PTTG1","E2F2","FOXM1"), sortregs = TRUE,
#' plotpdf = FALSE)
#'
#' @docType methods
#' @rdname tnsCox-methods
#' @aliases tnsCox
#' @export
#'
setMethod("tnsCox",
"TNS",
function (tns, regs = NULL, endpoint=60, fname="coxplot", fpath=".",
ylab="Regulons and key covariates",
xlab="Hazard Ratio (95% CI)",
qqkeycovar=FALSE, excludeMid=FALSE, width=5, height=5,
xlim=c(0.2,10), sortregs=TRUE, plotpdf = TRUE)
{
#-- checks
.tns.checks(tns, type = "status")
.tns.checks (regs, type = "Regs")
.tns.checks(fname, type = "Fname")
.tns.checks(fpath, type = "Path")
.tns.checks(ylab, type = "Ylab")
.tns.checks(xlab, type = "Xlab")
.tns.checks(qqkeycovar, type = "QQCovar")
.tns.checks(endpoint, type = "EndPoint")
.tns.checks(excludeMid, type = "ExcludeMid")
.tns.checks(width, height, type = "WidthHeight")
.tns.checks(xlim, type = "Xlim")
.tns.checks(sortregs, type = "SortRegs")
.tns.checks(plotpdf, type = "PlotPDF")
.tns.checks(tns@survivalData, type = "survival_cox")
#-- gets
EScores <- tnsGet(tns, what = "EScores")
survData <- tnsGet(tns, what = "survivalData")
keycovar <- tnsGet(tns, what = "keycovar")
#-- checks
dif <- EScores$dif
if(excludeMid){
dif[EScores$regstatus==EScores$mid]<-NA
}
if(!is.null(regs)){
if(!all(regs%in%colnames(dif))){
stop("Not all 'regs' have EScores!")
}
idx <- colnames(dif)%in%regs
dif <- dif[,idx]
dif <- dif[,regs]
}
#---set names to a valid format
regs<-colnames(dif)
xregs<-gsub("-|\\+|\\.","_",regs)
xregs<-gsub("\\s","",xregs)
colnames(dif)<-xregs
names(regs)<-xregs
#---combine dif and survivalData
summary<-cbind(survData[rownames(dif),],dif)
#--- set keycovar by quantile
kvarlist<-list()
if(qqkeycovar){
for(kvar in tns@keycovar){
tp<-summary[[kvar]]
ql<-quantile(tp,c(0.25,0.75), na.rm=TRUE)
tp[tp<ql[1]]<-NA
tp[tp>ql[2]]<-NA
summary[[kvar]]<-tp
kvarlist[[kvar]]<-ql
}
}
#--- filter data
summary<-summary[,c("time", "event",keycovar, xregs)]
#--- get cox formula
if (is.null(tns@keycovar))
{
fm1<-"Surv(time, event)"
} else {
fm1<-paste("Surv(time, event) ~ ", paste(keycovar,
collapse = "+"), sep="")
}
#--- fit cox regression model
resall<-NULL
for(rg in xregs){
nas<-is.na(summary[,rg])
if( sum(nas) > nrow(summary)/2 ){
rs<-c(1,1,0.99,1.01)
} else {
fm2<-formula( paste(fm1,rg, sep="+") )
rs<-summary(coxph(fm2, data=summary[!nas,]))$conf.int[rg,,drop=FALSE]
}
resall<-rbind(resall,rg=rs)
}
if(sortregs){
resall<-resall[sort.list(resall[,1]),]
}
#--- fit cox model for keycovars and adding to resall
idx<-which.max(resall[,"exp(coef)"])
fm2<-formula( paste(fm1,rownames(resall)[idx], sep="+") )
resref<-summary(coxph(fm2, data=summary))$conf.int
resall<-rbind(resref[-nrow(resref),],resall)
resall<-resall[nrow(resall):1,]
#--- add symbols to rownames
idx<-match(names(regs),rownames(resall))
rownames(resall)[idx]<-regs
#--- plot
filen = paste(fpath,"/",fname,".pdf",sep="")
.plotCox(resall, regs = regs, keycovar = keycovar, filen = filen,
width = width, height = height, xlim = xlim, xlab = xlab,
ylab = ylab, plotpdf = plotpdf)
#--- return
invisible(list(resall=resall,kvarlist=kvarlist))
}
)
setMethod( "show",
"TNS",
function(object)
{
cat("a TNS (Transcriptional Network - Survival) object:\n")
message("--status:")
print(object@status, quote=FALSE)
}
)
|
/R/AllMethods.R
|
no_license
|
xtsvm/RTNsurvival
|
R
| false | false | 21,136 |
r
|
#' Preprocessing of TNS class objects.
#'
#' Creates TNS class onbjects for regulons an survival data.
#'
#' @param tni A \linkS4class{TNI} class, already processed with the same samples
#' listed in the survival data.frame.
#' @param survivalData A named data.frame with samples in rows and survival data
#' in the columns.
#' @param keycovar A character vector of the 'keycovars' listed in the
#' data.frame columns.
#' @param time A numeric or character value corresponding to the column of the
#' data.frame where the time of last observation is given.
#' @param event A numeric or character value, corresponding to the columm of
#' the data.frame where the 'event' information is given.
#' @param samples An optional character vector listing samples to be analyzed.
#' @return A preprocessed \linkS4class{TNS} class
#' @examples
#' data(dt4rtn, package="RTN")
#' data(survival.data)
#'
#' # compute regulons for 3 TFs using the RTN package
#' rtni <- new("TNI", gexp=dt4rtn$gexp,
#' transcriptionFactors=dt4rtn$tfs[c("FOXM1","E2F2","PTTG1")])
#' rtni <- tni.preprocess(rtni,gexpIDs=dt4rtn$gexpIDs, verbose=FALSE)
#' rtni<-tni.permutation(rtni, nPermutations=100, verbose=FALSE) #sets 'nPermutations'>=1000
#' rtni<-tni.dpi.filter(rtni, verbose=FALSE)
#'
#' # create a new TNS object
#' rtns <- tnsPreprocess(rtni, survival.data, keycovar = c("Grade","Age"),
#' time = 1, event = 2)
#'
#' @seealso \code{\link[RTN:tni.preprocess]{tni.preprocess}} for similar
#' preprocessing.
#' @import methods
#' @docType methods
#' @rdname tnsPreprocess-methods
#' @aliases tnsPreprocess
#' @export
#'
setMethod("tnsPreprocess",
"TNI",
function (tni, survivalData, keycovar, time = 1, event = 2, samples = NULL)
{
#-- tni checks
if(tni@status["Preprocess"]!="[x]")
stop("NOTE: TNI object requires preprocessing in the
RTN package!")
if(tni@status["Permutation"]!="[x]")
stop("NOTE: TNI object requires permutation/bootstrap and
DPI filter in the RTN package!")
if(tni@status["DPI.filter"]!="[x]")
stop("NOTE: TNI object requires DPI filter in the RTN
package!")
#-- missing
if (missing(survivalData))
stop("Must provide a 'survivalData' object.")
if (missing(keycovar))
stop("Must provide a 'keycovar' object.")
#-- par checks
.tns.checks(survivalData, type = "survivalData")
time = .tns.checks(time, survivalData, type = "Time")
event = .tns.checks(event, survivalData, type = "Event")
.tns.checks(keycovar, survivalData, "Keycovars")
samples = .tns.checks(samples, survivalData, type = "Samples")
#-- other checks
if(!all(samples %in% colnames(tni@gexp))){
stop("all samples listed in 'survivalData' rownames must be
available in the 'tni' object!")
}
#-- reorganize survivalData
idx <- c(time, event)
te.data <- survivalData[,idx]
survivalData <- survivalData[,-idx]
survivalData <- cbind(te.data, survivalData)
names(survivalData)[1:2] <- c("time", "event")
survivalData <- survivalData[samples,]
#-- making TNS object
object <- new("TNS", tni=tni, survivalData = survivalData,
keycovar = keycovar)
#-- status update
object <- tns.set(object, what="status-1")
object
}
)
#' 2-tailed Gene Set Enrichment Analysis on Transcriptional Networks.
#'
#' Works as a wrapper for \code{\link[RTN:tni.gsea2]{tni.gsea2}}, performing a
#' 2-tailed GSEA analysis on a \linkS4class{TNI} class object and integrating
#' the results into the \linkS4class{TNS} class object.
#'
#' @param tns A \linkS4class{TNS} class, which has been preprocessed
#' @param ... Parameters passed to the \code{\link[RTN:tni.gsea2]{tni.gsea2}}
#' function.
#' @return A \linkS4class{TNS} class, with added Enrichment Scores.
#' @examples
#' data(dt4rtn, package="RTN")
#' data(survival.data)
#'
#' # compute regulons for 3 TFs using the RTN package
#' rtni <- new("TNI", gexp=dt4rtn$gexp,
#' transcriptionFactors=dt4rtn$tfs[c("FOXM1","E2F2","PTTG1")])
#' rtni <- tni.preprocess(rtni,gexpIDs=dt4rtn$gexpIDs, verbose=FALSE)
#' rtni<-tni.permutation(rtni, nPermutations=100, verbose=FALSE) #sets 'nPermutations'>=1000
#' rtni<-tni.dpi.filter(rtni, verbose=FALSE)
#'
#' rtns <- tnsPreprocess(rtni, survival.data, keycovar = c("Grade","Age"), time = 1, event = 2)
#' rtns <- tnsGSEA2(rtns, verbose=FALSE)
#'
#' @seealso \code{\link[RTN:tni.gsea2]{tni.gsea2}} for information on all
#' parameters.
#' @importClassesFrom RTN TNI
#' @docType methods
#' @rdname tnsGSEA2-methods
#' @aliases tnsGSEA2
#' @export
#'
setMethod("tnsGSEA2",
"TNS",
function(tns, ...) {
#-- checks
if(tns@status["Preprocess"]!="[x]")
stop("NOTE: TNS object requires preprocessing!")
#-- run gsea2 and update TNS
tni <- tnsGet(tns, what = "TNI")
EScores <- tni.gsea2(tni, ... = ...)
tns <- tns.set(tns, EScores, "EScores")
tns <- tns.set(tns, what = "status-2")
return(tns)
}
)
#' Kaplan-Meier analysis for TNS class objects.
#'
#' Makes a 2 or 3 panel plot for survival analysis. The first panel shows the
#' differential Enrichment score (dES) for all samples, ranked by expression
#' in their sections. The second (optional) panel shows the status of other
#' attributes which may be present in the survival data.frame for all samples.
#' The third panel shows a Kaplan-Meier plot computed for the given survival
#' data, with a curve for each section.
#'
#' @param tns a \linkS4class{TNS} object, which must have passed GSEA2 analysis.
#' @param regs a string vector. Contains all the regulons which are going to be
#' plotted.
#' @param attribs a numeric vector. Contains the columns of the survival
#' data.frame which will be plotted for the second panel.
#' @param nSections A numeric value for the stratification of the sample. The
#' larger the number, the more subdivisions will be created for the Kaplan-Meier
#' analysis.
#' @param endpoint a numeric value. It represents the cut-off point for the
#' "time", if any.
#' @param fname a string. The name of the file in which the plot will be saved
#' @param fpath a string. The path to the directory where the plot will be saved
#' @param ylab a string. The label for the y axis on the third panel
#' @param xlab a string. The label for the x axis on the third panel. This should
#' be the measure of time shown in the survival data.frame after the last
#' check-up.
#' @param pal a string, which can be "red", "blue" or "redblue". Represents the
#' colors used in the first and third panels. Alternatively, it can
#' contains the hex values.
#' @param excludeMid a logical value. If TRUE, inconclusive dES values will not
#' be consired in the survival analysis.
#' @param flipcols a logical value. If TRUE, flips the order of the samples to
#' lowest expression on top, highest on the bottom.
#' @param plotpdf a logical value. If TRUE, the plot is saved as a pdf file.
#' If false, it is plotted in the plotting area.
#' @param plotbatch a logical value. If TRUE, plots for all regs are saved in
#' the same file.
#' If FALSE, each plot for each reg is saved in a different file.
#' @param width a numeric value. Represents the width of the plot.
#' @param height a numeric value. Represents the height of the plot.
#' @param panelWidths a numeric vector of length=3 specifying the relative
#' width of the internal panels.
#' @return A plot, showing the graphical analysis of provided survival data.
#' @examples
#' data(dt4rtn, package="RTN")
#' data(survival.data)
#'
#' # compute regulons for 3 TFs using the RTN package
#' rtni <- new("TNI", gexp=dt4rtn$gexp,
#' transcriptionFactors=dt4rtn$tfs[c("FOXM1","E2F2","PTTG1")])
#' rtni <- tni.preprocess(rtni,gexpIDs=dt4rtn$gexpIDs, verbose=FALSE)
#' rtni<-tni.permutation(rtni, nPermutations=100, verbose=FALSE) #sets 'nPermutations'>=1000
#' rtni<-tni.dpi.filter(rtni, verbose=FALSE)
#'
#' rtns <- tnsPreprocess(rtni, survival.data, keycovar = c("Grade","Age"),
#' time = 1, event = 2)
#' rtns <- tnsGSEA2(rtns, verbose=FALSE)
#' tnsKM(rtns, regs="FOXM1", attribs = list(c("ER+","ER-"),c("G1","G2","G3")),
#' plotpdf = FALSE)
#'
#' @importFrom RColorBrewer brewer.pal
#' @importFrom survival survdiff survfit coxph Surv
#' @docType methods
#' @rdname tnsKM-methods
#' @aliases tnsKM
#' @export
#'
setMethod("tnsKM",
"TNS",
function (tns, regs = NULL, attribs=NULL, nSections=2, endpoint = 60,
fname="survplot", fpath=".", ylab="Survival probability",
xlab="Months", pal="redblue", excludeMid=FALSE, flipcols=FALSE,
plotpdf=TRUE, plotbatch=FALSE, width = 6.3, height = 3.6,
panelWidths=c(3,2,4))
{
#-- checks
.tns.checks(tns, type = "status")
.tns.checks(nSections, type = "nSec")
.tns.checks(fname, type = "Fname")
.tns.checks(fpath, type = "Path")
.tns.checks (ylab, type = "Ylab")
.tns.checks (xlab, type = "Xlab")
.tns.checks (regs, type = "Regs")
.tns.checks (attribs, tns@survivalData, type = "Attribs")
.tns.checks(pal, tns@para$strat, type = "Pal")
.tns.checks(excludeMid, type = "ExcludeMid")
.tns.checks(flipcols, type = "FlipCols")
.tns.checks (plotpdf, type = "PlotPDF")
.tns.checks (plotbatch, type = "PlotBatch")
.tns.checks (width, height, type = "WidthHeight")
.tns.checks (endpoint, type = "EndPoint")
.tns.checks (panelWidths, type = "panelWidths")
#-- stratification
tns <- .tns.stratification(tns, nSections = nSections)
#-- organize data
survData <- tnsGet(tns, what = "survivalData")
EScores <- tnsGet(tns, what = "EScores")
#--
survData$event[survData$time>endpoint]<-0
survData$time[survData$time>endpoint]<-endpoint
#--
sp1<-rownames(survData)
sp2<-rownames(EScores$regstatus)
survData <- survData[sp1%in%sp2,,drop=FALSE]
EScores$pos <- EScores$pos[sp2%in%sp1,,drop=FALSE]
EScores$neg <- EScores$neg[sp2%in%sp1,,drop=FALSE]
EScores$dif <- EScores$dif[sp2%in%sp1,,drop=FALSE]
#--
tns <- tns.set(tns, EScores, what = "EScores")
tns <- tns.set(tns, survData, what = "survivalData")
#-- get attribs
if(!is.null(attribs)){
if(is.list(attribs)){
groups<-unlist(lapply(attribs, length))
idx<-unlist(attribs)
attribs<-as.matrix(survData[,idx])
} else {
groups<-NULL
attribs<-as.matrix(survData[,attribs])
}
if( !all( attribs %in% c(0,1,NA) ) )
stop("NOTE: 'attribs' variables should only include binary values!")
}
#-- making reglist
reglist<-colnames(tns@EScores$regstatus)
if(!is.null(regs)){
if(!all(regs%in%reglist)){
stop("NOTE: all names in 'regs' should be listed
in the slot 'EScores' of the 'tns' object!")
}
reglist<-regs
}
idx<-apply(EScores$regstatus,2,function(es){any(is.na(es))})
validregs<-colnames(EScores$regstatus)[!idx]
reglist<-reglist[reglist%in%validregs]
#---plot
if(plotbatch & plotpdf){
pdf(file=paste(fpath,"/",fname,".pdf",sep=""), width=width,
height=height)
for(reg in reglist){
.survplot(EScores, survData, reg,fname,fpath,ylab,xlab,pal,
panelWidths,plotpdf,
excludeMid,flipcols,attribs,groups, endpoint)
}
dev.off()
} else {
for(reg in reglist){
if(plotpdf)
{
pdf(file=paste(fpath,"/",reg,fname,".pdf",sep=""),
width=width, height=height)
}
.survplot(EScores,survData,reg,fname,fpath,ylab,xlab,
pal,panelWidths,
plotpdf,excludeMid,flipcols,attribs,groups,
endpoint)
if(plotpdf)
{
message("NOTE: 'PDF' file was generated")
dev.off()
}
}
}
invisible(list(EScores = EScores, survivalData = survData))
}
)
#' Cox regression analysis for TNS class objects.
#'
#' Run Cox multivariate regression for regulons and key covariables.
#'
#' @param tns a \linkS4class{TNS} object, which must have passed GSEA2 analysis.
#' @param regs a string vector. Contains the regulons which will be used to
#' compute the Cox multivariate model. If left NULL, all regulons will be used.
#' @param endpoint a numeric value. The final point in time for the samples. All
#' time values larger than endpoint will be set at endpoint.
#' @param fname a string. The name of the PDF file which will contain the plot.
#' @param fpath a string. The directory where the file will be saved.
#' @param ylab a string. The label of the y-axis, describing what is represented.
#' @param xlab a string. The label of the x-axis.
#' @param qqkeycovar a logical value. If TRUE, only the samples in the 2nd and
#' 3rd quarters of dES are used to compute. If FALSE, all samples are used.
#' @param excludeMid a logical value. If TRUE, inconclusive dES values will not be
#' consired in the survival analysis.
#' @param width a numeric value. The width of the plot.
#' @param height a numeric value. The height of the plot.
#' @param xlim a vector with 2 values. The first value represents the lowest
#' value in the x-axis, the second value is the highest.
#' @param sortregs a logical value. If TRUE, regulons are sorted from most
#' negatively associated with hazard to most positively associated with hazard.
#' @param plotpdf a logical value.
#' @return A Cox hazard model plot. If TRUE, generates a pdf plot.
#' @examples
#' data(dt4rtn, package="RTN")
#' data(survival.data)
#'
#' # compute regulons for 3 TFs using the RTN package
#' rtni <- new("TNI", gexp=dt4rtn$gexp,
#' transcriptionFactors=dt4rtn$tfs[c("FOXM1","E2F2","PTTG1")])
#' rtni <- tni.preprocess(rtni,gexpIDs=dt4rtn$gexpIDs, verbose=FALSE)
#' rtni<-tni.permutation(rtni, nPermutations=100, verbose=FALSE) #sets 'nPermutations'>=1000
#' rtni<-tni.dpi.filter(rtni, verbose=FALSE)
#'
#' rtns <- tnsPreprocess(rtni, survival.data, keycovar = c("Grade","Age"),
#' time = 1, event = 2)
#' rtns <- tnsGSEA2(rtns, verbose=FALSE)
#' tnsCox(rtns, regs = c("PTTG1","E2F2","FOXM1"), sortregs = TRUE,
#' plotpdf = FALSE)
#'
#' @docType methods
#' @rdname tnsCox-methods
#' @aliases tnsCox
#' @export
#'
setMethod("tnsCox",
"TNS",
function (tns, regs = NULL, endpoint=60, fname="coxplot", fpath=".",
ylab="Regulons and key covariates",
xlab="Hazard Ratio (95% CI)",
qqkeycovar=FALSE, excludeMid=FALSE, width=5, height=5,
xlim=c(0.2,10), sortregs=TRUE, plotpdf = TRUE)
{
#-- checks
.tns.checks(tns, type = "status")
.tns.checks (regs, type = "Regs")
.tns.checks(fname, type = "Fname")
.tns.checks(fpath, type = "Path")
.tns.checks(ylab, type = "Ylab")
.tns.checks(xlab, type = "Xlab")
.tns.checks(qqkeycovar, type = "QQCovar")
.tns.checks(endpoint, type = "EndPoint")
.tns.checks(excludeMid, type = "ExcludeMid")
.tns.checks(width, height, type = "WidthHeight")
.tns.checks(xlim, type = "Xlim")
.tns.checks(sortregs, type = "SortRegs")
.tns.checks(plotpdf, type = "PlotPDF")
.tns.checks(tns@survivalData, type = "survival_cox")
#-- gets
EScores <- tnsGet(tns, what = "EScores")
survData <- tnsGet(tns, what = "survivalData")
keycovar <- tnsGet(tns, what = "keycovar")
#-- checks
dif <- EScores$dif
if(excludeMid){
dif[EScores$regstatus==EScores$mid]<-NA
}
if(!is.null(regs)){
if(!all(regs%in%colnames(dif))){
stop("Not all 'regs' have EScores!")
}
idx <- colnames(dif)%in%regs
dif <- dif[,idx]
dif <- dif[,regs]
}
#---set names to a valid format
regs<-colnames(dif)
xregs<-gsub("-|\\+|\\.","_",regs)
xregs<-gsub("\\s","",xregs)
colnames(dif)<-xregs
names(regs)<-xregs
#---combine dif and survivalData
summary<-cbind(survData[rownames(dif),],dif)
#--- set keycovar by quantile
kvarlist<-list()
if(qqkeycovar){
for(kvar in tns@keycovar){
tp<-summary[[kvar]]
ql<-quantile(tp,c(0.25,0.75), na.rm=TRUE)
tp[tp<ql[1]]<-NA
tp[tp>ql[2]]<-NA
summary[[kvar]]<-tp
kvarlist[[kvar]]<-ql
}
}
#--- filter data
summary<-summary[,c("time", "event",keycovar, xregs)]
#--- get cox formula
if (is.null(tns@keycovar))
{
fm1<-"Surv(time, event)"
} else {
fm1<-paste("Surv(time, event) ~ ", paste(keycovar,
collapse = "+"), sep="")
}
#--- fit cox regression model
resall<-NULL
for(rg in xregs){
nas<-is.na(summary[,rg])
if( sum(nas) > nrow(summary)/2 ){
rs<-c(1,1,0.99,1.01)
} else {
fm2<-formula( paste(fm1,rg, sep="+") )
rs<-summary(coxph(fm2, data=summary[!nas,]))$conf.int[rg,,drop=FALSE]
}
resall<-rbind(resall,rg=rs)
}
if(sortregs){
resall<-resall[sort.list(resall[,1]),]
}
#--- fit cox model for keycovars and adding to resall
idx<-which.max(resall[,"exp(coef)"])
fm2<-formula( paste(fm1,rownames(resall)[idx], sep="+") )
resref<-summary(coxph(fm2, data=summary))$conf.int
resall<-rbind(resref[-nrow(resref),],resall)
resall<-resall[nrow(resall):1,]
#--- add symbols to rownames
idx<-match(names(regs),rownames(resall))
rownames(resall)[idx]<-regs
#--- plot
filen = paste(fpath,"/",fname,".pdf",sep="")
.plotCox(resall, regs = regs, keycovar = keycovar, filen = filen,
width = width, height = height, xlim = xlim, xlab = xlab,
ylab = ylab, plotpdf = plotpdf)
#--- return
invisible(list(resall=resall,kvarlist=kvarlist))
}
)
setMethod( "show",
"TNS",
function(object)
{
cat("a TNS (Transcriptional Network - Survival) object:\n")
message("--status:")
print(object@status, quote=FALSE)
}
)
|
# clumsy plot code to be revised
## plots: PlotFaces (Chernoff-Faces) ====
# aus TeachingDemos, Author: H. P. Wolf
# updated with newer version, edited and simplified by 0.99.24
# Source aplpack, Author: H. P. Wolf
#' Chernoff Faces
#'
#' Plot Chernoff faces. The rows of a data matrix represent cases and the
#' columns the variables.
#'
#' The features paramters of this implementation are: \itemize{ \item1 height
#' of face \item2 width of face \item3 shape of face \item4 height of mouth
#' \item5 width of mouth \item6 curve of smile \item7 height of eyes \item8
#' width of eyes \item9 height of hair \item10 width of hair \item11 styling of
#' hair \item12 height of nose \item13 width of nose \item14 width of ears
#' \item15 height of ears }
#'
#' \figure{faces.pngSome faces}
#'
#' For details look at the literate program of \code{faces}
#'
#' @param xy \code{xy} data matrix, rows represent individuals and columns
#' attributes.
#' @param which.row defines a permutation of the rows of the input matrix.
#' @param fill logic. If set to \code{TRUE}, only the first \code{nc}
#' attributes of the faces are transformed, \code{nc} is the number of columns
#' of \code{x}.
#' @param nr number of columns of faces on graphics device
#' @param nc number of rows of faces
#' @param scale logic. If set to \code{TRUE}, attributes will be normalized.
#' @param byrow \code{if(byrow==TRUE)}, \code{x} will be transposed.
#' @param main title.
#' @param labels character strings to use as names for the faces.
#' @param col a vector of colors used for the parts of the faces. Colors are
#' recycled in the order: "nose", "eyes", "hair", "face", "lips", "ears".
#' Default is NA, which will omit colors.
#' @return information about usage of variables for face elements is returned
#' invisibly
#' @note based on version 12/2009
#' @author H. P. Wolf, some changes Andri Signorell <andri@@signorell.net>
#' @references Chernoff, H. (1973) The use of faces to represent statistiscal
#' assoziation, \emph{JASA}, 68, pp 361--368.\cr
#'
#' The smooth curves are computed by an algorithm found in:\cr Ralston, A. and
#' Rabinowitz, P. (1985) \emph{A first course in numerical analysis},
#' McGraw-Hill, pp 76ff.\cr \url{http://www.wiwi.uni-bielefeld.de/~wolf/}: S/R
#' - functions : faces
#' @keywords hplot
#' @examples
#'
#' PlotFaces(rbind(1:3,5:3,3:5,5:7))
#'
#' data(longley)
#' PlotFaces(longley[1:9,])
#'
#' set.seed(17)
#' PlotFaces(matrix(sample(1:1000,128,), 16, 8), main="random faces")
#'
#'
#' means <- lapply(iris[,-5], tapply, iris$Species, mean)
#' m <- t(do.call(rbind, means))
#' m <- cbind(m, matrix(rep(1, 11*3), nrow=3))
#'
#' # define the colors, first for all faces the same
#' col <- replicate(3, c("orchid1", "olivedrab", "goldenrod4",
#' "peachpuff", "darksalmon", "peachpuff3"))
#' rownames(col) <- c("nose","eyes","hair","face","lips","ears")
#' # change haircolor individually for each face
#' col[3, ] <- c("lightgoldenrod", "coral3", "sienna4")
#'
#' z <- PlotFaces(m, nr=1, nc=3, col=col)
#'
#' # print the used coding
#' print(z$info, right=FALSE)
#'
PlotFaces <- function(xy = rbind(1:3,5:3,3:5,5:7), which.row, fill = FALSE, nr, nc,
scale = TRUE, byrow = FALSE, main, labels,
col = "white") {
ncolors <- nrow(xy)
col <- matrix(rep(col, length.out=nrow(xy) * 6), ncol=nrow(xy))
col.nose <- col[1, ]
col.eyes <- col[2, ]
col.hair <- col[3, ]
col.face <- col[4, ]
col.lips <- col[5, ]
col.ears <- col[6, ]
n <- nrow(xy)
if(missing(nr)) nr <- n^0.5
if(missing(nc)) nc <- n^0.5
opar <- par(mfrow=c(ceiling(c(nr, nc))),
oma=rep(6, 4),
mar=rep(.7, 4))
on.exit(par(opar))
spline <- function(a, y, m=200, plot=FALSE) {
n <- length(a)
h <- diff(a)
dy <- diff(y)
sigma <- dy/h
lambda <- h[-1] / (hh <- h[-1] + h[-length(h)])
mu <- 1-lambda
d <- 6 * diff(sigma)/hh
tri.mat <- 2 * diag(n-2)
tri.mat[2 + (0:(n-4))*(n-1)] <- mu[-1]
tri.mat[(1:(n-3)) * (n-1)] <- lambda[-(n-2)]
M <- c(0,solve(tri.mat) %*% d, 0)
x <- seq(from=a[1], to=a[n], length=m)
anz.kl <- hist(x, breaks=a, plot=FALSE)$counts
adj <- function(i) i-1
i <- rep(1:(n-1), anz.kl) + 1
S.x <- M[i-1]*(a[i]-x)^3 / (6*h[adj(i)]) +
M[i] * (x-a[i-1])^3 / (6*h[adj(i)]) +
(y[i-1] - M[i-1] * h[adj(i)]^2 /6) * (a[i]-x)/ h[adj(i)] +
(y[i] - M[i] * h[adj(i)]^2 /6) * (x-a[i-1]) / h[adj(i)]
if(plot){
plot(x, S.x, type="l")
points(a, y)
}
return(cbind(x, S.x))
}
n.char <- 15
xy <- rbind(xy)
if(byrow) xy <- t(xy)
# if(any(is.na(xy))){
# if(na.rm){
# xy <- xy[!apply(is.na(xy),1,any),,drop=FALSE]
# if(nrow(xy)<3) {print("not enough data points"); return()}
# print("Warning: NA elements have been removed!!")
# }else{
# xy.means <- colMeans(xy,na.rm=TRUE)
# for(j in 1:length(xy[1,])) xy[is.na(xy[,j]),j] <- xy.means[j]
# print("Warning: NA elements have been exchanged by mean values!!")
# }
# }
if(!missing(which.row) && all(!is.na(match(which.row,1:dim(xy)[2])) ))
xy <- xy[, which.row, drop=FALSE]
mm <- dim(xy)[2]
n <- dim(xy)[1]
xnames <- dimnames(xy)[[1]]
if(is.null(xnames)) xnames <- as.character(1:n)
if(!missing(labels)) xnames <- labels
if(scale){
xy <- apply(xy,2,function(x){
x <- x-min(x); x <- if(max(x)>0) 2*x/max(x)-1 else x })
} else xy[] <- pmin(pmax(-1,xy),1)
xy <- rbind(xy)
n.c <- dim(xy)[2]
# expand input matrix xy by replication of cols
xy <- xy[,(rows.orig <- h <- rep(1:mm,ceiling(n.char/mm))),drop=FALSE]
if(fill) xy[,-(1:n.c)] <- 0
face.orig <- list(
eye = rbind(c(12,0),c(19,8),c(30,8),c(37,0),c(30,-8),c(19,-8),c(12,0)),
iris = rbind(c(20,0),c(24,4),c(29,0),c(24,-5),c(20,0)),
lipso = rbind(c(0,-47),c( 7,-49), lipsiend=c(16,-53), c( 7,-60),c(0,-62)),
lipsi = rbind(c(7,-54),c(0,-54)),
nose = rbind(c(0,-6),c(3,-16),c(6,-30),c(0,-31)),
shape = rbind(c(0,44),c(29,40),c(51,22),hairend=c(54,11),earsta=c(52,-4),
earend=c(46,-36),c(38,-61),c(25,-83),c(0,-89)),
ear = rbind(c(60,-11), c(57,-30)), # add earsta,earend
hair = rbind(hair1=c(72,12), hair2=c(64,50), c(36,74), c(0,79)) # add hairend
)
lipso.refl.ind <- 4:1
lipsi.refl.ind <- 1
nose.refl.ind <- 3:1
hair.refl.ind <- 3:1
shape.refl.ind <- 8:1
shape.xnotnull <- 2:8
nose.xnotnull <- 2:3
face.list <- list()
for(ind in 1:n){
factors <- xy[ind,]
face <- face.orig
m <- mean(face$lipso[,2])
face$lipso[,2] <- m+(face$lipso[,2]-m)*(1+0.7*factors[4])
face$lipsi[,2] <- m+(face$lipsi[,2]-m)*(1+0.7*factors[4])
face$lipso[,1] <- face$lipso[,1]*(1+0.7*factors[5])
face$lipsi[,1] <- face$lipsi[,1]*(1+0.7*factors[5])
face$lipso["lipsiend",2] <- face$lipso["lipsiend",2]+20*factors[6]
m <- mean(face$eye[,2])
face$eye[,2] <- m+(face$eye[,2] -m)*(1+0.7*factors[7])
face$iris[,2] <- m+(face$iris[,2]-m)*(1+0.7*factors[7])
m <- mean(face$eye[,1])
face$eye[,1] <- m+(face$eye[,1] -m)*(1+0.7*factors[8])
face$iris[,1] <- m+(face$iris[,1]-m)*(1+0.7*factors[8])
m <- min(face$hair[,2])
face$hair[,2] <- m+(face$hair[,2]-m)*(1+0.2*factors[9])
m <- 0
face$hair[,1] <- m+(face$hair[,1]-m)*(1+0.2*factors[10])
m <- 0
face$hair[c("hair1","hair2"),2] <- face$hair[c("hair1","hair2"),2]+50*factors[11]
m <- mean(face$nose[,2])
face$nose[,2] <- m+(face$nose[,2]-m)*(1+0.7*factors[12])
face$nose[nose.xnotnull,1] <- face$nose[nose.xnotnull,1]*(1+factors[13])
m <- mean(face$shape[c("earsta","earend"),1])
face$ear[,1] <- m+(face$ear[,1]-m)* (1+0.7*factors[14])
m <- min(face$ear[,2])
face$ear[,2] <- m+(face$ear[,2]-m)* (1+0.7*factors[15])
face <- lapply(face,function(x){ x[,2] <- x[,2]*(1+0.2*factors[1]);x})
face <- lapply(face,function(x){ x[,1] <- x[,1]*(1+0.2*factors[2]);x})
face <- lapply(face,function(x){ x[,1] <- ifelse(x[,1]>0,
ifelse(x[,2] > -30, x[,1],
pmax(0,x[,1]+(x[,2]+50)*0.2*sin(1.5*(-factors[3])))),0);x})
invert <- function(x) cbind(-x[,1], x[,2])
face.obj <- list(
eyer = face$eye,
eyel = invert(face$eye),
irisr = face$iris,
irisl = invert(face$iris),
lipso = rbind(face$lipso,invert(face$lipso[lipso.refl.ind,])),
lipsi = rbind(face$lipso["lipsiend",], face$lipsi,
invert(face$lipsi[lipsi.refl.ind,, drop=FALSE]),
invert(face$lipso["lipsiend",, drop=FALSE])),
earr = rbind(face$shape["earsta",], face$ear, face$shape["earend",]),
earl = invert(rbind(face$shape["earsta",], face$ear, face$shape["earend",])),
nose = rbind(face$nose,invert(face$nose[nose.refl.ind,])),
hair = rbind(face$shape["hairend",],face$hair,invert(face$hair[hair.refl.ind,]),
invert(face$shape["hairend",,drop=FALSE])),
shape = rbind(face$shape,invert(face$shape[shape.refl.ind,]))
)
face.obj$lipsi <- rbind(face.obj$lipsi, Rev(face.obj$lipsi, margin = 1))
face.list <- c(face.list, list(face.obj))
plot(1, type="n", xlim=c(-105, 105) * 1.1, axes=FALSE,
ylab="", ylim=c(-105, 105) * 1.3, xlab="")
title(xnames[ind])
f <- 1+(ncolors-1) * (factors+1)/2 # translate factors into color numbers
xtrans <- function(x){x}
ytrans <- function(y){y}
for(obj.ind in seq(face.obj)[c(10:11, 1:9)]) {
x <- face.obj[[obj.ind]][, 1]
y <- face.obj[[obj.ind]][, 2]
xx <- spline(1:length(x), x, 40, FALSE)[, 2]
yy <- spline(1:length(y), y, 40, FALSE)[, 2]
lines(xx, yy)
if(obj.ind == 10)
polygon(xtrans(xx), ytrans(yy), col=col.hair[ind], xpd=NA) # hair
if(obj.ind==11)
polygon(xtrans(xx), ytrans(yy), col=col.face[ind], xpd=NA) # face
xx <- xtrans(xx)
yy <- ytrans(yy)
if(obj.ind %in% 1:2) polygon(xx,yy,col="#eeeeee") # eyes without iris
if(obj.ind %in% 3:4) polygon(xx,yy,col=col.eyes[ind], xpd=NA) # eyes:iris
if(obj.ind %in% 9) polygon(xx,yy,col=col.nose[ind], xpd=NA)# nose
if(obj.ind %in% 5:6) polygon(xx,yy,col=col.lips[ind], xpd=NA) # lips
if(obj.ind %in% 7:8) polygon(xx,yy,col=col.ears[ind], xpd=NA)# ears
}
}
if(!missing(main)){
par(opar)
par(mfrow=c(1,1))
mtext(main, 3, 3, TRUE, 0.5)
title(main)
}
info <- c(
"height of face",
"width of face",
"structure of face",
"height of mouth",
"width of mouth",
"smiling",
"height of eyes",
"width of eyes",
"height of hair",
"width of hair",
"style of hair",
"height of nose",
"width of nose",
"width of ear",
"height of ear")
var.names <- dimnames(xy)[[2]]
if(0==length(var.names))
var.names <- paste("Var",rows.orig,sep="")
info <- data.frame("modified item"=info, "variable"=var.names[1:length(info)])
names(face.list) <- xnames
out <- list(faces=face.list, info=info,xy=t(xy))
class(out) <- "faces"
invisible(out)
}
## plots: PlotBag ====
####################################"
# the source code for the function
# from Hans Peter Wolf
#
# http://www.wiwi.uni-bielefeld.de/~wolf/software/R-wtools/bagplot/bagplot.R
#
#
##start:##
PlotBagPairs <- function(dm, trim = 0.0, main, numeric.only = TRUE,
factor = 3, approx.limit = 300, pch = 16,
cex = 0.8, precision = 1, col.loophull = "#aaccff",
col.looppoints = "#3355ff", col.baghull = "#7799ff",
col.bagpoints = "#000088", ...){
if(missing(main)) main <- paste(deparse(substitute(dm)),"/ trim =",round(trim,3))
if(length(trim) == 1) trim <- rep(trim, ncol(dm))
if(numeric.only){
dm <- dm[, idx <- sapply(1:ncol(dm), function(x) is.numeric(dm[,x]))]
trim <- trim[idx]
}
for(j in 1:ncol(dm)){
x <- dm[,j]
if(!is.numeric(x)) x <- as.numeric(x)
if( trim[j] > 0) {
na.idx <- is.na(x)
xlim <- quantile(x[!na.idx], c(trim[j] , 1-trim[j]))
x[ na.idx | x < xlim[1] | xlim[2] < x ] <- NA
}
dm[,j] <- x
}
# DM0 <<- dm
h.fn <- function(x,y){
idx <- !is.na(x) & !is.na(y)
x <- x[ idx ]; y <- y[ idx ]
BP <- PlotBag(x,y,add=TRUE,factor = factor, approx.limit = approx.limit, pch = pch,
cex = cex, precision = precision, col.loophull = col.loophull,
col.looppoints = col.looppoints, col.baghull = col.baghull,
col.bagpoints = col.bagpoints, verbose=FALSE)
# BP <<- BP ### for debugging
}
par(mfrow=c(1,1))
pairs(dm, panel = h.fn, ...)
mtext(main, line=2.5)
dm
}
#0:
##start:##
compute.bagplot <- function(x,y,
factor=3, # expanding factor for bag to get the loop
na.rm=FALSE, # should NAs removed or exchanged
approx.limit=300, # limit
dkmethod=2, # in 1:2; method 2 is recommended
precision=1, # controls precision of computation
verbose=FALSE,debug.plots="no" # tools for debugging
){
"bagplot, version 2012/12/05, peter wolf"
# define some functions
win<-function(dx,dy){ atan2(y=dy,x=dx) }
out.of.polygon<-function(xy,pg){ # 121026
xy<-matrix(xy,ncol=2)
# check trivial case
if(nrow(pg)==1) return(xy[,1]==pg[1] & xy[,2]==pg[2])
# store number of points of xy and polygon
m<-nrow(xy); n<-nrow(pg)
# find small value relative to polygon
limit <- -abs(1E-10*diff(range(pg)))
# find vectors that are orthogonal to segments of polygon
pgn<-cbind(diff(c(pg[,2],pg[1,2])),-diff(c(pg[,1],pg[1,1])))
# find center of gravity of xy
S<-colMeans(xy)
# compute negative distances of polygon to center of gravity of xy
dxy<-cbind(S[1]-pg[,1],S[2]-pg[,2])
# unused: S.in.pg<-all(limit<apply(dxy*pgn,1,sum))
if( !all( limit < apply(dxy*pgn,1,sum) ) ){
pg<-pg[n:1,]; pgn<--pgn[n:1,]
}
# initialize result
in.pg<-rep(TRUE,m)
for(j in 1:n){
dxy<-xy-matrix(pg[j,],m,2,byrow=TRUE)
in.pg<-in.pg & limit<(dxy%*%pgn[j,])
}
return(!in.pg)
}
cut.z.pg<-function(zx,zy,p1x,p1y,p2x,p2y){
a2<-(p2y-p1y)/(p2x-p1x); a1<-zy/zx
sx<-(p1y-a2*p1x)/(a1-a2); sy<-a1*sx
sxy<-cbind(sx,sy)
h<-any(is.nan(sxy))||any(is.na(sxy))||any(Inf==abs(sxy))
if(h){ # print("NAN found"); print(cbind(a1,a2,zx,zy,sxy,p2x-p1x))
if(!exists("verbose")) verbose<-FALSE
if(verbose) cat("special")
# zx is zero ### 121030
h<-0==zx
sx<-ifelse(h,zx,sx); sy<-ifelse(h,p1y-a2*p1x,sy)
# points on line defined by line segment
a1 <- ifelse( abs(a1) == Inf, sign(a1)*123456789*1E10, a1) # 121030
a2 <- ifelse( abs(a2) == Inf, sign(a2)*123456789*1E10, a2)
# points on line defined by line segment
h<-0==(a1-a2) & sign(zx)==sign(p1x)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-0==(a1-a2) & sign(zx)!=sign(p1x)
sx<-ifelse(h,p2x,sx); sy<-ifelse(h,p2y,sy)
# line segment vertical
# & center NOT ON line segment
h<-p1x==p2x & zx!=p1x & p1x!=0
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,zy*p1x/zx,sy)
# & center ON line segment
h<-p1x==p2x & zx!=p1x & p1x==0
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,0,sy)
# & center NOT ON line segment & point on line ### 121126
h<-p1x==p2x & zx==p1x & p1x!=0 # & sign(zy)==sign(p1y)
sx<-ifelse(h,zx,sx); sy<-ifelse(h,zy,sy)
# & center ON line segment & point on line
h<-p1x==p2x & zx==p1x & p1x==0 & sign(zy)==sign(p1y)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-p1x==p2x & zx==p1x & p1x==0 & sign(zy)!=sign(p1y)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p2y,sy)
# points identical to end points of line segment
h<-zx==p1x & zy==p1y; sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-zx==p2x & zy==p2y; sx<-ifelse(h,p2x,sx); sy<-ifelse(h,p2y,sy)
# point of z is center
h<-zx==0 & zy==0; sx<-ifelse(h,0,sx); sy<-ifelse(h,0,sy)
sxy<-cbind(sx,sy)
} # end of special cases
#if(verbose){ print(rbind(a1,a2));print(cbind(zx,zy,p1x,p1y,p2x,p2y,sxy))}
if(!exists("debug.plots")) debug.plots<-"no"
if(debug.plots=="all"){
segments(sxy[,1],sxy[,2],zx,zy,col="red")
segments(0,0,sxy[,1],sxy[,2],col="green",lty=2) ##!!
points(sxy,col="red")
}
return(sxy)
}
find.cut.z.pg<-function(z,pg,center=c(0,0),debug.plots="no"){
if(!is.matrix(z)) z<-rbind(z)
if(1==nrow(pg)) return(matrix(center,nrow(z),2,TRUE))
n.pg<-nrow(pg); n.z<-nrow(z)
z<-cbind(z[,1]-center[1],z[,2]-center[2])
pgo<-pg; pg<-cbind(pg[,1]-center[1],pg[,2]-center[2])
if(!exists("debug.plots")) debug.plots<-"no"
if(debug.plots=="all"){
plot(rbind(z,pg,0),bty="n"); points(z,pch="p")
lines(c(pg[,1],pg[1,1]),c(pg[,2],pg[1,2]))}
# find angles of pg und z
apg<-win(pg[,1],pg[,2])
apg[is.nan(apg)]<-0; a<-order(apg); apg<-apg[a]; pg<-pg[a,]
az<-win(z[,1],z[,2])
# find line segments
segm.no<-apply((outer(apg,az,"<")),2,sum)
segm.no<-ifelse(segm.no==0,n.pg,segm.no)
next.no<-1+(segm.no %% length(apg))
# compute cut points
cuts<-cut.z.pg(z[,1],z[,2],pg[segm.no,1],pg[segm.no,2],
pg[next.no,1],pg[next.no,2])
# rescale
cuts<-cbind(cuts[,1]+center[1],cuts[,2]+center[2])
return(cuts)
}
# find.cut.z.pg(EX, EX1,center=CE)
hdepth.of.points<-function(tp){
# 121030 second parameter n has been removed
# if(!exists("precision")) precision <- 1 ### 121203
# return(find.hdepths.tp(tp, xy, 181*precision)) ### 121202
n.tp<-nrow(tp)
tphdepth<-rep(0,n.tp); dpi<-2*pi-0.000001
for(j in 1:n.tp) {
dx<-tp[j,1]-xy[,1]; dy<-tp[j,2]-xy[,2]
a<-win(dx,dy)+pi; h<-a<10; a<-a[h]; ident<-sum(!h)
init<-sum(a < pi); a.shift<-(a+pi) %% dpi
minusplus<-c(rep(-1,length(a)),rep(1,length(a))) #### 070824
h<-cumsum(minusplus[order(c(a,a.shift))])
tphdepth[j]<-init+min(h)+1 # +1 because of the point itself!!
# tphdepth[j]<-init+min(h)+ident; cat("SUMME",ident)
}
tphdepth
}
find.hdepths.tp <- function(tp, data, number.of.directions=181){ # 121130
# standardize dimensions
xy <- as.matrix(data); tp <- as.matrix(rbind(tp)); n.tp <- dim(tp)[1]
for( j in 1:2) {
xy[,j] <- xy[,j] - (h <- min(xy[,j], na.rm=TRUE))
tp[,j] <- tp[,j] - h
if( 0 < (h <- max(xy[,j], na.rm=TRUE))){
xy[,j] <- xy[,j]/h; tp[,j] <- tp[,j]/h
}
}
##loop over directions##
phi <- c(seq(0,180,length=number.of.directions)[-1]*(2*pi/360))
sinphi <- c(sin(phi),1); cosphi <- c(cos(phi),0)
RM1 <- round(digits=6,rbind(cosphi,sinphi))
hdtp <- rep(length(xy[,1]),length(tp[,1]))
for( j in seq(along=sinphi)){ #print(j)
xyt <- xy %*% RM1[,j]; tpt <- (tp %*% RM1[,j])[]
xyt <- xyt[!is.na(xyt)] #; tpt <- sort(tpt)
hdtp <- pmin(hdtp,(rank( c(tpt,xyt), ties.method="min"))[1:n.tp]
-rank( tpt,ties.method="min")
,rank(-c(tpt,xyt), ties.method="min")[1:n.tp]
-rank(-tpt,ties.method="min")
)
}
hdtp
}
expand.hull<-function(pg,k){
if( 1 >= nrow(pg) ) return(pg) ## 121026 ## 121123 <= statt ==
resolution<-floor(20*precision)
pg0<-xy[hdepth==1,]
pg0<-pg0[chull(pg0[,1],pg0[,2]),]
end.points<-find.cut.z.pg(pg,pg0,center=center,debug.plots=debug.plots)
lam<-((0:resolution)^1)/resolution^1
pg.new<-pg
for(i in 1L:nrow(pg)){
tp<-cbind(pg[i,1]+lam*(end.points[i,1]-pg[i,1]),
pg[i,2]+lam*(end.points[i,2]-pg[i,2]))
# hd.tp<-hdepth.of.points(tp)
hd.tp<-find.hdepths.tp(tp,xy)
ind<-max(sum(hd.tp>=k),1)
if(ind<length(hd.tp)){ # hd.tp[ind]>k &&
tp<-cbind(tp[ind,1]+lam*(tp[ind+1,1]-tp[ind,1]),
tp[ind,2]+lam*(tp[ind+1,2]-tp[ind,2]))
# hd.tp<-hdepth.of.points(tp)
hp.tp<-find.hdepths.tp(tp,xy)
ind<-max(sum(hd.tp>=k),1)
}
pg.new[i,]<-tp[ind,]
}
pg.new<-pg.new[chull(pg.new[,1],pg.new[,2]),]
# cat("depth pg.new", hdepth.of.points(pg.new))
# cat("depth pg.new", find.hdepths.tp(pg.new,xy))
pg.add<-0.5*(pg.new+rbind(pg.new[-1,],pg.new[1,]))
# end.points<-find.cut.z.pg(pg,pg0,center=center)
end.points<-find.cut.z.pg(pg.add,pg0,center=center) #### 070824
for(i in 1L:nrow(pg.add)){
tp<-cbind(pg.add[i,1]+lam*(end.points[i,1]-pg.add[i,1]),
pg.add[i,2]+lam*(end.points[i,2]-pg.add[i,2]))
# hd.tp<-hdepth.of.points(tp)
hd.tp<-find.hdepths.tp(tp,xy)
ind<-max(sum(hd.tp>=k),1)
if(ind<length(hd.tp)){ # hd.tp[ind]>k &&
tp<-cbind(tp[ind,1]+lam*(tp[ind+1,1]-tp[ind,1]),
tp[ind,2]+lam*(tp[ind+1,2]-tp[ind,2]))
# hd.tp<-hdepth.of.points(tp)
hd.tp<-find.hdepths.tp(tp,xy)
ind<-max(sum(hd.tp>=k),1)
}
pg.add[i,]<-tp[ind,]
}
# cat("depth pg.add", hdepth.of.points(pg.add))
pg.new<-rbind(pg.new,pg.add)
pg.new<-pg.new[chull(pg.new[,1],pg.new[,2]),]
}
cut.p.sl.p.sl<-function(xy1,m1,xy2,m2){
sx<-(xy2[2]-m2*xy2[1]-xy1[2]+m1*xy1[1])/(m1-m2)
sy<-xy1[2]-m1*xy1[1]+m1*sx
if(!is.nan(sy)) return( c(sx,sy) )
if(abs(m1)==Inf) return( c(xy1[1],xy2[2]+m2*(xy1[1]-xy2[1])) )
if(abs(m2)==Inf) return( c(xy2[1],xy1[2]+m1*(xy2[1]-xy1[1])) )
}
pos.to.pg<-function(z,pg,reverse=FALSE){
if(reverse){
int.no<-apply(outer(pg[,1],z[,1],">="),2,sum)
zy.on.pg<-pg[int.no,2]+pg[int.no,3]*(z[,1]-pg[int.no,1])
}else{
int.no<-apply(outer(pg[,1],z[,1],"<="),2,sum)
zy.on.pg<-pg[int.no,2]+pg[int.no,3]*(z[,1]-pg[int.no,1])
}
#### ifelse(z[,2]<zy.on.pg, "lower","higher") ##### 121004
result <- ifelse(z[,2]<zy.on.pg, "lower","higher") ####
return(result)
if( all(result=="lower") ){
result <- ifelse(((z[,2] - zy.on.pg)/max(z[,2] - zy.on.pg)+1e-10) < 0,
"lower","higher")
}
if( all(result=="higher") ){
result <- ifelse(((z[,2] - zy.on.pg)/max(z[,2] - zy.on.pg)-1e-10) < 0,
"lower","higher")
}
print(result)
return(result)
}
find.polygon.center<-function(xy){
#### if(missing(xy)){n<-50;x<-rnorm(n);y<-rnorm(n); xy<-cbind(x,y)}
#### xy<-xy[chull(xy),]
if(length(xy)==2) return(xy[1:2])
if(nrow(xy)==2) return(colMeans(xy)) #### 121009
#### partition polygon into triangles
n<-length(xy[,1]); mxy<-colMeans(xy)
xy2<-rbind(xy[-1,],xy[1,]); xy3<-cbind(rep(mxy[1],n),mxy[2])
#### determine areas and centers of gravity of triangles
S<-(xy+xy2+xy3)/3
F2<-abs((xy[,1]-xy3[,1])*(xy2[,2]-xy3[,2])-
(xy[,2]-xy3[,2])*(xy2[,1]-xy3[,1]))
#### compute center of gravity of polygon
lambda<-F2/sum(F2)
SP<-colSums(cbind(S[,1]*lambda,S[,2]*lambda))
return(SP)
}
# check input
xydata<-if(missing(y)) x else cbind(x,y)
if(is.data.frame(xydata)) xydata<-as.matrix(xydata)
if(any(is.na(xydata))){
if(na.rm){ xydata<-xydata[!apply(is.na(xydata),1,any),,drop=FALSE]
print("Warning: NA elements have been removed!!")
}else{ #121129
xy.medians<-apply(xydata,2,function(x) median(x, na.rm=TRUE))
# colMeans(xydata,na.rm=TRUE)
for(j in 1:ncol(xydata)) xydata[is.na(xydata[,j]),j]<-xy.medians[j]
print("Warning: NA elements have been exchanged by median values!!")
}
}
# if(nrow(xydata)<3) {print("not enough data points"); return()} ### 121008
if(length(xydata)<4) {print("not enough data points"); return()}
if((length(xydata)%%2)==1) {print("number of values isn't even"); return()}
if(!is.matrix(xydata)) xydata<-matrix(xydata,ncol=2,byrow=TRUE)
# select sample in case of a very large data set
very.large.data.set<-nrow(xydata) > approx.limit
# use of random number generator may disturb simulation
# therefore we now use a systematical part of the data 20120930
#### OLD: set.seed(random.seed<-13) #### SEED
if(very.large.data.set){
#### OLD: ind<-sample(seq(nrow(xydata)),size=approx.limit)
step<-(n<-nrow(xydata))/approx.limit; ind <- round(seq(1,n,by=step))
xy<-xydata[ind,]
} else xy<-xydata
n<-nrow(xy)
points.in.bag<-floor(n/2)
# if jittering is needed
# the following two lines can be activated
#xy<-xy+cbind(rnorm(n,0,.0001*sd(xy[,1])),
# rnorm(n,0,.0001*sd(xy[,2])))
if(verbose) cat("end of initialization")
prdata<-prcomp(xydata)
is.one.dim<-(0 == max(prdata[[1]])) || (min(prdata[[1]])/max(prdata[[1]]))<0.00001 # 121129
if(is.one.dim){
if(verbose) cat("data set one dimensional")
center<-colMeans(xydata)
res<-list(xy=xy,xydata=xydata,prdata=prdata,
is.one.dim=is.one.dim,center=center)
class(res)<-"bagplot"
return(res)
}
if(verbose) cat("data not linear")
if(nrow(xydata)<=4) {
if(verbose) cat("only three or four data points")
center<-colMeans(xydata)
res<-list(xy=xy,xydata=xydata,prdata=prdata,hdepths=rep(1,n),hdepth=rep(1,n),
is.one.dim=is.one.dim,center=center,hull.center=NULL,
hull.bag=NULL,hull.loop=NULL,pxy.bag=NULL,pxy.outer=xydata,
pxy.outlier=NULL,exp.dk=xydata)
class(res)<-"bagplot"
return(res)
}
xym<-apply(xy,2,mean); xysd<-apply(xy,2,sd)
xyxy<-cbind((xy[,1]-xym[1])/xysd[1],(xy[,2]-xym[2])/xysd[2])
dx<-(outer(xy[,1],xy[,1],"-"))
dy<-(outer(xy[,2],xy[,2],"-"))
alpha<-atan2(y=dy,x=dx); diag(alpha)<-1000
for(j in 1:n) alpha[,j]<-sort(alpha[,j])
alpha<-alpha[-n,] ; m<-n-1
#### quick look inside, just for check
if(debug.plots=="all"){
plot(xy,bty="n"); xdelta<-abs(diff(range(xy[,1]))); dx<-xdelta*.3
for(j in 1:n) {
p<-xy[j,]; dy<-dx*tan(alpha[,j])
segments(p[1]-dx,p[2]-dy,p[1]+dx,p[2]+dy,col=j)
text(p[1]-xdelta*.02,p[2],j,col=j)
}
}
if(verbose) print("end of computation of angles")
hdepth<-rep(0,n); dpi<-2*pi-0.000001; mypi<-pi-0.000001
minusplus<-c(rep(-1,m),rep(1,m))
if(FALSE){
for(j in 1:n) {
a<-alpha[,j]+pi; h<-a<10; a<-a[h]; init<-sum(a < mypi) # hallo
a.shift<-(a+pi) %% dpi
minusplus<-c(rep(-1,length(a)),rep(1,length(a))) #### 070824
h<-cumsum(minusplus[order(c(a,a.shift))])
hdepth[j]<-init+min(h)+1 # or do we have to count identical points?
# hdepth[j]<-init+min(h)+sum(xy[j,1]==xy[,1] & xy[j,2]==xy[,2])
}
}
find.hdepths <- function(xy, number.of.directions=181){ # 121126
xy <- as.matrix(xy)
for( j in 1:2) {
xy[,j] <- xy[,j] - min(xy[,j])
if( 0 < (h <- max(xy[,j]))) xy[,j] <- xy[,j] / max(xy[,j])
}
phi <- c(seq(0,180,length=number.of.directions)[-1]*(2*pi/360))
sinphi <- c(sin(phi),1); cosphi <- c(cos(phi),0)
RM1 <- round(digits=6,rbind(cosphi,sinphi))
hd <- rep(h<-length(xy[,1]),h)
for( j in seq(along=sinphi)){
xyt <- xy %*% RM1[,j]
hd <- pmin(hd,rank(xyt,ties.method="min"), rank(-xyt,ties.method="min"))
}
# xyt <- xy %*% RM1
# hd2 <- cbind(apply(xyt, 2, rank, ties.method="min"),
# apply(-xyt,2, rank, ties.method="min"))
# hd2 <- apply(hd2, 1, min)
hd
}
hdepth <- find.hdepths(xy,181*precision)
if(verbose){print("end of computation of hdepth:"); print(hdepth)}
#### quick look inside, just for a check
if(debug.plots=="all"){
plot(xy,bty="n")
xdelta<-abs(diff(range(xy[,1]))); dx<-xdelta*.1
for(j in 1:n) {
a<-alpha[,j]+pi; a<-a[a<10]; init<-sum(a < pi)
a.shift<-(a+pi) %% dpi
minusplus<-c(rep(-1,length(a)),rep(1,length(a))) #### 070824
h<-cumsum(minusplus[ao<-(order(c(a,a.shift)))])
no<-which((init+min(h)) == (init+h))[1]
p<-xy[j,]; dy<-dx*tan(alpha[,j])
segments(p[1]-dx,p[2]-dy,p[1]+dx,p[2]+dy,col=j,lty=3)
dy<-dx*tan(c(sort(a),sort(a))[no])
segments(p[1]-5*dx,p[2]-5*dy,p[1]+5*dx,p[2]+5*dy,col="black")
text(p[1]-xdelta*.02,p[2],hdepth[j],col=1) # cex=2.5 assumes suitable fonts
}
}
hd.table<-table(sort(hdepth))
d.k<-cbind(dk=rev(cumsum(rev(hd.table))),
k =as.numeric(names(hd.table)))
k.1<-sum( points.in.bag < d.k[,1] )
# if(nrow(d.k)>1){ ### version 09/2005, error in data set 1 of Meuleman
# instead of >2 now >k.1 ### 070827
# if(nrow(d.k)>k.1){ k<-d.k[k.1+1,2] } else { k<-d.k[k.1,2] }
# this statement will not have an effect because of the next one:
k<-d.k[k.1,2]+1 # 121004 increment depth by one not by looking for next depth
if(verbose){cat("numbers of members of dk:"); print(hd.table); print(d.k)}
if(verbose){cat("end of computation of k, k=",k,"k.1:",k.1)}
# D.K<<-d.k; K.1<<-k.1; EX<<-exp.dk; EX.1<<-exp.dk.1; PDK<<-pdk; HDEPTH<<-hdepth
center<-apply(xy[which(hdepth==max(hdepth)),,drop=FALSE],2,mean)
hull.center<-NULL
if(3<nrow(xy)&&length(hd.table)>0){
n.p<-floor(1.5*c(32,16,8)[1+(n>50)+(n>200)]*precision)
# limit.hdepth.to.check <- sort(hdepth, decreasing = TRUE)[min(nrow(xy),6)]
# 121126
h <- unique(sort(hdepth, decreasing = TRUE))
limit.hdepth.to.check <- sort(h)[min(length(h),3)]
h<-cands<-xy[limit.hdepth.to.check <= hdepth,,drop=FALSE]
# h<-cands<-xy[rev(order(hdepth))[1:(min(nrow(xy),6))],]
cands<-cands[chull(cands[,1],cands[,2]),]; n.c<-nrow(cands)
if(is.null(n.c))cands<-h
xyextr<-rbind(apply(cands,2,min),apply(cands,2,max))
## xydel<-2*(xyextr[2,]-xyextr[1,])/n.p ## unused
if( (xyextr[2,1]-xyextr[1,1]) < 0.2*(h <- diff(range(xy[,1])))){
xyextr[1:2,1] <- mean(xyextr[,1]) + c(-.1,.1) * h } #### 121203
if( (xyextr[2,2]-xyextr[1,2]) < 0.2*(h <- diff(range(xy[,2])))){
xyextr[1:2,2] <- mean(xyextr[,2]) + c(-.1,.1) * h } #### 121203
if(verbose){cat("xyextr: looking for maximal depth"); print(xyextr) }
h1<-seq(xyextr[1,1],xyextr[2,1],length=n.p)
h2<-seq(xyextr[1,2],xyextr[2,2],length=n.p)
tp<-cbind(as.vector(matrix(h1,n.p,n.p)), # [1:n.p^2],
as.vector(matrix(h2,n.p,n.p,TRUE))) # [1:n.p^2])
# tphdepth<-max(hdepth.of.points(tp))-1
tphdepth<-max(find.hdepths.tp(tp,xy))
# if(verbose) { TP<<-tp; TPD<<-find.hdepths.tp(tp,xy) }
if(verbose) cat("points(TP,pch=c(letters,LETTERS)[TPD+1])")
# if max of testpoint is smaller than max depth of points take that max!
if(verbose){ cat("depth of testpoints"); print(summary(tphdepth)) } # 121126
tphdepth<-max(tphdepth,d.k[,2]) # 121004
# define direction for hdepth search
num<-floor(2*c(417,351,171,85,67,43)[sum(n>c(1,50,100,150,200,250))]*precision)
num.h<-floor(num/2); angles<-seq(0,pi,length=num.h)
ang<-tan(pi/2-angles)
kkk<-tphdepth
if(verbose){cat("max-hdepth found:"); print(kkk)}
if(verbose) cat("find polygon with max depth")
ia<-1; a<-angles[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
# initial for upper part
ind.k<-xyto[kkk]; cutp<-c(xyxy[ind.k,1],-10)
dxy<-diff(range(xyxy))
pg<-rbind(c(cutp[1],-dxy,Inf),c(cutp[1],dxy,NA))
# initial for lower part
ind.kk<-xyto[n+1-kkk]; cutpl<-c(xyxy[ind.kk,1],10)
# pgl<-rbind(c(cutpl[1],dxy,Inf),c(cutpl[1],-dxy,NA))
pgl<-rbind(c(cutpl[1],dxy,-Inf),c(cutpl[1],-dxy,NA))
# the sign of inf doesn't matter
if(debug.plots=="all"){ plot(xyxy,type="p",bty="n")
text(xy,,1:n,col="blue")
hx<-xy[ind.k,c(1,1)]; hy<-xy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
}
if(verbose) cat("start of computation of the directions: ","kkk=",kkk) # 121030
for(ia in seq(angles)[-1]){
# determine critical points pnew and pnewl of direction a
# if(verbose) cat("ia",ia,angles[ia])
# 121030
a<-angles[ia]; angtan<-ang[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
ind.k <-xyto[kkk]; ind.kk<-xyto[n+1-kkk]; pnew<-xyxy[ind.k,]; pnewl<-xyxy[ind.kk,]
# if(verbose) if( 1 < sum(xyt == xyt[ind.k]) )print("WARNING: some points identical")
if(debug.plots=="all") points(pnew[1],pnew[2],col="red")
# new limiting lines are defined by pnew / pnewl and slope a
# find segment of polygon that is cut by new limiting line and cut
# if(ia>200) { #<show pg pgl>#; points(pnew[1],pnew[2],col="magenta",cex=6) }
if( abs(angtan)>1e10){ if(verbose) cat("kkk",kkk,"x=c case")
# case of vertical slope #print(pg);print(pnew);print(xyt);lines(pg,col="red",lwd=3)
# number of points left of point pnew that limit the polygon
pg.no<-sum(pg[,1]<pnew[1])
if( 0 < pg.no ){
# the polygon (segment pg.no) has to be cut at x==pnew[1]
cutp <- c(pnew[1], pg [pg.no, 2]+pg [pg.no, 3]*(pnew [1]-pg [pg.no ,1]))
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated UPPER polygon: pg.no==0")
# the limiting point pnew is above the beginning of the polygon
# therefore, the polygon reduces to line
pg <- rbind(pg[1,], c(pg[2,1:2],NA))
}
pg.nol<-sum(pgl[,1]>=pnewl[1])
if( 0 < pg.nol ){ #??2 ### 121204
cutpl<-c(pnewl[1],pgl[pg.nol,2]+pgl[pg.nol,3]*(pnewl[1]-pgl[pg.nol,1]))
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated LOWER polygon: pgl.no==0")
pgl <- rbind(pgl[1,], c(pgl[2,1:2],NA))
}
}else{ # if(verbose) cat("kkk",kkk,"normal case")
# normal case upper polygon
pg.inter<-pg[,2]-angtan*pg[,1]; pnew.inter<-pnew[2]-angtan*pnew[1]
pg.no<-sum(pg.inter<pnew.inter)
if(is.na(pg[pg.no,3])) pg[pg.no,3] <- -Inf # 121129 NaN/Na error
cutp<-cut.p.sl.p.sl(pnew,ang[ia],pg[pg.no,1:2],pg[pg.no,3])
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
# normal case lower polygon
pg.interl<-pgl[,2]-angtan*pgl[,1]; pnew.interl<-pnewl[2]-angtan*pnewl[1]
pg.nol<-sum(pg.interl>pnew.interl)
if(is.na(pgl[pg.nol,3])) pgl[pg.nol,3] <- Inf # 121129 NaN/Na error
cutpl<-cut.p.sl.p.sl(pnewl,angtan,pgl[pg.nol,1:2],pgl[pg.nol,3])
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
}
# if(kkk==KKK && ia == 51) { cat("ENDE: pgl"); print(pgl) }
# update pg, pgl completed
# PG<<-pg;PG.NO<<-pg.no;CUTP<<-cutp;DXY<<-dxy;PNEW<<-pnew;PGL<<-pgl;PG.NOL<<-pg.nol
#
# cat("angtan",angtan,"pg.no",pg.no,"pkt:",pnew)
# if(ia==stopp) lines(pg,type="b",col="green")
if(debug.plots=="all"){
points(pnew[1],pnew[2],col="red")
hx<-xyxy[ind.k,c(1,1)]; hy<-xyxy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pg)
# if(ia==stopp) lines(pgl,type="b",col="green")
points(cutpl[1],cutpl[2],col="red")
hx<-xyxy[ind.kk,c(1,1)]; hy<-xyxy[ind.kk,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pgl)
}
##show pg pgl##
}
# if(verbose) PG <<- pg; PGL <<- pgl
if(2<nrow(pg) && 2<nrow(pgl)){
# plot(xyxy[,1:2],xlim=c(-.5,+.5),ylim=c(-.5,.50))
# lines(pg,type="b",col="red"); lines(pgl,type="b",col="blue")
# remove first and last points and multiple points #<show pg pgl>#
limit<-1e-10
# pg <-pg [c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)),] old#
idx <- c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)) # 121008
if(any(idx==FALSE)){
pg <-pg[idx,]; pg[,3] <- c(diff(pg[,2])/diff(pg[,1]), NA)
}
# old reduction which caused some errors:
# pgl<-pgl[c(TRUE,(abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit)),] error##
# pgl<-pgl[c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE),] old#
idx <- c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE)#121008
if(any(idx==FALSE)){
pgl<-pgl[idx,]; pgl[,3] <- c(diff(pgl[,2])/diff(pgl[,1]), NA)
}
# add some tolerance in course of numerical problems
pgl[,2]<-pgl[,2] - .00001 # 121004
# show pg pgl>>
pg<- pg [-nrow(pg ),][-1,,drop=FALSE]
pgl<-pgl[-nrow(pgl),][-1,,drop=FALSE]
# determine position according to the other polygon
# cat("relative position: lower polygon")
indl<-pos.to.pg(round(pgl,digits=10),round(pg,digits=10)) # 121126
# cat("relative position: upper polygon")
indu<-pos.to.pg(round(pg,digits=10),round(pgl,digits=10),TRUE)
sr<-sl<-NULL # ; ##show pg pgl>>
# right region
if(indu[(npg<-nrow(pg))]=="lower" & indl[1]=="higher"){
# cat("in if of right region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is NOT ok
rnuml<-which(indl=="lower")[1]-1
# checking from the left: last point of upper polygon that is ok
rnumu<-npg+1-which(rev(indu=="higher"))[1]
# special case all points of lower polygon are upper
if(is.na(rnuml)) rnuml<-sum(pg[rnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(rnumu)) rnumu<-sum(pg[,1]<pgl[rnuml,1])
xyl<-pgl[rnuml,]; xyu<-pg[rnumu,]
# cat("right"); print(rnuml); print(xyl)
# cat("right"); print(rnumu); print(xyu)
sr<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# left region
if(indl[(npgl<-nrow(pgl))]=="higher"&indu[1]=="lower"){
# cat("in if of left region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is ok
lnuml<-npgl+1-which(rev(indl=="lower"))[1]
# checking from the left: last point of upper polygon that is NOT ok
lnumu<-which(indu=="higher")[1]-1
# special case all points of lower polygon are upper
if(is.na(lnuml)) lnuml<-sum(pg[lnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(lnumu)) lnumu<-sum(pg[,1]<pgl[lnuml,1])
xyl<-pgl[lnuml,]; xyu<-pg[lnumu,]
# cat("left"); print(lnuml); print(xyl)
# cat("left"); print(lnumu); print(xyu)
sl<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# if(kkk==2){ ##show pg pgl##; INDU<<-indu; INDL<<-indl; PGL<<-pgl; PGU<<-pg}
pg<-rbind(pg [indu=="higher",1:2,drop=FALSE],sr,
pgl[indl=="lower", 1:2,drop=FALSE],sl)
if(debug.plots=="all") lines(rbind(pg,pg[1,]),col="red")
if(!any(is.na(pg))) pg<-pg[chull(pg[,1],pg[,2]),]
# if(kkk==7){ PG <<- pg }
} else {
if(2<nrow(pgl)){ #121204
pg <- rbind(pg[2,1:2],pgl[-c(1,length(pgl[,1])),1:2])
} else {
pg <- rbind(pg [-c(1,length(pg [,1])),1:2],pgl[2,1:2])
# rbind(pgl[2,1:2],pg[2,1:2])
}
}
if(verbose) cat("END of computation of the directions")
hull.center<-cbind(pg[,1]*xysd[1]+xym[1],pg[,2]*xysd[2]+xym[2])
if(!any(is.na(hull.center))) center<-find.polygon.center(hull.center) else
hull.center <- rbind(center) # 121126
if(verbose){ cat("CENTER"); print(center) }
if(verbose){cat("hull.center",hull.center); print(table(tphdepth)) }
}
# if(verbose) cat("center depth:",hdepth.of.points(rbind(center))-1)
if(verbose) cat("center depth:",find.hdepths.tp(rbind(center),xy)-1)
if(verbose){print("end of computation of center"); print(center)}
if(dkmethod==1){
# inner hull of bag
xyi<-xy[hdepth>=k,,drop=FALSE] # cat("dim XYI", dim(xyi))
# 121028 some corrections for strange k situations
if(0 < length(xyi)) pdk<-xyi[chull(xyi[,1],xyi[,2]),,drop=FALSE]
# outer hull of bag
if( k > 1 ){
xyo<-xy[hdepth>=(k-1),,drop=FALSE]
pdk.1<-xyo[chull(xyo[,1],xyo[,2]),,drop=FALSE]
} else pdk.1 <- pdk
if(0 == length(xyi)) pdk <- pdk.1
if(verbose)cat("hull computed: pdk, pdk.1:")
if(verbose){print(pdk); print(pdk.1) }
if(debug.plots=="all"){
plot(xy,bty="n")
h<-rbind(pdk,pdk[1,]); lines(h,col="red",lty=2)
h<-rbind(pdk.1,pdk.1[1,]);lines(h,col="blue",lty=3)
points(center[1],center[2],pch=8,col="red")
}
exp.dk<-expand.hull(pdk,k)
exp.dk.1<-expand.hull(exp.dk,k-1) # pdk.1,k-1,20)
}else{
# define direction for hdepth search
num<-floor(2*c(417,351,171,85,67,43)[sum(n>c(1,50,100,150,200,250))]*precision)
num.h<-floor(num/2); angles<-seq(0,pi,length=num.h)
ang<-tan(pi/2-angles)
# standardization of data set xyxy is used
kkk<-k
if(verbose) print("find polygon with depth something higher than that of the bag")
if( kkk <= max(d.k[,2]) ){ # inner one ### 121030
ia<-1; a<-angles[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
# initial for upper part
ind.k<-xyto[kkk]; cutp<-c(xyxy[ind.k,1],-10)
dxy<-diff(range(xyxy))
pg<-rbind(c(cutp[1],-dxy,Inf),c(cutp[1],dxy,NA))
# initial for lower part
ind.kk<-xyto[n+1-kkk]; cutpl<-c(xyxy[ind.kk,1],10)
# pgl<-rbind(c(cutpl[1],dxy,Inf),c(cutpl[1],-dxy,NA))
pgl<-rbind(c(cutpl[1],dxy,-Inf),c(cutpl[1],-dxy,NA))
# the sign of inf doesn't matter
if(debug.plots=="all"){ plot(xyxy,type="p",bty="n")
text(xy,,1:n,col="blue")
hx<-xy[ind.k,c(1,1)]; hy<-xy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
}
if(verbose) cat("start of computation of the directions: ","kkk=",kkk) # 121030
for(ia in seq(angles)[-1]){
# determine critical points pnew and pnewl of direction a
# if(verbose) cat("ia",ia,angles[ia])
# 121030
a<-angles[ia]; angtan<-ang[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
ind.k <-xyto[kkk]; ind.kk<-xyto[n+1-kkk]; pnew<-xyxy[ind.k,]; pnewl<-xyxy[ind.kk,]
# if(verbose) if( 1 < sum(xyt == xyt[ind.k]) )print("WARNING: some points identical")
if(debug.plots=="all") points(pnew[1],pnew[2],col="red")
# new limiting lines are defined by pnew / pnewl and slope a
# find segment of polygon that is cut by new limiting line and cut
# if(ia>200) { #<show pg pgl>#; points(pnew[1],pnew[2],col="magenta",cex=6) }
if( abs(angtan)>1e10){ if(verbose) cat("kkk",kkk,"x=c case")
# case of vertical slope #print(pg);print(pnew);print(xyt);lines(pg,col="red",lwd=3)
# number of points left of point pnew that limit the polygon
pg.no<-sum(pg[,1]<pnew[1])
if( 0 < pg.no ){
# the polygon (segment pg.no) has to be cut at x==pnew[1]
cutp <- c(pnew[1], pg [pg.no, 2]+pg [pg.no, 3]*(pnew [1]-pg [pg.no ,1]))
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated UPPER polygon: pg.no==0")
# the limiting point pnew is above the beginning of the polygon
# therefore, the polygon reduces to line
pg <- rbind(pg[1,], c(pg[2,1:2],NA))
}
pg.nol<-sum(pgl[,1]>=pnewl[1])
if( 0 < pg.nol ){ #??2 ### 121204
cutpl<-c(pnewl[1],pgl[pg.nol,2]+pgl[pg.nol,3]*(pnewl[1]-pgl[pg.nol,1]))
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated LOWER polygon: pgl.no==0")
pgl <- rbind(pgl[1,], c(pgl[2,1:2],NA))
}
}else{ # if(verbose) cat("kkk",kkk,"normal case")
# normal case upper polygon
pg.inter<-pg[,2]-angtan*pg[,1]; pnew.inter<-pnew[2]-angtan*pnew[1]
pg.no<-sum(pg.inter<pnew.inter)
if(is.na(pg[pg.no,3])) pg[pg.no,3] <- -Inf # 121129 NaN/Na error
cutp<-cut.p.sl.p.sl(pnew,ang[ia],pg[pg.no,1:2],pg[pg.no,3])
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
# normal case lower polygon
pg.interl<-pgl[,2]-angtan*pgl[,1]; pnew.interl<-pnewl[2]-angtan*pnewl[1]
pg.nol<-sum(pg.interl>pnew.interl)
if(is.na(pgl[pg.nol,3])) pgl[pg.nol,3] <- Inf # 121129 NaN/Na error
cutpl<-cut.p.sl.p.sl(pnewl,angtan,pgl[pg.nol,1:2],pgl[pg.nol,3])
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
}
# if(kkk==KKK && ia == 51) { cat("ENDE: pgl"); print(pgl) }
# update pg, pgl completed
# PG<<-pg;PG.NO<<-pg.no;CUTP<<-cutp;DXY<<-dxy;PNEW<<-pnew;PGL<<-pgl;PG.NOL<<-pg.nol
#### ***********************
#### cat("angtan",angtan,"pg.no",pg.no,"pkt:",pnew)
# if(ia==stopp) lines(pg,type="b",col="green")
if(debug.plots=="all"){
points(pnew[1],pnew[2],col="red")
hx<-xyxy[ind.k,c(1,1)]; hy<-xyxy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pg)
# if(ia==stopp) lines(pgl,type="b",col="green")
points(cutpl[1],cutpl[2],col="red")
hx<-xyxy[ind.kk,c(1,1)]; hy<-xyxy[ind.kk,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pgl)
}
##show pg pgl##
}
# if(verbose) PG <<- pg; PGL <<- pgl
if(2<nrow(pg) && 2<nrow(pgl)){
# plot(xyxy[,1:2],xlim=c(-.5,+.5),ylim=c(-.5,.50))
# lines(pg,type="b",col="red"); lines(pgl,type="b",col="blue")
# remove first and last points and multiple points #<show pg pgl>#
limit<-1e-10
# pg <-pg [c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)),] old#
idx <- c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)) # 121008
if(any(idx==FALSE)){
pg <-pg[idx,]; pg[,3] <- c(diff(pg[,2])/diff(pg[,1]), NA)
}
# old reduction which caused some errors:
# pgl<-pgl[c(TRUE,(abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit)),] error##
# pgl<-pgl[c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE),] old#
idx <- c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE)#121008
if(any(idx==FALSE)){
pgl<-pgl[idx,]; pgl[,3] <- c(diff(pgl[,2])/diff(pgl[,1]), NA)
}
# add some tolerance in course of numerical problems
pgl[,2]<-pgl[,2] - .00001 #### 121004
# show pg pgl>>
pg<- pg [-nrow(pg ),][-1,,drop=FALSE]
pgl<-pgl[-nrow(pgl),][-1,,drop=FALSE]
# determine position according to the other polygon
# cat("relative position: lower polygon")
indl<-pos.to.pg(round(pgl,digits=10),round(pg,digits=10)) # 121126
# cat("relative position: upper polygon")
indu<-pos.to.pg(round(pg,digits=10),round(pgl,digits=10),TRUE)
sr<-sl<-NULL # ; ##show pg pgl>>
# right region
if(indu[(npg<-nrow(pg))]=="lower" & indl[1]=="higher"){
# cat("in if of right region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is NOT ok
rnuml<-which(indl=="lower")[1]-1
# checking from the left: last point of upper polygon that is ok
rnumu<-npg+1-which(rev(indu=="higher"))[1]
# special case all points of lower polygon are upper
if(is.na(rnuml)) rnuml<-sum(pg[rnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(rnumu)) rnumu<-sum(pg[,1]<pgl[rnuml,1])
xyl<-pgl[rnuml,]; xyu<-pg[rnumu,]
# cat("right"); print(rnuml); print(xyl)
# cat("right"); print(rnumu); print(xyu)
sr<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# left region
if(indl[(npgl<-nrow(pgl))]=="higher"&indu[1]=="lower"){
# cat("in if of left region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is ok
lnuml<-npgl+1-which(rev(indl=="lower"))[1]
# checking from the left: last point of upper polygon that is NOT ok
lnumu<-which(indu=="higher")[1]-1
# special case all points of lower polygon are upper
if(is.na(lnuml)) lnuml<-sum(pg[lnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(lnumu)) lnumu<-sum(pg[,1]<pgl[lnuml,1])
xyl<-pgl[lnuml,]; xyu<-pg[lnumu,]
# cat("left"); print(lnuml); print(xyl)
# cat("left"); print(lnumu); print(xyu)
sl<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# if(kkk==2){ ##show pg pgl##; INDU<<-indu; INDL<<-indl; PGL<<-pgl; PGU<<-pg}
pg<-rbind(pg [indu=="higher",1:2,drop=FALSE],sr,
pgl[indl=="lower", 1:2,drop=FALSE],sl)
if(debug.plots=="all") lines(rbind(pg,pg[1,]),col="red")
if(!any(is.na(pg))) pg<-pg[chull(pg[,1],pg[,2]),]
# if(kkk==7){ PG <<- pg }
} else {
if(2<nrow(pgl)){ #121204
pg <- rbind(pg[2,1:2],pgl[-c(1,length(pgl[,1])),1:2])
} else {
pg <- rbind(pg [-c(1,length(pg [,1])),1:2],pgl[2,1:2])
# rbind(pgl[2,1:2],pg[2,1:2])
}
}
if(verbose) cat("END of computation of the directions")
exp.dk<-cbind(pg[,1]*xysd[1]+xym[1],pg[,2]*xysd[2]+xym[2])
} else {
exp.dk <- NULL
}
if( 1 < kkk ) kkk<-kkk-1 # outer one
if(verbose) print("find polygon with depth a little bit lower than that of the bag")
ia<-1; a<-angles[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
# initial for upper part
ind.k<-xyto[kkk]; cutp<-c(xyxy[ind.k,1],-10)
dxy<-diff(range(xyxy))
pg<-rbind(c(cutp[1],-dxy,Inf),c(cutp[1],dxy,NA))
# initial for lower part
ind.kk<-xyto[n+1-kkk]; cutpl<-c(xyxy[ind.kk,1],10)
# pgl<-rbind(c(cutpl[1],dxy,Inf),c(cutpl[1],-dxy,NA))
pgl<-rbind(c(cutpl[1],dxy,-Inf),c(cutpl[1],-dxy,NA))
# the sign of inf doesn't matter
if(debug.plots=="all"){ plot(xyxy,type="p",bty="n")
text(xy,,1:n,col="blue")
hx<-xy[ind.k,c(1,1)]; hy<-xy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
}
if(verbose) cat("start of computation of the directions: ","kkk=",kkk) # 121030
for(ia in seq(angles)[-1]){
# determine critical points pnew and pnewl of direction a
# if(verbose) cat("ia",ia,angles[ia])
# 121030
a<-angles[ia]; angtan<-ang[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
ind.k <-xyto[kkk]; ind.kk<-xyto[n+1-kkk]; pnew<-xyxy[ind.k,]; pnewl<-xyxy[ind.kk,]
# if(verbose) if( 1 < sum(xyt == xyt[ind.k]) )print("WARNING: some points identical")
if(debug.plots=="all") points(pnew[1],pnew[2],col="red")
# new limiting lines are defined by pnew / pnewl and slope a
# find segment of polygon that is cut by new limiting line and cut
# if(ia>200) { #<show pg pgl>#; points(pnew[1],pnew[2],col="magenta",cex=6) }
if( abs(angtan)>1e10){ if(verbose) cat("kkk",kkk,"x=c case")
# case of vertical slope #print(pg);print(pnew);print(xyt);lines(pg,col="red",lwd=3)
# number of points left of point pnew that limit the polygon
pg.no<-sum(pg[,1]<pnew[1])
if( 0 < pg.no ){
# the polygon (segment pg.no) has to be cut at x==pnew[1]
cutp <- c(pnew[1], pg [pg.no, 2]+pg [pg.no, 3]*(pnew [1]-pg [pg.no ,1]))
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated UPPER polygon: pg.no==0")
# the limiting point pnew is above the beginning of the polygon
# therefore, the polygon reduces to line
pg <- rbind(pg[1,], c(pg[2,1:2],NA))
}
pg.nol<-sum(pgl[,1]>=pnewl[1])
if( 0 < pg.nol ){ ##??2 ### 121204
cutpl<-c(pnewl[1],pgl[pg.nol,2]+pgl[pg.nol,3]*(pnewl[1]-pgl[pg.nol,1]))
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated LOWER polygon: pgl.no==0")
pgl <- rbind(pgl[1,], c(pgl[2,1:2],NA))
}
}else{ # if(verbose) cat("kkk",kkk,"normal case")
# normal case upper polygon
pg.inter<-pg[,2]-angtan*pg[,1]; pnew.inter<-pnew[2]-angtan*pnew[1]
pg.no<-sum(pg.inter<pnew.inter)
if(is.na(pg[pg.no,3])) pg[pg.no,3] <- -Inf # 121129 NaN/Na error
cutp<-cut.p.sl.p.sl(pnew,ang[ia],pg[pg.no,1:2],pg[pg.no,3])
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
# normal case lower polygon
pg.interl<-pgl[,2]-angtan*pgl[,1]; pnew.interl<-pnewl[2]-angtan*pnewl[1]
pg.nol<-sum(pg.interl>pnew.interl)
if(is.na(pgl[pg.nol,3])) pgl[pg.nol,3] <- Inf # 121129 NaN/Na error
cutpl<-cut.p.sl.p.sl(pnewl,angtan,pgl[pg.nol,1:2],pgl[pg.nol,3])
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
}
# if(kkk==KKK && ia == 51) { cat("ENDE: pgl"); print(pgl) }
# update pg, pgl completed
# PG<<-pg;PG.NO<<-pg.no;CUTP<<-cutp;DXY<<-dxy;PNEW<<-pnew;PGL<<-pgl;PG.NOL<<-pg.nol
#### ---**************************
# cat("angtan",angtan,"pg.no",pg.no,"pkt:",pnew)
# if(ia==stopp) lines(pg,type="b",col="green")
if(debug.plots=="all"){
points(pnew[1],pnew[2],col="red")
hx<-xyxy[ind.k,c(1,1)]; hy<-xyxy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pg)
# if(ia==stopp) lines(pgl,type="b",col="green")
points(cutpl[1],cutpl[2],col="red")
hx<-xyxy[ind.kk,c(1,1)]; hy<-xyxy[ind.kk,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pgl)
}
##show pg pgl##
}
# if(verbose) PG <<- pg; PGL <<- pgl
if(2<nrow(pg) && 2<nrow(pgl)){
# plot(xyxy[,1:2],xlim=c(-.5,+.5),ylim=c(-.5,.50))
# lines(pg,type="b",col="red"); lines(pgl,type="b",col="blue")
# remove first and last points and multiple points #<show pg pgl>#
limit<-1e-10
# pg <-pg [c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)),] old#
idx <- c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)) # 121008
if(any(idx==FALSE)){
pg <-pg[idx,]; pg[,3] <- c(diff(pg[,2])/diff(pg[,1]), NA)
}
# old reduction which caused some errors:
# pgl<-pgl[c(TRUE,(abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit)),] error##
# pgl<-pgl[c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE),] old#
idx <- c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE)#121008
if(any(idx==FALSE)){
pgl<-pgl[idx,]; pgl[,3] <- c(diff(pgl[,2])/diff(pgl[,1]), NA)
}
# add some tolerance in course of numerical problems
pgl[,2]<-pgl[,2] - .00001 #### 121004
# show pg pgl>>
pg<- pg [-nrow(pg ),][-1,,drop=FALSE]
pgl<-pgl[-nrow(pgl),][-1,,drop=FALSE]
# determine position according to the other polygon
# cat("relative position: lower polygon")
indl<-pos.to.pg(round(pgl,digits=10),round(pg,digits=10)) # 121126
# cat("relative position: upper polygon")
indu<-pos.to.pg(round(pg,digits=10),round(pgl,digits=10),TRUE)
sr<-sl<-NULL # ; ##show pg pgl>>
# right region
if(indu[(npg<-nrow(pg))]=="lower" & indl[1]=="higher"){
# cat("in if of right region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is NOT ok
rnuml<-which(indl=="lower")[1]-1
# checking from the left: last point of upper polygon that is ok
rnumu<-npg+1-which(rev(indu=="higher"))[1]
# special case all points of lower polygon are upper
if(is.na(rnuml)) rnuml<-sum(pg[rnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(rnumu)) rnumu<-sum(pg[,1]<pgl[rnuml,1])
xyl<-pgl[rnuml,]; xyu<-pg[rnumu,]
# cat("right"); print(rnuml); print(xyl)
# cat("right"); print(rnumu); print(xyu)
sr<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# left region
if(indl[(npgl<-nrow(pgl))]=="higher"&indu[1]=="lower"){
# cat("in if of left region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is ok
lnuml<-npgl+1-which(rev(indl=="lower"))[1]
# checking from the left: last point of upper polygon that is NOT ok
lnumu<-which(indu=="higher")[1]-1
# special case all points of lower polygon are upper
if(is.na(lnuml)) lnuml<-sum(pg[lnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(lnumu)) lnumu<-sum(pg[,1]<pgl[lnuml,1])
xyl<-pgl[lnuml,]; xyu<-pg[lnumu,]
# cat("left"); print(lnuml); print(xyl)
# cat("left"); print(lnumu); print(xyu)
sl<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# if(kkk==2){ ##show pg pgl##; INDU<<-indu; INDL<<-indl; PGL<<-pgl; PGU<<-pg}
pg<-rbind(pg [indu=="higher",1:2,drop=FALSE],sr,
pgl[indl=="lower", 1:2,drop=FALSE],sl)
if(debug.plots=="all") lines(rbind(pg,pg[1,]),col="red")
if(!any(is.na(pg))) pg<-pg[chull(pg[,1],pg[,2]),]
# if(kkk==7){ PG <<- pg }
} else {
if(2<nrow(pgl)){ #121204
pg <- rbind(pg[2,1:2],pgl[-c(1,length(pgl[,1])),1:2])
} else {
pg <- rbind(pg [-c(1,length(pg [,1])),1:2],pgl[2,1:2])
# rbind(pgl[2,1:2],pg[2,1:2])
}
}
if(verbose) cat("END of computation of the directions")
exp.dk.1<-cbind(pg[,1]*xysd[1]+xym[1],pg[,2]*xysd[2]+xym[2])
if(is.null(exp.dk)) exp.dk <- exp.dk.1
# EX.1 <<- exp.dk.1; EX <<- exp.dk
if(verbose) print("End of find hulls, method two")
}
# if(max(d.k[,2])==k.1||nrow(d.k)==1) lambda<-0 else { ### 121027
if(nrow(d.k)==k.1 || nrow(d.k)==1) lambda<-0 else { # 121126
ind <- sum(d.k[,2] <= k.1) # complicated, may be wrong in case of missing depths
ind <- k.1 # 121123
ndk.1 <- d.k[ ind, 1]
ndk <- d.k[ ind+1, 1] # number inner
# (halve - number inner)/(number outer - number inner)
lambda <-(n/2-ndk) /(ndk.1 - ndk)
# lambda<-(n/2-d.k[k.1+1,1]) /(d.k[k.1,1]-d.k[k.1+1,1]) ### old
# cat(n/2, ndk,ndk.1, "k.1",k.1,"ind",ind)
}
if(verbose) cat("lambda",lambda)
cut.on.pdk.1<-find.cut.z.pg(exp.dk, exp.dk.1,center=center)
# print("HALLO"); print(cut.on.pdk.1)
cut.on.pdk <-find.cut.z.pg(exp.dk.1,exp.dk, center=center)
# expand inner polgon exp.dk
h1<-(1-lambda)*exp.dk+lambda*cut.on.pdk.1
# shrink outer polygon exp.dk.1
h2<-(1-lambda)*cut.on.pdk+lambda*exp.dk.1
h<-rbind(h1,h2);
h<-h[!is.nan(h[,1])&!is.nan(h[,2]),]
hull.bag<-h[chull(h[,1],h[,2]),]
# if(verbose){
# plot(xy); lines(exp.dk,col="red"); lines(exp.dk.1,col="blue");
# segments(cut.on.pdk[,1],cut.on.pdk[,2],exp.dk.1[,1],exp.dk.1[,2],col="red")
# segments(cut.on.pdk.1[,1],cut.on.pdk.1[,2],exp.dk[,1],exp.dk[,2],col="blue",lwd=3)
# points(cut.on.pdk.1,col="blue"); cat("cut.on.pdk.1"); print(cut.on.pdk.1)
# points(cut.on.pdk,col="red"); cat("cut.on.pdk"); print(cut.on.pdk)
# lines(hull.bag,col="green")
# }
if(verbose)cat("bag completed:")
#if(verbose) print(hull.bag)
if(debug.plots=="all"){ lines(hull.bag,col="red") }
hull.loop<-cbind(hull.bag[,1]-center[1],hull.bag[,2]-center[2])
hull.loop<-factor*hull.loop
hull.loop<-cbind(hull.loop[,1]+center[1],hull.loop[,2]+center[2])
if(verbose) cat("loop computed")
if(!very.large.data.set){
pxy.bag <-xydata[hdepth>= k ,,drop=FALSE]
pkt.cand <-xydata[hdepth==(k-1),,drop=FALSE]
pkt.not.bag<-xydata[hdepth< (k-1),,drop=FALSE]
if( 0 < length(pkt.cand) && 0 < length(hull.bag) ){
outside<-out.of.polygon(pkt.cand,hull.bag)
if(sum(!outside)>0)
pxy.bag <-rbind(pxy.bag, pkt.cand[!outside,])
if(sum( outside)>0)
pkt.not.bag<-rbind(pkt.not.bag, pkt.cand[ outside,])
}
}else {
extr<-out.of.polygon(xydata,hull.bag)
pxy.bag <-xydata[!extr,]
pkt.not.bag<-xydata[extr,,drop=FALSE]
}
if(length(pkt.not.bag)>0){
extr<-out.of.polygon(pkt.not.bag,hull.loop)
pxy.outlier<-pkt.not.bag[extr,,drop=FALSE]
if(0==length(pxy.outlier)) pxy.outlier<-NULL
pxy.outer<-pkt.not.bag[!extr,,drop=FALSE]
}else{
pxy.outer<-pxy.outlier<-NULL
}
if(verbose) cat("points of bag, outer points and outlier identified")
hull.loop<-rbind(pxy.outer,hull.bag)
hull.loop<-hull.loop[chull(hull.loop[,1],hull.loop[,2]),]
if(verbose) cat("end of computation of loop")
res<-list(
center=center,
hull.center=hull.center,
hull.bag=hull.bag,
hull.loop=hull.loop,
pxy.bag=pxy.bag,
pxy.outer=if(length(pxy.outer)>0) pxy.outer else NULL,
pxy.outlier=if(length(pxy.outlier)>0) pxy.outlier else NULL,
hdepths=hdepth,
is.one.dim=is.one.dim,
prdata=prdata,
# random.seed=random.seed, ###SEED
xy=xy,xydata=xydata
)
if(verbose) res<-c(res,list(exp.dk=exp.dk,exp.dk.1=exp.dk.1,hdepth=hdepth))
class(res)<-"bagplot"
return(res)
}
plot.bagplot <- function(x,
show.outlier=TRUE,# if TRUE outlier are shown
show.whiskers=TRUE, # if TRUE whiskers are shown
show.looppoints=TRUE, # if TRUE points in loop are shown
show.bagpoints=TRUE, # if TRUE points in bag are shown
show.loophull=TRUE, # if TRUE loop is shown
show.baghull=TRUE, # if TRUE bag is shown
add=FALSE, # if TRUE graphical elements are added to actual plot
pch=16,cex=.4, # to define further parameters of plot
verbose=FALSE, # tools for debugging
col.loophull="#aaccff", # Alternatives: #ccffaa, #ffaacc
col.looppoints="#3355ff", # Alternatives: #55ff33, #ff3355
col.baghull="#7799ff", # Alternatives: #99ff77, #ff7799
col.bagpoints="#000088", # Alternatives: #008800, #880000
transparency=FALSE,...
){
if(missing(x)) return(
"bagplot, version 2012/12/05, peter wolf"
)
# transparency flag and color flags have been proposed by wouter
if (transparency==TRUE) {
col.loophull = paste(col.loophull, "99", sep="")
col.baghull = paste(col.baghull, "99", sep="")
}
win<-function(dx,dy){ atan2(y=dy,x=dx) }
cut.z.pg<-function(zx,zy,p1x,p1y,p2x,p2y){
a2<-(p2y-p1y)/(p2x-p1x); a1<-zy/zx
sx<-(p1y-a2*p1x)/(a1-a2); sy<-a1*sx
sxy<-cbind(sx,sy)
h<-any(is.nan(sxy))||any(is.na(sxy))||any(Inf==abs(sxy))
if(h){ # print("NAN found"); print(cbind(a1,a2,zx,zy,sxy,p2x-p1x))
if(!exists("verbose")) verbose<-FALSE
if(verbose) cat("special")
# zx is zero ### 121030
h<-0==zx
sx<-ifelse(h,zx,sx); sy<-ifelse(h,p1y-a2*p1x,sy)
# points on line defined by line segment
a1 <- ifelse( abs(a1) == Inf, sign(a1)*123456789*1E10, a1) # 121030
a2 <- ifelse( abs(a2) == Inf, sign(a2)*123456789*1E10, a2)
# points on line defined by line segment
h<-0==(a1-a2) & sign(zx)==sign(p1x)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-0==(a1-a2) & sign(zx)!=sign(p1x)
sx<-ifelse(h,p2x,sx); sy<-ifelse(h,p2y,sy)
# line segment vertical
# & center NOT ON line segment
h<-p1x==p2x & zx!=p1x & p1x!=0
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,zy*p1x/zx,sy)
# & center ON line segment
h<-p1x==p2x & zx!=p1x & p1x==0
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,0,sy)
# & center NOT ON line segment & point on line ### 121126
h<-p1x==p2x & zx==p1x & p1x!=0 # & sign(zy)==sign(p1y)
sx<-ifelse(h,zx,sx); sy<-ifelse(h,zy,sy)
# & center ON line segment & point on line
h<-p1x==p2x & zx==p1x & p1x==0 & sign(zy)==sign(p1y)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-p1x==p2x & zx==p1x & p1x==0 & sign(zy)!=sign(p1y)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p2y,sy)
# points identical to end points of line segment
h<-zx==p1x & zy==p1y; sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-zx==p2x & zy==p2y; sx<-ifelse(h,p2x,sx); sy<-ifelse(h,p2y,sy)
# point of z is center
h<-zx==0 & zy==0; sx<-ifelse(h,0,sx); sy<-ifelse(h,0,sy)
sxy<-cbind(sx,sy)
} # end of special cases
#if(verbose){ print(rbind(a1,a2));print(cbind(zx,zy,p1x,p1y,p2x,p2y,sxy))}
if(!exists("debug.plots")) debug.plots<-"no"
if(debug.plots=="all"){
segments(sxy[,1],sxy[,2],zx,zy,col="red")
segments(0,0,sxy[,1],sxy[,2],col="green",lty=2) ##!!
points(sxy,col="red")
}
return(sxy)
}
find.cut.z.pg<-function(z,pg,center=c(0,0),debug.plots="no"){
if(!is.matrix(z)) z<-rbind(z)
if(1==nrow(pg)) return(matrix(center,nrow(z),2,TRUE))
n.pg<-nrow(pg); n.z<-nrow(z)
z<-cbind(z[,1]-center[1],z[,2]-center[2])
pgo<-pg; pg<-cbind(pg[,1]-center[1],pg[,2]-center[2])
if(!exists("debug.plots")) debug.plots<-"no"
if(debug.plots=="all"){
plot(rbind(z,pg,0),bty="n"); points(z,pch="p")
lines(c(pg[,1],pg[1,1]),c(pg[,2],pg[1,2]))}
# find angles of pg und z
apg<-win(pg[,1],pg[,2])
apg[is.nan(apg)]<-0; a<-order(apg); apg<-apg[a]; pg<-pg[a,]
az<-win(z[,1],z[,2])
# find line segments
segm.no<-apply((outer(apg,az,"<")),2,sum)
segm.no<-ifelse(segm.no==0,n.pg,segm.no)
next.no<-1+(segm.no %% length(apg))
# compute cut points
cuts<-cut.z.pg(z[,1],z[,2],pg[segm.no,1],pg[segm.no,2],
pg[next.no,1],pg[next.no,2])
# rescale
cuts<-cbind(cuts[,1]+center[1],cuts[,2]+center[2])
return(cuts)
}
# find.cut.z.pg(EX, EX1,center=CE)
center<-hull.center<-hull.bag<-hull.loop<-pxy.bag<-pxy.outer<-pxy.outlier<-NULL
# random.seed <-
hdepths<-is.one.dim<-prdata<-xy<-xydata<-exp.dk<-exp.dk.1<-hdepth<-NULL
tphdepth<-tp<-NULL
#090216
bagplotobj<-x
for(i in seq(along=bagplotobj))
eval(parse(text=paste(names(bagplotobj)[i],"<-bagplotobj[[",i,"]]")))
if(is.one.dim){
if(!verbose) cat("data set one dimensional") # 121202
ROT<-round(prdata[[2]],digits=5); IROT<-round(solve(ROT),digits=5)
if(!add){ ## 121008 ## 121130
plot(xydata,type="n",bty="n",pch=16,cex=1, ...) # xlim=xlim, ylim=ylim, ...)
}
# find five points for box and whiskers
usr <- par()$usr; xlim <- usr[1:2]; ylim <- usr[3:4]
mins <- usr[c(1,3)]; ranges <- usr[c(2,4)] - mins
if(ROT[1,1]==0){ # cat("FALL senkrecht")
xydata <- cbind( mean(usr[1:2]) ,xydata[,2])
boxplotres<-boxplot(xydata[,2],plot=FALSE)
five<-cbind(mean(usr[1:2]),boxplotres$stat)
dx <- 0.1*(xlim[2]-xlim[1]); dy <- 0
idx.out <- if(0<length(boxplotres$out)) match(boxplotres$out, xydata[,2] ) else NULL
}
if(ROT[1,2]==0){ # cat("FALL waagerecht")
xydata <- cbind( xydata[,1], mean(usr[3:4]))
boxplotres<-boxplot(xydata[,1],plot=FALSE)
five<-cbind(boxplotres$stat,mean(usr[3:4]))
dx <- 0; dy <- 0.1*(ylim[2]-ylim[1]) # 1/5 of del.y
idx.out <- if(0<length(boxplotres$out)) match(boxplotres$out, xydata[,1] ) else NULL
}
if(ROT[1,2]!=0 && ROT[1,1]!=0){
xytr<-xydata%*%ROT
boxplotres<-boxplot(xytr[,1],plot=FALSE)
five<-cbind(boxplotres$stat,xytr[1,2])%*%IROT
# find small vector for box height
vec <- five[5,] - five[1,]
vec.ortho <- c(vec[2],-vec[1]) * ranges / par()$pin
xy.delta <- vec.ortho * par()$pin[2:1] * ranges # plot region inches
xy.delta <- xy.delta / sqrt( sum(xy.delta * xy.delta) )
xy.delta <- xy.delta * .15 / ( sqrt(sum(abs(par()$pin*xy.delta/ranges)^2) ))
dx <- xy.delta[1]; dy <- xy.delta[2]
idx.out <- if(0<length(boxplotres$out)) match(boxplotres$out, xytr ) else NULL
}
# construct segments
# whiskers
segments(five[h<-c(1,5),1],five[h,2],five[h<-c(2,4),1],five[h,2], # col=col.looppoints,
lwd=2)
points(five[c(1,5),], cex=1, col=col.looppoints,pch=16)
# box
#segments(five[h<-2:4,1] + dx, five[h,2] + dy, five[h,1] - dx, five[h,2] - dy,
# col=col.bagpoints,lwd=2)
#segments(five[2,1] + (h<-c(-1,1))*dx, five[2,2] + h*dy,
# five[4,1] + h*dx, five[4,2] + h*dy,
# col=col.bagpoints,lwd=2)
polygon(five[c(2,4,4,2,2),1] + c(dx,dx,-dx,-dx,dx),
five[c(2,4,4,2,2),2] + c(dy,dy,-dy,-dy,dy),
col=col.baghull,lwd=1)
# median
segments(five[h<-3 ,1] + dx, five[h,2] + dy,
five[h,1] - dx, five[h,2] - dy,col="red",lwd=3)
# Outlier
if(0 < length(idx.out) && !is.na(idx.out[1])){
points(xydata[idx.out,,drop=FALSE], cex=1, pch=16,col="red")
}
# segments(five[3,1],five[3,2],five[3,1]+1*vec.ortho[1],
# five[3,2]+100*vec.ortho[2],col="green",lwd=5)
# segments(five[3,1],five[3,2],five[3,1]+1*vec1[1],
# five[3,2]+1*vec1[2],col="red",lwd=5)
# points(five,cex=2,col="green")
return("one dimensional boxplot plottet")
} else {
if(!add) plot(xydata,type="n",pch=pch,cex=cex,bty="n",...)
if(verbose) text(xy[,1],xy[,2],paste(as.character(hdepth))) # cex=2 needs fonts
# loop: --************
if(show.loophull){ # fill loop
h<-rbind(hull.loop,hull.loop[1,]); lines(h[,1],h[,2],lty=1)
polygon(hull.loop[,1],hull.loop[,2],col=col.loophull)
}
if(show.looppoints && 0 < length(pxy.outer)){ # points in loop
points(pxy.outer[,1],pxy.outer[,2],col=col.looppoints,pch=pch,cex=cex)
}
# bag: --*****************
if(show.baghull && 0 < length(hull.bag)){ # fill bag
h<-rbind(hull.bag,hull.bag[1,]); lines(h[,1],h[,2],lty=1)
polygon(hull.bag[,1],hull.bag[,2],col=col.baghull)
}
if(show.bagpoints && 0 < length(pxy.bag)){ # points in bag
points(pxy.bag[,1],pxy.bag[,2],col=col.bagpoints,pch=pch,cex=cex)
}
# whiskers
if(show.whiskers && 0 < length(pxy.outer)){
debug.plots<-"not"
if((n<-length(xy[,1]))<15){
segments(xy[,1],xy[,2],rep(center[1],n),rep(center[2],n),
col="red")
}else{
pkt.cut<-find.cut.z.pg(pxy.outer,hull.bag,center=center)
segments(pxy.outer[,1],pxy.outer[,2],pkt.cut[,1],pkt.cut[,2],
col="red")
}
}
# outlier: ---**********************
if(show.outlier && 0 < length(pxy.outlier)){ # points in loop
points(pxy.outlier[,1],pxy.outlier[,2],col="red",pch=pch,cex=cex)
}
# center:
if(exists("hull.center") && 2 < length(hull.center)){
h<-rbind(hull.center,hull.center[1,]); lines(h[,1],h[,2],lty=1)
polygon(hull.center[,1],hull.center[,2],col="orange")
}
if(!is.one.dim) points(center[1],center[2],pch=8,col="red")
if(verbose && 0 < length(exp.dk.1) ){
h<-rbind(exp.dk,exp.dk[1,]); lines(h,col="blue",lty=2)
h<-rbind(exp.dk.1,exp.dk.1[1,]); lines(h,col="black",lty=2, lwd=3)
if(exists("tphdepth") && 0<length(tphdepth))
text(tp[,1],tp[,2],as.character(tphdepth),col="green")
text(xy[,1],xy[,2],paste(as.character(hdepth))) # cex=2 needs special fonts
points(center[1],center[2],pch=8,col="red")
}
"bagplot plottet"
}
}
# find.hdepths <- function(xy, number.of.directions=181){ ### 121126
# xy <- as.matrix(xy)
# for( j in 1:2) {
# xy[,j] <- xy[,j] - min(xy[,j])
# if( 0 < (h <- max(xy[,j]))) xy[,j] <- xy[,j] / max(xy[,j])
# }
# phi <- c(seq(0,180,length=number.of.directions)[-1]*(2*pi/360))
# sinphi <- c(sin(phi),1); cosphi <- c(cos(phi),0)
# RM1 <- round(digits=6,rbind(cosphi,sinphi))
# hd <- rep(h<-length(xy[,1]),h)
# for( j in seq(along=sinphi)){
# xyt <- xy %*% RM1[,j]
# hd <- pmin(hd,rank(xyt,ties.method="min"), rank(-xyt,ties.method="min"))
# }
# ### xyt <- xy %*% RM1
# ### hd2 <- cbind(apply(xyt, 2, rank, ties.method="min"),
# ### apply(-xyt,2, rank, ties.method="min"))
# ### hd2 <- apply(hd2, 1, min)
# hd
# }
# find.hdepths.tp <- function(tp, data, number.of.directions=181){ ### 121130
# ### standardize dimensions ###
# xy <- as.matrix(data); tp <- as.matrix(rbind(tp)); n.tp <- dim(tp)[1]
# for( j in 1:2) {
# xy[,j] <- xy[,j] - (h <- min(xy[,j], na.rm=TRUE))
# tp[,j] <- tp[,j] - h
# if( 0 < (h <- max(xy[,j], na.rm=TRUE))){
# xy[,j] <- xy[,j]/h; tp[,j] <- tp[,j]/h
# }
# }
# ##loop over directions##
# phi <- c(seq(0,180,length=number.of.directions)[-1]*(2*pi/360))
# sinphi <- c(sin(phi),1); cosphi <- c(cos(phi),0)
# RM1 <- round(digits=6,rbind(cosphi,sinphi))
# hdtp <- rep(length(xy[,1]),length(tp[,1]))
# for( j in seq(along=sinphi)){ #print(j)
# xyt <- xy %*% RM1[,j]; tpt <- (tp %*% RM1[,j])[]
# xyt <- xyt[!is.na(xyt)] #; tpt <- sort(tpt)
# hdtp <- pmin(hdtp,(rank( c(tpt,xyt), ties.method="min"))[1:n.tp]
# -rank( tpt,ties.method="min")
# ,rank(-c(tpt,xyt), ties.method="min")[1:n.tp]
# -rank(-tpt,ties.method="min")
# )
# }
# hdtp
# }
# hdepth<-function(xy,data){
# ###function to compute the h-depths of points
# win<-function(dx,dy){ atan2(y=dy,x=dx) }
# if(missing(data)) data <- xy
# tp <- xy; xy <- data
# n.tp<-nrow(tp); n <- length(xy[,1])
# tphdepth<-rep(0,n.tp); dpi<-2*pi-0.000001
# for(j in 1:n.tp) {
# ### compute difference of coordinates of tp j and data
# dx<-tp[j,1]-xy[,1]; dy<-tp[j,2]-xy[,2]
# ### remove data points that are identical to tp j
# h <- tp[j,1] != xy[,1] & tp[j,2] != xy[,2]
# dx <- dx[h]; dy <- dy[h]; n <- length(dx)
# minusplus<-c(rep(-1,n),rep(1,n)) #### 070824
# ### compute angles of slopes of lines through tp j and data
# a<-win(dx,dy)+pi; h<-a<10; a<-a[h]; ident<-sum(!h)
# ### count number of angles that are lower than pi == points above tp j
# init<-sum(a < pi); a.shift<-(a+pi) %% dpi
# ### count points relative to the tp j in halve planes
# h<-cumsum(minusplus[order(c(a,a.shift))])
# ### find minimum number of points in a halve plane
# tphdepth[j]<-init+min(h)+1 ### +1 because of the point itself!!
# ### tphdepth[j]<-init+min(h)+ident; cat("SUMME",ident)
# }
# tphdepth
# }
# hdepth <- find.hdepths.tp #121202
#' Bivariate Boxplot
#'
#' \code{PlotBag()} creates a twodimensional boxplot called "bagplot" based on
#' two numerical variables x and y. \code{plot.PlotBag()} is the plotting
#' routine for a bagplot object. \code{compute.PlotBag()} contains the
#' computation logic the object.
#'
#' A bagplot is a bivariate generalization of the well known boxplot. It has
#' been proposed by Rousseeuw, Ruts, and Tukey. In the bivariate case the box
#' of the boxplot changes to a convex polygon, the bag of bagplot. In the bag
#' are 50 percent of all points. The fence separates points within the fence
#' from points outside. It is computed by increasing the the bag. The loop is
#' defined as the convex hull containing all points inside the fence. If all
#' points are on a straight line you get a classical boxplot. \code{PlotBag()}
#' plots bagplots that are very similar to the one described in Rousseeuw et
#' al. Remarks: The two dimensional median is approximated. For large data sets
#' the error will be very small. On the other hand it is not very wise to make
#' a (graphical) summary of e.g. 10 bivariate data points.
#'
#' In case you want to plot multiple (overlapping) bagplots, you may want plots
#' that are semi-transparent. For this you can use the \code{transparency}
#' flag. If \code{transparency==TRUE} the alpha layer is set to '99' (hex).
#' This causes the bagplots to appear semi-transparent, but ONLY if the output
#' device is PDF and opened using: \code{pdf(file="filename.pdf",
#' version="1.4")}. For this reason, the default is \code{transparency==FALSE}.
#' This feature as well as the arguments to specify different colors has been
#' proposed by Wouter Meuleman.
#'
#' @aliases PlotBag PlotBagPairs compute.bagplot plot.bagplot
#' @param x x values of a data set; in \code{PlotBag}: an object of class
#' \code{PlotBag} computed by \code{compute.PlotBag}
#' @param y y values of the data set
#' @param factor factor defining the loop
#' @param na.rm if \code{TRUE} 'NA' values are removed otherwise exchanged by
#' median
#' @param approx.limit if the number of data points exceeds \code{approx.limit}
#' a sample is used to compute some of the quantities; default: 300
#' @param show.outlier if \code{TRUE} outlier are shown
#' @param show.whiskers if \code{TRUE} whiskers are shown
#' @param show.looppoints if \code{TRUE} loop points are plottet
#' @param show.bagpoints if \code{TRUE} bag points are plottet
#' @param show.loophull if \code{TRUE} the loop is plotted
#' @param show.baghull if \code{TRUE} the bag is plotted
#' @param create.plot if \code{FALSE} no plot is created
#' @param add if \code{TRUE} the bagplot is added to an existing plot
#' @param pch sets the plotting character
#' @param cex sets characters size
#' @param dkmethod 1 or 2, there are two method of approximating the bag,
#' method 1 is very rough (only based on observations
#' @param precision precision of approximation, default: 1
#' @param verbose automatic commenting of calculations
#' @param debug.plots if \code{TRUE} additional plots describing intermediate
#' results are constructed
#' @param col.loophull color of loop hull
#' @param col.looppoints color of the points of the loop
#' @param col.baghull color of bag hull
#' @param col.bagpoints color of the points of the bag
#' @param transparency see section details
#' @param dm x
#' @param trim x
#' @param main x
#' @param numeric.only x
#' @param \dots additional graphical parameters
#' @return \code{compute.bagplot} returns an object of class \code{bagplot}
#' that could be plotted by \code{plot.bagplot()}. An object of the bagplot
#' class is a list with the following elements: \code{center} is a two
#' dimensional vector with the coordinates of the center. \code{hull.center} is
#' a two column matrix, the rows are the coordinates of the corners of the
#' center region. \code{hull.bag} and \code{hull.loop} contain the coordinates
#' of the hull of the bag and the hull of the loop. \code{pxy.bag} shows you
#' the coordinates of the points of the bag. \code{pxy.outer} is the two column
#' matrix of the points that are within the fence. \code{pxy.outlier} represent
#' the outliers. The vector \code{hdepths} shows the depths of data points.
#' \code{is.one.dim} is \code{TRUE} if the data set is (nearly) one
#' dimensional. The dimensionality is decided by analysing the result of
#' \code{prcomp} which is stored in the element \code{prdata}. \code{xy} shows
#' you the data that are used for the bagplot. In the case of very large data
#' sets subsets of the data are used for constructing the bagplot. A data set
#' is very large if there are more data points than \code{approx.limit}.
#' \code{xydata} are the input data structured in a two column matrix.
#' @note Version of bagplot: 10/2012
#' @author Hans Peter Wolf <pwolf@@wiwi.uni-bielefeld.de>
#' @seealso \code{\link[graphics]{boxplot}}
#' @references P. J. Rousseeuw, I. Ruts, J. W. Tukey (1999): The bagplot: a
#' bivariate boxplot, \emph{The American Statistician}, vol. 53, no. 4,
#' 382--387
#' @keywords misc hplot
#' @examples
#'
#' # example: 100 random points and one outlier
#' dat <- cbind(rnorm(100) + 100, rnorm(100) + 300)
#' dat <- rbind(dat, c(105,295))
#'
#' PlotBag(dat,factor=2.5,create.plot=TRUE,approx.limit=300,
#' show.outlier=TRUE,show.looppoints=TRUE,
#' show.bagpoints=TRUE,dkmethod=2,
#' show.whiskers=TRUE,show.loophull=TRUE,
#' show.baghull=TRUE,verbose=FALSE)
#'
#' # example of Rousseeuw et al., see R-package rpart
#' cardata <- structure(as.integer( c(2560,2345,1845,2260,2440,
#' 2285, 2275, 2350, 2295, 1900, 2390, 2075, 2330, 3320, 2885,
#' 3310, 2695, 2170, 2710, 2775, 2840, 2485, 2670, 2640, 2655,
#' 3065, 2750, 2920, 2780, 2745, 3110, 2920, 2645, 2575, 2935,
#' 2920, 2985, 3265, 2880, 2975, 3450, 3145, 3190, 3610, 2885,
#' 3480, 3200, 2765, 3220, 3480, 3325, 3855, 3850, 3195, 3735,
#' 3665, 3735, 3415, 3185, 3690, 97, 114, 81, 91, 113, 97, 97,
#' 98, 109, 73, 97, 89, 109, 305, 153, 302, 133, 97, 125, 146,
#' 107, 109, 121, 151, 133, 181, 141, 132, 133, 122, 181, 146,
#' 151, 116, 135, 122, 141, 163, 151, 153, 202, 180, 182, 232,
#' 143, 180, 180, 151, 189, 180, 231, 305, 302, 151, 202, 182,
#' 181, 143, 146, 146)), .Dim = as.integer(c(60, 2)),
#' .Dimnames = list(NULL, c("Weight", "Disp.")))
#'
#' PlotBag(cardata,factor=3,show.baghull=TRUE,
#' show.loophull=TRUE,precision=1, dkmethod=2)
#'
#' title("car data Chambers/Hastie 1992")
#'
#' # points of y=x*x
#' PlotBag(x=1:30,y=(1:30)^2,verbose=FALSE,dkmethod=2)
#'
#' # one dimensional subspace
#' PlotBag(x=1:50,y=1:50)
#'
#' # pairwise bagplots
#' par(las=1)
#' PlotBagPairs(swiss[, 1:2],
#' main="Swiss Fertility and Socioeconomic Indicators (1888) Data")
#'
PlotBag <- function(x, y,
factor=3, # expanding factor for bag to get the loop
na.rm=FALSE, # should 'NAs' values be removed or exchanged
approx.limit=300, # limit
show.outlier=TRUE,# if TRUE outlier are shown
show.whiskers=TRUE, # if TRUE whiskers are shown
show.looppoints=TRUE, # if TRUE points in loop are shown
show.bagpoints=TRUE, # if TRUE points in bag are shown
show.loophull=TRUE, # if TRUE loop is shown
show.baghull=TRUE, # if TRUE bag is shown
create.plot=TRUE, # if TRUE a plot is created
add=FALSE, # if TRUE graphical elements are added to actual plot
pch=16,cex=.4, # some graphical parameters
dkmethod=2, # in 1:2; there are two methods for approximating the bag
precision=1, # controls precision of computation
verbose=FALSE,debug.plots="no", # tools for debugging
col.loophull="#aaccff", # Alternatives: #ccffaa, #ffaacc
col.looppoints="#3355ff", # Alternatives: #55ff33, #ff3355
col.baghull="#7799ff", # Alternatives: #99ff77, #ff7799
col.bagpoints="#000088", # Alternatives: #008800, #880000
transparency=FALSE, ... # to define further parameters of plot
){
if(missing(x)) return(
"bagplot, version 2012/12/05, peter wolf"
)
bo<-compute.bagplot(x=x,y=y,factor=factor,na.rm=na.rm,
approx.limit=approx.limit,dkmethod=dkmethod,
precision=precision,verbose=verbose,debug.plots=debug.plots)
if(create.plot){
plot(bo,
show.outlier=show.outlier,
show.whiskers=show.whiskers,
show.looppoints=show.looppoints,
show.bagpoints=show.bagpoints,
show.loophull=show.loophull,
show.baghull=show.baghull,
add=add,pch=pch,cex=cex,
verbose=verbose,
col.loophull=col.loophull,
col.looppoints=col.looppoints,
col.baghull=col.baghull,
col.bagpoints=col.bagpoints,
transparency=transparency, ...
)
}
invisible(bo)
}
# New interface
# PlotBag <- function(x, y,
# outl = list(pch, cex, col, bg),
# looppoints = list(pch, cex, col, bg),
# bagpoints = list(pch, cex, col, bg),
# loophull = list(lwd, col, border),
# baghull = list(lwd, col, border),
# factor=3, # expanding factor for bag to get the loop
# na.rm=FALSE, # should 'NAs' values be removed or exchanged
# approx.limit=300, # limit
# add=FALSE, # if TRUE graphical elements are added to actual plot
# dkmethod=2, # in 1:2; there are two methods for approximating the bag
# precision=1, # controls precision of computation
# verbose=FALSE,debug.plots="no", # tools for debugging
# transparency=FALSE,
# ... # to define further parameters of plot
# )
# #
# outlty, outlwd, outpch, outcex, outcol, outbg
#
#
# faces<-function(xy,which.row,fill=FALSE,face.type=1,
# nrow.plot,ncol.plot,scale=TRUE,byrow=FALSE,main,
# labels,print.info = TRUE,na.rm = FALSE,
# ncolors=20,
# col.nose=rainbow(ncolors), # nose
# col.eyes=rainbow(ncolors,start=0.6,end=0.85),# eyes
# col.hair=terrain.colors(ncolors), # hair
# col.face=heat.colors(ncolors), # face
# col.lips=rainbow(ncolors,start=0.0,end=0.2), # lips
# col.ears=rainbow(ncolors,start=0.0,end=0.2), # ears
#
# plot.faces=TRUE){ # 070831 pwolf
# if((demo<-missing(xy))){
# xy<-rbind(
# c(1,3,5),c(3,5,7),
# c(1,5,3),c(3,7,5),
# c(3,1,5),c(5,3,7),
# c(3,5,1),c(5,7,3),
# c(5,1,3),c(7,3,5),
# c(5,3,1),c(7,5,3),
# c(1,1,1),c(4,4,4),c(7,7,7)
# )
# labels<-apply(xy,1,function(x) paste(x,collapse="-"))
# }
|
/R/SpecialPlots.r
|
no_license
|
forked-packages/DescTools--2
|
R
| false | false | 88,287 |
r
|
# clumsy plot code to be revised
## plots: PlotFaces (Chernoff-Faces) ====
# aus TeachingDemos, Author: H. P. Wolf
# updated with newer version, edited and simplified by 0.99.24
# Source aplpack, Author: H. P. Wolf
#' Chernoff Faces
#'
#' Plot Chernoff faces. The rows of a data matrix represent cases and the
#' columns the variables.
#'
#' The features paramters of this implementation are: \itemize{ \item1 height
#' of face \item2 width of face \item3 shape of face \item4 height of mouth
#' \item5 width of mouth \item6 curve of smile \item7 height of eyes \item8
#' width of eyes \item9 height of hair \item10 width of hair \item11 styling of
#' hair \item12 height of nose \item13 width of nose \item14 width of ears
#' \item15 height of ears }
#'
#' \figure{faces.pngSome faces}
#'
#' For details look at the literate program of \code{faces}
#'
#' @param xy \code{xy} data matrix, rows represent individuals and columns
#' attributes.
#' @param which.row defines a permutation of the rows of the input matrix.
#' @param fill logic. If set to \code{TRUE}, only the first \code{nc}
#' attributes of the faces are transformed, \code{nc} is the number of columns
#' of \code{x}.
#' @param nr number of columns of faces on graphics device
#' @param nc number of rows of faces
#' @param scale logic. If set to \code{TRUE}, attributes will be normalized.
#' @param byrow \code{if(byrow==TRUE)}, \code{x} will be transposed.
#' @param main title.
#' @param labels character strings to use as names for the faces.
#' @param col a vector of colors used for the parts of the faces. Colors are
#' recycled in the order: "nose", "eyes", "hair", "face", "lips", "ears".
#' Default is NA, which will omit colors.
#' @return information about usage of variables for face elements is returned
#' invisibly
#' @note based on version 12/2009
#' @author H. P. Wolf, some changes Andri Signorell <andri@@signorell.net>
#' @references Chernoff, H. (1973) The use of faces to represent statistiscal
#' assoziation, \emph{JASA}, 68, pp 361--368.\cr
#'
#' The smooth curves are computed by an algorithm found in:\cr Ralston, A. and
#' Rabinowitz, P. (1985) \emph{A first course in numerical analysis},
#' McGraw-Hill, pp 76ff.\cr \url{http://www.wiwi.uni-bielefeld.de/~wolf/}: S/R
#' - functions : faces
#' @keywords hplot
#' @examples
#'
#' PlotFaces(rbind(1:3,5:3,3:5,5:7))
#'
#' data(longley)
#' PlotFaces(longley[1:9,])
#'
#' set.seed(17)
#' PlotFaces(matrix(sample(1:1000,128,), 16, 8), main="random faces")
#'
#'
#' means <- lapply(iris[,-5], tapply, iris$Species, mean)
#' m <- t(do.call(rbind, means))
#' m <- cbind(m, matrix(rep(1, 11*3), nrow=3))
#'
#' # define the colors, first for all faces the same
#' col <- replicate(3, c("orchid1", "olivedrab", "goldenrod4",
#' "peachpuff", "darksalmon", "peachpuff3"))
#' rownames(col) <- c("nose","eyes","hair","face","lips","ears")
#' # change haircolor individually for each face
#' col[3, ] <- c("lightgoldenrod", "coral3", "sienna4")
#'
#' z <- PlotFaces(m, nr=1, nc=3, col=col)
#'
#' # print the used coding
#' print(z$info, right=FALSE)
#'
PlotFaces <- function(xy = rbind(1:3,5:3,3:5,5:7), which.row, fill = FALSE, nr, nc,
scale = TRUE, byrow = FALSE, main, labels,
col = "white") {
ncolors <- nrow(xy)
col <- matrix(rep(col, length.out=nrow(xy) * 6), ncol=nrow(xy))
col.nose <- col[1, ]
col.eyes <- col[2, ]
col.hair <- col[3, ]
col.face <- col[4, ]
col.lips <- col[5, ]
col.ears <- col[6, ]
n <- nrow(xy)
if(missing(nr)) nr <- n^0.5
if(missing(nc)) nc <- n^0.5
opar <- par(mfrow=c(ceiling(c(nr, nc))),
oma=rep(6, 4),
mar=rep(.7, 4))
on.exit(par(opar))
spline <- function(a, y, m=200, plot=FALSE) {
n <- length(a)
h <- diff(a)
dy <- diff(y)
sigma <- dy/h
lambda <- h[-1] / (hh <- h[-1] + h[-length(h)])
mu <- 1-lambda
d <- 6 * diff(sigma)/hh
tri.mat <- 2 * diag(n-2)
tri.mat[2 + (0:(n-4))*(n-1)] <- mu[-1]
tri.mat[(1:(n-3)) * (n-1)] <- lambda[-(n-2)]
M <- c(0,solve(tri.mat) %*% d, 0)
x <- seq(from=a[1], to=a[n], length=m)
anz.kl <- hist(x, breaks=a, plot=FALSE)$counts
adj <- function(i) i-1
i <- rep(1:(n-1), anz.kl) + 1
S.x <- M[i-1]*(a[i]-x)^3 / (6*h[adj(i)]) +
M[i] * (x-a[i-1])^3 / (6*h[adj(i)]) +
(y[i-1] - M[i-1] * h[adj(i)]^2 /6) * (a[i]-x)/ h[adj(i)] +
(y[i] - M[i] * h[adj(i)]^2 /6) * (x-a[i-1]) / h[adj(i)]
if(plot){
plot(x, S.x, type="l")
points(a, y)
}
return(cbind(x, S.x))
}
n.char <- 15
xy <- rbind(xy)
if(byrow) xy <- t(xy)
# if(any(is.na(xy))){
# if(na.rm){
# xy <- xy[!apply(is.na(xy),1,any),,drop=FALSE]
# if(nrow(xy)<3) {print("not enough data points"); return()}
# print("Warning: NA elements have been removed!!")
# }else{
# xy.means <- colMeans(xy,na.rm=TRUE)
# for(j in 1:length(xy[1,])) xy[is.na(xy[,j]),j] <- xy.means[j]
# print("Warning: NA elements have been exchanged by mean values!!")
# }
# }
if(!missing(which.row) && all(!is.na(match(which.row,1:dim(xy)[2])) ))
xy <- xy[, which.row, drop=FALSE]
mm <- dim(xy)[2]
n <- dim(xy)[1]
xnames <- dimnames(xy)[[1]]
if(is.null(xnames)) xnames <- as.character(1:n)
if(!missing(labels)) xnames <- labels
if(scale){
xy <- apply(xy,2,function(x){
x <- x-min(x); x <- if(max(x)>0) 2*x/max(x)-1 else x })
} else xy[] <- pmin(pmax(-1,xy),1)
xy <- rbind(xy)
n.c <- dim(xy)[2]
# expand input matrix xy by replication of cols
xy <- xy[,(rows.orig <- h <- rep(1:mm,ceiling(n.char/mm))),drop=FALSE]
if(fill) xy[,-(1:n.c)] <- 0
face.orig <- list(
eye = rbind(c(12,0),c(19,8),c(30,8),c(37,0),c(30,-8),c(19,-8),c(12,0)),
iris = rbind(c(20,0),c(24,4),c(29,0),c(24,-5),c(20,0)),
lipso = rbind(c(0,-47),c( 7,-49), lipsiend=c(16,-53), c( 7,-60),c(0,-62)),
lipsi = rbind(c(7,-54),c(0,-54)),
nose = rbind(c(0,-6),c(3,-16),c(6,-30),c(0,-31)),
shape = rbind(c(0,44),c(29,40),c(51,22),hairend=c(54,11),earsta=c(52,-4),
earend=c(46,-36),c(38,-61),c(25,-83),c(0,-89)),
ear = rbind(c(60,-11), c(57,-30)), # add earsta,earend
hair = rbind(hair1=c(72,12), hair2=c(64,50), c(36,74), c(0,79)) # add hairend
)
lipso.refl.ind <- 4:1
lipsi.refl.ind <- 1
nose.refl.ind <- 3:1
hair.refl.ind <- 3:1
shape.refl.ind <- 8:1
shape.xnotnull <- 2:8
nose.xnotnull <- 2:3
face.list <- list()
for(ind in 1:n){
factors <- xy[ind,]
face <- face.orig
m <- mean(face$lipso[,2])
face$lipso[,2] <- m+(face$lipso[,2]-m)*(1+0.7*factors[4])
face$lipsi[,2] <- m+(face$lipsi[,2]-m)*(1+0.7*factors[4])
face$lipso[,1] <- face$lipso[,1]*(1+0.7*factors[5])
face$lipsi[,1] <- face$lipsi[,1]*(1+0.7*factors[5])
face$lipso["lipsiend",2] <- face$lipso["lipsiend",2]+20*factors[6]
m <- mean(face$eye[,2])
face$eye[,2] <- m+(face$eye[,2] -m)*(1+0.7*factors[7])
face$iris[,2] <- m+(face$iris[,2]-m)*(1+0.7*factors[7])
m <- mean(face$eye[,1])
face$eye[,1] <- m+(face$eye[,1] -m)*(1+0.7*factors[8])
face$iris[,1] <- m+(face$iris[,1]-m)*(1+0.7*factors[8])
m <- min(face$hair[,2])
face$hair[,2] <- m+(face$hair[,2]-m)*(1+0.2*factors[9])
m <- 0
face$hair[,1] <- m+(face$hair[,1]-m)*(1+0.2*factors[10])
m <- 0
face$hair[c("hair1","hair2"),2] <- face$hair[c("hair1","hair2"),2]+50*factors[11]
m <- mean(face$nose[,2])
face$nose[,2] <- m+(face$nose[,2]-m)*(1+0.7*factors[12])
face$nose[nose.xnotnull,1] <- face$nose[nose.xnotnull,1]*(1+factors[13])
m <- mean(face$shape[c("earsta","earend"),1])
face$ear[,1] <- m+(face$ear[,1]-m)* (1+0.7*factors[14])
m <- min(face$ear[,2])
face$ear[,2] <- m+(face$ear[,2]-m)* (1+0.7*factors[15])
face <- lapply(face,function(x){ x[,2] <- x[,2]*(1+0.2*factors[1]);x})
face <- lapply(face,function(x){ x[,1] <- x[,1]*(1+0.2*factors[2]);x})
face <- lapply(face,function(x){ x[,1] <- ifelse(x[,1]>0,
ifelse(x[,2] > -30, x[,1],
pmax(0,x[,1]+(x[,2]+50)*0.2*sin(1.5*(-factors[3])))),0);x})
invert <- function(x) cbind(-x[,1], x[,2])
face.obj <- list(
eyer = face$eye,
eyel = invert(face$eye),
irisr = face$iris,
irisl = invert(face$iris),
lipso = rbind(face$lipso,invert(face$lipso[lipso.refl.ind,])),
lipsi = rbind(face$lipso["lipsiend",], face$lipsi,
invert(face$lipsi[lipsi.refl.ind,, drop=FALSE]),
invert(face$lipso["lipsiend",, drop=FALSE])),
earr = rbind(face$shape["earsta",], face$ear, face$shape["earend",]),
earl = invert(rbind(face$shape["earsta",], face$ear, face$shape["earend",])),
nose = rbind(face$nose,invert(face$nose[nose.refl.ind,])),
hair = rbind(face$shape["hairend",],face$hair,invert(face$hair[hair.refl.ind,]),
invert(face$shape["hairend",,drop=FALSE])),
shape = rbind(face$shape,invert(face$shape[shape.refl.ind,]))
)
face.obj$lipsi <- rbind(face.obj$lipsi, Rev(face.obj$lipsi, margin = 1))
face.list <- c(face.list, list(face.obj))
plot(1, type="n", xlim=c(-105, 105) * 1.1, axes=FALSE,
ylab="", ylim=c(-105, 105) * 1.3, xlab="")
title(xnames[ind])
f <- 1+(ncolors-1) * (factors+1)/2 # translate factors into color numbers
xtrans <- function(x){x}
ytrans <- function(y){y}
for(obj.ind in seq(face.obj)[c(10:11, 1:9)]) {
x <- face.obj[[obj.ind]][, 1]
y <- face.obj[[obj.ind]][, 2]
xx <- spline(1:length(x), x, 40, FALSE)[, 2]
yy <- spline(1:length(y), y, 40, FALSE)[, 2]
lines(xx, yy)
if(obj.ind == 10)
polygon(xtrans(xx), ytrans(yy), col=col.hair[ind], xpd=NA) # hair
if(obj.ind==11)
polygon(xtrans(xx), ytrans(yy), col=col.face[ind], xpd=NA) # face
xx <- xtrans(xx)
yy <- ytrans(yy)
if(obj.ind %in% 1:2) polygon(xx,yy,col="#eeeeee") # eyes without iris
if(obj.ind %in% 3:4) polygon(xx,yy,col=col.eyes[ind], xpd=NA) # eyes:iris
if(obj.ind %in% 9) polygon(xx,yy,col=col.nose[ind], xpd=NA)# nose
if(obj.ind %in% 5:6) polygon(xx,yy,col=col.lips[ind], xpd=NA) # lips
if(obj.ind %in% 7:8) polygon(xx,yy,col=col.ears[ind], xpd=NA)# ears
}
}
if(!missing(main)){
par(opar)
par(mfrow=c(1,1))
mtext(main, 3, 3, TRUE, 0.5)
title(main)
}
info <- c(
"height of face",
"width of face",
"structure of face",
"height of mouth",
"width of mouth",
"smiling",
"height of eyes",
"width of eyes",
"height of hair",
"width of hair",
"style of hair",
"height of nose",
"width of nose",
"width of ear",
"height of ear")
var.names <- dimnames(xy)[[2]]
if(0==length(var.names))
var.names <- paste("Var",rows.orig,sep="")
info <- data.frame("modified item"=info, "variable"=var.names[1:length(info)])
names(face.list) <- xnames
out <- list(faces=face.list, info=info,xy=t(xy))
class(out) <- "faces"
invisible(out)
}
## plots: PlotBag ====
####################################"
# the source code for the function
# from Hans Peter Wolf
#
# http://www.wiwi.uni-bielefeld.de/~wolf/software/R-wtools/bagplot/bagplot.R
#
#
##start:##
PlotBagPairs <- function(dm, trim = 0.0, main, numeric.only = TRUE,
factor = 3, approx.limit = 300, pch = 16,
cex = 0.8, precision = 1, col.loophull = "#aaccff",
col.looppoints = "#3355ff", col.baghull = "#7799ff",
col.bagpoints = "#000088", ...){
if(missing(main)) main <- paste(deparse(substitute(dm)),"/ trim =",round(trim,3))
if(length(trim) == 1) trim <- rep(trim, ncol(dm))
if(numeric.only){
dm <- dm[, idx <- sapply(1:ncol(dm), function(x) is.numeric(dm[,x]))]
trim <- trim[idx]
}
for(j in 1:ncol(dm)){
x <- dm[,j]
if(!is.numeric(x)) x <- as.numeric(x)
if( trim[j] > 0) {
na.idx <- is.na(x)
xlim <- quantile(x[!na.idx], c(trim[j] , 1-trim[j]))
x[ na.idx | x < xlim[1] | xlim[2] < x ] <- NA
}
dm[,j] <- x
}
# DM0 <<- dm
h.fn <- function(x,y){
idx <- !is.na(x) & !is.na(y)
x <- x[ idx ]; y <- y[ idx ]
BP <- PlotBag(x,y,add=TRUE,factor = factor, approx.limit = approx.limit, pch = pch,
cex = cex, precision = precision, col.loophull = col.loophull,
col.looppoints = col.looppoints, col.baghull = col.baghull,
col.bagpoints = col.bagpoints, verbose=FALSE)
# BP <<- BP ### for debugging
}
par(mfrow=c(1,1))
pairs(dm, panel = h.fn, ...)
mtext(main, line=2.5)
dm
}
#0:
##start:##
compute.bagplot <- function(x,y,
factor=3, # expanding factor for bag to get the loop
na.rm=FALSE, # should NAs removed or exchanged
approx.limit=300, # limit
dkmethod=2, # in 1:2; method 2 is recommended
precision=1, # controls precision of computation
verbose=FALSE,debug.plots="no" # tools for debugging
){
"bagplot, version 2012/12/05, peter wolf"
# define some functions
win<-function(dx,dy){ atan2(y=dy,x=dx) }
out.of.polygon<-function(xy,pg){ # 121026
xy<-matrix(xy,ncol=2)
# check trivial case
if(nrow(pg)==1) return(xy[,1]==pg[1] & xy[,2]==pg[2])
# store number of points of xy and polygon
m<-nrow(xy); n<-nrow(pg)
# find small value relative to polygon
limit <- -abs(1E-10*diff(range(pg)))
# find vectors that are orthogonal to segments of polygon
pgn<-cbind(diff(c(pg[,2],pg[1,2])),-diff(c(pg[,1],pg[1,1])))
# find center of gravity of xy
S<-colMeans(xy)
# compute negative distances of polygon to center of gravity of xy
dxy<-cbind(S[1]-pg[,1],S[2]-pg[,2])
# unused: S.in.pg<-all(limit<apply(dxy*pgn,1,sum))
if( !all( limit < apply(dxy*pgn,1,sum) ) ){
pg<-pg[n:1,]; pgn<--pgn[n:1,]
}
# initialize result
in.pg<-rep(TRUE,m)
for(j in 1:n){
dxy<-xy-matrix(pg[j,],m,2,byrow=TRUE)
in.pg<-in.pg & limit<(dxy%*%pgn[j,])
}
return(!in.pg)
}
cut.z.pg<-function(zx,zy,p1x,p1y,p2x,p2y){
a2<-(p2y-p1y)/(p2x-p1x); a1<-zy/zx
sx<-(p1y-a2*p1x)/(a1-a2); sy<-a1*sx
sxy<-cbind(sx,sy)
h<-any(is.nan(sxy))||any(is.na(sxy))||any(Inf==abs(sxy))
if(h){ # print("NAN found"); print(cbind(a1,a2,zx,zy,sxy,p2x-p1x))
if(!exists("verbose")) verbose<-FALSE
if(verbose) cat("special")
# zx is zero ### 121030
h<-0==zx
sx<-ifelse(h,zx,sx); sy<-ifelse(h,p1y-a2*p1x,sy)
# points on line defined by line segment
a1 <- ifelse( abs(a1) == Inf, sign(a1)*123456789*1E10, a1) # 121030
a2 <- ifelse( abs(a2) == Inf, sign(a2)*123456789*1E10, a2)
# points on line defined by line segment
h<-0==(a1-a2) & sign(zx)==sign(p1x)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-0==(a1-a2) & sign(zx)!=sign(p1x)
sx<-ifelse(h,p2x,sx); sy<-ifelse(h,p2y,sy)
# line segment vertical
# & center NOT ON line segment
h<-p1x==p2x & zx!=p1x & p1x!=0
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,zy*p1x/zx,sy)
# & center ON line segment
h<-p1x==p2x & zx!=p1x & p1x==0
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,0,sy)
# & center NOT ON line segment & point on line ### 121126
h<-p1x==p2x & zx==p1x & p1x!=0 # & sign(zy)==sign(p1y)
sx<-ifelse(h,zx,sx); sy<-ifelse(h,zy,sy)
# & center ON line segment & point on line
h<-p1x==p2x & zx==p1x & p1x==0 & sign(zy)==sign(p1y)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-p1x==p2x & zx==p1x & p1x==0 & sign(zy)!=sign(p1y)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p2y,sy)
# points identical to end points of line segment
h<-zx==p1x & zy==p1y; sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-zx==p2x & zy==p2y; sx<-ifelse(h,p2x,sx); sy<-ifelse(h,p2y,sy)
# point of z is center
h<-zx==0 & zy==0; sx<-ifelse(h,0,sx); sy<-ifelse(h,0,sy)
sxy<-cbind(sx,sy)
} # end of special cases
#if(verbose){ print(rbind(a1,a2));print(cbind(zx,zy,p1x,p1y,p2x,p2y,sxy))}
if(!exists("debug.plots")) debug.plots<-"no"
if(debug.plots=="all"){
segments(sxy[,1],sxy[,2],zx,zy,col="red")
segments(0,0,sxy[,1],sxy[,2],col="green",lty=2) ##!!
points(sxy,col="red")
}
return(sxy)
}
find.cut.z.pg<-function(z,pg,center=c(0,0),debug.plots="no"){
if(!is.matrix(z)) z<-rbind(z)
if(1==nrow(pg)) return(matrix(center,nrow(z),2,TRUE))
n.pg<-nrow(pg); n.z<-nrow(z)
z<-cbind(z[,1]-center[1],z[,2]-center[2])
pgo<-pg; pg<-cbind(pg[,1]-center[1],pg[,2]-center[2])
if(!exists("debug.plots")) debug.plots<-"no"
if(debug.plots=="all"){
plot(rbind(z,pg,0),bty="n"); points(z,pch="p")
lines(c(pg[,1],pg[1,1]),c(pg[,2],pg[1,2]))}
# find angles of pg und z
apg<-win(pg[,1],pg[,2])
apg[is.nan(apg)]<-0; a<-order(apg); apg<-apg[a]; pg<-pg[a,]
az<-win(z[,1],z[,2])
# find line segments
segm.no<-apply((outer(apg,az,"<")),2,sum)
segm.no<-ifelse(segm.no==0,n.pg,segm.no)
next.no<-1+(segm.no %% length(apg))
# compute cut points
cuts<-cut.z.pg(z[,1],z[,2],pg[segm.no,1],pg[segm.no,2],
pg[next.no,1],pg[next.no,2])
# rescale
cuts<-cbind(cuts[,1]+center[1],cuts[,2]+center[2])
return(cuts)
}
# find.cut.z.pg(EX, EX1,center=CE)
hdepth.of.points<-function(tp){
# 121030 second parameter n has been removed
# if(!exists("precision")) precision <- 1 ### 121203
# return(find.hdepths.tp(tp, xy, 181*precision)) ### 121202
n.tp<-nrow(tp)
tphdepth<-rep(0,n.tp); dpi<-2*pi-0.000001
for(j in 1:n.tp) {
dx<-tp[j,1]-xy[,1]; dy<-tp[j,2]-xy[,2]
a<-win(dx,dy)+pi; h<-a<10; a<-a[h]; ident<-sum(!h)
init<-sum(a < pi); a.shift<-(a+pi) %% dpi
minusplus<-c(rep(-1,length(a)),rep(1,length(a))) #### 070824
h<-cumsum(minusplus[order(c(a,a.shift))])
tphdepth[j]<-init+min(h)+1 # +1 because of the point itself!!
# tphdepth[j]<-init+min(h)+ident; cat("SUMME",ident)
}
tphdepth
}
find.hdepths.tp <- function(tp, data, number.of.directions=181){ # 121130
# standardize dimensions
xy <- as.matrix(data); tp <- as.matrix(rbind(tp)); n.tp <- dim(tp)[1]
for( j in 1:2) {
xy[,j] <- xy[,j] - (h <- min(xy[,j], na.rm=TRUE))
tp[,j] <- tp[,j] - h
if( 0 < (h <- max(xy[,j], na.rm=TRUE))){
xy[,j] <- xy[,j]/h; tp[,j] <- tp[,j]/h
}
}
##loop over directions##
phi <- c(seq(0,180,length=number.of.directions)[-1]*(2*pi/360))
sinphi <- c(sin(phi),1); cosphi <- c(cos(phi),0)
RM1 <- round(digits=6,rbind(cosphi,sinphi))
hdtp <- rep(length(xy[,1]),length(tp[,1]))
for( j in seq(along=sinphi)){ #print(j)
xyt <- xy %*% RM1[,j]; tpt <- (tp %*% RM1[,j])[]
xyt <- xyt[!is.na(xyt)] #; tpt <- sort(tpt)
hdtp <- pmin(hdtp,(rank( c(tpt,xyt), ties.method="min"))[1:n.tp]
-rank( tpt,ties.method="min")
,rank(-c(tpt,xyt), ties.method="min")[1:n.tp]
-rank(-tpt,ties.method="min")
)
}
hdtp
}
expand.hull<-function(pg,k){
if( 1 >= nrow(pg) ) return(pg) ## 121026 ## 121123 <= statt ==
resolution<-floor(20*precision)
pg0<-xy[hdepth==1,]
pg0<-pg0[chull(pg0[,1],pg0[,2]),]
end.points<-find.cut.z.pg(pg,pg0,center=center,debug.plots=debug.plots)
lam<-((0:resolution)^1)/resolution^1
pg.new<-pg
for(i in 1L:nrow(pg)){
tp<-cbind(pg[i,1]+lam*(end.points[i,1]-pg[i,1]),
pg[i,2]+lam*(end.points[i,2]-pg[i,2]))
# hd.tp<-hdepth.of.points(tp)
hd.tp<-find.hdepths.tp(tp,xy)
ind<-max(sum(hd.tp>=k),1)
if(ind<length(hd.tp)){ # hd.tp[ind]>k &&
tp<-cbind(tp[ind,1]+lam*(tp[ind+1,1]-tp[ind,1]),
tp[ind,2]+lam*(tp[ind+1,2]-tp[ind,2]))
# hd.tp<-hdepth.of.points(tp)
hp.tp<-find.hdepths.tp(tp,xy)
ind<-max(sum(hd.tp>=k),1)
}
pg.new[i,]<-tp[ind,]
}
pg.new<-pg.new[chull(pg.new[,1],pg.new[,2]),]
# cat("depth pg.new", hdepth.of.points(pg.new))
# cat("depth pg.new", find.hdepths.tp(pg.new,xy))
pg.add<-0.5*(pg.new+rbind(pg.new[-1,],pg.new[1,]))
# end.points<-find.cut.z.pg(pg,pg0,center=center)
end.points<-find.cut.z.pg(pg.add,pg0,center=center) #### 070824
for(i in 1L:nrow(pg.add)){
tp<-cbind(pg.add[i,1]+lam*(end.points[i,1]-pg.add[i,1]),
pg.add[i,2]+lam*(end.points[i,2]-pg.add[i,2]))
# hd.tp<-hdepth.of.points(tp)
hd.tp<-find.hdepths.tp(tp,xy)
ind<-max(sum(hd.tp>=k),1)
if(ind<length(hd.tp)){ # hd.tp[ind]>k &&
tp<-cbind(tp[ind,1]+lam*(tp[ind+1,1]-tp[ind,1]),
tp[ind,2]+lam*(tp[ind+1,2]-tp[ind,2]))
# hd.tp<-hdepth.of.points(tp)
hd.tp<-find.hdepths.tp(tp,xy)
ind<-max(sum(hd.tp>=k),1)
}
pg.add[i,]<-tp[ind,]
}
# cat("depth pg.add", hdepth.of.points(pg.add))
pg.new<-rbind(pg.new,pg.add)
pg.new<-pg.new[chull(pg.new[,1],pg.new[,2]),]
}
cut.p.sl.p.sl<-function(xy1,m1,xy2,m2){
sx<-(xy2[2]-m2*xy2[1]-xy1[2]+m1*xy1[1])/(m1-m2)
sy<-xy1[2]-m1*xy1[1]+m1*sx
if(!is.nan(sy)) return( c(sx,sy) )
if(abs(m1)==Inf) return( c(xy1[1],xy2[2]+m2*(xy1[1]-xy2[1])) )
if(abs(m2)==Inf) return( c(xy2[1],xy1[2]+m1*(xy2[1]-xy1[1])) )
}
pos.to.pg<-function(z,pg,reverse=FALSE){
if(reverse){
int.no<-apply(outer(pg[,1],z[,1],">="),2,sum)
zy.on.pg<-pg[int.no,2]+pg[int.no,3]*(z[,1]-pg[int.no,1])
}else{
int.no<-apply(outer(pg[,1],z[,1],"<="),2,sum)
zy.on.pg<-pg[int.no,2]+pg[int.no,3]*(z[,1]-pg[int.no,1])
}
#### ifelse(z[,2]<zy.on.pg, "lower","higher") ##### 121004
result <- ifelse(z[,2]<zy.on.pg, "lower","higher") ####
return(result)
if( all(result=="lower") ){
result <- ifelse(((z[,2] - zy.on.pg)/max(z[,2] - zy.on.pg)+1e-10) < 0,
"lower","higher")
}
if( all(result=="higher") ){
result <- ifelse(((z[,2] - zy.on.pg)/max(z[,2] - zy.on.pg)-1e-10) < 0,
"lower","higher")
}
print(result)
return(result)
}
find.polygon.center<-function(xy){
#### if(missing(xy)){n<-50;x<-rnorm(n);y<-rnorm(n); xy<-cbind(x,y)}
#### xy<-xy[chull(xy),]
if(length(xy)==2) return(xy[1:2])
if(nrow(xy)==2) return(colMeans(xy)) #### 121009
#### partition polygon into triangles
n<-length(xy[,1]); mxy<-colMeans(xy)
xy2<-rbind(xy[-1,],xy[1,]); xy3<-cbind(rep(mxy[1],n),mxy[2])
#### determine areas and centers of gravity of triangles
S<-(xy+xy2+xy3)/3
F2<-abs((xy[,1]-xy3[,1])*(xy2[,2]-xy3[,2])-
(xy[,2]-xy3[,2])*(xy2[,1]-xy3[,1]))
#### compute center of gravity of polygon
lambda<-F2/sum(F2)
SP<-colSums(cbind(S[,1]*lambda,S[,2]*lambda))
return(SP)
}
# check input
xydata<-if(missing(y)) x else cbind(x,y)
if(is.data.frame(xydata)) xydata<-as.matrix(xydata)
if(any(is.na(xydata))){
if(na.rm){ xydata<-xydata[!apply(is.na(xydata),1,any),,drop=FALSE]
print("Warning: NA elements have been removed!!")
}else{ #121129
xy.medians<-apply(xydata,2,function(x) median(x, na.rm=TRUE))
# colMeans(xydata,na.rm=TRUE)
for(j in 1:ncol(xydata)) xydata[is.na(xydata[,j]),j]<-xy.medians[j]
print("Warning: NA elements have been exchanged by median values!!")
}
}
# if(nrow(xydata)<3) {print("not enough data points"); return()} ### 121008
if(length(xydata)<4) {print("not enough data points"); return()}
if((length(xydata)%%2)==1) {print("number of values isn't even"); return()}
if(!is.matrix(xydata)) xydata<-matrix(xydata,ncol=2,byrow=TRUE)
# select sample in case of a very large data set
very.large.data.set<-nrow(xydata) > approx.limit
# use of random number generator may disturb simulation
# therefore we now use a systematical part of the data 20120930
#### OLD: set.seed(random.seed<-13) #### SEED
if(very.large.data.set){
#### OLD: ind<-sample(seq(nrow(xydata)),size=approx.limit)
step<-(n<-nrow(xydata))/approx.limit; ind <- round(seq(1,n,by=step))
xy<-xydata[ind,]
} else xy<-xydata
n<-nrow(xy)
points.in.bag<-floor(n/2)
# if jittering is needed
# the following two lines can be activated
#xy<-xy+cbind(rnorm(n,0,.0001*sd(xy[,1])),
# rnorm(n,0,.0001*sd(xy[,2])))
if(verbose) cat("end of initialization")
prdata<-prcomp(xydata)
is.one.dim<-(0 == max(prdata[[1]])) || (min(prdata[[1]])/max(prdata[[1]]))<0.00001 # 121129
if(is.one.dim){
if(verbose) cat("data set one dimensional")
center<-colMeans(xydata)
res<-list(xy=xy,xydata=xydata,prdata=prdata,
is.one.dim=is.one.dim,center=center)
class(res)<-"bagplot"
return(res)
}
if(verbose) cat("data not linear")
if(nrow(xydata)<=4) {
if(verbose) cat("only three or four data points")
center<-colMeans(xydata)
res<-list(xy=xy,xydata=xydata,prdata=prdata,hdepths=rep(1,n),hdepth=rep(1,n),
is.one.dim=is.one.dim,center=center,hull.center=NULL,
hull.bag=NULL,hull.loop=NULL,pxy.bag=NULL,pxy.outer=xydata,
pxy.outlier=NULL,exp.dk=xydata)
class(res)<-"bagplot"
return(res)
}
xym<-apply(xy,2,mean); xysd<-apply(xy,2,sd)
xyxy<-cbind((xy[,1]-xym[1])/xysd[1],(xy[,2]-xym[2])/xysd[2])
dx<-(outer(xy[,1],xy[,1],"-"))
dy<-(outer(xy[,2],xy[,2],"-"))
alpha<-atan2(y=dy,x=dx); diag(alpha)<-1000
for(j in 1:n) alpha[,j]<-sort(alpha[,j])
alpha<-alpha[-n,] ; m<-n-1
#### quick look inside, just for check
if(debug.plots=="all"){
plot(xy,bty="n"); xdelta<-abs(diff(range(xy[,1]))); dx<-xdelta*.3
for(j in 1:n) {
p<-xy[j,]; dy<-dx*tan(alpha[,j])
segments(p[1]-dx,p[2]-dy,p[1]+dx,p[2]+dy,col=j)
text(p[1]-xdelta*.02,p[2],j,col=j)
}
}
if(verbose) print("end of computation of angles")
hdepth<-rep(0,n); dpi<-2*pi-0.000001; mypi<-pi-0.000001
minusplus<-c(rep(-1,m),rep(1,m))
if(FALSE){
for(j in 1:n) {
a<-alpha[,j]+pi; h<-a<10; a<-a[h]; init<-sum(a < mypi) # hallo
a.shift<-(a+pi) %% dpi
minusplus<-c(rep(-1,length(a)),rep(1,length(a))) #### 070824
h<-cumsum(minusplus[order(c(a,a.shift))])
hdepth[j]<-init+min(h)+1 # or do we have to count identical points?
# hdepth[j]<-init+min(h)+sum(xy[j,1]==xy[,1] & xy[j,2]==xy[,2])
}
}
find.hdepths <- function(xy, number.of.directions=181){ # 121126
xy <- as.matrix(xy)
for( j in 1:2) {
xy[,j] <- xy[,j] - min(xy[,j])
if( 0 < (h <- max(xy[,j]))) xy[,j] <- xy[,j] / max(xy[,j])
}
phi <- c(seq(0,180,length=number.of.directions)[-1]*(2*pi/360))
sinphi <- c(sin(phi),1); cosphi <- c(cos(phi),0)
RM1 <- round(digits=6,rbind(cosphi,sinphi))
hd <- rep(h<-length(xy[,1]),h)
for( j in seq(along=sinphi)){
xyt <- xy %*% RM1[,j]
hd <- pmin(hd,rank(xyt,ties.method="min"), rank(-xyt,ties.method="min"))
}
# xyt <- xy %*% RM1
# hd2 <- cbind(apply(xyt, 2, rank, ties.method="min"),
# apply(-xyt,2, rank, ties.method="min"))
# hd2 <- apply(hd2, 1, min)
hd
}
hdepth <- find.hdepths(xy,181*precision)
if(verbose){print("end of computation of hdepth:"); print(hdepth)}
#### quick look inside, just for a check
if(debug.plots=="all"){
plot(xy,bty="n")
xdelta<-abs(diff(range(xy[,1]))); dx<-xdelta*.1
for(j in 1:n) {
a<-alpha[,j]+pi; a<-a[a<10]; init<-sum(a < pi)
a.shift<-(a+pi) %% dpi
minusplus<-c(rep(-1,length(a)),rep(1,length(a))) #### 070824
h<-cumsum(minusplus[ao<-(order(c(a,a.shift)))])
no<-which((init+min(h)) == (init+h))[1]
p<-xy[j,]; dy<-dx*tan(alpha[,j])
segments(p[1]-dx,p[2]-dy,p[1]+dx,p[2]+dy,col=j,lty=3)
dy<-dx*tan(c(sort(a),sort(a))[no])
segments(p[1]-5*dx,p[2]-5*dy,p[1]+5*dx,p[2]+5*dy,col="black")
text(p[1]-xdelta*.02,p[2],hdepth[j],col=1) # cex=2.5 assumes suitable fonts
}
}
hd.table<-table(sort(hdepth))
d.k<-cbind(dk=rev(cumsum(rev(hd.table))),
k =as.numeric(names(hd.table)))
k.1<-sum( points.in.bag < d.k[,1] )
# if(nrow(d.k)>1){ ### version 09/2005, error in data set 1 of Meuleman
# instead of >2 now >k.1 ### 070827
# if(nrow(d.k)>k.1){ k<-d.k[k.1+1,2] } else { k<-d.k[k.1,2] }
# this statement will not have an effect because of the next one:
k<-d.k[k.1,2]+1 # 121004 increment depth by one not by looking for next depth
if(verbose){cat("numbers of members of dk:"); print(hd.table); print(d.k)}
if(verbose){cat("end of computation of k, k=",k,"k.1:",k.1)}
# D.K<<-d.k; K.1<<-k.1; EX<<-exp.dk; EX.1<<-exp.dk.1; PDK<<-pdk; HDEPTH<<-hdepth
center<-apply(xy[which(hdepth==max(hdepth)),,drop=FALSE],2,mean)
hull.center<-NULL
if(3<nrow(xy)&&length(hd.table)>0){
n.p<-floor(1.5*c(32,16,8)[1+(n>50)+(n>200)]*precision)
# limit.hdepth.to.check <- sort(hdepth, decreasing = TRUE)[min(nrow(xy),6)]
# 121126
h <- unique(sort(hdepth, decreasing = TRUE))
limit.hdepth.to.check <- sort(h)[min(length(h),3)]
h<-cands<-xy[limit.hdepth.to.check <= hdepth,,drop=FALSE]
# h<-cands<-xy[rev(order(hdepth))[1:(min(nrow(xy),6))],]
cands<-cands[chull(cands[,1],cands[,2]),]; n.c<-nrow(cands)
if(is.null(n.c))cands<-h
xyextr<-rbind(apply(cands,2,min),apply(cands,2,max))
## xydel<-2*(xyextr[2,]-xyextr[1,])/n.p ## unused
if( (xyextr[2,1]-xyextr[1,1]) < 0.2*(h <- diff(range(xy[,1])))){
xyextr[1:2,1] <- mean(xyextr[,1]) + c(-.1,.1) * h } #### 121203
if( (xyextr[2,2]-xyextr[1,2]) < 0.2*(h <- diff(range(xy[,2])))){
xyextr[1:2,2] <- mean(xyextr[,2]) + c(-.1,.1) * h } #### 121203
if(verbose){cat("xyextr: looking for maximal depth"); print(xyextr) }
h1<-seq(xyextr[1,1],xyextr[2,1],length=n.p)
h2<-seq(xyextr[1,2],xyextr[2,2],length=n.p)
tp<-cbind(as.vector(matrix(h1,n.p,n.p)), # [1:n.p^2],
as.vector(matrix(h2,n.p,n.p,TRUE))) # [1:n.p^2])
# tphdepth<-max(hdepth.of.points(tp))-1
tphdepth<-max(find.hdepths.tp(tp,xy))
# if(verbose) { TP<<-tp; TPD<<-find.hdepths.tp(tp,xy) }
if(verbose) cat("points(TP,pch=c(letters,LETTERS)[TPD+1])")
# if max of testpoint is smaller than max depth of points take that max!
if(verbose){ cat("depth of testpoints"); print(summary(tphdepth)) } # 121126
tphdepth<-max(tphdepth,d.k[,2]) # 121004
# define direction for hdepth search
num<-floor(2*c(417,351,171,85,67,43)[sum(n>c(1,50,100,150,200,250))]*precision)
num.h<-floor(num/2); angles<-seq(0,pi,length=num.h)
ang<-tan(pi/2-angles)
kkk<-tphdepth
if(verbose){cat("max-hdepth found:"); print(kkk)}
if(verbose) cat("find polygon with max depth")
ia<-1; a<-angles[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
# initial for upper part
ind.k<-xyto[kkk]; cutp<-c(xyxy[ind.k,1],-10)
dxy<-diff(range(xyxy))
pg<-rbind(c(cutp[1],-dxy,Inf),c(cutp[1],dxy,NA))
# initial for lower part
ind.kk<-xyto[n+1-kkk]; cutpl<-c(xyxy[ind.kk,1],10)
# pgl<-rbind(c(cutpl[1],dxy,Inf),c(cutpl[1],-dxy,NA))
pgl<-rbind(c(cutpl[1],dxy,-Inf),c(cutpl[1],-dxy,NA))
# the sign of inf doesn't matter
if(debug.plots=="all"){ plot(xyxy,type="p",bty="n")
text(xy,,1:n,col="blue")
hx<-xy[ind.k,c(1,1)]; hy<-xy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
}
if(verbose) cat("start of computation of the directions: ","kkk=",kkk) # 121030
for(ia in seq(angles)[-1]){
# determine critical points pnew and pnewl of direction a
# if(verbose) cat("ia",ia,angles[ia])
# 121030
a<-angles[ia]; angtan<-ang[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
ind.k <-xyto[kkk]; ind.kk<-xyto[n+1-kkk]; pnew<-xyxy[ind.k,]; pnewl<-xyxy[ind.kk,]
# if(verbose) if( 1 < sum(xyt == xyt[ind.k]) )print("WARNING: some points identical")
if(debug.plots=="all") points(pnew[1],pnew[2],col="red")
# new limiting lines are defined by pnew / pnewl and slope a
# find segment of polygon that is cut by new limiting line and cut
# if(ia>200) { #<show pg pgl>#; points(pnew[1],pnew[2],col="magenta",cex=6) }
if( abs(angtan)>1e10){ if(verbose) cat("kkk",kkk,"x=c case")
# case of vertical slope #print(pg);print(pnew);print(xyt);lines(pg,col="red",lwd=3)
# number of points left of point pnew that limit the polygon
pg.no<-sum(pg[,1]<pnew[1])
if( 0 < pg.no ){
# the polygon (segment pg.no) has to be cut at x==pnew[1]
cutp <- c(pnew[1], pg [pg.no, 2]+pg [pg.no, 3]*(pnew [1]-pg [pg.no ,1]))
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated UPPER polygon: pg.no==0")
# the limiting point pnew is above the beginning of the polygon
# therefore, the polygon reduces to line
pg <- rbind(pg[1,], c(pg[2,1:2],NA))
}
pg.nol<-sum(pgl[,1]>=pnewl[1])
if( 0 < pg.nol ){ #??2 ### 121204
cutpl<-c(pnewl[1],pgl[pg.nol,2]+pgl[pg.nol,3]*(pnewl[1]-pgl[pg.nol,1]))
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated LOWER polygon: pgl.no==0")
pgl <- rbind(pgl[1,], c(pgl[2,1:2],NA))
}
}else{ # if(verbose) cat("kkk",kkk,"normal case")
# normal case upper polygon
pg.inter<-pg[,2]-angtan*pg[,1]; pnew.inter<-pnew[2]-angtan*pnew[1]
pg.no<-sum(pg.inter<pnew.inter)
if(is.na(pg[pg.no,3])) pg[pg.no,3] <- -Inf # 121129 NaN/Na error
cutp<-cut.p.sl.p.sl(pnew,ang[ia],pg[pg.no,1:2],pg[pg.no,3])
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
# normal case lower polygon
pg.interl<-pgl[,2]-angtan*pgl[,1]; pnew.interl<-pnewl[2]-angtan*pnewl[1]
pg.nol<-sum(pg.interl>pnew.interl)
if(is.na(pgl[pg.nol,3])) pgl[pg.nol,3] <- Inf # 121129 NaN/Na error
cutpl<-cut.p.sl.p.sl(pnewl,angtan,pgl[pg.nol,1:2],pgl[pg.nol,3])
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
}
# if(kkk==KKK && ia == 51) { cat("ENDE: pgl"); print(pgl) }
# update pg, pgl completed
# PG<<-pg;PG.NO<<-pg.no;CUTP<<-cutp;DXY<<-dxy;PNEW<<-pnew;PGL<<-pgl;PG.NOL<<-pg.nol
#
# cat("angtan",angtan,"pg.no",pg.no,"pkt:",pnew)
# if(ia==stopp) lines(pg,type="b",col="green")
if(debug.plots=="all"){
points(pnew[1],pnew[2],col="red")
hx<-xyxy[ind.k,c(1,1)]; hy<-xyxy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pg)
# if(ia==stopp) lines(pgl,type="b",col="green")
points(cutpl[1],cutpl[2],col="red")
hx<-xyxy[ind.kk,c(1,1)]; hy<-xyxy[ind.kk,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pgl)
}
##show pg pgl##
}
# if(verbose) PG <<- pg; PGL <<- pgl
if(2<nrow(pg) && 2<nrow(pgl)){
# plot(xyxy[,1:2],xlim=c(-.5,+.5),ylim=c(-.5,.50))
# lines(pg,type="b",col="red"); lines(pgl,type="b",col="blue")
# remove first and last points and multiple points #<show pg pgl>#
limit<-1e-10
# pg <-pg [c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)),] old#
idx <- c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)) # 121008
if(any(idx==FALSE)){
pg <-pg[idx,]; pg[,3] <- c(diff(pg[,2])/diff(pg[,1]), NA)
}
# old reduction which caused some errors:
# pgl<-pgl[c(TRUE,(abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit)),] error##
# pgl<-pgl[c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE),] old#
idx <- c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE)#121008
if(any(idx==FALSE)){
pgl<-pgl[idx,]; pgl[,3] <- c(diff(pgl[,2])/diff(pgl[,1]), NA)
}
# add some tolerance in course of numerical problems
pgl[,2]<-pgl[,2] - .00001 # 121004
# show pg pgl>>
pg<- pg [-nrow(pg ),][-1,,drop=FALSE]
pgl<-pgl[-nrow(pgl),][-1,,drop=FALSE]
# determine position according to the other polygon
# cat("relative position: lower polygon")
indl<-pos.to.pg(round(pgl,digits=10),round(pg,digits=10)) # 121126
# cat("relative position: upper polygon")
indu<-pos.to.pg(round(pg,digits=10),round(pgl,digits=10),TRUE)
sr<-sl<-NULL # ; ##show pg pgl>>
# right region
if(indu[(npg<-nrow(pg))]=="lower" & indl[1]=="higher"){
# cat("in if of right region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is NOT ok
rnuml<-which(indl=="lower")[1]-1
# checking from the left: last point of upper polygon that is ok
rnumu<-npg+1-which(rev(indu=="higher"))[1]
# special case all points of lower polygon are upper
if(is.na(rnuml)) rnuml<-sum(pg[rnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(rnumu)) rnumu<-sum(pg[,1]<pgl[rnuml,1])
xyl<-pgl[rnuml,]; xyu<-pg[rnumu,]
# cat("right"); print(rnuml); print(xyl)
# cat("right"); print(rnumu); print(xyu)
sr<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# left region
if(indl[(npgl<-nrow(pgl))]=="higher"&indu[1]=="lower"){
# cat("in if of left region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is ok
lnuml<-npgl+1-which(rev(indl=="lower"))[1]
# checking from the left: last point of upper polygon that is NOT ok
lnumu<-which(indu=="higher")[1]-1
# special case all points of lower polygon are upper
if(is.na(lnuml)) lnuml<-sum(pg[lnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(lnumu)) lnumu<-sum(pg[,1]<pgl[lnuml,1])
xyl<-pgl[lnuml,]; xyu<-pg[lnumu,]
# cat("left"); print(lnuml); print(xyl)
# cat("left"); print(lnumu); print(xyu)
sl<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# if(kkk==2){ ##show pg pgl##; INDU<<-indu; INDL<<-indl; PGL<<-pgl; PGU<<-pg}
pg<-rbind(pg [indu=="higher",1:2,drop=FALSE],sr,
pgl[indl=="lower", 1:2,drop=FALSE],sl)
if(debug.plots=="all") lines(rbind(pg,pg[1,]),col="red")
if(!any(is.na(pg))) pg<-pg[chull(pg[,1],pg[,2]),]
# if(kkk==7){ PG <<- pg }
} else {
if(2<nrow(pgl)){ #121204
pg <- rbind(pg[2,1:2],pgl[-c(1,length(pgl[,1])),1:2])
} else {
pg <- rbind(pg [-c(1,length(pg [,1])),1:2],pgl[2,1:2])
# rbind(pgl[2,1:2],pg[2,1:2])
}
}
if(verbose) cat("END of computation of the directions")
hull.center<-cbind(pg[,1]*xysd[1]+xym[1],pg[,2]*xysd[2]+xym[2])
if(!any(is.na(hull.center))) center<-find.polygon.center(hull.center) else
hull.center <- rbind(center) # 121126
if(verbose){ cat("CENTER"); print(center) }
if(verbose){cat("hull.center",hull.center); print(table(tphdepth)) }
}
# if(verbose) cat("center depth:",hdepth.of.points(rbind(center))-1)
if(verbose) cat("center depth:",find.hdepths.tp(rbind(center),xy)-1)
if(verbose){print("end of computation of center"); print(center)}
if(dkmethod==1){
# inner hull of bag
xyi<-xy[hdepth>=k,,drop=FALSE] # cat("dim XYI", dim(xyi))
# 121028 some corrections for strange k situations
if(0 < length(xyi)) pdk<-xyi[chull(xyi[,1],xyi[,2]),,drop=FALSE]
# outer hull of bag
if( k > 1 ){
xyo<-xy[hdepth>=(k-1),,drop=FALSE]
pdk.1<-xyo[chull(xyo[,1],xyo[,2]),,drop=FALSE]
} else pdk.1 <- pdk
if(0 == length(xyi)) pdk <- pdk.1
if(verbose)cat("hull computed: pdk, pdk.1:")
if(verbose){print(pdk); print(pdk.1) }
if(debug.plots=="all"){
plot(xy,bty="n")
h<-rbind(pdk,pdk[1,]); lines(h,col="red",lty=2)
h<-rbind(pdk.1,pdk.1[1,]);lines(h,col="blue",lty=3)
points(center[1],center[2],pch=8,col="red")
}
exp.dk<-expand.hull(pdk,k)
exp.dk.1<-expand.hull(exp.dk,k-1) # pdk.1,k-1,20)
}else{
# define direction for hdepth search
num<-floor(2*c(417,351,171,85,67,43)[sum(n>c(1,50,100,150,200,250))]*precision)
num.h<-floor(num/2); angles<-seq(0,pi,length=num.h)
ang<-tan(pi/2-angles)
# standardization of data set xyxy is used
kkk<-k
if(verbose) print("find polygon with depth something higher than that of the bag")
if( kkk <= max(d.k[,2]) ){ # inner one ### 121030
ia<-1; a<-angles[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
# initial for upper part
ind.k<-xyto[kkk]; cutp<-c(xyxy[ind.k,1],-10)
dxy<-diff(range(xyxy))
pg<-rbind(c(cutp[1],-dxy,Inf),c(cutp[1],dxy,NA))
# initial for lower part
ind.kk<-xyto[n+1-kkk]; cutpl<-c(xyxy[ind.kk,1],10)
# pgl<-rbind(c(cutpl[1],dxy,Inf),c(cutpl[1],-dxy,NA))
pgl<-rbind(c(cutpl[1],dxy,-Inf),c(cutpl[1],-dxy,NA))
# the sign of inf doesn't matter
if(debug.plots=="all"){ plot(xyxy,type="p",bty="n")
text(xy,,1:n,col="blue")
hx<-xy[ind.k,c(1,1)]; hy<-xy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
}
if(verbose) cat("start of computation of the directions: ","kkk=",kkk) # 121030
for(ia in seq(angles)[-1]){
# determine critical points pnew and pnewl of direction a
# if(verbose) cat("ia",ia,angles[ia])
# 121030
a<-angles[ia]; angtan<-ang[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
ind.k <-xyto[kkk]; ind.kk<-xyto[n+1-kkk]; pnew<-xyxy[ind.k,]; pnewl<-xyxy[ind.kk,]
# if(verbose) if( 1 < sum(xyt == xyt[ind.k]) )print("WARNING: some points identical")
if(debug.plots=="all") points(pnew[1],pnew[2],col="red")
# new limiting lines are defined by pnew / pnewl and slope a
# find segment of polygon that is cut by new limiting line and cut
# if(ia>200) { #<show pg pgl>#; points(pnew[1],pnew[2],col="magenta",cex=6) }
if( abs(angtan)>1e10){ if(verbose) cat("kkk",kkk,"x=c case")
# case of vertical slope #print(pg);print(pnew);print(xyt);lines(pg,col="red",lwd=3)
# number of points left of point pnew that limit the polygon
pg.no<-sum(pg[,1]<pnew[1])
if( 0 < pg.no ){
# the polygon (segment pg.no) has to be cut at x==pnew[1]
cutp <- c(pnew[1], pg [pg.no, 2]+pg [pg.no, 3]*(pnew [1]-pg [pg.no ,1]))
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated UPPER polygon: pg.no==0")
# the limiting point pnew is above the beginning of the polygon
# therefore, the polygon reduces to line
pg <- rbind(pg[1,], c(pg[2,1:2],NA))
}
pg.nol<-sum(pgl[,1]>=pnewl[1])
if( 0 < pg.nol ){ #??2 ### 121204
cutpl<-c(pnewl[1],pgl[pg.nol,2]+pgl[pg.nol,3]*(pnewl[1]-pgl[pg.nol,1]))
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated LOWER polygon: pgl.no==0")
pgl <- rbind(pgl[1,], c(pgl[2,1:2],NA))
}
}else{ # if(verbose) cat("kkk",kkk,"normal case")
# normal case upper polygon
pg.inter<-pg[,2]-angtan*pg[,1]; pnew.inter<-pnew[2]-angtan*pnew[1]
pg.no<-sum(pg.inter<pnew.inter)
if(is.na(pg[pg.no,3])) pg[pg.no,3] <- -Inf # 121129 NaN/Na error
cutp<-cut.p.sl.p.sl(pnew,ang[ia],pg[pg.no,1:2],pg[pg.no,3])
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
# normal case lower polygon
pg.interl<-pgl[,2]-angtan*pgl[,1]; pnew.interl<-pnewl[2]-angtan*pnewl[1]
pg.nol<-sum(pg.interl>pnew.interl)
if(is.na(pgl[pg.nol,3])) pgl[pg.nol,3] <- Inf # 121129 NaN/Na error
cutpl<-cut.p.sl.p.sl(pnewl,angtan,pgl[pg.nol,1:2],pgl[pg.nol,3])
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
}
# if(kkk==KKK && ia == 51) { cat("ENDE: pgl"); print(pgl) }
# update pg, pgl completed
# PG<<-pg;PG.NO<<-pg.no;CUTP<<-cutp;DXY<<-dxy;PNEW<<-pnew;PGL<<-pgl;PG.NOL<<-pg.nol
#### ***********************
#### cat("angtan",angtan,"pg.no",pg.no,"pkt:",pnew)
# if(ia==stopp) lines(pg,type="b",col="green")
if(debug.plots=="all"){
points(pnew[1],pnew[2],col="red")
hx<-xyxy[ind.k,c(1,1)]; hy<-xyxy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pg)
# if(ia==stopp) lines(pgl,type="b",col="green")
points(cutpl[1],cutpl[2],col="red")
hx<-xyxy[ind.kk,c(1,1)]; hy<-xyxy[ind.kk,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pgl)
}
##show pg pgl##
}
# if(verbose) PG <<- pg; PGL <<- pgl
if(2<nrow(pg) && 2<nrow(pgl)){
# plot(xyxy[,1:2],xlim=c(-.5,+.5),ylim=c(-.5,.50))
# lines(pg,type="b",col="red"); lines(pgl,type="b",col="blue")
# remove first and last points and multiple points #<show pg pgl>#
limit<-1e-10
# pg <-pg [c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)),] old#
idx <- c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)) # 121008
if(any(idx==FALSE)){
pg <-pg[idx,]; pg[,3] <- c(diff(pg[,2])/diff(pg[,1]), NA)
}
# old reduction which caused some errors:
# pgl<-pgl[c(TRUE,(abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit)),] error##
# pgl<-pgl[c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE),] old#
idx <- c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE)#121008
if(any(idx==FALSE)){
pgl<-pgl[idx,]; pgl[,3] <- c(diff(pgl[,2])/diff(pgl[,1]), NA)
}
# add some tolerance in course of numerical problems
pgl[,2]<-pgl[,2] - .00001 #### 121004
# show pg pgl>>
pg<- pg [-nrow(pg ),][-1,,drop=FALSE]
pgl<-pgl[-nrow(pgl),][-1,,drop=FALSE]
# determine position according to the other polygon
# cat("relative position: lower polygon")
indl<-pos.to.pg(round(pgl,digits=10),round(pg,digits=10)) # 121126
# cat("relative position: upper polygon")
indu<-pos.to.pg(round(pg,digits=10),round(pgl,digits=10),TRUE)
sr<-sl<-NULL # ; ##show pg pgl>>
# right region
if(indu[(npg<-nrow(pg))]=="lower" & indl[1]=="higher"){
# cat("in if of right region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is NOT ok
rnuml<-which(indl=="lower")[1]-1
# checking from the left: last point of upper polygon that is ok
rnumu<-npg+1-which(rev(indu=="higher"))[1]
# special case all points of lower polygon are upper
if(is.na(rnuml)) rnuml<-sum(pg[rnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(rnumu)) rnumu<-sum(pg[,1]<pgl[rnuml,1])
xyl<-pgl[rnuml,]; xyu<-pg[rnumu,]
# cat("right"); print(rnuml); print(xyl)
# cat("right"); print(rnumu); print(xyu)
sr<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# left region
if(indl[(npgl<-nrow(pgl))]=="higher"&indu[1]=="lower"){
# cat("in if of left region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is ok
lnuml<-npgl+1-which(rev(indl=="lower"))[1]
# checking from the left: last point of upper polygon that is NOT ok
lnumu<-which(indu=="higher")[1]-1
# special case all points of lower polygon are upper
if(is.na(lnuml)) lnuml<-sum(pg[lnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(lnumu)) lnumu<-sum(pg[,1]<pgl[lnuml,1])
xyl<-pgl[lnuml,]; xyu<-pg[lnumu,]
# cat("left"); print(lnuml); print(xyl)
# cat("left"); print(lnumu); print(xyu)
sl<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# if(kkk==2){ ##show pg pgl##; INDU<<-indu; INDL<<-indl; PGL<<-pgl; PGU<<-pg}
pg<-rbind(pg [indu=="higher",1:2,drop=FALSE],sr,
pgl[indl=="lower", 1:2,drop=FALSE],sl)
if(debug.plots=="all") lines(rbind(pg,pg[1,]),col="red")
if(!any(is.na(pg))) pg<-pg[chull(pg[,1],pg[,2]),]
# if(kkk==7){ PG <<- pg }
} else {
if(2<nrow(pgl)){ #121204
pg <- rbind(pg[2,1:2],pgl[-c(1,length(pgl[,1])),1:2])
} else {
pg <- rbind(pg [-c(1,length(pg [,1])),1:2],pgl[2,1:2])
# rbind(pgl[2,1:2],pg[2,1:2])
}
}
if(verbose) cat("END of computation of the directions")
exp.dk<-cbind(pg[,1]*xysd[1]+xym[1],pg[,2]*xysd[2]+xym[2])
} else {
exp.dk <- NULL
}
if( 1 < kkk ) kkk<-kkk-1 # outer one
if(verbose) print("find polygon with depth a little bit lower than that of the bag")
ia<-1; a<-angles[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
# initial for upper part
ind.k<-xyto[kkk]; cutp<-c(xyxy[ind.k,1],-10)
dxy<-diff(range(xyxy))
pg<-rbind(c(cutp[1],-dxy,Inf),c(cutp[1],dxy,NA))
# initial for lower part
ind.kk<-xyto[n+1-kkk]; cutpl<-c(xyxy[ind.kk,1],10)
# pgl<-rbind(c(cutpl[1],dxy,Inf),c(cutpl[1],-dxy,NA))
pgl<-rbind(c(cutpl[1],dxy,-Inf),c(cutpl[1],-dxy,NA))
# the sign of inf doesn't matter
if(debug.plots=="all"){ plot(xyxy,type="p",bty="n")
text(xy,,1:n,col="blue")
hx<-xy[ind.k,c(1,1)]; hy<-xy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
}
if(verbose) cat("start of computation of the directions: ","kkk=",kkk) # 121030
for(ia in seq(angles)[-1]){
# determine critical points pnew and pnewl of direction a
# if(verbose) cat("ia",ia,angles[ia])
# 121030
a<-angles[ia]; angtan<-ang[ia]; xyt<-xyxy%*%c(cos(a),-sin(a)); xyto<-order(xyt)
ind.k <-xyto[kkk]; ind.kk<-xyto[n+1-kkk]; pnew<-xyxy[ind.k,]; pnewl<-xyxy[ind.kk,]
# if(verbose) if( 1 < sum(xyt == xyt[ind.k]) )print("WARNING: some points identical")
if(debug.plots=="all") points(pnew[1],pnew[2],col="red")
# new limiting lines are defined by pnew / pnewl and slope a
# find segment of polygon that is cut by new limiting line and cut
# if(ia>200) { #<show pg pgl>#; points(pnew[1],pnew[2],col="magenta",cex=6) }
if( abs(angtan)>1e10){ if(verbose) cat("kkk",kkk,"x=c case")
# case of vertical slope #print(pg);print(pnew);print(xyt);lines(pg,col="red",lwd=3)
# number of points left of point pnew that limit the polygon
pg.no<-sum(pg[,1]<pnew[1])
if( 0 < pg.no ){
# the polygon (segment pg.no) has to be cut at x==pnew[1]
cutp <- c(pnew[1], pg [pg.no, 2]+pg [pg.no, 3]*(pnew [1]-pg [pg.no ,1]))
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated UPPER polygon: pg.no==0")
# the limiting point pnew is above the beginning of the polygon
# therefore, the polygon reduces to line
pg <- rbind(pg[1,], c(pg[2,1:2],NA))
}
pg.nol<-sum(pgl[,1]>=pnewl[1])
if( 0 < pg.nol ){ ##??2 ### 121204
cutpl<-c(pnewl[1],pgl[pg.nol,2]+pgl[pg.nol,3]*(pnewl[1]-pgl[pg.nol,1]))
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
} else {
if(verbose) cat("!!! case degenerated LOWER polygon: pgl.no==0")
pgl <- rbind(pgl[1,], c(pgl[2,1:2],NA))
}
}else{ # if(verbose) cat("kkk",kkk,"normal case")
# normal case upper polygon
pg.inter<-pg[,2]-angtan*pg[,1]; pnew.inter<-pnew[2]-angtan*pnew[1]
pg.no<-sum(pg.inter<pnew.inter)
if(is.na(pg[pg.no,3])) pg[pg.no,3] <- -Inf # 121129 NaN/Na error
cutp<-cut.p.sl.p.sl(pnew,ang[ia],pg[pg.no,1:2],pg[pg.no,3])
pg<- rbind(pg[1:pg.no,], c(cutp,angtan), c(cutp[1]+dxy, cutp[2] +angtan*dxy,NA))
# normal case lower polygon
pg.interl<-pgl[,2]-angtan*pgl[,1]; pnew.interl<-pnewl[2]-angtan*pnewl[1]
pg.nol<-sum(pg.interl>pnew.interl)
if(is.na(pgl[pg.nol,3])) pgl[pg.nol,3] <- Inf # 121129 NaN/Na error
cutpl<-cut.p.sl.p.sl(pnewl,angtan,pgl[pg.nol,1:2],pgl[pg.nol,3])
pgl<-rbind(pgl[1:pg.nol,],c(cutpl,angtan),c(cutpl[1]-dxy, cutpl[2]-angtan*dxy,NA))
}
# if(kkk==KKK && ia == 51) { cat("ENDE: pgl"); print(pgl) }
# update pg, pgl completed
# PG<<-pg;PG.NO<<-pg.no;CUTP<<-cutp;DXY<<-dxy;PNEW<<-pnew;PGL<<-pgl;PG.NOL<<-pg.nol
#### ---**************************
# cat("angtan",angtan,"pg.no",pg.no,"pkt:",pnew)
# if(ia==stopp) lines(pg,type="b",col="green")
if(debug.plots=="all"){
points(pnew[1],pnew[2],col="red")
hx<-xyxy[ind.k,c(1,1)]; hy<-xyxy[ind.k,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pg)
# if(ia==stopp) lines(pgl,type="b",col="green")
points(cutpl[1],cutpl[2],col="red")
hx<-xyxy[ind.kk,c(1,1)]; hy<-xyxy[ind.kk,c(2,2)]
segments(hx,hy,c(10,-10),hy+ang[ia]*(c(10,-10)-hx),lty=2)
# text(hx+rnorm(1,,.1),hy+rnorm(1,,.1),ia)
# print(pgl)
}
##show pg pgl##
}
# if(verbose) PG <<- pg; PGL <<- pgl
if(2<nrow(pg) && 2<nrow(pgl)){
# plot(xyxy[,1:2],xlim=c(-.5,+.5),ylim=c(-.5,.50))
# lines(pg,type="b",col="red"); lines(pgl,type="b",col="blue")
# remove first and last points and multiple points #<show pg pgl>#
limit<-1e-10
# pg <-pg [c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)),] old#
idx <- c(TRUE,(abs(diff(pg [,1]))>limit)|(abs(diff(pg [,2]))>limit)) # 121008
if(any(idx==FALSE)){
pg <-pg[idx,]; pg[,3] <- c(diff(pg[,2])/diff(pg[,1]), NA)
}
# old reduction which caused some errors:
# pgl<-pgl[c(TRUE,(abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit)),] error##
# pgl<-pgl[c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE),] old#
idx <- c( (abs(diff(pgl[,1]))>limit)|(abs(diff(pgl[,2]))>limit),TRUE)#121008
if(any(idx==FALSE)){
pgl<-pgl[idx,]; pgl[,3] <- c(diff(pgl[,2])/diff(pgl[,1]), NA)
}
# add some tolerance in course of numerical problems
pgl[,2]<-pgl[,2] - .00001 #### 121004
# show pg pgl>>
pg<- pg [-nrow(pg ),][-1,,drop=FALSE]
pgl<-pgl[-nrow(pgl),][-1,,drop=FALSE]
# determine position according to the other polygon
# cat("relative position: lower polygon")
indl<-pos.to.pg(round(pgl,digits=10),round(pg,digits=10)) # 121126
# cat("relative position: upper polygon")
indu<-pos.to.pg(round(pg,digits=10),round(pgl,digits=10),TRUE)
sr<-sl<-NULL # ; ##show pg pgl>>
# right region
if(indu[(npg<-nrow(pg))]=="lower" & indl[1]=="higher"){
# cat("in if of right region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is NOT ok
rnuml<-which(indl=="lower")[1]-1
# checking from the left: last point of upper polygon that is ok
rnumu<-npg+1-which(rev(indu=="higher"))[1]
# special case all points of lower polygon are upper
if(is.na(rnuml)) rnuml<-sum(pg[rnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(rnumu)) rnumu<-sum(pg[,1]<pgl[rnuml,1])
xyl<-pgl[rnuml,]; xyu<-pg[rnumu,]
# cat("right"); print(rnuml); print(xyl)
# cat("right"); print(rnumu); print(xyu)
sr<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# left region
if(indl[(npgl<-nrow(pgl))]=="higher"&indu[1]=="lower"){
# cat("in if of left region: the upper polynom is somewhere lower")
# checking from the right: last point of lower polygon that is ok
lnuml<-npgl+1-which(rev(indl=="lower"))[1]
# checking from the left: last point of upper polygon that is NOT ok
lnumu<-which(indu=="higher")[1]-1
# special case all points of lower polygon are upper
if(is.na(lnuml)) lnuml<-sum(pg[lnumu,1]<pgl[,1])
# special case all points of upper polygon are lower
if(is.na(lnumu)) lnumu<-sum(pg[,1]<pgl[lnuml,1])
xyl<-pgl[lnuml,]; xyu<-pg[lnumu,]
# cat("left"); print(lnuml); print(xyl)
# cat("left"); print(lnumu); print(xyu)
sl<-cut.p.sl.p.sl(xyl[1:2],xyl[3],xyu[1:2],xyu[3])
}
# if(kkk==2){ ##show pg pgl##; INDU<<-indu; INDL<<-indl; PGL<<-pgl; PGU<<-pg}
pg<-rbind(pg [indu=="higher",1:2,drop=FALSE],sr,
pgl[indl=="lower", 1:2,drop=FALSE],sl)
if(debug.plots=="all") lines(rbind(pg,pg[1,]),col="red")
if(!any(is.na(pg))) pg<-pg[chull(pg[,1],pg[,2]),]
# if(kkk==7){ PG <<- pg }
} else {
if(2<nrow(pgl)){ #121204
pg <- rbind(pg[2,1:2],pgl[-c(1,length(pgl[,1])),1:2])
} else {
pg <- rbind(pg [-c(1,length(pg [,1])),1:2],pgl[2,1:2])
# rbind(pgl[2,1:2],pg[2,1:2])
}
}
if(verbose) cat("END of computation of the directions")
exp.dk.1<-cbind(pg[,1]*xysd[1]+xym[1],pg[,2]*xysd[2]+xym[2])
if(is.null(exp.dk)) exp.dk <- exp.dk.1
# EX.1 <<- exp.dk.1; EX <<- exp.dk
if(verbose) print("End of find hulls, method two")
}
# if(max(d.k[,2])==k.1||nrow(d.k)==1) lambda<-0 else { ### 121027
if(nrow(d.k)==k.1 || nrow(d.k)==1) lambda<-0 else { # 121126
ind <- sum(d.k[,2] <= k.1) # complicated, may be wrong in case of missing depths
ind <- k.1 # 121123
ndk.1 <- d.k[ ind, 1]
ndk <- d.k[ ind+1, 1] # number inner
# (halve - number inner)/(number outer - number inner)
lambda <-(n/2-ndk) /(ndk.1 - ndk)
# lambda<-(n/2-d.k[k.1+1,1]) /(d.k[k.1,1]-d.k[k.1+1,1]) ### old
# cat(n/2, ndk,ndk.1, "k.1",k.1,"ind",ind)
}
if(verbose) cat("lambda",lambda)
cut.on.pdk.1<-find.cut.z.pg(exp.dk, exp.dk.1,center=center)
# print("HALLO"); print(cut.on.pdk.1)
cut.on.pdk <-find.cut.z.pg(exp.dk.1,exp.dk, center=center)
# expand inner polgon exp.dk
h1<-(1-lambda)*exp.dk+lambda*cut.on.pdk.1
# shrink outer polygon exp.dk.1
h2<-(1-lambda)*cut.on.pdk+lambda*exp.dk.1
h<-rbind(h1,h2);
h<-h[!is.nan(h[,1])&!is.nan(h[,2]),]
hull.bag<-h[chull(h[,1],h[,2]),]
# if(verbose){
# plot(xy); lines(exp.dk,col="red"); lines(exp.dk.1,col="blue");
# segments(cut.on.pdk[,1],cut.on.pdk[,2],exp.dk.1[,1],exp.dk.1[,2],col="red")
# segments(cut.on.pdk.1[,1],cut.on.pdk.1[,2],exp.dk[,1],exp.dk[,2],col="blue",lwd=3)
# points(cut.on.pdk.1,col="blue"); cat("cut.on.pdk.1"); print(cut.on.pdk.1)
# points(cut.on.pdk,col="red"); cat("cut.on.pdk"); print(cut.on.pdk)
# lines(hull.bag,col="green")
# }
if(verbose)cat("bag completed:")
#if(verbose) print(hull.bag)
if(debug.plots=="all"){ lines(hull.bag,col="red") }
hull.loop<-cbind(hull.bag[,1]-center[1],hull.bag[,2]-center[2])
hull.loop<-factor*hull.loop
hull.loop<-cbind(hull.loop[,1]+center[1],hull.loop[,2]+center[2])
if(verbose) cat("loop computed")
if(!very.large.data.set){
pxy.bag <-xydata[hdepth>= k ,,drop=FALSE]
pkt.cand <-xydata[hdepth==(k-1),,drop=FALSE]
pkt.not.bag<-xydata[hdepth< (k-1),,drop=FALSE]
if( 0 < length(pkt.cand) && 0 < length(hull.bag) ){
outside<-out.of.polygon(pkt.cand,hull.bag)
if(sum(!outside)>0)
pxy.bag <-rbind(pxy.bag, pkt.cand[!outside,])
if(sum( outside)>0)
pkt.not.bag<-rbind(pkt.not.bag, pkt.cand[ outside,])
}
}else {
extr<-out.of.polygon(xydata,hull.bag)
pxy.bag <-xydata[!extr,]
pkt.not.bag<-xydata[extr,,drop=FALSE]
}
if(length(pkt.not.bag)>0){
extr<-out.of.polygon(pkt.not.bag,hull.loop)
pxy.outlier<-pkt.not.bag[extr,,drop=FALSE]
if(0==length(pxy.outlier)) pxy.outlier<-NULL
pxy.outer<-pkt.not.bag[!extr,,drop=FALSE]
}else{
pxy.outer<-pxy.outlier<-NULL
}
if(verbose) cat("points of bag, outer points and outlier identified")
hull.loop<-rbind(pxy.outer,hull.bag)
hull.loop<-hull.loop[chull(hull.loop[,1],hull.loop[,2]),]
if(verbose) cat("end of computation of loop")
res<-list(
center=center,
hull.center=hull.center,
hull.bag=hull.bag,
hull.loop=hull.loop,
pxy.bag=pxy.bag,
pxy.outer=if(length(pxy.outer)>0) pxy.outer else NULL,
pxy.outlier=if(length(pxy.outlier)>0) pxy.outlier else NULL,
hdepths=hdepth,
is.one.dim=is.one.dim,
prdata=prdata,
# random.seed=random.seed, ###SEED
xy=xy,xydata=xydata
)
if(verbose) res<-c(res,list(exp.dk=exp.dk,exp.dk.1=exp.dk.1,hdepth=hdepth))
class(res)<-"bagplot"
return(res)
}
plot.bagplot <- function(x,
show.outlier=TRUE,# if TRUE outlier are shown
show.whiskers=TRUE, # if TRUE whiskers are shown
show.looppoints=TRUE, # if TRUE points in loop are shown
show.bagpoints=TRUE, # if TRUE points in bag are shown
show.loophull=TRUE, # if TRUE loop is shown
show.baghull=TRUE, # if TRUE bag is shown
add=FALSE, # if TRUE graphical elements are added to actual plot
pch=16,cex=.4, # to define further parameters of plot
verbose=FALSE, # tools for debugging
col.loophull="#aaccff", # Alternatives: #ccffaa, #ffaacc
col.looppoints="#3355ff", # Alternatives: #55ff33, #ff3355
col.baghull="#7799ff", # Alternatives: #99ff77, #ff7799
col.bagpoints="#000088", # Alternatives: #008800, #880000
transparency=FALSE,...
){
if(missing(x)) return(
"bagplot, version 2012/12/05, peter wolf"
)
# transparency flag and color flags have been proposed by wouter
if (transparency==TRUE) {
col.loophull = paste(col.loophull, "99", sep="")
col.baghull = paste(col.baghull, "99", sep="")
}
win<-function(dx,dy){ atan2(y=dy,x=dx) }
cut.z.pg<-function(zx,zy,p1x,p1y,p2x,p2y){
a2<-(p2y-p1y)/(p2x-p1x); a1<-zy/zx
sx<-(p1y-a2*p1x)/(a1-a2); sy<-a1*sx
sxy<-cbind(sx,sy)
h<-any(is.nan(sxy))||any(is.na(sxy))||any(Inf==abs(sxy))
if(h){ # print("NAN found"); print(cbind(a1,a2,zx,zy,sxy,p2x-p1x))
if(!exists("verbose")) verbose<-FALSE
if(verbose) cat("special")
# zx is zero ### 121030
h<-0==zx
sx<-ifelse(h,zx,sx); sy<-ifelse(h,p1y-a2*p1x,sy)
# points on line defined by line segment
a1 <- ifelse( abs(a1) == Inf, sign(a1)*123456789*1E10, a1) # 121030
a2 <- ifelse( abs(a2) == Inf, sign(a2)*123456789*1E10, a2)
# points on line defined by line segment
h<-0==(a1-a2) & sign(zx)==sign(p1x)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-0==(a1-a2) & sign(zx)!=sign(p1x)
sx<-ifelse(h,p2x,sx); sy<-ifelse(h,p2y,sy)
# line segment vertical
# & center NOT ON line segment
h<-p1x==p2x & zx!=p1x & p1x!=0
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,zy*p1x/zx,sy)
# & center ON line segment
h<-p1x==p2x & zx!=p1x & p1x==0
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,0,sy)
# & center NOT ON line segment & point on line ### 121126
h<-p1x==p2x & zx==p1x & p1x!=0 # & sign(zy)==sign(p1y)
sx<-ifelse(h,zx,sx); sy<-ifelse(h,zy,sy)
# & center ON line segment & point on line
h<-p1x==p2x & zx==p1x & p1x==0 & sign(zy)==sign(p1y)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-p1x==p2x & zx==p1x & p1x==0 & sign(zy)!=sign(p1y)
sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p2y,sy)
# points identical to end points of line segment
h<-zx==p1x & zy==p1y; sx<-ifelse(h,p1x,sx); sy<-ifelse(h,p1y,sy)
h<-zx==p2x & zy==p2y; sx<-ifelse(h,p2x,sx); sy<-ifelse(h,p2y,sy)
# point of z is center
h<-zx==0 & zy==0; sx<-ifelse(h,0,sx); sy<-ifelse(h,0,sy)
sxy<-cbind(sx,sy)
} # end of special cases
#if(verbose){ print(rbind(a1,a2));print(cbind(zx,zy,p1x,p1y,p2x,p2y,sxy))}
if(!exists("debug.plots")) debug.plots<-"no"
if(debug.plots=="all"){
segments(sxy[,1],sxy[,2],zx,zy,col="red")
segments(0,0,sxy[,1],sxy[,2],col="green",lty=2) ##!!
points(sxy,col="red")
}
return(sxy)
}
find.cut.z.pg<-function(z,pg,center=c(0,0),debug.plots="no"){
if(!is.matrix(z)) z<-rbind(z)
if(1==nrow(pg)) return(matrix(center,nrow(z),2,TRUE))
n.pg<-nrow(pg); n.z<-nrow(z)
z<-cbind(z[,1]-center[1],z[,2]-center[2])
pgo<-pg; pg<-cbind(pg[,1]-center[1],pg[,2]-center[2])
if(!exists("debug.plots")) debug.plots<-"no"
if(debug.plots=="all"){
plot(rbind(z,pg,0),bty="n"); points(z,pch="p")
lines(c(pg[,1],pg[1,1]),c(pg[,2],pg[1,2]))}
# find angles of pg und z
apg<-win(pg[,1],pg[,2])
apg[is.nan(apg)]<-0; a<-order(apg); apg<-apg[a]; pg<-pg[a,]
az<-win(z[,1],z[,2])
# find line segments
segm.no<-apply((outer(apg,az,"<")),2,sum)
segm.no<-ifelse(segm.no==0,n.pg,segm.no)
next.no<-1+(segm.no %% length(apg))
# compute cut points
cuts<-cut.z.pg(z[,1],z[,2],pg[segm.no,1],pg[segm.no,2],
pg[next.no,1],pg[next.no,2])
# rescale
cuts<-cbind(cuts[,1]+center[1],cuts[,2]+center[2])
return(cuts)
}
# find.cut.z.pg(EX, EX1,center=CE)
center<-hull.center<-hull.bag<-hull.loop<-pxy.bag<-pxy.outer<-pxy.outlier<-NULL
# random.seed <-
hdepths<-is.one.dim<-prdata<-xy<-xydata<-exp.dk<-exp.dk.1<-hdepth<-NULL
tphdepth<-tp<-NULL
#090216
bagplotobj<-x
for(i in seq(along=bagplotobj))
eval(parse(text=paste(names(bagplotobj)[i],"<-bagplotobj[[",i,"]]")))
if(is.one.dim){
if(!verbose) cat("data set one dimensional") # 121202
ROT<-round(prdata[[2]],digits=5); IROT<-round(solve(ROT),digits=5)
if(!add){ ## 121008 ## 121130
plot(xydata,type="n",bty="n",pch=16,cex=1, ...) # xlim=xlim, ylim=ylim, ...)
}
# find five points for box and whiskers
usr <- par()$usr; xlim <- usr[1:2]; ylim <- usr[3:4]
mins <- usr[c(1,3)]; ranges <- usr[c(2,4)] - mins
if(ROT[1,1]==0){ # cat("FALL senkrecht")
xydata <- cbind( mean(usr[1:2]) ,xydata[,2])
boxplotres<-boxplot(xydata[,2],plot=FALSE)
five<-cbind(mean(usr[1:2]),boxplotres$stat)
dx <- 0.1*(xlim[2]-xlim[1]); dy <- 0
idx.out <- if(0<length(boxplotres$out)) match(boxplotres$out, xydata[,2] ) else NULL
}
if(ROT[1,2]==0){ # cat("FALL waagerecht")
xydata <- cbind( xydata[,1], mean(usr[3:4]))
boxplotres<-boxplot(xydata[,1],plot=FALSE)
five<-cbind(boxplotres$stat,mean(usr[3:4]))
dx <- 0; dy <- 0.1*(ylim[2]-ylim[1]) # 1/5 of del.y
idx.out <- if(0<length(boxplotres$out)) match(boxplotres$out, xydata[,1] ) else NULL
}
if(ROT[1,2]!=0 && ROT[1,1]!=0){
xytr<-xydata%*%ROT
boxplotres<-boxplot(xytr[,1],plot=FALSE)
five<-cbind(boxplotres$stat,xytr[1,2])%*%IROT
# find small vector for box height
vec <- five[5,] - five[1,]
vec.ortho <- c(vec[2],-vec[1]) * ranges / par()$pin
xy.delta <- vec.ortho * par()$pin[2:1] * ranges # plot region inches
xy.delta <- xy.delta / sqrt( sum(xy.delta * xy.delta) )
xy.delta <- xy.delta * .15 / ( sqrt(sum(abs(par()$pin*xy.delta/ranges)^2) ))
dx <- xy.delta[1]; dy <- xy.delta[2]
idx.out <- if(0<length(boxplotres$out)) match(boxplotres$out, xytr ) else NULL
}
# construct segments
# whiskers
segments(five[h<-c(1,5),1],five[h,2],five[h<-c(2,4),1],five[h,2], # col=col.looppoints,
lwd=2)
points(five[c(1,5),], cex=1, col=col.looppoints,pch=16)
# box
#segments(five[h<-2:4,1] + dx, five[h,2] + dy, five[h,1] - dx, five[h,2] - dy,
# col=col.bagpoints,lwd=2)
#segments(five[2,1] + (h<-c(-1,1))*dx, five[2,2] + h*dy,
# five[4,1] + h*dx, five[4,2] + h*dy,
# col=col.bagpoints,lwd=2)
polygon(five[c(2,4,4,2,2),1] + c(dx,dx,-dx,-dx,dx),
five[c(2,4,4,2,2),2] + c(dy,dy,-dy,-dy,dy),
col=col.baghull,lwd=1)
# median
segments(five[h<-3 ,1] + dx, five[h,2] + dy,
five[h,1] - dx, five[h,2] - dy,col="red",lwd=3)
# Outlier
if(0 < length(idx.out) && !is.na(idx.out[1])){
points(xydata[idx.out,,drop=FALSE], cex=1, pch=16,col="red")
}
# segments(five[3,1],five[3,2],five[3,1]+1*vec.ortho[1],
# five[3,2]+100*vec.ortho[2],col="green",lwd=5)
# segments(five[3,1],five[3,2],five[3,1]+1*vec1[1],
# five[3,2]+1*vec1[2],col="red",lwd=5)
# points(five,cex=2,col="green")
return("one dimensional boxplot plottet")
} else {
if(!add) plot(xydata,type="n",pch=pch,cex=cex,bty="n",...)
if(verbose) text(xy[,1],xy[,2],paste(as.character(hdepth))) # cex=2 needs fonts
# loop: --************
if(show.loophull){ # fill loop
h<-rbind(hull.loop,hull.loop[1,]); lines(h[,1],h[,2],lty=1)
polygon(hull.loop[,1],hull.loop[,2],col=col.loophull)
}
if(show.looppoints && 0 < length(pxy.outer)){ # points in loop
points(pxy.outer[,1],pxy.outer[,2],col=col.looppoints,pch=pch,cex=cex)
}
# bag: --*****************
if(show.baghull && 0 < length(hull.bag)){ # fill bag
h<-rbind(hull.bag,hull.bag[1,]); lines(h[,1],h[,2],lty=1)
polygon(hull.bag[,1],hull.bag[,2],col=col.baghull)
}
if(show.bagpoints && 0 < length(pxy.bag)){ # points in bag
points(pxy.bag[,1],pxy.bag[,2],col=col.bagpoints,pch=pch,cex=cex)
}
# whiskers
if(show.whiskers && 0 < length(pxy.outer)){
debug.plots<-"not"
if((n<-length(xy[,1]))<15){
segments(xy[,1],xy[,2],rep(center[1],n),rep(center[2],n),
col="red")
}else{
pkt.cut<-find.cut.z.pg(pxy.outer,hull.bag,center=center)
segments(pxy.outer[,1],pxy.outer[,2],pkt.cut[,1],pkt.cut[,2],
col="red")
}
}
# outlier: ---**********************
if(show.outlier && 0 < length(pxy.outlier)){ # points in loop
points(pxy.outlier[,1],pxy.outlier[,2],col="red",pch=pch,cex=cex)
}
# center:
if(exists("hull.center") && 2 < length(hull.center)){
h<-rbind(hull.center,hull.center[1,]); lines(h[,1],h[,2],lty=1)
polygon(hull.center[,1],hull.center[,2],col="orange")
}
if(!is.one.dim) points(center[1],center[2],pch=8,col="red")
if(verbose && 0 < length(exp.dk.1) ){
h<-rbind(exp.dk,exp.dk[1,]); lines(h,col="blue",lty=2)
h<-rbind(exp.dk.1,exp.dk.1[1,]); lines(h,col="black",lty=2, lwd=3)
if(exists("tphdepth") && 0<length(tphdepth))
text(tp[,1],tp[,2],as.character(tphdepth),col="green")
text(xy[,1],xy[,2],paste(as.character(hdepth))) # cex=2 needs special fonts
points(center[1],center[2],pch=8,col="red")
}
"bagplot plottet"
}
}
# find.hdepths <- function(xy, number.of.directions=181){ ### 121126
# xy <- as.matrix(xy)
# for( j in 1:2) {
# xy[,j] <- xy[,j] - min(xy[,j])
# if( 0 < (h <- max(xy[,j]))) xy[,j] <- xy[,j] / max(xy[,j])
# }
# phi <- c(seq(0,180,length=number.of.directions)[-1]*(2*pi/360))
# sinphi <- c(sin(phi),1); cosphi <- c(cos(phi),0)
# RM1 <- round(digits=6,rbind(cosphi,sinphi))
# hd <- rep(h<-length(xy[,1]),h)
# for( j in seq(along=sinphi)){
# xyt <- xy %*% RM1[,j]
# hd <- pmin(hd,rank(xyt,ties.method="min"), rank(-xyt,ties.method="min"))
# }
# ### xyt <- xy %*% RM1
# ### hd2 <- cbind(apply(xyt, 2, rank, ties.method="min"),
# ### apply(-xyt,2, rank, ties.method="min"))
# ### hd2 <- apply(hd2, 1, min)
# hd
# }
# find.hdepths.tp <- function(tp, data, number.of.directions=181){ ### 121130
# ### standardize dimensions ###
# xy <- as.matrix(data); tp <- as.matrix(rbind(tp)); n.tp <- dim(tp)[1]
# for( j in 1:2) {
# xy[,j] <- xy[,j] - (h <- min(xy[,j], na.rm=TRUE))
# tp[,j] <- tp[,j] - h
# if( 0 < (h <- max(xy[,j], na.rm=TRUE))){
# xy[,j] <- xy[,j]/h; tp[,j] <- tp[,j]/h
# }
# }
# ##loop over directions##
# phi <- c(seq(0,180,length=number.of.directions)[-1]*(2*pi/360))
# sinphi <- c(sin(phi),1); cosphi <- c(cos(phi),0)
# RM1 <- round(digits=6,rbind(cosphi,sinphi))
# hdtp <- rep(length(xy[,1]),length(tp[,1]))
# for( j in seq(along=sinphi)){ #print(j)
# xyt <- xy %*% RM1[,j]; tpt <- (tp %*% RM1[,j])[]
# xyt <- xyt[!is.na(xyt)] #; tpt <- sort(tpt)
# hdtp <- pmin(hdtp,(rank( c(tpt,xyt), ties.method="min"))[1:n.tp]
# -rank( tpt,ties.method="min")
# ,rank(-c(tpt,xyt), ties.method="min")[1:n.tp]
# -rank(-tpt,ties.method="min")
# )
# }
# hdtp
# }
# hdepth<-function(xy,data){
# ###function to compute the h-depths of points
# win<-function(dx,dy){ atan2(y=dy,x=dx) }
# if(missing(data)) data <- xy
# tp <- xy; xy <- data
# n.tp<-nrow(tp); n <- length(xy[,1])
# tphdepth<-rep(0,n.tp); dpi<-2*pi-0.000001
# for(j in 1:n.tp) {
# ### compute difference of coordinates of tp j and data
# dx<-tp[j,1]-xy[,1]; dy<-tp[j,2]-xy[,2]
# ### remove data points that are identical to tp j
# h <- tp[j,1] != xy[,1] & tp[j,2] != xy[,2]
# dx <- dx[h]; dy <- dy[h]; n <- length(dx)
# minusplus<-c(rep(-1,n),rep(1,n)) #### 070824
# ### compute angles of slopes of lines through tp j and data
# a<-win(dx,dy)+pi; h<-a<10; a<-a[h]; ident<-sum(!h)
# ### count number of angles that are lower than pi == points above tp j
# init<-sum(a < pi); a.shift<-(a+pi) %% dpi
# ### count points relative to the tp j in halve planes
# h<-cumsum(minusplus[order(c(a,a.shift))])
# ### find minimum number of points in a halve plane
# tphdepth[j]<-init+min(h)+1 ### +1 because of the point itself!!
# ### tphdepth[j]<-init+min(h)+ident; cat("SUMME",ident)
# }
# tphdepth
# }
# hdepth <- find.hdepths.tp #121202
#' Bivariate Boxplot
#'
#' \code{PlotBag()} creates a twodimensional boxplot called "bagplot" based on
#' two numerical variables x and y. \code{plot.PlotBag()} is the plotting
#' routine for a bagplot object. \code{compute.PlotBag()} contains the
#' computation logic the object.
#'
#' A bagplot is a bivariate generalization of the well known boxplot. It has
#' been proposed by Rousseeuw, Ruts, and Tukey. In the bivariate case the box
#' of the boxplot changes to a convex polygon, the bag of bagplot. In the bag
#' are 50 percent of all points. The fence separates points within the fence
#' from points outside. It is computed by increasing the the bag. The loop is
#' defined as the convex hull containing all points inside the fence. If all
#' points are on a straight line you get a classical boxplot. \code{PlotBag()}
#' plots bagplots that are very similar to the one described in Rousseeuw et
#' al. Remarks: The two dimensional median is approximated. For large data sets
#' the error will be very small. On the other hand it is not very wise to make
#' a (graphical) summary of e.g. 10 bivariate data points.
#'
#' In case you want to plot multiple (overlapping) bagplots, you may want plots
#' that are semi-transparent. For this you can use the \code{transparency}
#' flag. If \code{transparency==TRUE} the alpha layer is set to '99' (hex).
#' This causes the bagplots to appear semi-transparent, but ONLY if the output
#' device is PDF and opened using: \code{pdf(file="filename.pdf",
#' version="1.4")}. For this reason, the default is \code{transparency==FALSE}.
#' This feature as well as the arguments to specify different colors has been
#' proposed by Wouter Meuleman.
#'
#' @aliases PlotBag PlotBagPairs compute.bagplot plot.bagplot
#' @param x x values of a data set; in \code{PlotBag}: an object of class
#' \code{PlotBag} computed by \code{compute.PlotBag}
#' @param y y values of the data set
#' @param factor factor defining the loop
#' @param na.rm if \code{TRUE} 'NA' values are removed otherwise exchanged by
#' median
#' @param approx.limit if the number of data points exceeds \code{approx.limit}
#' a sample is used to compute some of the quantities; default: 300
#' @param show.outlier if \code{TRUE} outlier are shown
#' @param show.whiskers if \code{TRUE} whiskers are shown
#' @param show.looppoints if \code{TRUE} loop points are plottet
#' @param show.bagpoints if \code{TRUE} bag points are plottet
#' @param show.loophull if \code{TRUE} the loop is plotted
#' @param show.baghull if \code{TRUE} the bag is plotted
#' @param create.plot if \code{FALSE} no plot is created
#' @param add if \code{TRUE} the bagplot is added to an existing plot
#' @param pch sets the plotting character
#' @param cex sets characters size
#' @param dkmethod 1 or 2, there are two method of approximating the bag,
#' method 1 is very rough (only based on observations
#' @param precision precision of approximation, default: 1
#' @param verbose automatic commenting of calculations
#' @param debug.plots if \code{TRUE} additional plots describing intermediate
#' results are constructed
#' @param col.loophull color of loop hull
#' @param col.looppoints color of the points of the loop
#' @param col.baghull color of bag hull
#' @param col.bagpoints color of the points of the bag
#' @param transparency see section details
#' @param dm x
#' @param trim x
#' @param main x
#' @param numeric.only x
#' @param \dots additional graphical parameters
#' @return \code{compute.bagplot} returns an object of class \code{bagplot}
#' that could be plotted by \code{plot.bagplot()}. An object of the bagplot
#' class is a list with the following elements: \code{center} is a two
#' dimensional vector with the coordinates of the center. \code{hull.center} is
#' a two column matrix, the rows are the coordinates of the corners of the
#' center region. \code{hull.bag} and \code{hull.loop} contain the coordinates
#' of the hull of the bag and the hull of the loop. \code{pxy.bag} shows you
#' the coordinates of the points of the bag. \code{pxy.outer} is the two column
#' matrix of the points that are within the fence. \code{pxy.outlier} represent
#' the outliers. The vector \code{hdepths} shows the depths of data points.
#' \code{is.one.dim} is \code{TRUE} if the data set is (nearly) one
#' dimensional. The dimensionality is decided by analysing the result of
#' \code{prcomp} which is stored in the element \code{prdata}. \code{xy} shows
#' you the data that are used for the bagplot. In the case of very large data
#' sets subsets of the data are used for constructing the bagplot. A data set
#' is very large if there are more data points than \code{approx.limit}.
#' \code{xydata} are the input data structured in a two column matrix.
#' @note Version of bagplot: 10/2012
#' @author Hans Peter Wolf <pwolf@@wiwi.uni-bielefeld.de>
#' @seealso \code{\link[graphics]{boxplot}}
#' @references P. J. Rousseeuw, I. Ruts, J. W. Tukey (1999): The bagplot: a
#' bivariate boxplot, \emph{The American Statistician}, vol. 53, no. 4,
#' 382--387
#' @keywords misc hplot
#' @examples
#'
#' # example: 100 random points and one outlier
#' dat <- cbind(rnorm(100) + 100, rnorm(100) + 300)
#' dat <- rbind(dat, c(105,295))
#'
#' PlotBag(dat,factor=2.5,create.plot=TRUE,approx.limit=300,
#' show.outlier=TRUE,show.looppoints=TRUE,
#' show.bagpoints=TRUE,dkmethod=2,
#' show.whiskers=TRUE,show.loophull=TRUE,
#' show.baghull=TRUE,verbose=FALSE)
#'
#' # example of Rousseeuw et al., see R-package rpart
#' cardata <- structure(as.integer( c(2560,2345,1845,2260,2440,
#' 2285, 2275, 2350, 2295, 1900, 2390, 2075, 2330, 3320, 2885,
#' 3310, 2695, 2170, 2710, 2775, 2840, 2485, 2670, 2640, 2655,
#' 3065, 2750, 2920, 2780, 2745, 3110, 2920, 2645, 2575, 2935,
#' 2920, 2985, 3265, 2880, 2975, 3450, 3145, 3190, 3610, 2885,
#' 3480, 3200, 2765, 3220, 3480, 3325, 3855, 3850, 3195, 3735,
#' 3665, 3735, 3415, 3185, 3690, 97, 114, 81, 91, 113, 97, 97,
#' 98, 109, 73, 97, 89, 109, 305, 153, 302, 133, 97, 125, 146,
#' 107, 109, 121, 151, 133, 181, 141, 132, 133, 122, 181, 146,
#' 151, 116, 135, 122, 141, 163, 151, 153, 202, 180, 182, 232,
#' 143, 180, 180, 151, 189, 180, 231, 305, 302, 151, 202, 182,
#' 181, 143, 146, 146)), .Dim = as.integer(c(60, 2)),
#' .Dimnames = list(NULL, c("Weight", "Disp.")))
#'
#' PlotBag(cardata,factor=3,show.baghull=TRUE,
#' show.loophull=TRUE,precision=1, dkmethod=2)
#'
#' title("car data Chambers/Hastie 1992")
#'
#' # points of y=x*x
#' PlotBag(x=1:30,y=(1:30)^2,verbose=FALSE,dkmethod=2)
#'
#' # one dimensional subspace
#' PlotBag(x=1:50,y=1:50)
#'
#' # pairwise bagplots
#' par(las=1)
#' PlotBagPairs(swiss[, 1:2],
#' main="Swiss Fertility and Socioeconomic Indicators (1888) Data")
#'
PlotBag <- function(x, y,
factor=3, # expanding factor for bag to get the loop
na.rm=FALSE, # should 'NAs' values be removed or exchanged
approx.limit=300, # limit
show.outlier=TRUE,# if TRUE outlier are shown
show.whiskers=TRUE, # if TRUE whiskers are shown
show.looppoints=TRUE, # if TRUE points in loop are shown
show.bagpoints=TRUE, # if TRUE points in bag are shown
show.loophull=TRUE, # if TRUE loop is shown
show.baghull=TRUE, # if TRUE bag is shown
create.plot=TRUE, # if TRUE a plot is created
add=FALSE, # if TRUE graphical elements are added to actual plot
pch=16,cex=.4, # some graphical parameters
dkmethod=2, # in 1:2; there are two methods for approximating the bag
precision=1, # controls precision of computation
verbose=FALSE,debug.plots="no", # tools for debugging
col.loophull="#aaccff", # Alternatives: #ccffaa, #ffaacc
col.looppoints="#3355ff", # Alternatives: #55ff33, #ff3355
col.baghull="#7799ff", # Alternatives: #99ff77, #ff7799
col.bagpoints="#000088", # Alternatives: #008800, #880000
transparency=FALSE, ... # to define further parameters of plot
){
if(missing(x)) return(
"bagplot, version 2012/12/05, peter wolf"
)
bo<-compute.bagplot(x=x,y=y,factor=factor,na.rm=na.rm,
approx.limit=approx.limit,dkmethod=dkmethod,
precision=precision,verbose=verbose,debug.plots=debug.plots)
if(create.plot){
plot(bo,
show.outlier=show.outlier,
show.whiskers=show.whiskers,
show.looppoints=show.looppoints,
show.bagpoints=show.bagpoints,
show.loophull=show.loophull,
show.baghull=show.baghull,
add=add,pch=pch,cex=cex,
verbose=verbose,
col.loophull=col.loophull,
col.looppoints=col.looppoints,
col.baghull=col.baghull,
col.bagpoints=col.bagpoints,
transparency=transparency, ...
)
}
invisible(bo)
}
# New interface
# PlotBag <- function(x, y,
# outl = list(pch, cex, col, bg),
# looppoints = list(pch, cex, col, bg),
# bagpoints = list(pch, cex, col, bg),
# loophull = list(lwd, col, border),
# baghull = list(lwd, col, border),
# factor=3, # expanding factor for bag to get the loop
# na.rm=FALSE, # should 'NAs' values be removed or exchanged
# approx.limit=300, # limit
# add=FALSE, # if TRUE graphical elements are added to actual plot
# dkmethod=2, # in 1:2; there are two methods for approximating the bag
# precision=1, # controls precision of computation
# verbose=FALSE,debug.plots="no", # tools for debugging
# transparency=FALSE,
# ... # to define further parameters of plot
# )
# #
# outlty, outlwd, outpch, outcex, outcol, outbg
#
#
# faces<-function(xy,which.row,fill=FALSE,face.type=1,
# nrow.plot,ncol.plot,scale=TRUE,byrow=FALSE,main,
# labels,print.info = TRUE,na.rm = FALSE,
# ncolors=20,
# col.nose=rainbow(ncolors), # nose
# col.eyes=rainbow(ncolors,start=0.6,end=0.85),# eyes
# col.hair=terrain.colors(ncolors), # hair
# col.face=heat.colors(ncolors), # face
# col.lips=rainbow(ncolors,start=0.0,end=0.2), # lips
# col.ears=rainbow(ncolors,start=0.0,end=0.2), # ears
#
# plot.faces=TRUE){ # 070831 pwolf
# if((demo<-missing(xy))){
# xy<-rbind(
# c(1,3,5),c(3,5,7),
# c(1,5,3),c(3,7,5),
# c(3,1,5),c(5,3,7),
# c(3,5,1),c(5,7,3),
# c(5,1,3),c(7,3,5),
# c(5,3,1),c(7,5,3),
# c(1,1,1),c(4,4,4),c(7,7,7)
# )
# labels<-apply(xy,1,function(x) paste(x,collapse="-"))
# }
|
#' Log-Likelihood Function for Parametric Lifetime Distributions
#'
#' @description
#' This function computes the log-likelihood value with respect to a given set
#' of parameters. In terms of *Maximum Likelihood Estimation* this function can
#' be optimized ([optim][stats::optim]) to estimate the parameters and
#' variance-covariance matrix of the parameters.
#'
#' @inheritParams ml_estimation
#' @inheritParams predict_quantile
#'
#' @return
#' Returns the log-likelihood value for the parameters in `dist_params` given
#' the data.
#'
#' @encoding UTF-8
#'
#' @template dist-params
#'
#' @references Meeker, William Q; Escobar, Luis A., Statistical methods for
#' reliability data, New York: Wiley series in probability and statistics, 1998
#'
#' @examples
#' # Reliability data preparation:
#' data <- reliability_data(
#' alloy,
#' x = cycles,
#' status = status
#' )
#'
#' # Example 1 - Evaluating Log-Likelihood function of two-parametric weibull:
#' loglik_weib <- loglik_function(
#' x = data,
#' dist_params = c(5.29, 0.33),
#' distribution = "weibull"
#' )
#'
#' # Example 2 - Evaluating Log-Likelihood function of three-parametric weibull:
#' loglik_weib3 <- loglik_function(
#' x = data,
#' dist_params = c(4.54, 0.76, 92.99),
#' distribution = "weibull3"
#' )
#'
#' @md
#'
#' @export
loglik_function <- function(x, ...) {
UseMethod("loglik_function")
}
#' @rdname loglik_function
#'
#' @export
loglik_function.wt_reliability_data <- function(
x,
wts = rep(1, nrow(x)),
dist_params,
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"weibull3", "lognormal3", "loglogistic3",
"exponential", "exponential2"
),
...
) {
# Call `loglik_function.default()`:
loglik_function.default(
x = x$x,
status = x$status,
wts = wts,
dist_params = dist_params,
distribution = distribution
)
}
#' Log-Likelihood Function for Parametric Lifetime Distributions
#'
#' @inherit loglik_function description return references
#'
#' @inheritParams ml_estimation.default
#' @inheritParams predict_quantile
#'
#' @encoding UTF-8
#'
#' @template dist-params
#'
#' @seealso [loglik_function]
#'
#' @examples
#' # Vectors:
#' cycles <- alloy$cycles
#' status <- alloy$status
#'
#' # Example 1 - Evaluating Log-Likelihood function of two-parametric weibull:
#' loglik_weib <- loglik_function(
#' x = cycles,
#' status = status,
#' dist_params = c(5.29, 0.33),
#' distribution = "weibull"
#' )
#'
#' # Example 2 - Evaluating Log-Likelihood function of three-parametric weibull:
#' loglik_weib3 <- loglik_function(
#' x = cycles,
#' status = status,
#' dist_params = c(4.54, 0.76, 92.99),
#' distribution = "weibull3"
#' )
#'
#' @md
#'
#' @export
loglik_function.default <- function(x,
status,
wts = rep(1, length(x)),
dist_params,
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"weibull3", "lognormal3", "loglogistic3",
"exponential", "exponential2"
),
...
) {
distribution <- match.arg(distribution)
check_dist_params(dist_params, distribution)
loglik_function_(
x = x,
status = status,
wts = wts,
dist_params = dist_params,
distribution = distribution
)
}
# Helper function for log-likelihood calculation:
loglik_function_ <- function(x,
status,
wts,
dist_params,
distribution,
log_scale = FALSE
) {
d <- status
if (std_parametric(distribution) == "exponential") {
mu <- NULL
sig <- dist_params[1]
thres <- dist_params[2]
} else {
mu <- dist_params[1]
sig <- dist_params[2]
thres <- dist_params[3]
}
if (log_scale) sig <- exp(sig)
# Threshold model:
if (!is.na(thres)) {
x <- x - thres
## Restriction of threshold models, i.e. x > threshold parameter:
subs <- x > 0
x <- x[subs]
d <- d[subs]
wts <- wts[subs]
}
# Use distribution without threshold:
distribution <- std_parametric(distribution)
## Standardize x:
z <- standardize(x = x, dist_params = c(mu, sig), distribution = distribution)
## Switch between distributions:
switch(distribution,
"weibull" = ds <- dsev(z) / (sig * x),
"lognormal" = ds <- stats::dnorm(z) / (sig * x),
"loglogistic" = ds <- stats::dlogis(z) / (sig * x),
"sev" = ds <- dsev(z) / sig,
"normal" = ds <- stats::dnorm(z) / sig,
"logistic" = ds <- stats::dlogis(z) / sig,
"exponential" = ds <- stats::dexp(z) / sig
)
ps <- p_std(z, distribution)
# Compute log-likelihood:
logL_i <- d * log(ds) + (1 - d) * log(1 - ps)
logL <- sum(wts * logL_i)
logL
}
#' Log-Likelihood Profile Function for Parametric Lifetime Distributions with Threshold
#'
#' @description
#' This function evaluates the log-likelihood with respect to a given threshold
#' parameter of a parametric lifetime distribution. In terms of
#' *Maximum Likelihood Estimation* this function can be optimized
#' ([optim][stats::optim]) to estimate the threshold parameter.
#'
#'
#' @inheritParams ml_estimation
#' @param thres A numeric value for the threshold parameter.
#' @param distribution Supposed parametric distribution of the random variable.
#'
#' @return
#' Returns the log-likelihood value for the threshold parameter `thres` given
#' the data.
#'
#' @encoding UTF-8
#'
#' @references Meeker, William Q; Escobar, Luis A., Statistical methods for
#' reliability data, New York: Wiley series in probability and statistics, 1998
#'
#' @examples
#' # Reliability data preparation:
#' data <- reliability_data(
#' alloy,
#' x = cycles,
#' status = status
#' )
#'
#' # Determining the optimal loglikelihood value:
#' ## Range of threshold parameter must be smaller than the first failure:
#' threshold <- seq(
#' 0,
#' min(data$x[data$status == 1]) - 0.1,
#' length.out = 50
#' )
#'
#' ## loglikelihood value with respect to threshold values:
#' profile_logL <- loglik_profiling(
#' x = data,
#' thres = threshold,
#' distribution = "weibull3"
#' )
#'
#' ## Threshold value (among the candidates) that maximizes the
#' ## loglikelihood:
#' threshold[which.max(profile_logL)]
#'
#' ## plot:
#' plot(
#' threshold,
#' profile_logL,
#' type = "l"
#' )
#' abline(
#' v = threshold[which.max(profile_logL)],
#' h = max(profile_logL),
#' col = "red"
#' )
#'
#' @md
#'
#' @export
loglik_profiling <- function(x, ...) {
UseMethod("loglik_profiling")
}
#' @rdname loglik_profiling
#'
#' @export
loglik_profiling.wt_reliability_data <- function(
x,
wts = rep(1, nrow(x)),
thres,
distribution = c(
"weibull3", "lognormal3",
"loglogistic3", "exponential2"
),
...
) {
# Call `loglik_profiling.default()`:
loglik_profiling.default(
x = x$x,
status = x$status,
wts = wts,
thres = thres,
distribution = distribution
)
}
#' Log-Likelihood Profile Function for Parametric Lifetime Distributions with Threshold
#'
#' @inherit loglik_profiling description return references
#'
#' @inheritParams ml_estimation.default
#' @param thres A numeric value for the threshold parameter.
#' @param distribution Supposed parametric distribution of the random variable.
#'
#' @encoding UTF-8
#'
#' @seealso [loglik_profiling]
#'
#' @examples
#' # Vectors:
#' cycles <- alloy$cycles
#' status <- alloy$status
#'
#' # Determining the optimal loglikelihood value:
#' ## Range of threshold parameter must be smaller than the first failure:
#' threshold <- seq(
#' 0,
#' min(cycles[status == 1]) - 0.1,
#' length.out = 50
#' )
#'
#' ## loglikelihood value with respect to threshold values:
#' profile_logL <- loglik_profiling(
#' x = cycles,
#' status = status,
#' thres = threshold,
#' distribution = "weibull3"
#' )
#'
#' ## Threshold value (among the candidates) that maximizes the
#' ## loglikelihood:
#' threshold[which.max(profile_logL)]
#'
#' ## plot:
#' plot(
#' threshold,
#' profile_logL,
#' type = "l"
#' )
#' abline(
#' v = threshold[which.max(profile_logL)],
#' h = max(profile_logL),
#' col = "red"
#' )
#'
#' @md
#'
#' @export
loglik_profiling.default <- function(x,
status,
wts = rep(1, length(x)),
thres,
distribution = c(
"weibull3", "lognormal3",
"loglogistic3", "exponential2"
),
...
) {
distribution <- match.arg(distribution)
loglik_prof_vectorized <- Vectorize(
FUN = loglik_profiling_,
vectorize.args = "thres"
)
loglik_prof_vectorized(
x = x,
status = status,
wts = wts,
thres = thres,
distribution = distribution
)
}
# Function to perform profiling with `optim()` routine:
loglik_profiling_ <- function(x,
status,
wts,
thres,
distribution
) {
d <- status
x <- x - thres
## Restriction of threshold models, i.e. x > threshold parameter:
subs <- x > 0
x <- x[subs]
d <- d[subs]
wts <- wts[subs]
## Set distribution to parametric version without threshold:
distribution <- std_parametric(distribution)
## Initial parameters of two-parametric model:
start_dist_params <- start_params(
x = x,
status = d,
distribution = distribution
)
## Use log scale:
n_par <- length(start_dist_params)
start_dist_params[n_par] <- log(start_dist_params[n_par])
## Log-Likelihood profiling:
logL_profile <- stats::optim(
par = start_dist_params,
fn = loglik_function_,
method = "BFGS",
control = list(fnscale = -1),
x = x,
status = d,
wts = wts,
distribution = distribution,
log_scale = TRUE
)
logL_profile$value
}
|
/R/likelihood_functions.R
|
no_license
|
Tim-TU/weibulltools
|
R
| false | false | 11,059 |
r
|
#' Log-Likelihood Function for Parametric Lifetime Distributions
#'
#' @description
#' This function computes the log-likelihood value with respect to a given set
#' of parameters. In terms of *Maximum Likelihood Estimation* this function can
#' be optimized ([optim][stats::optim]) to estimate the parameters and
#' variance-covariance matrix of the parameters.
#'
#' @inheritParams ml_estimation
#' @inheritParams predict_quantile
#'
#' @return
#' Returns the log-likelihood value for the parameters in `dist_params` given
#' the data.
#'
#' @encoding UTF-8
#'
#' @template dist-params
#'
#' @references Meeker, William Q; Escobar, Luis A., Statistical methods for
#' reliability data, New York: Wiley series in probability and statistics, 1998
#'
#' @examples
#' # Reliability data preparation:
#' data <- reliability_data(
#' alloy,
#' x = cycles,
#' status = status
#' )
#'
#' # Example 1 - Evaluating Log-Likelihood function of two-parametric weibull:
#' loglik_weib <- loglik_function(
#' x = data,
#' dist_params = c(5.29, 0.33),
#' distribution = "weibull"
#' )
#'
#' # Example 2 - Evaluating Log-Likelihood function of three-parametric weibull:
#' loglik_weib3 <- loglik_function(
#' x = data,
#' dist_params = c(4.54, 0.76, 92.99),
#' distribution = "weibull3"
#' )
#'
#' @md
#'
#' @export
loglik_function <- function(x, ...) {
UseMethod("loglik_function")
}
#' @rdname loglik_function
#'
#' @export
loglik_function.wt_reliability_data <- function(
x,
wts = rep(1, nrow(x)),
dist_params,
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"weibull3", "lognormal3", "loglogistic3",
"exponential", "exponential2"
),
...
) {
# Call `loglik_function.default()`:
loglik_function.default(
x = x$x,
status = x$status,
wts = wts,
dist_params = dist_params,
distribution = distribution
)
}
#' Log-Likelihood Function for Parametric Lifetime Distributions
#'
#' @inherit loglik_function description return references
#'
#' @inheritParams ml_estimation.default
#' @inheritParams predict_quantile
#'
#' @encoding UTF-8
#'
#' @template dist-params
#'
#' @seealso [loglik_function]
#'
#' @examples
#' # Vectors:
#' cycles <- alloy$cycles
#' status <- alloy$status
#'
#' # Example 1 - Evaluating Log-Likelihood function of two-parametric weibull:
#' loglik_weib <- loglik_function(
#' x = cycles,
#' status = status,
#' dist_params = c(5.29, 0.33),
#' distribution = "weibull"
#' )
#'
#' # Example 2 - Evaluating Log-Likelihood function of three-parametric weibull:
#' loglik_weib3 <- loglik_function(
#' x = cycles,
#' status = status,
#' dist_params = c(4.54, 0.76, 92.99),
#' distribution = "weibull3"
#' )
#'
#' @md
#'
#' @export
loglik_function.default <- function(x,
status,
wts = rep(1, length(x)),
dist_params,
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"weibull3", "lognormal3", "loglogistic3",
"exponential", "exponential2"
),
...
) {
distribution <- match.arg(distribution)
check_dist_params(dist_params, distribution)
loglik_function_(
x = x,
status = status,
wts = wts,
dist_params = dist_params,
distribution = distribution
)
}
# Helper function for log-likelihood calculation:
loglik_function_ <- function(x,
status,
wts,
dist_params,
distribution,
log_scale = FALSE
) {
d <- status
if (std_parametric(distribution) == "exponential") {
mu <- NULL
sig <- dist_params[1]
thres <- dist_params[2]
} else {
mu <- dist_params[1]
sig <- dist_params[2]
thres <- dist_params[3]
}
if (log_scale) sig <- exp(sig)
# Threshold model:
if (!is.na(thres)) {
x <- x - thres
## Restriction of threshold models, i.e. x > threshold parameter:
subs <- x > 0
x <- x[subs]
d <- d[subs]
wts <- wts[subs]
}
# Use distribution without threshold:
distribution <- std_parametric(distribution)
## Standardize x:
z <- standardize(x = x, dist_params = c(mu, sig), distribution = distribution)
## Switch between distributions:
switch(distribution,
"weibull" = ds <- dsev(z) / (sig * x),
"lognormal" = ds <- stats::dnorm(z) / (sig * x),
"loglogistic" = ds <- stats::dlogis(z) / (sig * x),
"sev" = ds <- dsev(z) / sig,
"normal" = ds <- stats::dnorm(z) / sig,
"logistic" = ds <- stats::dlogis(z) / sig,
"exponential" = ds <- stats::dexp(z) / sig
)
ps <- p_std(z, distribution)
# Compute log-likelihood:
logL_i <- d * log(ds) + (1 - d) * log(1 - ps)
logL <- sum(wts * logL_i)
logL
}
#' Log-Likelihood Profile Function for Parametric Lifetime Distributions with Threshold
#'
#' @description
#' This function evaluates the log-likelihood with respect to a given threshold
#' parameter of a parametric lifetime distribution. In terms of
#' *Maximum Likelihood Estimation* this function can be optimized
#' ([optim][stats::optim]) to estimate the threshold parameter.
#'
#'
#' @inheritParams ml_estimation
#' @param thres A numeric value for the threshold parameter.
#' @param distribution Supposed parametric distribution of the random variable.
#'
#' @return
#' Returns the log-likelihood value for the threshold parameter `thres` given
#' the data.
#'
#' @encoding UTF-8
#'
#' @references Meeker, William Q; Escobar, Luis A., Statistical methods for
#' reliability data, New York: Wiley series in probability and statistics, 1998
#'
#' @examples
#' # Reliability data preparation:
#' data <- reliability_data(
#' alloy,
#' x = cycles,
#' status = status
#' )
#'
#' # Determining the optimal loglikelihood value:
#' ## Range of threshold parameter must be smaller than the first failure:
#' threshold <- seq(
#' 0,
#' min(data$x[data$status == 1]) - 0.1,
#' length.out = 50
#' )
#'
#' ## loglikelihood value with respect to threshold values:
#' profile_logL <- loglik_profiling(
#' x = data,
#' thres = threshold,
#' distribution = "weibull3"
#' )
#'
#' ## Threshold value (among the candidates) that maximizes the
#' ## loglikelihood:
#' threshold[which.max(profile_logL)]
#'
#' ## plot:
#' plot(
#' threshold,
#' profile_logL,
#' type = "l"
#' )
#' abline(
#' v = threshold[which.max(profile_logL)],
#' h = max(profile_logL),
#' col = "red"
#' )
#'
#' @md
#'
#' @export
loglik_profiling <- function(x, ...) {
UseMethod("loglik_profiling")
}
#' @rdname loglik_profiling
#'
#' @export
loglik_profiling.wt_reliability_data <- function(
x,
wts = rep(1, nrow(x)),
thres,
distribution = c(
"weibull3", "lognormal3",
"loglogistic3", "exponential2"
),
...
) {
# Call `loglik_profiling.default()`:
loglik_profiling.default(
x = x$x,
status = x$status,
wts = wts,
thres = thres,
distribution = distribution
)
}
#' Log-Likelihood Profile Function for Parametric Lifetime Distributions with Threshold
#'
#' @inherit loglik_profiling description return references
#'
#' @inheritParams ml_estimation.default
#' @param thres A numeric value for the threshold parameter.
#' @param distribution Supposed parametric distribution of the random variable.
#'
#' @encoding UTF-8
#'
#' @seealso [loglik_profiling]
#'
#' @examples
#' # Vectors:
#' cycles <- alloy$cycles
#' status <- alloy$status
#'
#' # Determining the optimal loglikelihood value:
#' ## Range of threshold parameter must be smaller than the first failure:
#' threshold <- seq(
#' 0,
#' min(cycles[status == 1]) - 0.1,
#' length.out = 50
#' )
#'
#' ## loglikelihood value with respect to threshold values:
#' profile_logL <- loglik_profiling(
#' x = cycles,
#' status = status,
#' thres = threshold,
#' distribution = "weibull3"
#' )
#'
#' ## Threshold value (among the candidates) that maximizes the
#' ## loglikelihood:
#' threshold[which.max(profile_logL)]
#'
#' ## plot:
#' plot(
#' threshold,
#' profile_logL,
#' type = "l"
#' )
#' abline(
#' v = threshold[which.max(profile_logL)],
#' h = max(profile_logL),
#' col = "red"
#' )
#'
#' @md
#'
#' @export
loglik_profiling.default <- function(x,
status,
wts = rep(1, length(x)),
thres,
distribution = c(
"weibull3", "lognormal3",
"loglogistic3", "exponential2"
),
...
) {
distribution <- match.arg(distribution)
loglik_prof_vectorized <- Vectorize(
FUN = loglik_profiling_,
vectorize.args = "thres"
)
loglik_prof_vectorized(
x = x,
status = status,
wts = wts,
thres = thres,
distribution = distribution
)
}
# Function to perform profiling with `optim()` routine:
loglik_profiling_ <- function(x,
status,
wts,
thres,
distribution
) {
d <- status
x <- x - thres
## Restriction of threshold models, i.e. x > threshold parameter:
subs <- x > 0
x <- x[subs]
d <- d[subs]
wts <- wts[subs]
## Set distribution to parametric version without threshold:
distribution <- std_parametric(distribution)
## Initial parameters of two-parametric model:
start_dist_params <- start_params(
x = x,
status = d,
distribution = distribution
)
## Use log scale:
n_par <- length(start_dist_params)
start_dist_params[n_par] <- log(start_dist_params[n_par])
## Log-Likelihood profiling:
logL_profile <- stats::optim(
par = start_dist_params,
fn = loglik_function_,
method = "BFGS",
control = list(fnscale = -1),
x = x,
status = d,
wts = wts,
distribution = distribution,
log_scale = TRUE
)
logL_profile$value
}
|
#Stat R 502 Final Project
# Data manipulation and cleaning
Projectraw <- read.csv("train.csv")
Titanic.data <- read.csv("train.csv")
View(Projectraw)
dim(Projectraw)
Titanic.data %<>% na.omit
str(Titanic.data)
##making the response variable a categorical response.
Titanic.data$Survived %<>% as.factor
##making class a factor
Titanic.data$Pclass %<>% as.factor
View(Titanic.data)
###########################Scratch code doesn't work####################################
# Trying to recode sibsp and parch
if age>18 then spouse = sibsp
else spouse = 0
if age <18 then siblings = sibsp
else siblings = 0
if age >18 then children = parch
else children = 0
if age <18 then parents = parch
else parents = 0
###########################End of Scratch code########################################################################
names(Titanic.data)
##Spouse
Titanic.data$spouse <- rep(0,length(Titanic.data$Sex))
for (i in 1:length(Titanic.data$spouse)){
if (Titanic.data$Age[i]>20){
Titanic.data$spouse[i] <- Titanic.data$SibSp[i]
}
else {
Titanic.data$spouse[i] <- 0
}
}
Titanic.data$spouse %<>% as.factor
#getting rid of the multiple spouses - they represent people at the cutoff age that have siblings on board.
for (i in 1:length(Titanic.data$spouse)){
if (Titanic.data$spouse[i]>1){
Titanic.data$spouse[i] <- 0
}
}
max(Titanic.data$spouse)
#Spouse finished
#Start of coding the siblings variable.
Titanic.data$siblings <- rep(0,length(Titanic.data$Sex))
for (i in 1:length(Titanic.data$siblings)){
if (Titanic.data$Age[i]<20){
Titanic.data$siblings[i] <- Titanic.data$SibSp[i]
}
else {
Titanic.data$siblings[i] <- 0
}
}
for (i in 1:length(Titanic.data$siblings)){
if (Titanic.data$SibSp[i]>1){
Titanic.data$siblings[i] <- Titanic.data$SibSp[i]
}
}
##Finished coding siblings
##coding children
if age >18 then children = parch
else children = 0
Titanic.data$children <- rep(0,length(Titanic.data$Sex))
for (i in 1:length(Titanic.data$children)){
if (Titanic.data$Age[i]>20){
Titanic.data$children[i] <- Titanic.data$Parch[i]
}
else {
Titanic.data$children[i] <- 0
}
}
for (i in 1:length(Titanic.data$children)){
if (Titanic.data$Parch[i]>2){
Titanic.data$children[i] <- Titanic.data$Parch[i]
}
}
#finished coding children
#coding parents
if age <18 then parents = parch
else parents = 0
Titanic.data$parents <- rep(0,length(Titanic.data$Sex))
for (i in 1:length(Titanic.data$parents)){
if (Titanic.data$Age[i]<20){
Titanic.data$parents[i] <- Titanic.data$Parch[i]
}
else {
Titanic.data$parents[i] <- 0
}
}
for (i in 1:length(Titanic.data$parents)){
if ((Titanic.data$Age[i]<20 & Titanic.data$SibSp[i]>1)){
Titanic.data$parents[i] <- Titanic.data$Parch[i]
}
}
#Finished coding parents
View(Titanic.data)
###On to the age variable
##Age groups
Titanic.data$Age2 <- cut(Titanic.data$Age,c(0,5,12,18,30,60,80))
str(Titanic.data$Age2)
View(Titanic.data)
levels(Titanic.data$Age2) <- c("(0,5]","(5,12]","(12,18]","(18,30]","(30,60]","(60,80]")
##Binary
Titanic.data$child <- rep(0,length(Titanic.data$Sex))
for (i in 1:length(Titanic.data$child)){
if (Titanic.data$Age[i]<15){
Titanic.data$child[i] <- 1
}
else {
Titanic.data$child[i] <- 0
}
}
str(Titanic.data$child)
Titanic.data$child %<>% as.factor
##End of Age
str(Titanic.data)
View(Titanic.data)
#creating the output file
?write.csv
write.csv(Titanic.data,file = "Project.502")
###########Final Data set
Project.final <- Project3
str(Project.final)
Project.final$siblings2 <- NULL
Project.final$siblings3 <- NULL
str(Project.final)
write.csv(Project.final,file="FinalProjectData502.csv")
|
/Logisitic_Regression/502_Project_Files/Final_Code/data_manipulation.R
|
no_license
|
SherberttheScientist/R-Statistics
|
R
| false | false | 3,708 |
r
|
#Stat R 502 Final Project
# Data manipulation and cleaning
Projectraw <- read.csv("train.csv")
Titanic.data <- read.csv("train.csv")
View(Projectraw)
dim(Projectraw)
Titanic.data %<>% na.omit
str(Titanic.data)
##making the response variable a categorical response.
Titanic.data$Survived %<>% as.factor
##making class a factor
Titanic.data$Pclass %<>% as.factor
View(Titanic.data)
###########################Scratch code doesn't work####################################
# Trying to recode sibsp and parch
if age>18 then spouse = sibsp
else spouse = 0
if age <18 then siblings = sibsp
else siblings = 0
if age >18 then children = parch
else children = 0
if age <18 then parents = parch
else parents = 0
###########################End of Scratch code########################################################################
names(Titanic.data)
##Spouse
Titanic.data$spouse <- rep(0,length(Titanic.data$Sex))
for (i in 1:length(Titanic.data$spouse)){
if (Titanic.data$Age[i]>20){
Titanic.data$spouse[i] <- Titanic.data$SibSp[i]
}
else {
Titanic.data$spouse[i] <- 0
}
}
Titanic.data$spouse %<>% as.factor
#getting rid of the multiple spouses - they represent people at the cutoff age that have siblings on board.
for (i in 1:length(Titanic.data$spouse)){
if (Titanic.data$spouse[i]>1){
Titanic.data$spouse[i] <- 0
}
}
max(Titanic.data$spouse)
#Spouse finished
#Start of coding the siblings variable.
Titanic.data$siblings <- rep(0,length(Titanic.data$Sex))
for (i in 1:length(Titanic.data$siblings)){
if (Titanic.data$Age[i]<20){
Titanic.data$siblings[i] <- Titanic.data$SibSp[i]
}
else {
Titanic.data$siblings[i] <- 0
}
}
for (i in 1:length(Titanic.data$siblings)){
if (Titanic.data$SibSp[i]>1){
Titanic.data$siblings[i] <- Titanic.data$SibSp[i]
}
}
##Finished coding siblings
##coding children
if age >18 then children = parch
else children = 0
Titanic.data$children <- rep(0,length(Titanic.data$Sex))
for (i in 1:length(Titanic.data$children)){
if (Titanic.data$Age[i]>20){
Titanic.data$children[i] <- Titanic.data$Parch[i]
}
else {
Titanic.data$children[i] <- 0
}
}
for (i in 1:length(Titanic.data$children)){
if (Titanic.data$Parch[i]>2){
Titanic.data$children[i] <- Titanic.data$Parch[i]
}
}
#finished coding children
#coding parents
if age <18 then parents = parch
else parents = 0
Titanic.data$parents <- rep(0,length(Titanic.data$Sex))
for (i in 1:length(Titanic.data$parents)){
if (Titanic.data$Age[i]<20){
Titanic.data$parents[i] <- Titanic.data$Parch[i]
}
else {
Titanic.data$parents[i] <- 0
}
}
for (i in 1:length(Titanic.data$parents)){
if ((Titanic.data$Age[i]<20 & Titanic.data$SibSp[i]>1)){
Titanic.data$parents[i] <- Titanic.data$Parch[i]
}
}
#Finished coding parents
View(Titanic.data)
###On to the age variable
##Age groups
Titanic.data$Age2 <- cut(Titanic.data$Age,c(0,5,12,18,30,60,80))
str(Titanic.data$Age2)
View(Titanic.data)
levels(Titanic.data$Age2) <- c("(0,5]","(5,12]","(12,18]","(18,30]","(30,60]","(60,80]")
##Binary
Titanic.data$child <- rep(0,length(Titanic.data$Sex))
for (i in 1:length(Titanic.data$child)){
if (Titanic.data$Age[i]<15){
Titanic.data$child[i] <- 1
}
else {
Titanic.data$child[i] <- 0
}
}
str(Titanic.data$child)
Titanic.data$child %<>% as.factor
##End of Age
str(Titanic.data)
View(Titanic.data)
#creating the output file
?write.csv
write.csv(Titanic.data,file = "Project.502")
###########Final Data set
Project.final <- Project3
str(Project.final)
Project.final$siblings2 <- NULL
Project.final$siblings3 <- NULL
str(Project.final)
write.csv(Project.final,file="FinalProjectData502.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/databases.R
\name{load_db_deprecated}
\alias{load_db_deprecated}
\title{Load Database connections into Global Environment}
\usage{
load_db_deprecated(name = NULL)
}
\arguments{
\item{name}{(OPTIONAL) name of ODBC database source}
}
\value{
Copy of loaded database
}
\description{
Load Database connections into Global Environment
}
|
/man/load_db_deprecated.Rd
|
permissive
|
dshurick/shurtools
|
R
| false | true | 410 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/databases.R
\name{load_db_deprecated}
\alias{load_db_deprecated}
\title{Load Database connections into Global Environment}
\usage{
load_db_deprecated(name = NULL)
}
\arguments{
\item{name}{(OPTIONAL) name of ODBC database source}
}
\value{
Copy of loaded database
}
\description{
Load Database connections into Global Environment
}
|
library(ape)
testtree <- read.tree("2021_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2021_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/2021_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 135 |
r
|
library(ape)
testtree <- read.tree("2021_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2021_0_unrooted.txt")
|
rm(list=ls())
require(readxl)
require(reshape2)
require(ggplot2)
require(GGally)
require(factoextra)
ADD_PLOT_CONSTRAINTS=T
INCLUDE_LOG_SCALE_TRACE=F
DATE = "2021-05-04"
# Read in Gelman-Rubin RHat results
if(file.exists(paste(DATE, '_MCMCSTATmprsf_Diagnostics.xlsx', sep='', collapse=''))){
df.prsf = data.frame(read_xlsx(paste(DATE, '_MCMCSTATmprsf_Diagnostics.xlsx', sep='', collapse=''))
, stringsAsFactors = F, row.names = 1)
}else{
df.prsf = data.frame(read.csv(paste(DATE, '_MCMCSTATmprsf_Diagnostics.csv', sep='', collapse=''))
, stringsAsFactors = F)
}
# Constraints
df.constraints = data.frame(read_xlsx('MCMCSTAT_constraints.xlsx')
, stringsAsFactors = F, row.names = 1)
# Read in chain summaries. Nested because I'm bad at regex
v.chains = grep(value = T, list.files(path = 'OUTPUT/', pattern = DATE)
, pattern='chain')
# Read in chains
ls.chains = sapply(v.chains, function(x){
REGION = strsplit(x, '_')[[1]][2]
# Read in csv
res = read.csv(paste("OUTPUT/", x, sep='', collapse = ''))
# Data-frame
res = as.data.frame(res)
# Remove last column since I still can't write matlab outputs
res = res[,-ncol(res)]
# Remove the initial condition columns for ease
v.icCols = c("S_c", "S_a", "S_rc", "S_fc", "S_e"
,"E_c", "E_a", "E_rc", "E_fc", "E_e"
,"Isym_c", "Isym_a", "Isym_rc", "Isym_fc", "Isym_e"
,"Iasym_c", "Iasym_a", "Iasym_rc", "Iasym_fc", "Iasym_e"
,"Hsub_c", "Hsub_a", "Hsub_rc", "Hsub_fc", "Hsub_e"
,"Hcri_c", "Hcri_a", "Hcri_rc", "Hcri_fc", "Hcri_e"
,"D_c", "D_a", "D_rc", "D_fc", "D_e"
,"R_c", "R_a", "R_rc", "R_fc", "R_e")
res = res[,!colnames(res) %in% v.icCols]
# Add positions
res$idx = 1:(table(res$i_chain)[1])
# add region
res$region = REGION
# add nVars
res$nVars = strsplit(strsplit(x, 'NVarsFit')[[1]][2], '_')[[1]][1]
# Return
list(res)
})
names(ls.chains) = sapply(names(ls.chains)
, function(x) paste(strsplit(x, '_')[[1]][2]
, strsplit(strsplit(x, 'NVarsFit')[[1]][2], '_')[[1]][1]
, sep=''
, collapse=''))
# combine by name
temp.names = unique(names(ls.chains))
temp.chains = sapply(temp.names, function(x){
temp.in = ls.chains[names(ls.chains)==x]
res = do.call('rbind', temp.in)
return(list(res))
})
ls.chainComb = temp.chains
v.col_headers = colnames(df.constraints)
v.variables = v.col_headers[!v.col_headers %in% c('idx', 'i_chain', 'LogLikelihood', 'region', 'nVars')]
# (1) Pairplots -----------------------------------------------------------
# Contours
if(T){
for(REGION in c('nyc5', 'sflor5', 'wash5')){
for(i in 1:10){
b.chains_in = names(ls.chains)==REGION
test = do.call('rbind', ls.chains[b.chains_in])
test = test[test$i_chain == i,v.variables[1:5]] # obtain variables
p_pairs = ggpairs(test, lower = list(continuous = "density"))
# Combinations
v.names = 1:ncol(test)
names(v.names) = colnames(test)
v.combs = t(combn(v.names, 2))
for(j in 1:nrow(v.combs)){
j_comb = v.combs[j,]
p_pairs[j_comb[2], j_comb[1]] = p_pairs[j_comb[2], j_comb[1]] +
scale_x_continuous(limits=c(df.constraints[j_comb[1]][1,1]
, df.constraints[j_comb[1]][2,1])) +
scale_y_continuous(limit=c(df.constraints[j_comb[2]][1,1]
, df.constraints[j_comb[2]][2,1]))
}
for(j in 1:ncol(test)){
p_pairs[j, j] = p_pairs[j, j] +
scale_x_continuous(limits=c(df.constraints[1,j]
, df.constraints[2,j]))
}
ggsave(paste('OUTPUT/MCMC Figures/', DATE, '_', REGION, '_countour_', i, '.png', sep='', collapse='')
, p_pairs
, 'png'
, width = 12
, height = 12)
}
}
}
# (2) Traceplots ----------------------------------------------------------
ls.plotChains = lapply(ls.chainComb, function(x){
n_rows = nrow(x)
n_vars = unique(x$nVars)
# Add constraints for plotting
temp_df = x
temp_vars = colnames(x)[colnames(x) %in% colnames(df.constraints)]
if(ADD_PLOT_CONSTRAINTS){
temp_df[n_rows+(1:2),] = temp_df[n_rows,] # duplicate last rows instead of rbind
temp_df[n_rows+(1:2), temp_vars] = df.constraints[,temp_vars]
temp_df[n_rows+(1:2), 'idx'] = NA
}
# melt
ret = melt(temp_df, c('idx', 'i_chain', 'LogLikelihood', 'region', 'nVars'))
ret$i_chain = as.character(ret$i_chain)
return(ret)
})
for(i_chain in names(ls.plotChains)){
temp.chains = ls.plotChains[[i_chain]]
p.traces = ggplot(temp.chains, aes(x = idx, y = value, color = i_chain)) +
theme_grey(base_size=22) +
geom_line(alpha = 0.5) +
facet_wrap('variable', scales = 'free') +
xlab('iterations') +
ggtitle(i_chain)
ggsave(paste('OUTPUT/MCMC Figures/', DATE, '_', i_chain, '_TracePlots.png', sep='', collapse='')
, p.traces, height = 8, width = 14)
if(INCLUDE_LOG_SCALE_TRACE){
p.traces_log = ggplot(temp.chains, aes(x = idx, y = value, color = i_chain)) +
theme_grey(base_size=14) +
geom_line(alpha = 0.5) +
facet_wrap('variable', scales = 'free') +
xlab('iterations') +
ggtitle(i_chain) +
scale_y_log10()
ggsave(paste('OUTPUT/MCMC Figures/', DATE, '_', i_chain, '_TracePlots_log.png', sep='', collapse='')
, p.traces_log, height = 8, width = 12)
}
}
# (3) RHats ---------------------------------------------------------------
temp.prsf = df.prsf
temp.prsf[temp.prsf==0] = NA
melt.prsf = melt(temp.prsf, c('region', 'n_vars', 'n_chains'))
melt.prsf = na.omit(melt.prsf)
melt.prsf$value = as.numeric(as.character(melt.prsf$value))
melt.prsf$variable = factor(melt.prsf$variable, levels = rev(levels(melt.prsf$variable)))
melt.prsf$n_vars = paste("N vars fit:", melt.prsf$n_vars)
p.rhats = ggplot(melt.prsf, aes(x = variable, y = value, color = region)) +
geom_point(size = 3) +
scale_y_log10() +
geom_hline(color = 'red', yintercept = 1.01) +
geom_hline(color = 'blue', yintercept = 1.1) +
theme_grey(base_size = 12) +
coord_flip() +
ylab('RHat') +
xlab('') +
facet_wrap('n_vars')
p.rhats
ggsave(paste('OUTPUT/MCMC Figures/', DATE, '_GMBConvergenceRhats.png', sep='', collapse='')
, p.rhats, height = 4, width = 6)
|
/MCMC_CODE/Matlab/MCMC_Visualize_traces.R
|
no_license
|
lopmanlab/Serological_Shielding
|
R
| false | false | 6,574 |
r
|
rm(list=ls())
require(readxl)
require(reshape2)
require(ggplot2)
require(GGally)
require(factoextra)
ADD_PLOT_CONSTRAINTS=T
INCLUDE_LOG_SCALE_TRACE=F
DATE = "2021-05-04"
# Read in Gelman-Rubin RHat results
if(file.exists(paste(DATE, '_MCMCSTATmprsf_Diagnostics.xlsx', sep='', collapse=''))){
df.prsf = data.frame(read_xlsx(paste(DATE, '_MCMCSTATmprsf_Diagnostics.xlsx', sep='', collapse=''))
, stringsAsFactors = F, row.names = 1)
}else{
df.prsf = data.frame(read.csv(paste(DATE, '_MCMCSTATmprsf_Diagnostics.csv', sep='', collapse=''))
, stringsAsFactors = F)
}
# Constraints
df.constraints = data.frame(read_xlsx('MCMCSTAT_constraints.xlsx')
, stringsAsFactors = F, row.names = 1)
# Read in chain summaries. Nested because I'm bad at regex
v.chains = grep(value = T, list.files(path = 'OUTPUT/', pattern = DATE)
, pattern='chain')
# Read in chains
ls.chains = sapply(v.chains, function(x){
REGION = strsplit(x, '_')[[1]][2]
# Read in csv
res = read.csv(paste("OUTPUT/", x, sep='', collapse = ''))
# Data-frame
res = as.data.frame(res)
# Remove last column since I still can't write matlab outputs
res = res[,-ncol(res)]
# Remove the initial condition columns for ease
v.icCols = c("S_c", "S_a", "S_rc", "S_fc", "S_e"
,"E_c", "E_a", "E_rc", "E_fc", "E_e"
,"Isym_c", "Isym_a", "Isym_rc", "Isym_fc", "Isym_e"
,"Iasym_c", "Iasym_a", "Iasym_rc", "Iasym_fc", "Iasym_e"
,"Hsub_c", "Hsub_a", "Hsub_rc", "Hsub_fc", "Hsub_e"
,"Hcri_c", "Hcri_a", "Hcri_rc", "Hcri_fc", "Hcri_e"
,"D_c", "D_a", "D_rc", "D_fc", "D_e"
,"R_c", "R_a", "R_rc", "R_fc", "R_e")
res = res[,!colnames(res) %in% v.icCols]
# Add positions
res$idx = 1:(table(res$i_chain)[1])
# add region
res$region = REGION
# add nVars
res$nVars = strsplit(strsplit(x, 'NVarsFit')[[1]][2], '_')[[1]][1]
# Return
list(res)
})
names(ls.chains) = sapply(names(ls.chains)
, function(x) paste(strsplit(x, '_')[[1]][2]
, strsplit(strsplit(x, 'NVarsFit')[[1]][2], '_')[[1]][1]
, sep=''
, collapse=''))
# combine by name
temp.names = unique(names(ls.chains))
temp.chains = sapply(temp.names, function(x){
temp.in = ls.chains[names(ls.chains)==x]
res = do.call('rbind', temp.in)
return(list(res))
})
ls.chainComb = temp.chains
v.col_headers = colnames(df.constraints)
v.variables = v.col_headers[!v.col_headers %in% c('idx', 'i_chain', 'LogLikelihood', 'region', 'nVars')]
# (1) Pairplots -----------------------------------------------------------
# Contours
if(T){
for(REGION in c('nyc5', 'sflor5', 'wash5')){
for(i in 1:10){
b.chains_in = names(ls.chains)==REGION
test = do.call('rbind', ls.chains[b.chains_in])
test = test[test$i_chain == i,v.variables[1:5]] # obtain variables
p_pairs = ggpairs(test, lower = list(continuous = "density"))
# Combinations
v.names = 1:ncol(test)
names(v.names) = colnames(test)
v.combs = t(combn(v.names, 2))
for(j in 1:nrow(v.combs)){
j_comb = v.combs[j,]
p_pairs[j_comb[2], j_comb[1]] = p_pairs[j_comb[2], j_comb[1]] +
scale_x_continuous(limits=c(df.constraints[j_comb[1]][1,1]
, df.constraints[j_comb[1]][2,1])) +
scale_y_continuous(limit=c(df.constraints[j_comb[2]][1,1]
, df.constraints[j_comb[2]][2,1]))
}
for(j in 1:ncol(test)){
p_pairs[j, j] = p_pairs[j, j] +
scale_x_continuous(limits=c(df.constraints[1,j]
, df.constraints[2,j]))
}
ggsave(paste('OUTPUT/MCMC Figures/', DATE, '_', REGION, '_countour_', i, '.png', sep='', collapse='')
, p_pairs
, 'png'
, width = 12
, height = 12)
}
}
}
# (2) Traceplots ----------------------------------------------------------
ls.plotChains = lapply(ls.chainComb, function(x){
n_rows = nrow(x)
n_vars = unique(x$nVars)
# Add constraints for plotting
temp_df = x
temp_vars = colnames(x)[colnames(x) %in% colnames(df.constraints)]
if(ADD_PLOT_CONSTRAINTS){
temp_df[n_rows+(1:2),] = temp_df[n_rows,] # duplicate last rows instead of rbind
temp_df[n_rows+(1:2), temp_vars] = df.constraints[,temp_vars]
temp_df[n_rows+(1:2), 'idx'] = NA
}
# melt
ret = melt(temp_df, c('idx', 'i_chain', 'LogLikelihood', 'region', 'nVars'))
ret$i_chain = as.character(ret$i_chain)
return(ret)
})
for(i_chain in names(ls.plotChains)){
temp.chains = ls.plotChains[[i_chain]]
p.traces = ggplot(temp.chains, aes(x = idx, y = value, color = i_chain)) +
theme_grey(base_size=22) +
geom_line(alpha = 0.5) +
facet_wrap('variable', scales = 'free') +
xlab('iterations') +
ggtitle(i_chain)
ggsave(paste('OUTPUT/MCMC Figures/', DATE, '_', i_chain, '_TracePlots.png', sep='', collapse='')
, p.traces, height = 8, width = 14)
if(INCLUDE_LOG_SCALE_TRACE){
p.traces_log = ggplot(temp.chains, aes(x = idx, y = value, color = i_chain)) +
theme_grey(base_size=14) +
geom_line(alpha = 0.5) +
facet_wrap('variable', scales = 'free') +
xlab('iterations') +
ggtitle(i_chain) +
scale_y_log10()
ggsave(paste('OUTPUT/MCMC Figures/', DATE, '_', i_chain, '_TracePlots_log.png', sep='', collapse='')
, p.traces_log, height = 8, width = 12)
}
}
# (3) RHats ---------------------------------------------------------------
temp.prsf = df.prsf
temp.prsf[temp.prsf==0] = NA
melt.prsf = melt(temp.prsf, c('region', 'n_vars', 'n_chains'))
melt.prsf = na.omit(melt.prsf)
melt.prsf$value = as.numeric(as.character(melt.prsf$value))
melt.prsf$variable = factor(melt.prsf$variable, levels = rev(levels(melt.prsf$variable)))
melt.prsf$n_vars = paste("N vars fit:", melt.prsf$n_vars)
p.rhats = ggplot(melt.prsf, aes(x = variable, y = value, color = region)) +
geom_point(size = 3) +
scale_y_log10() +
geom_hline(color = 'red', yintercept = 1.01) +
geom_hline(color = 'blue', yintercept = 1.1) +
theme_grey(base_size = 12) +
coord_flip() +
ylab('RHat') +
xlab('') +
facet_wrap('n_vars')
p.rhats
ggsave(paste('OUTPUT/MCMC Figures/', DATE, '_GMBConvergenceRhats.png', sep='', collapse='')
, p.rhats, height = 4, width = 6)
|
#' Johnson-Lewin model
#'
#'
#' @param temp temperature in degrees centigrade
#' @param e activation energy (eV)
#' @param eh high temperature de-activation energy (eV)
#' @param topt optimum temperature (K)
#' @param r0 scaling parameter
#' @author Daniel Padfield
#' @references Johnson, Frank H., and Isaac Lewin. "The growth rate of E. coli in relation to temperature, quinine and coenzyme." Journal of Cellular and Comparative Physiology 28.1 (1946): 47-75.
#'
#' @export johnsonlewin_1946
johnsonlewin_1946 <- function(temp, r0, e, eh, topt){
k <- 8.62e-05
boltzmann.term <- r0*exp(-e/(k*(temp + 273.15)))
inactivation.term <- 1/(1 + exp((-1/(k*(temp + 273.15)))* (eh - ((eh/(topt + 273.15)) + k*log(e/(eh - e)))*(temp + 273.15))))
return(boltzmann.term * inactivation.term)
}
|
/R/johnsonlewin_1946.R
|
no_license
|
juadiegaitan/rTPC
|
R
| false | false | 793 |
r
|
#' Johnson-Lewin model
#'
#'
#' @param temp temperature in degrees centigrade
#' @param e activation energy (eV)
#' @param eh high temperature de-activation energy (eV)
#' @param topt optimum temperature (K)
#' @param r0 scaling parameter
#' @author Daniel Padfield
#' @references Johnson, Frank H., and Isaac Lewin. "The growth rate of E. coli in relation to temperature, quinine and coenzyme." Journal of Cellular and Comparative Physiology 28.1 (1946): 47-75.
#'
#' @export johnsonlewin_1946
johnsonlewin_1946 <- function(temp, r0, e, eh, topt){
k <- 8.62e-05
boltzmann.term <- r0*exp(-e/(k*(temp + 273.15)))
inactivation.term <- 1/(1 + exp((-1/(k*(temp + 273.15)))* (eh - ((eh/(topt + 273.15)) + k*log(e/(eh - e)))*(temp + 273.15))))
return(boltzmann.term * inactivation.term)
}
|
library(readr)
library(data.table)
library(proxy)
library(qlcMatrix)
library(cccd)
library(igraph)
setwd("/home/branden/Documents/kaggle/walmart")
# Load data
ts1Trans <- data.table(read_csv("./data_trans/ts1Trans3_netScans_abs.csv"))
# Department distance/similarity
ts1_dept <- as.matrix(ts1Trans[, 47:115, with=FALSE], nrow=nrow(ts1Trans))
ts1_dept_Matrix <- Matrix(ts1_dept)
ts1_cosSparse <- as.matrix(cosSparse(ts1_dept_Matrix))
ts1_dist <- pr_simil2dist(ts1_cosSparse)
ts1_dist_nng <- nng(dx=ts1_dist, k=4)
V(ts1_dist_nng)$name <- rownames(ts1_cosSparse)
E(ts1_dist_nng)$weight <- apply(get.edges(ts1_dist_nng,1:ecount(ts1_dist_nng)),1,function(x)ts1_cosSparse[x[1],x[2]])
ts1_dist_adj <- as_adjacency_matrix(ts1_dist_nng, attr="weight")
ts1_dist_adj_mat <- as.matrix(ts1_dist_adj)
dept_diag <- diag(x=1, nrow=nrow(ts1_dist_adj_mat))
ts1_dist_adj_mat <- ts1_dist_adj_mat + dept_diag
ts1_dist_adj_mat <- ts1_dist_adj_mat %*% diag(1/rowSums(ts1_dist_adj_mat))
ts1_dept_simil <- as.data.frame(ts1_dept %*% ts1_dist_adj_mat)
colnames(ts1_dept_simil) <- colnames(ts1_dept)
write_csv(ts1_dept_simil, "./data_trans/ts1_dept_simil.csv")
# Fineline distance/similarity
ts1_fine <- as.matrix(ts1Trans[, 116:5469, with=FALSE], nrow=nrow(ts1Trans))
ts1_fine_Matrix <- Matrix(ts1_fine)
ts1_cosSparse <- as.matrix(cosSparse(ts1_fine_Matrix))
ts1_dist <- pr_simil2dist(ts1_cosSparse)
ts1_dist_nng <- nng(dx=ts1_dist, k=4)
V(ts1_dist_nng)$name <- rownames(ts1_cosSparse)
E(ts1_dist_nng)$weight <- apply(get.edges(ts1_dist_nng,1:ecount(ts1_dist_nng)),1,function(x)ts1_cosSparse[x[1],x[2]])
ts1_dist_adj <- as_adjacency_matrix(ts1_dist_nng, attr="weight")
ts1_dist_adj_mat <- as.matrix(ts1_dist_adj)
fine_diag <- diag(x=1, nrow=nrow(ts1_dist_adj_mat))
ts1_dist_adj_mat <- ts1_dist_adj_mat + fine_diag
ts1_dist_adj_mat <- ts1_dist_adj_mat %*% diag(1/rowSums(ts1_dist_adj_mat))
ts1_fine_simil <- as.data.frame(ts1_fine %*% ts1_dist_adj_mat)
colnames(ts1_fine_simil) <- colnames(ts1_fine)
write_csv(ts1_fine_simil, "./data_trans/ts1_fine_simil.csv")
|
/walmart/data_trans/dist_sim_abs.R
|
no_license
|
brandenkmurray/kaggle
|
R
| false | false | 2,049 |
r
|
library(readr)
library(data.table)
library(proxy)
library(qlcMatrix)
library(cccd)
library(igraph)
setwd("/home/branden/Documents/kaggle/walmart")
# Load data
ts1Trans <- data.table(read_csv("./data_trans/ts1Trans3_netScans_abs.csv"))
# Department distance/similarity
ts1_dept <- as.matrix(ts1Trans[, 47:115, with=FALSE], nrow=nrow(ts1Trans))
ts1_dept_Matrix <- Matrix(ts1_dept)
ts1_cosSparse <- as.matrix(cosSparse(ts1_dept_Matrix))
ts1_dist <- pr_simil2dist(ts1_cosSparse)
ts1_dist_nng <- nng(dx=ts1_dist, k=4)
V(ts1_dist_nng)$name <- rownames(ts1_cosSparse)
E(ts1_dist_nng)$weight <- apply(get.edges(ts1_dist_nng,1:ecount(ts1_dist_nng)),1,function(x)ts1_cosSparse[x[1],x[2]])
ts1_dist_adj <- as_adjacency_matrix(ts1_dist_nng, attr="weight")
ts1_dist_adj_mat <- as.matrix(ts1_dist_adj)
dept_diag <- diag(x=1, nrow=nrow(ts1_dist_adj_mat))
ts1_dist_adj_mat <- ts1_dist_adj_mat + dept_diag
ts1_dist_adj_mat <- ts1_dist_adj_mat %*% diag(1/rowSums(ts1_dist_adj_mat))
ts1_dept_simil <- as.data.frame(ts1_dept %*% ts1_dist_adj_mat)
colnames(ts1_dept_simil) <- colnames(ts1_dept)
write_csv(ts1_dept_simil, "./data_trans/ts1_dept_simil.csv")
# Fineline distance/similarity
ts1_fine <- as.matrix(ts1Trans[, 116:5469, with=FALSE], nrow=nrow(ts1Trans))
ts1_fine_Matrix <- Matrix(ts1_fine)
ts1_cosSparse <- as.matrix(cosSparse(ts1_fine_Matrix))
ts1_dist <- pr_simil2dist(ts1_cosSparse)
ts1_dist_nng <- nng(dx=ts1_dist, k=4)
V(ts1_dist_nng)$name <- rownames(ts1_cosSparse)
E(ts1_dist_nng)$weight <- apply(get.edges(ts1_dist_nng,1:ecount(ts1_dist_nng)),1,function(x)ts1_cosSparse[x[1],x[2]])
ts1_dist_adj <- as_adjacency_matrix(ts1_dist_nng, attr="weight")
ts1_dist_adj_mat <- as.matrix(ts1_dist_adj)
fine_diag <- diag(x=1, nrow=nrow(ts1_dist_adj_mat))
ts1_dist_adj_mat <- ts1_dist_adj_mat + fine_diag
ts1_dist_adj_mat <- ts1_dist_adj_mat %*% diag(1/rowSums(ts1_dist_adj_mat))
ts1_fine_simil <- as.data.frame(ts1_fine %*% ts1_dist_adj_mat)
colnames(ts1_fine_simil) <- colnames(ts1_fine)
write_csv(ts1_fine_simil, "./data_trans/ts1_fine_simil.csv")
|
library(h2o)
iris.hex <- as.h2o(iris)
iris.gbm <- h2o.gbm(y="Species", training_frame=iris.hex, model_id="irisgbm")
h2o.download_mojo(model=iris.gbm, path="/Users/nkkarpov/ws", get_genmodel_jar=TRUE)
|
/tutorials/mojo-resource/train_and_export_model.R
|
no_license
|
h2oai/h2o-tutorials
|
R
| false | false | 199 |
r
|
library(h2o)
iris.hex <- as.h2o(iris)
iris.gbm <- h2o.gbm(y="Species", training_frame=iris.hex, model_id="irisgbm")
h2o.download_mojo(model=iris.gbm, path="/Users/nkkarpov/ws", get_genmodel_jar=TRUE)
|
#' Elo rating function.
#'
#' @param games Dataframe containing games (1 row each) with
#' columns for players i and j and a column for the results
#' @param PROB Function to compute the probabilities. Should
#' take in two skills and optionally other parameters and return
#' a pairwise win/loss probability.
#' @param init Initial ratings
#' @param k Learning rate
#' @param coeff Coefficient. Passed to prob.
#' @param decay k coefficient decays with time by a factor
#' of 1/(t^decay)
#' @param ... Additional parameters to be passed to PROB.
#'
#' @return Matrix, with each row containing ratings as of a
#' given game.
RateElo <- function(games,PROB,init,k,coeff,decay=0,...) {
skills <- init
skillMat <- matrix(skills,nrow=1,byrow = TRUE)
for(l in 1:length(games$i)) {
kDecayed <- k/(l^decay)
iSkill <- skills[games$i[l]]
jSkill <- skills[games$j[l]]
p <- PROB(iSkill,jSkill,coeff,...)
skills[games$i[l]] <- iSkill+kDecayed*(games$results[l]-p)
skills[games$j[l]] <- jSkill+kDecayed*(p-games$results[l])
skillMat <- rbind(skillMat,skills)
}
return(skillMat)
}
|
/Rating Functions/Rate_Elo_01.R
|
no_license
|
alexm496/ranking
|
R
| false | false | 1,139 |
r
|
#' Elo rating function.
#'
#' @param games Dataframe containing games (1 row each) with
#' columns for players i and j and a column for the results
#' @param PROB Function to compute the probabilities. Should
#' take in two skills and optionally other parameters and return
#' a pairwise win/loss probability.
#' @param init Initial ratings
#' @param k Learning rate
#' @param coeff Coefficient. Passed to prob.
#' @param decay k coefficient decays with time by a factor
#' of 1/(t^decay)
#' @param ... Additional parameters to be passed to PROB.
#'
#' @return Matrix, with each row containing ratings as of a
#' given game.
RateElo <- function(games,PROB,init,k,coeff,decay=0,...) {
skills <- init
skillMat <- matrix(skills,nrow=1,byrow = TRUE)
for(l in 1:length(games$i)) {
kDecayed <- k/(l^decay)
iSkill <- skills[games$i[l]]
jSkill <- skills[games$j[l]]
p <- PROB(iSkill,jSkill,coeff,...)
skills[games$i[l]] <- iSkill+kDecayed*(games$results[l]-p)
skills[games$j[l]] <- jSkill+kDecayed*(p-games$results[l])
skillMat <- rbind(skillMat,skills)
}
return(skillMat)
}
|
# Shiny Price_production
library(data.table)
library(shiny)
library(ggplot2)
library(dplyr)
library(shiny)
library(tidyr)
library(stringr)
#### The UI ####
ui <- fluidPage(
titlePanel(title = "Worldside Crop Price vs. Production Trends",
windowTitle = "Price X Production"),
sidebarLayout(
sidebarPanel(
helpText("Select crop to start with"),
selectInput(inputId = "Item",
label = "Choose an Item",
choices = c("Pineapples",
"Apples",
"Avocados",
"Wheat"),
selected = "")
),
mainPanel(
textOutput(outputId = "chart_title"),
plotOutput(outputId = "trend")
)
)
)
server <- function(input, output, session) {
p_p_chart = ({
harvest = fread("Production_Crops_E_All_Data.csv")
price = fread("Prices_E_All_Data.csv")
######data cleaning for harvest#######
harvest = harvest %>%
select(-ends_with("F")) %>%
gather(Y1961:Y2016, key = "Year", value = "production")
harvest$Year = str_sub(harvest$Year,2)
harvest$Year = as.numeric(harvest$Year)
harvest$Area = as.character(harvest$Area)
harvest <- harvest %>%
select("Area", "Item","Element","Year","production") %>%
spread(key = Element, value = production)
names(harvest) <- gsub(" ", "_", names(harvest))
harvest <- harvest %>%
select(-Feed, -Area_harvested, -Yield) %>%
filter(!is.na(Production),
Year >= 1991)
######## Cleaning price data ##########
price = price %>%
select(-ends_with("F")) %>%
gather(Y1991:Y2016, key = "Year", value = "Price")
price$Year = str_sub(price$Year,2)
price$Year = as.numeric(price$Year)
price$Area = as.character(price$Area)
price <- price %>%
select("Area", "Item","Element","Year","Price") %>%
spread(key = Element, value = Price)
names(price) <- gsub(" ", "_", names(price))
price <- price %>%
filter(!is.na(`Producer_Price_(USD/tonne)`))
######## Yearly production aggregation #########
yearly_production <- harvest %>%
group_by(Year, Item) %>%
summarise(total_prod = sum(Production))
levels(yearly_production$Item)
######## Next Step: aggregate the price
yearly_price <- price %>%
group_by(Year, Item) %>%
summarise(avg_price = mean(`Producer_Price_(USD/tonne)`))
######## compare produciton and price with geom_line
price_prod <- inner_join(yearly_price, yearly_production,
by = c("Item","Year"))
price_prod$Item <- as.factor(price_prod$Item)
})
reactive_price_prod = reactive({
price_prod %>%
filter(Item == input$Item)
})
output$trend = renderPlot({
ggplot(reactive_price_prod, aes(x = Year)) +
geom_line(aes(y = total_prod, colour = "Prod")) +
geom_line(aes(y = avg_price*100000, colour = "Price")) +
scale_y_continuous(sec.axis = sec_axis(~./100000, name = "Price [$USD/Tonne]"))
})
}
shinyApp(ui, server)
|
/Shiny_price_prod.R
|
no_license
|
TakaakiKihara/Group_Project_Agriculture
|
R
| false | false | 3,177 |
r
|
# Shiny Price_production
library(data.table)
library(shiny)
library(ggplot2)
library(dplyr)
library(shiny)
library(tidyr)
library(stringr)
#### The UI ####
ui <- fluidPage(
titlePanel(title = "Worldside Crop Price vs. Production Trends",
windowTitle = "Price X Production"),
sidebarLayout(
sidebarPanel(
helpText("Select crop to start with"),
selectInput(inputId = "Item",
label = "Choose an Item",
choices = c("Pineapples",
"Apples",
"Avocados",
"Wheat"),
selected = "")
),
mainPanel(
textOutput(outputId = "chart_title"),
plotOutput(outputId = "trend")
)
)
)
server <- function(input, output, session) {
p_p_chart = ({
harvest = fread("Production_Crops_E_All_Data.csv")
price = fread("Prices_E_All_Data.csv")
######data cleaning for harvest#######
harvest = harvest %>%
select(-ends_with("F")) %>%
gather(Y1961:Y2016, key = "Year", value = "production")
harvest$Year = str_sub(harvest$Year,2)
harvest$Year = as.numeric(harvest$Year)
harvest$Area = as.character(harvest$Area)
harvest <- harvest %>%
select("Area", "Item","Element","Year","production") %>%
spread(key = Element, value = production)
names(harvest) <- gsub(" ", "_", names(harvest))
harvest <- harvest %>%
select(-Feed, -Area_harvested, -Yield) %>%
filter(!is.na(Production),
Year >= 1991)
######## Cleaning price data ##########
price = price %>%
select(-ends_with("F")) %>%
gather(Y1991:Y2016, key = "Year", value = "Price")
price$Year = str_sub(price$Year,2)
price$Year = as.numeric(price$Year)
price$Area = as.character(price$Area)
price <- price %>%
select("Area", "Item","Element","Year","Price") %>%
spread(key = Element, value = Price)
names(price) <- gsub(" ", "_", names(price))
price <- price %>%
filter(!is.na(`Producer_Price_(USD/tonne)`))
######## Yearly production aggregation #########
yearly_production <- harvest %>%
group_by(Year, Item) %>%
summarise(total_prod = sum(Production))
levels(yearly_production$Item)
######## Next Step: aggregate the price
yearly_price <- price %>%
group_by(Year, Item) %>%
summarise(avg_price = mean(`Producer_Price_(USD/tonne)`))
######## compare produciton and price with geom_line
price_prod <- inner_join(yearly_price, yearly_production,
by = c("Item","Year"))
price_prod$Item <- as.factor(price_prod$Item)
})
reactive_price_prod = reactive({
price_prod %>%
filter(Item == input$Item)
})
output$trend = renderPlot({
ggplot(reactive_price_prod, aes(x = Year)) +
geom_line(aes(y = total_prod, colour = "Prod")) +
geom_line(aes(y = avg_price*100000, colour = "Price")) +
scale_y_continuous(sec.axis = sec_axis(~./100000, name = "Price [$USD/Tonne]"))
})
}
shinyApp(ui, server)
|
#' Resample Di-ZTD to phase cell resolution and match raster extents.
#' @author Subhadip Datta
#' @param unw_pha Un-wrapped InSAR tile/raster.
#' @param dztd Di-ZTD.
#' @param method Raster resampleing method "ngb" for nearest neighbor or "bilinear" for bilinear interpolation
#' @import raster
#' @examples
#' library(raster)
#' library(GInSARCorW)
#' library(circular)
#' noDataAsNA<-FALSE
#' i1m<-system.file("td","20170317.ztd.rsc",package = "GInSARCorW")
#' i2m<-system.file("td","20170410.ztd.rsc",package = "GInSARCorW")
#' GACOS_ZTD_T1<-GACOS.Import(i1m,noDataAsNA)
#' GACOS_ZTD_T2<-GACOS.Import(i2m,noDataAsNA)
#' dztd<-d.ztd(GACOS_ZTD_T1,GACOS_ZTD_T2)
#' unw_pha<-raster(system.file("td","Unw_Phase_ifg_17Mar2017_10Apr2017_VV.img",package = "GInSARCorW"))
#' crs(unw_pha)<-CRS("+proj=longlat +datum=WGS84 +no_defs")
#' d.ztd.resample(unw_pha,dztd)
#' @export
d.ztd.resample<-function(unw_pha,dztd,method="bilinear"){
re_ztd<-resample(dztd,unw_pha,method)
return(re_ztd)
}
|
/R/downsample_dztd.R
|
no_license
|
cran/GInSARCorW
|
R
| false | false | 1,016 |
r
|
#' Resample Di-ZTD to phase cell resolution and match raster extents.
#' @author Subhadip Datta
#' @param unw_pha Un-wrapped InSAR tile/raster.
#' @param dztd Di-ZTD.
#' @param method Raster resampleing method "ngb" for nearest neighbor or "bilinear" for bilinear interpolation
#' @import raster
#' @examples
#' library(raster)
#' library(GInSARCorW)
#' library(circular)
#' noDataAsNA<-FALSE
#' i1m<-system.file("td","20170317.ztd.rsc",package = "GInSARCorW")
#' i2m<-system.file("td","20170410.ztd.rsc",package = "GInSARCorW")
#' GACOS_ZTD_T1<-GACOS.Import(i1m,noDataAsNA)
#' GACOS_ZTD_T2<-GACOS.Import(i2m,noDataAsNA)
#' dztd<-d.ztd(GACOS_ZTD_T1,GACOS_ZTD_T2)
#' unw_pha<-raster(system.file("td","Unw_Phase_ifg_17Mar2017_10Apr2017_VV.img",package = "GInSARCorW"))
#' crs(unw_pha)<-CRS("+proj=longlat +datum=WGS84 +no_defs")
#' d.ztd.resample(unw_pha,dztd)
#' @export
d.ztd.resample<-function(unw_pha,dztd,method="bilinear"){
re_ztd<-resample(dztd,unw_pha,method)
return(re_ztd)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_lags.R
\name{create_lags}
\alias{create_lags}
\title{Create Lag Variables}
\usage{
create_lags(data, lags, vars)
}
\arguments{
\item{data}{A data frame.}
\item{lags}{A numeric vector of lags.}
\item{vars}{A character vector of column names.}
}
\value{
A data frame.
}
\description{
Takes a data frame and adds new columns that are lags of existing columns.
}
\details{
For each column name, the specified lags are added as new columns to the data frame.
}
\examples{
# create lags of 2 and 4 of mpg
mtcars_mpg_lags <- create_lags(mtcars, c(2, 4), "mpg")
head(mtcars_mpg_lags)
}
|
/man/create_lags.Rd
|
no_license
|
ebrist/mlts
|
R
| false | true | 666 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_lags.R
\name{create_lags}
\alias{create_lags}
\title{Create Lag Variables}
\usage{
create_lags(data, lags, vars)
}
\arguments{
\item{data}{A data frame.}
\item{lags}{A numeric vector of lags.}
\item{vars}{A character vector of column names.}
}
\value{
A data frame.
}
\description{
Takes a data frame and adds new columns that are lags of existing columns.
}
\details{
For each column name, the specified lags are added as new columns to the data frame.
}
\examples{
# create lags of 2 and 4 of mpg
mtcars_mpg_lags <- create_lags(mtcars, c(2, 4), "mpg")
head(mtcars_mpg_lags)
}
|
prescribe<-function(m,m3){ifelse((m$"Harvesting.System")=="Cable Manual WT", yarderEst(m,m3),
ifelse((m$"Harvesting.System")=="Ground-Based Man WT", sawSkidEst(m,m3),
ifelse((m$"Harvesting.System")=="Ground-Based Mech WT", fbSkidEst(m,m3),
ifelse((m$"Harvesting.System")=="Ground-Based CTL", harForEst(m,m3),
ifelse((m$"Harvesting.System")=="Helicopter CTL", heliHarEst(m,m3),
ifelse((m$"Harvesting.System")=="Cable Manual WT/Log", yarderSawEst(m,m3),
ifelse((m$"Harvesting.System")=="Cable Manual Log", yarderSawLogEst(m,m3),
ifelse((m$"Harvesting.System")=="Ground-Based Manual Log", sawSkidLogEst(m,m3),
ifelse((m$"Harvesting.System")=="Helicopter Manual WT", heliSawEst(m,m3),
ifelse((m$"Harvesting.System")=="Cable CTL", yarderHarEst(m,m3), 1))))))))))
}
m3<-data.frame(row.names=c("fellerBuncher","forwarder","harvester","grappleSkidderLarge",
"cableSkidderLarge","wheelFellerBuncher","crawlerFellerBuncher","yarder","slideboom","chainsaw","chipper","helicopter"),values=c(210.10,170.00,180.00,110.00,149.80,205.10,185.60,299.00,200.00,90.00,100.00,600))
## The above costs are assuming 40 hour week with stand operating and ownership costs
dbh<-function(m){
sqrt((twitchVol(m)+8.4166)/.2679)
}
dbh.cm<-function(m){
dbh(m)*2.54
}
treesRemoved<-function(m){
(m$"Small.log.trees.per.acre"+
m$"Large.log.trees.per.acre"+
m$"Chip.tree.per.acre")
}
bcTpa<-function(m){
m$"BrushCutTPA"
}
bcVol<-function(m){
m$"BrushCutAvgVol"
}
chipTrees<-function(m){
m$"Chip.tree.per.acre"
}
sppgrp<-function(m){
ifelse((m$"Small.log.trees.hardwood.proportion"+m$"Large.log.trees.hardwood.proportion")>1, 0, 1)
}
distBetweenTrees<-function(m){
(sqrt((43560/(m$"Small.log.trees.per.acre"+
m$"Large.log.trees.per.acre"+
m$"Chip.tree.per.acre"))/pi))*2
} ## Feet between trees
twitchVol3<-function(m){
((m$"Large.log.trees.per.acre"*m$"Large.log.trees.average.vol.ft3.")+(m$"Small.log.trees.per.acre"*m$"Small.log.trees.average.volume.ft3.")+(m$"Chip.tree.per.acre"*m$"Chip.trees.average.volume.ft3"))/
(m$"Large.log.trees.per.acre"+m$"Small.log.trees.per.acre"+m$"Chip.tree.per.acre")
}
twitchVol2<-function(m){
((m$"Large.log.trees.per.acre"*(m$"Large.log.trees.average.vol.ft3."+(
m$Large.log.trees.average.vol.ft3.*(.01*m$Large.log.trees.residue.fraction))))+(
m$"Small.log.trees.per.acre"*(m$"Small.log.trees.average.volume.ft3."+(
m$Small.log.trees.average.volume.ft3.*(.01*m$Small.log.trees.residue.fraction))))+(
m$"Chip.tree.per.acre"*m$"Chip.trees.average.volume.ft3"))/
(m$"Large.log.trees.per.acre"+m$"Small.log.trees.per.acre"+m$"Chip.tree.per.acre")
}
twitchVol<-function(m){ifelse(m$Harvesting.System=="Ground-Based CTL"|
m$Harvesting.System=="Helicopter CTL"|
m$Harvesting.System=="Cable CTL",twitchVol3(m),ifelse(
m$Harvesting.System=="Cable Manual WT"|
m$Harvesting.System=="Ground-Based Man WT"|
m$Harvesting.System=="Ground-Based Mech WT"|
m$Harvesting.System=="Cable Manual WT/Log"|
m$Harvesting.System=="Cable Manual Log"|
m$Harvesting.System=="Ground-Based Manual Log"|
m$Harvesting.System=="Helicopter Manual WT",twitchVol2(m),NaN))}
twitchVolM<-function(m){
twitchVol(m)*0.0283168
}
twitchWeight<-function(m){
twitchDF<-data.frame(m[,"Large.log.trees.average.density.lbs.ft3."],
m[,"Small.log.trees.average.density.lbs.ft3."])
twitchDF[twitchDF==0]<-NA
avgDensity<-rowMeans(twitchDF, na.rm=TRUE)
avgDensity*twitchVol(m)
}
totalWeight<-function(m){
twitchDF<-data.frame(m[,"Large.log.trees.average.density.lbs.ft3."],
m[,"Small.log.trees.average.density.lbs.ft3."])
twitchDF[twitchDF==0]<-NA
avgDensity<-rowMeans(twitchDF, na.rm=TRUE)
avgDensity*totalVol(m)
}
cordsPerAcre<-function(m){
((m$"Large.log.trees.per.acre"*m$"Large.log.trees.average.vol.ft3.")+(m$"Small.log.trees.per.acre"*m$"Small.log.trees.average.volume.ft3."))/128
}
totalVol<-function(m){
(m$"Large.log.trees.per.acre"*m$"Large.log.trees.average.vol.ft3.")+(m$"Small.log.trees.per.acre"*m$"Small.log.trees.average.volume.ft3.")
}
totalVolM<-function(m){
totalVol(m)*0.0283168
}
## Equations
behjouSaw<-function(m){
-2.80+(0.051*(25.4*dbh(m)))+(0.039*(distBetweenTrees(m)/3.28084))
} ## Min/Tree
klepacSaw=function(m){
24.796+0.331419*(dbh(m)^2)
} ## Seconds/Tree
ghafSaw<-function(m){
-1.582+(0.099*dbh.cm(m))
} ##Min/Tree
hartsoughSaw<-function(m){
0.1+0.0111*(dbh(m)^1.496)
} ##Min/Tree
kluenderSaw<-function(m){
(0.016*((dbh(m)*2.54)^1.33))*
(distBetweenTrees(m)^0.083)*(.5^0.196)
} ## Min/Tree
spinelliSaw<-function(m){
30.04 + 0.2*((dbh(m))^2)+ 8.3
}
akaySaw<-function(m){
56.62*(log(twitchVolM(m)))+322.09
}
hansillChip<-function(m){
2.32+(-0.42*1.79)+(1.83*dbh.cm(m))
} ## Sec/Tree
hansillChip2<-function(m){
2.4+(-0.32*1.4)+(1.3*dbh.cm(m))
}
boldingChip<-function(m){
0.001*(totalVol(m)*25)
}
adebayoHar<-function(m){
30.04 + 0.2*((dbh(m))^2)+ 8.3
} ## centi-Minutes/Acre
boldingHar=function(m){
.1765521+(0.003786057*dbh(m))+(4.936639*m$"Percent.Slope"*(sqrt(treesRemoved(m))))
} ##****
hieslHarCord2=function(m){
exp((-0.826+.309*(dbh(m)))+0.386*sppgrp(m))
} ## Cords/PMH
karhaHar<-function(m){
0.288+(0.1004*(m$"Percent.Slope"/0.001))+(-0.00008*(twitchVolM(m)/0.001)^2)
} ##m^3/PMH
karhaHar2<-function(m){
0.181*m$"Percent.Slope"+(0.1315*(twitchVolM(m)*1000))
}
karhaHar3<-function(m){
0.422+(0.1126*(twitchVolM(m)*1000))
}
klepacSkid<-function(m){
abs(
(0.0076197*m$"One.way.Yarding.Distance"-0.6073183873)+
(0.00735*(distBetweenTrees(m)*4)+0.5438016119)+
(0.0121066*m$"One.way.Yarding.Distance"-1.651069636)
)
} ## 3 compounded functions
klepacHar=function(m){
24.796+0.331419*(dbh(m)^2)
} ## Seconds/Tree
drewsHar<-function(m){
21.139+72.775*(totalVol(m))
}
jirousekHar<-function(m){
60.711*(twitchVolM(m)^0.6545)
} ## m^3/PMH
klepacHar<-function(m){
abs(
(0.0076197*m$"One.way.Yarding.Distance"-0.6073183873)+
(0.00735*(distBetweenTrees(m)*4)+0.5438016119)+
(0.0121066*m$"One.way.Yarding.Distance"-1.651069636)
)
}
## Feller Buncher
hartsoughFB<-function(m){
0.324+0.00138*(dbh(m)^2)
} ##minutes per tree
akayFB<-function(m){
56.62*(log(twitchVolM(m)))+322.09
}
stokesFB<-function(m){
2.80*(m$"One.way.Yarding.Distance")^0.574
}
drewsFB<-function(m){
21.139+72.775*(totalVol(m))
}
dykstraFB<-function(m){
2.39219+0.0019426*(m$"Percent.Slope")+
(m$"One.way.Yarding.Distance")+0.030463*(treesRemoved(m))
}
boldingFB=function(m){
.1765521+(0.003786057*dbh(m))+(4.936639*m$"Percent.Slope"*(sqrt(treesRemoved(m))))
}
karhaFB<-function(m){
0.422+(0.1126*(twitchVolM(m)*1000))
}
hieslFB=function(m){
exp((-0.826+.309*(dbh(m)))+0.386*sppgrp(m))
}
behjouFB<-function(m){
-2.80+(0.051*(25.4*dbh(m)))+(0.039*(distBetweenTrees(m)/3.28084))
}
adebayoFB<-function(m){
30.04 + 0.2*((dbh(m))^2)+ 8.3
}
## Skidder
hieslGrapCord=function(m){
exp(1.754*(-0.0005*m$"One.way.Yarding.Distance"))+(0.755*twitchVol(m))
} ##Cords/PMH
akaySkid<-function(m){
-0.1971+(1.1287*5)+((0.0045*twitchVolM(m)*5))+(0.0063*(m$"One.way.Yarding.Distance"*2))
}
ghafSkid<-function(m){
13.027+(0.035*(m$"One.way.Yarding.Distance"*2))+(0.847*(m$"Percent.Slope"))+(0.551*(twitchVolM(m)*5))
}
akaySkid2<-function(m){
-0.1971+(1.1287*5)+((0.0045*twitchVolM(m)*5))+(0.0063*(m$"One.way.Yarding.Distance"*2))
}
akaySkid2<-function(m){
0.012*(m$"One.way.Yarding.Distance"^-0.399)*(dbh(m)^2.041)*(5^0.766)
}
kluenderSkid<-function(m){
(0.017*((m$"One.way.Yarding.Distance"^-0.574))*2)*(dbh.cm(m)^2.002)
}
boldingSkid<-function(m){
.1761+(0.00357*dbh(m))+(4.93*m$"Percent.Slope"*(sqrt(treesRemoved(m))))
}
fisherSkid=function(m){
2.374+(0.00841141*(m$"One.way.Yarding.Distance"))+(0.72548570*(1.35))
}
##Forwarder
jirousekFor<-function(m){
-7.6881*log((m$"One.way.Yarding.Distance"*0.3048))+64.351
}
boldingFor<-function(m){
.1761+(0.00357*dbh(m))+(4.93*m$"Percent.Slope"*(sqrt(treesRemoved(m))))
}
jirousekFor2<-function(m){
10.5193*(m$"One.way.Yarding.Distance"^(24.9181/m$"One.way.Yarding.Distance"))
}
jirousekFor3<-function(m){
17.0068*(m$"One.way.Yarding.Distance"^(13.2533/m$"Percent.Slope"))
}
kluenderFor<-function(m){
(0.017*((m$"One.way.Yarding.Distance"^-0.574))*2)*(dbh.cm(m)^2.002)
}
fisherFor=function(m){
2.374+(0.00841141*(m$"One.way.Yarding.Distance"))+(0.72548570*(1.35))
}
dykstraFor<-function(m){
2.39219+0.0019426*(m$"Percent.Slope")+
(m$"One.way.Yarding.Distance")+0.030463*(treesRemoved(m))
}
iffFor<-function(m){
1.054+.00234*(m$One.way.Yarding.Distance)+0.01180*(97)+0.980*(
treesRemoved(m))+.00069*(totalWeight(m))
}
## Yarder
fisherYarder=function(m){
2.374+(0.00841141*(m$"One.way.Yarding.Distance"))+(0.72548570*(1.35))
} ## Turn Time Minutes
curtisYarding<-function(m){
23.755 + (2.7716*(1.5))-(0.63694*(m$"One.way.Yarding.Distance"))
} ## Logs/Hour
curtisYarding2<-function(m){
11.138+(7.1774*(1.5))-(0.59976*(m$"One.way.Yarding.Distance"))
} ## Logs/Hour
dykstraYarding<-function(m){
2.39219+0.0019426*(m$"Percent.Slope")+
(m$"One.way.Yarding.Distance")+0.030463*(treesRemoved(m))
}
aulerichYard3<-function(m){
1.210+0.009*(m$One.way.Yarding.Distance)+0.015*(
m$One.way.Yarding.Distance)+0.253*(treesRemoved(m))
}
iffYard<-function(m){
1.054+.00234*(m$One.way.Yarding.Distance)+0.01180*(97)+0.980*(
treesRemoved(m))+.00069*(totalWeight(m))
}
aulerichYard2<-function(m){
1.925+0.002*(m$One.way.Yarding.Distance)+0.017*(
m$One.way.Yarding.Distance)+0.909*(treesRemoved(m))
}
aulerichYard<-function(m){
0.826+0.006*(m$One.way.Yarding.Distance)+0.032*(
m$One.way.Yarding.Distance)+0.897*(treesRemoved(m))
}
## Slide Boom Processor
hartsoughSlide<-function(m){
0.141+0.0298*dbh(m)
}
ghafLoad<-function(m){
23.297/twitchVolM(m)
}
suchomelSlide<-function(m){
twitchVolM(m)/19.8
}
## Helicopter Yarding
flattenHeli<-function(m){
40.737274+(0.0168951*m$"One.way.Yarding.Distance")+((totalWeight(m)/12)*2.052894)+(22.658839)
}
dykstraHeli<-function(m){
1.3721152+(0.0126924*m$"Percent.Slope")+(0.00246741*m$"One.way.Yarding.Distance")+
(0.031200*(3))+(0.000060987*(totalVol(m)/12))-(0.000050837*(totalVol(m)/36))
}
curtisHeli<- function(m){
23.755 + 2.7716*treesRemoved(m)-0.63694*(m$"One.way.Yarding.Distance")
}
akayHeli<-function(m){
1.3721152+(0.0126924*m$"Percent.Slope")+(0.00246741*m$"One.way.Yarding.Distance")+
(0.031200*(3))+(0.000060987*(totalVol(m)/12))-(0.000050837*(totalVol(m)/36))
}
############## Time Per Acre Converstions
ghafSkidTime<-function(m){
ifelse(m$"Percent.Slope"<45, totalVolM(m)/(twitchVolM(m)*30)*ghafSkid(m)/60, NA)
}
mechBC<-function(m){
ifelse(is.na(m$BrushCutTPA), 0, ifelse(m$BrushCutTPA>0, ifelse(m$BrushCutAvgVol<4, m$BrushCutTPA/(10*60), m$BrushCutTPA/(5*60)), 0))
} ##Hours/Acre
adebayoHarTime<-function(m){
adebayoHar(m)*treesRemoved(m)/60/60
} ## Hours/Acre
hansillChip2Time<-function(m){
ifelse(dbh.cm(m)<76, ((chipTrees(m)*hansillChip2(m))/60), NA)
} ##Hours/Acre
curtisYardingTime<-function(m){
ifelse(m$"One.way.Yarding.Distance"<10,treesRemoved(m)/curtisYarding(m),NA)
} ## Hours/Acre
manBC2<-function(m){
ifelse(is.na(m$BrushCutTPA), 0, ifelse(m$BrushCutTPA>0 ,ifelse(m$BrushCutAvgVol<4, m$BrushCutTPA/(60), m$BrushCutTPA/(2*60)),0))
}
curtisYarding2Time<-function(m){
ifelse(m$"One.way.Yarding.Distance"<10,treesRemoved(m)/curtisYarding2(m),NA)
} ## Hours/Acre
fisherYarderTime<-function(m){
((((m$"Small.log.trees.per.acre"+m$"Large.log.trees.per.acre")/1.35)*fisherYarder(m))/120)
} ## Hours/Acre
kluenderSkidTime<-function(m){
ifelse(m$"Percent.Slope"<45, kluenderSkid(m), NA)
}
behjouSawTime<-function(m){
ifelse(dbh.cm(m)>40.00, ((treesRemoved(m)*behjouSaw(m))/60), NA)
}
dykstraHeliTime<-function(m){
ifelse((twitchWeight(m)<2900),
(((((totalVol(m)/3)*(m$"Large.log.trees.per.acre"+m$"Small.log.trees.per.acre"))/190)*dykstraHeli(m))/60),
NA)
} ##Hours/Acre
flattenHeliTime<-function(m){
ifelse((twitchWeight(m)<2900), flattenHeli(m)/3600, NA)
} ## Hours/Acre
ghafSawTime<-function(m){
ifelse(dbh.cm(m)>25, ((ghafSaw(m)*treesRemoved(m))/180), NA)
}
hansillChipTime<-function(m){
ifelse(dbh.cm(m)<76, ((chipTrees(m)*hansillChip(m))/60/60), NA)
} ## Hours/ Acre
hartsoughSawTime<-function(m){
(hartsoughSaw(m)*treesRemoved(m))/60
}
jirousekFor3Time<-function(m){
ifelse(m$"Percent.Slope"<45, totalVolM(m)/jirousekFor3(m), NA)
}
boldingChipTime<-function(m){
ifelse(dbh.cm(m)<50, ((boldingChip(m))/60/60), NA)
}
akaySkidTime<-function(m){
ifelse(m$"One.way.Yarding.Distance"<3500, akaySkid(m)*.4, NA)
}
jirousekFor2Time<-function(m){
ifelse(m$"Percent.Slope"<45, totalVolM(m)/jirousekFor2(m), NA)
}
hansillChip2Time<-function(m){
ifelse(dbh.cm(m)<76, ((chipTrees(m)*hansillChip2(m))/60), NA)
}
kluenderSawTime<-function(m){
(kluenderSaw(m)*treesRemoved(m))/60
}
curtisHeliTime<-function(m){
ifelse(dbh.cm(m)>20, curtisHeli(m)/60, NA)
}
hieslGrapTime<-function(m){
cordsPerAcre(m)/hieslGrapCord(m)
} ##Hours/Acre
hieslHarTime<-function(m){
cordsPerAcre(m)/hieslHarCord2(m)
} ##Hours/Acre
boldingForTime<-function(m){
ifelse(m$"Percent.Slope"<0, totalVolM(mslope)/boldingFor(m), NA)
}
karhaHar3Time<-function(m){
ifelse(twitchVolM(m)<40, (totalVolM(m))/karhaHar3(m), NA)
}
ghafLoadTime<-function(m){
ifelse(twitchVolM(m)<3, ghafLoad(m)/60, NA)
}
klepacSkidTime<-function(m){
ifelse(m$"Percent.Slope"<.01, treesRemoved(m)/(klepacSkid(m)*15), NA)
}
jirousekHarTime<-function(m){
ifelse(twitchVolM(m)<1.4, (totalVolM(m)/jirousekHar(m)), NA)
} ##Hours/Acre
jirousekForTime<-function(m){
totalVolM(m)/jirousekFor(m)
} ##Hours/Acre
klepacHarTime<-function(m){
((klepacHar(m)*treesRemoved(m))/60)/60
} ## Hours/Acre
stokesFBTime<-function(m){
(ifelse(m$"Percent.Slope"<45, stokesFB(m)/60, NA))
} ##Hours/Acre
akayFBTime<-function(m){
ifelse(twitchVolM(m)<.2, (totalVolM(m))/akayFB(m), NA)
}
karhaHarTime<-function(m){
ifelse(twitchVolM(m)<.0, (totalVol(m)*0.0283168)/karhaHar(m), NA)
} ## Hours/Acre
hartsoughFBTime<-function(m){
(hartsoughFB(m)*treesRemoved(m))/60
} ##Hours/Acre
hartsoughSlideTime<-function(m){
if(m$Harvesting.System=="Cable Manual WT"||m$Harvesting.System=="Cable Manual WT/Log"||m$Harvesting.System=="Cable Manual Log"||m$Harvesting.System=="Ground-Based Man WT"||m$Harvesting.System=="Cable CTL"){hartsoughSlide(m)*30}else{NA}
} ##Hours/Acre
suchomelSlideTime<-function(m){
suchomelSlide(m)
} ##Hours/Acre
karhaHar2Time<-function(m){
ifelse(twitchVolM(m)<40, (totalVolM(m))/karhaHar2(m), NA)
} ##Hours/Acre
############################### Cycle Time Analysis
## Chipping Analysis
chipDF<-function(m){
data.frame(
hansillChipTime(m),
hansillChip2Time(m),
boldingChipTime(m))
}
chipTime<-function(m){
rowMeans(chipDF(m), na.rm=TRUE)
}
## Feller Buncher Analysis
fbDF<-function(m){
data.frame(
hartsoughFBTime(m),
stokesFBTime(m),
akayFBTime(m))
}
fbTime<-function(m){
fbAvg(m)
}
fbAvg<-function(m){
ifelse(fbTime2(m)=="NaN", mechBC(m), fbTime2(m)+mechBC(m))
}
fbTime2<-function(m){
rowMeans(fbDF(m), na.rm=TRUE)
}
## Slideboom Processor
slideDF<-function(m){
data.frame(
ifelse(is.nan(suchomelSlideTime(m)),0,suchomelSlideTime(m)),
ifelse(is.nan(ghafLoadTime(m)),0,ghafLoadTime(m)),
ifelse(is.nan(hartsoughSlideTime(m)),0,hartsoughSlideTime(m))
)
}
slideTime<-function(m){
rowMeans(slideDF(m), na.rm=TRUE)
}
## Forwarder Analysis
forDF<-function(m){
data.frame(
jirousekForTime(m),
jirousekFor2Time(m),
boldingForTime(m),
jirousekFor3Time(m))
}
forTime<-function(m){
rowMeans(forDF(m), na.rm=TRUE)
}
## Skidder Analysis
skidderDF<-function(m){
data.frame(
hieslGrapTime(m),
akaySkidTime(m),
kluenderSkidTime(m),
ghafSkidTime(m),
klepacSkidTime(m))
}
skidderTime<-function(m){
rowMeans(skidderDF(m), na.rm=TRUE)
}
## Manual
sawDF<-function(m){
data.frame(
behjouSawTime(m),
ghafSawTime(m),
hartsoughSawTime(m),
kluenderSawTime(m))
}
sawTime2<-function(m){
ifelse((m$"Harvesting.System")=="Cable Manual WT/Log",
(m$"Small.log.trees.per.acre"+m$"Large.log.trees.per.acre")*0.013333+(rowMeans(sawDF(m), na.rm=TRUE)),
ifelse((m$"Harvesting.System")=="Ground-Based Manual Log",
((m$"Small.log.trees.per.acre"+m$"Large.log.trees.per.acre")*0.013333+(rowMeans(sawDF(m), na.rm=TRUE))),
(rowMeans(sawDF(m), na.rm=TRUE))))
}
sawAvg<-function(m){
ifelse(sawTime2(m)=="NaN", manBC2(m), sawTime2(m)+manBC2(m))
}
sawTime<-function(m){
sawAvg(m)}
## Hours/Acre
## Harvester
harvesterDF<-function(m){
data.frame(
klepacHarTime(m),
hieslHarTime(m),
adebayoHarTime(m),
karhaHarTime(m),
jirousekHarTime(m))
}
harTime2<-function(m){
rowMeans(harvesterDF(m), na.rm=TRUE)
}
harAvg<-function(m){
ifelse(harTime2(m)=="NaN", mechBC(m), harTime2(m)+mechBC(m))
}
harTime<-function(m){
harAvg(m)
}
## Helicopter Yarding Analysis
heliDF<-function(m){
data.frame(
flattenHeliTime(m),
dykstraHeliTime(m),
curtisHeliTime(m))
}
heliTime<-function(m){
rowMeans(heliDF(m), na.rm=TRUE)
}
## Cable Yarding Analysis
yarderDF<-function(m){
data.frame(
fisherYarderTime(m),
abs(curtisYardingTime(m)),
abs(curtisYarding2Time(m)))
}
yarderTime<-function(m){
rowMeans(yarderDF(m), na.rm=TRUE)
}
## Analizing Time Conversions and Price Regression
highYardingTime<-function(m){
ifelse(yarderTime(m)>sawTime(m), yarderTime(m),
ifelse(sawTime(m)>yarderTime(m), sawTime(m), 1))
} ## Returns the limiting time/acre
highGroundHarSkidTime<-function(m){
ifelse(skidderTime(m)>harTime(m), skidderTime(m), ifelse(harTime(m)>skidderTime(m), harTime(m), 1))
} ## Returns the limiting time
highGroundFBSkidTime<-function(m){
ifelse(skidderTime(m)>fbTime(m), skidderTime(m), ifelse(fbTime(m)>skidderTime(m), fbTime(m), 1))
} ## Returns the limiting time
highGroundCTLTime<-function(m){
ifelse(forTime(m)>harTime(m), forTime(m), ifelse(harTime(m)>forTime(m), harTime(m), 1))
}
yarderEst<-function(m,p){
rowSums(cbind((yarderTime(m)*p["yarder",]),(sawTime(m)*p["chainsaw",]),(slideTime(m)*p["slideboom",]),((chipTime(m)*.2)*p["chipper",])),na.rm = TRUE)
}
yarderSawEst<-function(m,p){
rowSums(cbind((yarderTime(m)*p["yarder",]),(sawTime(m)*p["chainsaw",]),(slideTime(m)*p["slideboom",]),((chipTime(m)*.15)*p["chipper",])),na.rm=TRUE)
}
sawSkidEst<-function(m,p){
rowSums(cbind(((skidderTime(m)*p["grappleSkidderLarge",])),(sawTime(m)*p["chainsaw",]),(slideTime(m)*p["slideboom",]),((chipTime(m)*.1)*p["chipper",])),na.rm = TRUE)
}
sawSkidLogEst<-function(m,p){
rowSums(cbind((skidderTime(m)*p["grappleSkidderLarge",]*2.5),(sawTime(m)*p["chainsaw",]*2.5),(slideTime(m)*p["slideboom",]),((chipTime(m)*.1)*p["chipper",])),na.rm=TRUE)
}
harForEst<-function(m,p){
rowSums(cbind((((forTime(m)*.75)+2)*p["forwarder",]),((harTime(m)*p["harvester",])*5),(slideTime(m)*p["slideboom",]),((chipTime(m)*.2)*p["chipper",])),na.rm = TRUE)
}
harSkidEst<-function(m,p){
rowSums(cbind((harTime(m)*p["harvester",]),(skidderTime(m)*p["grappleSkidderLarge",]),(slideTime(m)*p["slideboom",]),((chipTime(m)*.1)*p["chipper",])),na.rm = TRUE)
}
heliHarEst<-function(m,p){
rowSums(cbind((heliTime(m)*p["helicopter",]),(harTime(m)*p["harvester",])),na.rm = TRUE)
}
yarderHarEst<-function(m,p){
rowSums(cbind((yarderTime(m)*p["yarder",]),(harTime(m)*p["harvester",]),((chipTime(m)*.15)*p["chipper",])),na.rm = TRUE)
}
fbSkidEst<-function(m,p){
rowSums(cbind((fbTime(m)*p["fellerBuncher",]),((skidderTime(m)*1.5*p["grappleSkidderLarge",])*1.3),(slideTime(m)*p["slideboom",]),((chipTime(m)*.15)*p["chipper",])),na.rm = TRUE)
}
yarderSawLogEst<-function(m,p){
rowSums(cbind((yarderTime(m)*p["yarder",]),(sawTime(m)*p["chainsaw",]),(slideTime(m)*p["slideboom",]),((chipTime(m)*.5)*p["chipper",])),na.rm = TRUE)
}
heliSawEst<-function(m,p){
rowSums(cbind((heliTime(m)*p["helicopter",]),(sawTime(m)*p["chainsaw",])),na.rm = TRUE)
}
################ Cost Data Frames
idahoCost<-data.frame(row.names=c("fellerBuncher","forwarder","harvester","grappleSkidderLarge","cableSkidderLarge","wheelFellerBuncher","crawlerFellerBuncher","yarder","slideboom","chainsaw","chipper"),values=c(
210.10,170.00,180.00,110.00,149.80,205.10,185.60,299.00,200.00,90.00,100.00))
washingtonCost<-data.frame(row.names=c("fellerBuncher","forwarder","harvester","grappleSkidderLarge","cableSkidderLarge","wheelFellerBuncher","crawlerFellerBuncher","yarder","slideboom","chainsaw","chipper"),values=c(190.10,170.00,170.00,155.00,190.00,195.10,180.60,305.00,200.00,90.00,100.00))
oregonCost<-data.frame(row.names=c("fellerBuncher","forwarder","harvester","grappleSkidderLarge","cableSkidderLarge","wheelFellerBuncher","crawlerFellerBuncher","yarder","slideboom","chainsaw","chipper"),values=c(175.10,180.00,175.00,150.00,150.90,190.10,162.60,303.00,200.00,90.00,100.00))
############### Common Harvest Costs
yarderPrice<-function(m){
m["yarder",]+m["chainsaw",]
}
groundMechHarSkidPrice<-function(m){
m["harvester",]+(m["grappleSkidderLarge",])
}
groundMechFBSkidPrice<-function(m){
m["fellerBuncher",]+(m["grappleSkidderLarge",])
}
groundCTLPrice<-function(m){
m["forwarder",]+m["harvester",]
}
exPriceFBSkid<-idahoCost["fellerBuncher",]+(idahoCost["grappleSkidderLarge",])
exPriceHarSkid<-idahoCost["harvester",]+(idahoCost["grappleSkidderLarge",])
TyardingPrice<-function(m){
yarderPrice(m)*highYardingTime(m)
}
TgroundMechHarSkidPrice<-function(m){
exPriceHarSkid*highGroundHarSkidTime(m)
}
TgroundMechFBSkidPrice<-function(m){
exPriceFBSkid*highGroundFBSkidTime(m)
}
TgroundCTLPrice<-function(m){
forTime(m)*groundCTLPrice(idahoCost)
}
############### Prescribed Analysis
prescribedEq<-function(m){
ifelse((m$"Harvesting.System")=="Cable Manual WT", highYardingTime(m),
ifelse((m$"Harvesting.System")=="Ground-Based Man WT", highGroundHarSkidTime(m),
ifelse((m$"Harvesting.System")=="Ground-Based Mech WT", highGroundFBSkidTime(m),
ifelse((m$"Harvesting.System")==30500, highGroundCTLTime(m), 1))))
}
labelEq1<-function(m){
ifelse((m$"Harvesting.System")=="Cable Manual WT", "Cable Manual WT",
ifelse((m$"Harvesting.System")=="Ground-Based Man WT", "Sawyer Skidder WT",
ifelse((m$"Harvesting.System")=="Ground-Based Mech WT", "Fellerbuncher Skidder WT",
ifelse((m$"Harvesting.System")=="Ground-Based CTL", "Harvester Forwarder CTL",
ifelse((m$"Harvesting.System")=="Helicopter CTL", "Helicopter Harvester CTL",
ifelse((m$"Harvesting.System")=="Cable Manual WT/Log", "Cable Man WT/Log",
ifelse((m$"Harvesting.System")=="Cable Manual Log", "Cable Manual Log",
ifelse((m$"Harvesting.System")=="Ground-Based Manual Log", "Manual Skidder CTL",
ifelse((m$"Harvesting.System")=="Helicopter Manual WT", "Helicopter Man WT",
ifelse((m$"Harvesting.System")=="Cable CTL", "Cable Mech CTL", 1))))))))))
}
cNames<-function(m){colnames(m)<-c("Stand ID", "Treatment Cost", "FVS OpCost Treatment Selection")}
############## Ideal Analysis
slopeEq<-function(m){
ifelse((m$"Percent.Slope")>45.001, highYardingTime(m), idealGround(m))
}
harFBChoice<-function(m){
ifelse(TgroundMechHarSkidPrice(m)>TgroundMechFBSkidPrice(m), highGroundFBSkidTime(m),
ifelse(TgroundMechFBSkidPrice(m)>TgroundMechHarSkidPrice(m), highGroundHarSkidTime(m), 1))
}
idealGround<-function(m){
ifelse((harFBChoice(m)*groundMechHarSkidPrice(idahoCost))>TgroundCTLPrice(m), harFBChoice(m),
ifelse((harFBChoice(m)*groundMechHarSkidPrice(idahoCost))<TgroundCTLPrice(m), highGroundCTLTime(m), 1))
}
print("896: OK")
labelEq2<-function(m,p){
ifelse(m$Percent.Slope>45,ifelse(
m$One.way.Yarding.Distance>8000, ifelse(
heliHarEst(m,m3)<heliSawEst(m,m3),heliHarEst(m,m3),heliSawEst(m,m3)),ifelse(
m$"Harvesting.System"=="Cable Manual WT"|m$"Harvesting.System"=="Cable Manual WT/Log",ifelse(
yarderEst(m,m3)<yarderSawEst(m,m3),yarderEst(m,m3),yarderSawEst(m,m3)),ifelse(
yarderSawLogEst(m,m3)<yarderHarEst(m,m3)&yarderSawEst(m,m3),yarderSawLogEst(m,m3),ifelse(
yarderHarEst(m,m3)<yarderSawEst(m,m3),yarderHarEst(m,m3),yarderSawEst(m,m3))))),ifelse(
m$"Harvesting.System"=="Ground-Based CTL"|m$"Harvesting.System"=="Ground-Based Manual Log",ifelse(
harForEst(m,m3)<sawSkidEst(m,m3),harForEst(m,m3),sawSkidEst(m,m3)),ifelse(
harSkidEst(m,m3)<fbSkidEst(m,m3),harSkidEst(m,m3),fbSkidEst(m,m3))))
}
print("labelEq2: OK")
labelEq3<-function(m,p){
ifelse(m$Percent.Slope>45,ifelse(
m$One.way.Yarding.Distance>8000, ifelse(
heliHarEst(m,m3)<heliSawEst(m,m3),"Helicopter CTL","Helicopter Manual WT"),ifelse(
m$"Harvesting.System"=="Cable Manual WT"|m$"Harvesting.System"=="Cable Manual WT/Log",ifelse(
yarderEst(m,m3)<yarderSawEst(m,m3),"Cable Manual WT","Cable Manual WT/Log"),ifelse(
yarderSawLogEst(m,m3)<yarderHarEst(m,m3)&yarderSawEst(m,m3),"Cable Manual Log",ifelse(
yarderHarEst(m,m3)<yarderSawEst(m,m3),"Cable CTL","Cable Manual WT/Log")))),ifelse(
m$"Harvesting.System"=="Ground-Based CTL"|m$"Harvesting.System"=="Ground-Based Manual Log",ifelse(
harForEst(m,m3)<sawSkidEst(m,m3),"Ground-Based CTL","Ground-Based Man WT"),ifelse(
harSkidEst(m,m3)<fbSkidEst(m,m3),"Ground-Based CTL","Ground-Based Mech WT")))
}
print("labelEq3: OK")
mic<-function(x){
(m3[x,]*.35)*((30/25)+(30/45))
}
mim<-function(m,m3){ifelse((m$"Harvesting.System")=="Cable Manual WT", mic("yarder")+mic("chainsaw"),
ifelse((m$"Harvesting.System")=="Ground-Based Man WT", mic("chainsaw")+mic("grappleSkidderLarge"),
ifelse((m$"Harvesting.System")=="Ground-Based Mech WT", mic("fellerBuncher")+mic("grappleSkidderLarge"),
ifelse((m$"Harvesting.System")=="Ground-Based CTL", mic("forwarder")+mic("harvester"),
ifelse((m$"Harvesting.System")=="Helicopter CTL", mic("helicopter"),
ifelse((m$"Harvesting.System")=="Cable Manual WT/Log", mic("yarder")+mic("chainsaw"),
ifelse((m$"Harvesting.System")=="Cable Manual Log", mic("yarder")+mic("chainsaw"),
ifelse((m$"Harvesting.System")=="Ground-Based Manual Log", mic("chainsaw")+mic("grappleSkidderLarge"),
ifelse((m$"Harvesting.System")=="Helicopter Manual WT", mic("helicopter"),
ifelse((m$"Harvesting.System")=="Cable CTL", mic("chainsaw")+mic("yarder"), 1))))))))))
}
chippingCost2<-function(m,p){ifelse((m$"Harvesting.System")=="Cable Manual WT", ((chipTime(m)*.2)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Ground-Based Man WT", ((chipTime(m)*.1)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Ground-Based Mech WT", ((chipTime(m)*.15)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Ground-Based CTL", ((chipTime(m)*.2)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Helicopter CTL", ((chipTime(m)*.2)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Cable Manual WT/Log", ((chipTime(m)*.5)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Cable Manual Log", ((chipTime(m)*.1)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Ground-Based Manual Log", ((chipTime(m)*.1)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Helicopter Manual WT", ((chipTime(m)*.2)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Cable CTL", ((chipTime(m)*.15)*p["chipper",]), "NAN"))))))))))
}
############# Selection
|
/fvsopcostshiny.r
|
no_license
|
timgholland/ltw
|
R
| false | false | 30,714 |
r
|
prescribe<-function(m,m3){ifelse((m$"Harvesting.System")=="Cable Manual WT", yarderEst(m,m3),
ifelse((m$"Harvesting.System")=="Ground-Based Man WT", sawSkidEst(m,m3),
ifelse((m$"Harvesting.System")=="Ground-Based Mech WT", fbSkidEst(m,m3),
ifelse((m$"Harvesting.System")=="Ground-Based CTL", harForEst(m,m3),
ifelse((m$"Harvesting.System")=="Helicopter CTL", heliHarEst(m,m3),
ifelse((m$"Harvesting.System")=="Cable Manual WT/Log", yarderSawEst(m,m3),
ifelse((m$"Harvesting.System")=="Cable Manual Log", yarderSawLogEst(m,m3),
ifelse((m$"Harvesting.System")=="Ground-Based Manual Log", sawSkidLogEst(m,m3),
ifelse((m$"Harvesting.System")=="Helicopter Manual WT", heliSawEst(m,m3),
ifelse((m$"Harvesting.System")=="Cable CTL", yarderHarEst(m,m3), 1))))))))))
}
m3<-data.frame(row.names=c("fellerBuncher","forwarder","harvester","grappleSkidderLarge",
"cableSkidderLarge","wheelFellerBuncher","crawlerFellerBuncher","yarder","slideboom","chainsaw","chipper","helicopter"),values=c(210.10,170.00,180.00,110.00,149.80,205.10,185.60,299.00,200.00,90.00,100.00,600))
## The above costs are assuming 40 hour week with stand operating and ownership costs
dbh<-function(m){
sqrt((twitchVol(m)+8.4166)/.2679)
}
dbh.cm<-function(m){
dbh(m)*2.54
}
treesRemoved<-function(m){
(m$"Small.log.trees.per.acre"+
m$"Large.log.trees.per.acre"+
m$"Chip.tree.per.acre")
}
bcTpa<-function(m){
m$"BrushCutTPA"
}
bcVol<-function(m){
m$"BrushCutAvgVol"
}
chipTrees<-function(m){
m$"Chip.tree.per.acre"
}
sppgrp<-function(m){
ifelse((m$"Small.log.trees.hardwood.proportion"+m$"Large.log.trees.hardwood.proportion")>1, 0, 1)
}
distBetweenTrees<-function(m){
(sqrt((43560/(m$"Small.log.trees.per.acre"+
m$"Large.log.trees.per.acre"+
m$"Chip.tree.per.acre"))/pi))*2
} ## Feet between trees
twitchVol3<-function(m){
((m$"Large.log.trees.per.acre"*m$"Large.log.trees.average.vol.ft3.")+(m$"Small.log.trees.per.acre"*m$"Small.log.trees.average.volume.ft3.")+(m$"Chip.tree.per.acre"*m$"Chip.trees.average.volume.ft3"))/
(m$"Large.log.trees.per.acre"+m$"Small.log.trees.per.acre"+m$"Chip.tree.per.acre")
}
twitchVol2<-function(m){
((m$"Large.log.trees.per.acre"*(m$"Large.log.trees.average.vol.ft3."+(
m$Large.log.trees.average.vol.ft3.*(.01*m$Large.log.trees.residue.fraction))))+(
m$"Small.log.trees.per.acre"*(m$"Small.log.trees.average.volume.ft3."+(
m$Small.log.trees.average.volume.ft3.*(.01*m$Small.log.trees.residue.fraction))))+(
m$"Chip.tree.per.acre"*m$"Chip.trees.average.volume.ft3"))/
(m$"Large.log.trees.per.acre"+m$"Small.log.trees.per.acre"+m$"Chip.tree.per.acre")
}
twitchVol<-function(m){ifelse(m$Harvesting.System=="Ground-Based CTL"|
m$Harvesting.System=="Helicopter CTL"|
m$Harvesting.System=="Cable CTL",twitchVol3(m),ifelse(
m$Harvesting.System=="Cable Manual WT"|
m$Harvesting.System=="Ground-Based Man WT"|
m$Harvesting.System=="Ground-Based Mech WT"|
m$Harvesting.System=="Cable Manual WT/Log"|
m$Harvesting.System=="Cable Manual Log"|
m$Harvesting.System=="Ground-Based Manual Log"|
m$Harvesting.System=="Helicopter Manual WT",twitchVol2(m),NaN))}
twitchVolM<-function(m){
twitchVol(m)*0.0283168
}
twitchWeight<-function(m){
twitchDF<-data.frame(m[,"Large.log.trees.average.density.lbs.ft3."],
m[,"Small.log.trees.average.density.lbs.ft3."])
twitchDF[twitchDF==0]<-NA
avgDensity<-rowMeans(twitchDF, na.rm=TRUE)
avgDensity*twitchVol(m)
}
totalWeight<-function(m){
twitchDF<-data.frame(m[,"Large.log.trees.average.density.lbs.ft3."],
m[,"Small.log.trees.average.density.lbs.ft3."])
twitchDF[twitchDF==0]<-NA
avgDensity<-rowMeans(twitchDF, na.rm=TRUE)
avgDensity*totalVol(m)
}
cordsPerAcre<-function(m){
((m$"Large.log.trees.per.acre"*m$"Large.log.trees.average.vol.ft3.")+(m$"Small.log.trees.per.acre"*m$"Small.log.trees.average.volume.ft3."))/128
}
totalVol<-function(m){
(m$"Large.log.trees.per.acre"*m$"Large.log.trees.average.vol.ft3.")+(m$"Small.log.trees.per.acre"*m$"Small.log.trees.average.volume.ft3.")
}
totalVolM<-function(m){
totalVol(m)*0.0283168
}
## Equations
behjouSaw<-function(m){
-2.80+(0.051*(25.4*dbh(m)))+(0.039*(distBetweenTrees(m)/3.28084))
} ## Min/Tree
klepacSaw=function(m){
24.796+0.331419*(dbh(m)^2)
} ## Seconds/Tree
ghafSaw<-function(m){
-1.582+(0.099*dbh.cm(m))
} ##Min/Tree
hartsoughSaw<-function(m){
0.1+0.0111*(dbh(m)^1.496)
} ##Min/Tree
kluenderSaw<-function(m){
(0.016*((dbh(m)*2.54)^1.33))*
(distBetweenTrees(m)^0.083)*(.5^0.196)
} ## Min/Tree
spinelliSaw<-function(m){
30.04 + 0.2*((dbh(m))^2)+ 8.3
}
akaySaw<-function(m){
56.62*(log(twitchVolM(m)))+322.09
}
hansillChip<-function(m){
2.32+(-0.42*1.79)+(1.83*dbh.cm(m))
} ## Sec/Tree
hansillChip2<-function(m){
2.4+(-0.32*1.4)+(1.3*dbh.cm(m))
}
boldingChip<-function(m){
0.001*(totalVol(m)*25)
}
adebayoHar<-function(m){
30.04 + 0.2*((dbh(m))^2)+ 8.3
} ## centi-Minutes/Acre
boldingHar=function(m){
.1765521+(0.003786057*dbh(m))+(4.936639*m$"Percent.Slope"*(sqrt(treesRemoved(m))))
} ##****
hieslHarCord2=function(m){
exp((-0.826+.309*(dbh(m)))+0.386*sppgrp(m))
} ## Cords/PMH
karhaHar<-function(m){
0.288+(0.1004*(m$"Percent.Slope"/0.001))+(-0.00008*(twitchVolM(m)/0.001)^2)
} ##m^3/PMH
karhaHar2<-function(m){
0.181*m$"Percent.Slope"+(0.1315*(twitchVolM(m)*1000))
}
karhaHar3<-function(m){
0.422+(0.1126*(twitchVolM(m)*1000))
}
klepacSkid<-function(m){
abs(
(0.0076197*m$"One.way.Yarding.Distance"-0.6073183873)+
(0.00735*(distBetweenTrees(m)*4)+0.5438016119)+
(0.0121066*m$"One.way.Yarding.Distance"-1.651069636)
)
} ## 3 compounded functions
klepacHar=function(m){
24.796+0.331419*(dbh(m)^2)
} ## Seconds/Tree
drewsHar<-function(m){
21.139+72.775*(totalVol(m))
}
jirousekHar<-function(m){
60.711*(twitchVolM(m)^0.6545)
} ## m^3/PMH
klepacHar<-function(m){
abs(
(0.0076197*m$"One.way.Yarding.Distance"-0.6073183873)+
(0.00735*(distBetweenTrees(m)*4)+0.5438016119)+
(0.0121066*m$"One.way.Yarding.Distance"-1.651069636)
)
}
## Feller Buncher
hartsoughFB<-function(m){
0.324+0.00138*(dbh(m)^2)
} ##minutes per tree
akayFB<-function(m){
56.62*(log(twitchVolM(m)))+322.09
}
stokesFB<-function(m){
2.80*(m$"One.way.Yarding.Distance")^0.574
}
drewsFB<-function(m){
21.139+72.775*(totalVol(m))
}
dykstraFB<-function(m){
2.39219+0.0019426*(m$"Percent.Slope")+
(m$"One.way.Yarding.Distance")+0.030463*(treesRemoved(m))
}
boldingFB=function(m){
.1765521+(0.003786057*dbh(m))+(4.936639*m$"Percent.Slope"*(sqrt(treesRemoved(m))))
}
karhaFB<-function(m){
0.422+(0.1126*(twitchVolM(m)*1000))
}
hieslFB=function(m){
exp((-0.826+.309*(dbh(m)))+0.386*sppgrp(m))
}
behjouFB<-function(m){
-2.80+(0.051*(25.4*dbh(m)))+(0.039*(distBetweenTrees(m)/3.28084))
}
adebayoFB<-function(m){
30.04 + 0.2*((dbh(m))^2)+ 8.3
}
## Skidder
hieslGrapCord=function(m){
exp(1.754*(-0.0005*m$"One.way.Yarding.Distance"))+(0.755*twitchVol(m))
} ##Cords/PMH
akaySkid<-function(m){
-0.1971+(1.1287*5)+((0.0045*twitchVolM(m)*5))+(0.0063*(m$"One.way.Yarding.Distance"*2))
}
ghafSkid<-function(m){
13.027+(0.035*(m$"One.way.Yarding.Distance"*2))+(0.847*(m$"Percent.Slope"))+(0.551*(twitchVolM(m)*5))
}
akaySkid2<-function(m){
-0.1971+(1.1287*5)+((0.0045*twitchVolM(m)*5))+(0.0063*(m$"One.way.Yarding.Distance"*2))
}
akaySkid2<-function(m){
0.012*(m$"One.way.Yarding.Distance"^-0.399)*(dbh(m)^2.041)*(5^0.766)
}
kluenderSkid<-function(m){
(0.017*((m$"One.way.Yarding.Distance"^-0.574))*2)*(dbh.cm(m)^2.002)
}
boldingSkid<-function(m){
.1761+(0.00357*dbh(m))+(4.93*m$"Percent.Slope"*(sqrt(treesRemoved(m))))
}
fisherSkid=function(m){
2.374+(0.00841141*(m$"One.way.Yarding.Distance"))+(0.72548570*(1.35))
}
##Forwarder
jirousekFor<-function(m){
-7.6881*log((m$"One.way.Yarding.Distance"*0.3048))+64.351
}
boldingFor<-function(m){
.1761+(0.00357*dbh(m))+(4.93*m$"Percent.Slope"*(sqrt(treesRemoved(m))))
}
jirousekFor2<-function(m){
10.5193*(m$"One.way.Yarding.Distance"^(24.9181/m$"One.way.Yarding.Distance"))
}
jirousekFor3<-function(m){
17.0068*(m$"One.way.Yarding.Distance"^(13.2533/m$"Percent.Slope"))
}
kluenderFor<-function(m){
(0.017*((m$"One.way.Yarding.Distance"^-0.574))*2)*(dbh.cm(m)^2.002)
}
fisherFor=function(m){
2.374+(0.00841141*(m$"One.way.Yarding.Distance"))+(0.72548570*(1.35))
}
dykstraFor<-function(m){
2.39219+0.0019426*(m$"Percent.Slope")+
(m$"One.way.Yarding.Distance")+0.030463*(treesRemoved(m))
}
iffFor<-function(m){
1.054+.00234*(m$One.way.Yarding.Distance)+0.01180*(97)+0.980*(
treesRemoved(m))+.00069*(totalWeight(m))
}
## Yarder
fisherYarder=function(m){
2.374+(0.00841141*(m$"One.way.Yarding.Distance"))+(0.72548570*(1.35))
} ## Turn Time Minutes
curtisYarding<-function(m){
23.755 + (2.7716*(1.5))-(0.63694*(m$"One.way.Yarding.Distance"))
} ## Logs/Hour
curtisYarding2<-function(m){
11.138+(7.1774*(1.5))-(0.59976*(m$"One.way.Yarding.Distance"))
} ## Logs/Hour
dykstraYarding<-function(m){
2.39219+0.0019426*(m$"Percent.Slope")+
(m$"One.way.Yarding.Distance")+0.030463*(treesRemoved(m))
}
aulerichYard3<-function(m){
1.210+0.009*(m$One.way.Yarding.Distance)+0.015*(
m$One.way.Yarding.Distance)+0.253*(treesRemoved(m))
}
iffYard<-function(m){
1.054+.00234*(m$One.way.Yarding.Distance)+0.01180*(97)+0.980*(
treesRemoved(m))+.00069*(totalWeight(m))
}
aulerichYard2<-function(m){
1.925+0.002*(m$One.way.Yarding.Distance)+0.017*(
m$One.way.Yarding.Distance)+0.909*(treesRemoved(m))
}
aulerichYard<-function(m){
0.826+0.006*(m$One.way.Yarding.Distance)+0.032*(
m$One.way.Yarding.Distance)+0.897*(treesRemoved(m))
}
## Slide Boom Processor
hartsoughSlide<-function(m){
0.141+0.0298*dbh(m)
}
ghafLoad<-function(m){
23.297/twitchVolM(m)
}
suchomelSlide<-function(m){
twitchVolM(m)/19.8
}
## Helicopter Yarding
flattenHeli<-function(m){
40.737274+(0.0168951*m$"One.way.Yarding.Distance")+((totalWeight(m)/12)*2.052894)+(22.658839)
}
dykstraHeli<-function(m){
1.3721152+(0.0126924*m$"Percent.Slope")+(0.00246741*m$"One.way.Yarding.Distance")+
(0.031200*(3))+(0.000060987*(totalVol(m)/12))-(0.000050837*(totalVol(m)/36))
}
curtisHeli<- function(m){
23.755 + 2.7716*treesRemoved(m)-0.63694*(m$"One.way.Yarding.Distance")
}
akayHeli<-function(m){
1.3721152+(0.0126924*m$"Percent.Slope")+(0.00246741*m$"One.way.Yarding.Distance")+
(0.031200*(3))+(0.000060987*(totalVol(m)/12))-(0.000050837*(totalVol(m)/36))
}
############## Time Per Acre Converstions
ghafSkidTime<-function(m){
ifelse(m$"Percent.Slope"<45, totalVolM(m)/(twitchVolM(m)*30)*ghafSkid(m)/60, NA)
}
mechBC<-function(m){
ifelse(is.na(m$BrushCutTPA), 0, ifelse(m$BrushCutTPA>0, ifelse(m$BrushCutAvgVol<4, m$BrushCutTPA/(10*60), m$BrushCutTPA/(5*60)), 0))
} ##Hours/Acre
adebayoHarTime<-function(m){
adebayoHar(m)*treesRemoved(m)/60/60
} ## Hours/Acre
hansillChip2Time<-function(m){
ifelse(dbh.cm(m)<76, ((chipTrees(m)*hansillChip2(m))/60), NA)
} ##Hours/Acre
curtisYardingTime<-function(m){
ifelse(m$"One.way.Yarding.Distance"<10,treesRemoved(m)/curtisYarding(m),NA)
} ## Hours/Acre
manBC2<-function(m){
ifelse(is.na(m$BrushCutTPA), 0, ifelse(m$BrushCutTPA>0 ,ifelse(m$BrushCutAvgVol<4, m$BrushCutTPA/(60), m$BrushCutTPA/(2*60)),0))
}
curtisYarding2Time<-function(m){
ifelse(m$"One.way.Yarding.Distance"<10,treesRemoved(m)/curtisYarding2(m),NA)
} ## Hours/Acre
fisherYarderTime<-function(m){
((((m$"Small.log.trees.per.acre"+m$"Large.log.trees.per.acre")/1.35)*fisherYarder(m))/120)
} ## Hours/Acre
kluenderSkidTime<-function(m){
ifelse(m$"Percent.Slope"<45, kluenderSkid(m), NA)
}
behjouSawTime<-function(m){
ifelse(dbh.cm(m)>40.00, ((treesRemoved(m)*behjouSaw(m))/60), NA)
}
dykstraHeliTime<-function(m){
ifelse((twitchWeight(m)<2900),
(((((totalVol(m)/3)*(m$"Large.log.trees.per.acre"+m$"Small.log.trees.per.acre"))/190)*dykstraHeli(m))/60),
NA)
} ##Hours/Acre
flattenHeliTime<-function(m){
ifelse((twitchWeight(m)<2900), flattenHeli(m)/3600, NA)
} ## Hours/Acre
ghafSawTime<-function(m){
ifelse(dbh.cm(m)>25, ((ghafSaw(m)*treesRemoved(m))/180), NA)
}
hansillChipTime<-function(m){
ifelse(dbh.cm(m)<76, ((chipTrees(m)*hansillChip(m))/60/60), NA)
} ## Hours/ Acre
hartsoughSawTime<-function(m){
(hartsoughSaw(m)*treesRemoved(m))/60
}
jirousekFor3Time<-function(m){
ifelse(m$"Percent.Slope"<45, totalVolM(m)/jirousekFor3(m), NA)
}
boldingChipTime<-function(m){
ifelse(dbh.cm(m)<50, ((boldingChip(m))/60/60), NA)
}
akaySkidTime<-function(m){
ifelse(m$"One.way.Yarding.Distance"<3500, akaySkid(m)*.4, NA)
}
jirousekFor2Time<-function(m){
ifelse(m$"Percent.Slope"<45, totalVolM(m)/jirousekFor2(m), NA)
}
hansillChip2Time<-function(m){
ifelse(dbh.cm(m)<76, ((chipTrees(m)*hansillChip2(m))/60), NA)
}
kluenderSawTime<-function(m){
(kluenderSaw(m)*treesRemoved(m))/60
}
curtisHeliTime<-function(m){
ifelse(dbh.cm(m)>20, curtisHeli(m)/60, NA)
}
hieslGrapTime<-function(m){
cordsPerAcre(m)/hieslGrapCord(m)
} ##Hours/Acre
hieslHarTime<-function(m){
cordsPerAcre(m)/hieslHarCord2(m)
} ##Hours/Acre
boldingForTime<-function(m){
ifelse(m$"Percent.Slope"<0, totalVolM(mslope)/boldingFor(m), NA)
}
karhaHar3Time<-function(m){
ifelse(twitchVolM(m)<40, (totalVolM(m))/karhaHar3(m), NA)
}
ghafLoadTime<-function(m){
ifelse(twitchVolM(m)<3, ghafLoad(m)/60, NA)
}
klepacSkidTime<-function(m){
ifelse(m$"Percent.Slope"<.01, treesRemoved(m)/(klepacSkid(m)*15), NA)
}
jirousekHarTime<-function(m){
ifelse(twitchVolM(m)<1.4, (totalVolM(m)/jirousekHar(m)), NA)
} ##Hours/Acre
jirousekForTime<-function(m){
totalVolM(m)/jirousekFor(m)
} ##Hours/Acre
klepacHarTime<-function(m){
((klepacHar(m)*treesRemoved(m))/60)/60
} ## Hours/Acre
stokesFBTime<-function(m){
(ifelse(m$"Percent.Slope"<45, stokesFB(m)/60, NA))
} ##Hours/Acre
akayFBTime<-function(m){
ifelse(twitchVolM(m)<.2, (totalVolM(m))/akayFB(m), NA)
}
karhaHarTime<-function(m){
ifelse(twitchVolM(m)<.0, (totalVol(m)*0.0283168)/karhaHar(m), NA)
} ## Hours/Acre
hartsoughFBTime<-function(m){
(hartsoughFB(m)*treesRemoved(m))/60
} ##Hours/Acre
hartsoughSlideTime<-function(m){
if(m$Harvesting.System=="Cable Manual WT"||m$Harvesting.System=="Cable Manual WT/Log"||m$Harvesting.System=="Cable Manual Log"||m$Harvesting.System=="Ground-Based Man WT"||m$Harvesting.System=="Cable CTL"){hartsoughSlide(m)*30}else{NA}
} ##Hours/Acre
suchomelSlideTime<-function(m){
suchomelSlide(m)
} ##Hours/Acre
karhaHar2Time<-function(m){
ifelse(twitchVolM(m)<40, (totalVolM(m))/karhaHar2(m), NA)
} ##Hours/Acre
############################### Cycle Time Analysis
## Chipping Analysis
chipDF<-function(m){
data.frame(
hansillChipTime(m),
hansillChip2Time(m),
boldingChipTime(m))
}
chipTime<-function(m){
rowMeans(chipDF(m), na.rm=TRUE)
}
## Feller Buncher Analysis
fbDF<-function(m){
data.frame(
hartsoughFBTime(m),
stokesFBTime(m),
akayFBTime(m))
}
fbTime<-function(m){
fbAvg(m)
}
fbAvg<-function(m){
ifelse(fbTime2(m)=="NaN", mechBC(m), fbTime2(m)+mechBC(m))
}
fbTime2<-function(m){
rowMeans(fbDF(m), na.rm=TRUE)
}
## Slideboom Processor
slideDF<-function(m){
data.frame(
ifelse(is.nan(suchomelSlideTime(m)),0,suchomelSlideTime(m)),
ifelse(is.nan(ghafLoadTime(m)),0,ghafLoadTime(m)),
ifelse(is.nan(hartsoughSlideTime(m)),0,hartsoughSlideTime(m))
)
}
slideTime<-function(m){
rowMeans(slideDF(m), na.rm=TRUE)
}
## Forwarder Analysis
forDF<-function(m){
data.frame(
jirousekForTime(m),
jirousekFor2Time(m),
boldingForTime(m),
jirousekFor3Time(m))
}
forTime<-function(m){
rowMeans(forDF(m), na.rm=TRUE)
}
## Skidder Analysis
skidderDF<-function(m){
data.frame(
hieslGrapTime(m),
akaySkidTime(m),
kluenderSkidTime(m),
ghafSkidTime(m),
klepacSkidTime(m))
}
skidderTime<-function(m){
rowMeans(skidderDF(m), na.rm=TRUE)
}
## Manual
sawDF<-function(m){
data.frame(
behjouSawTime(m),
ghafSawTime(m),
hartsoughSawTime(m),
kluenderSawTime(m))
}
sawTime2<-function(m){
ifelse((m$"Harvesting.System")=="Cable Manual WT/Log",
(m$"Small.log.trees.per.acre"+m$"Large.log.trees.per.acre")*0.013333+(rowMeans(sawDF(m), na.rm=TRUE)),
ifelse((m$"Harvesting.System")=="Ground-Based Manual Log",
((m$"Small.log.trees.per.acre"+m$"Large.log.trees.per.acre")*0.013333+(rowMeans(sawDF(m), na.rm=TRUE))),
(rowMeans(sawDF(m), na.rm=TRUE))))
}
sawAvg<-function(m){
ifelse(sawTime2(m)=="NaN", manBC2(m), sawTime2(m)+manBC2(m))
}
sawTime<-function(m){
sawAvg(m)}
## Hours/Acre
## Harvester
harvesterDF<-function(m){
data.frame(
klepacHarTime(m),
hieslHarTime(m),
adebayoHarTime(m),
karhaHarTime(m),
jirousekHarTime(m))
}
harTime2<-function(m){
rowMeans(harvesterDF(m), na.rm=TRUE)
}
harAvg<-function(m){
ifelse(harTime2(m)=="NaN", mechBC(m), harTime2(m)+mechBC(m))
}
harTime<-function(m){
harAvg(m)
}
## Helicopter Yarding Analysis
heliDF<-function(m){
data.frame(
flattenHeliTime(m),
dykstraHeliTime(m),
curtisHeliTime(m))
}
heliTime<-function(m){
rowMeans(heliDF(m), na.rm=TRUE)
}
## Cable Yarding Analysis
yarderDF<-function(m){
data.frame(
fisherYarderTime(m),
abs(curtisYardingTime(m)),
abs(curtisYarding2Time(m)))
}
yarderTime<-function(m){
rowMeans(yarderDF(m), na.rm=TRUE)
}
## Analizing Time Conversions and Price Regression
highYardingTime<-function(m){
ifelse(yarderTime(m)>sawTime(m), yarderTime(m),
ifelse(sawTime(m)>yarderTime(m), sawTime(m), 1))
} ## Returns the limiting time/acre
highGroundHarSkidTime<-function(m){
ifelse(skidderTime(m)>harTime(m), skidderTime(m), ifelse(harTime(m)>skidderTime(m), harTime(m), 1))
} ## Returns the limiting time
highGroundFBSkidTime<-function(m){
ifelse(skidderTime(m)>fbTime(m), skidderTime(m), ifelse(fbTime(m)>skidderTime(m), fbTime(m), 1))
} ## Returns the limiting time
highGroundCTLTime<-function(m){
ifelse(forTime(m)>harTime(m), forTime(m), ifelse(harTime(m)>forTime(m), harTime(m), 1))
}
yarderEst<-function(m,p){
rowSums(cbind((yarderTime(m)*p["yarder",]),(sawTime(m)*p["chainsaw",]),(slideTime(m)*p["slideboom",]),((chipTime(m)*.2)*p["chipper",])),na.rm = TRUE)
}
yarderSawEst<-function(m,p){
rowSums(cbind((yarderTime(m)*p["yarder",]),(sawTime(m)*p["chainsaw",]),(slideTime(m)*p["slideboom",]),((chipTime(m)*.15)*p["chipper",])),na.rm=TRUE)
}
sawSkidEst<-function(m,p){
rowSums(cbind(((skidderTime(m)*p["grappleSkidderLarge",])),(sawTime(m)*p["chainsaw",]),(slideTime(m)*p["slideboom",]),((chipTime(m)*.1)*p["chipper",])),na.rm = TRUE)
}
sawSkidLogEst<-function(m,p){
rowSums(cbind((skidderTime(m)*p["grappleSkidderLarge",]*2.5),(sawTime(m)*p["chainsaw",]*2.5),(slideTime(m)*p["slideboom",]),((chipTime(m)*.1)*p["chipper",])),na.rm=TRUE)
}
harForEst<-function(m,p){
rowSums(cbind((((forTime(m)*.75)+2)*p["forwarder",]),((harTime(m)*p["harvester",])*5),(slideTime(m)*p["slideboom",]),((chipTime(m)*.2)*p["chipper",])),na.rm = TRUE)
}
harSkidEst<-function(m,p){
rowSums(cbind((harTime(m)*p["harvester",]),(skidderTime(m)*p["grappleSkidderLarge",]),(slideTime(m)*p["slideboom",]),((chipTime(m)*.1)*p["chipper",])),na.rm = TRUE)
}
heliHarEst<-function(m,p){
rowSums(cbind((heliTime(m)*p["helicopter",]),(harTime(m)*p["harvester",])),na.rm = TRUE)
}
yarderHarEst<-function(m,p){
rowSums(cbind((yarderTime(m)*p["yarder",]),(harTime(m)*p["harvester",]),((chipTime(m)*.15)*p["chipper",])),na.rm = TRUE)
}
fbSkidEst<-function(m,p){
rowSums(cbind((fbTime(m)*p["fellerBuncher",]),((skidderTime(m)*1.5*p["grappleSkidderLarge",])*1.3),(slideTime(m)*p["slideboom",]),((chipTime(m)*.15)*p["chipper",])),na.rm = TRUE)
}
yarderSawLogEst<-function(m,p){
rowSums(cbind((yarderTime(m)*p["yarder",]),(sawTime(m)*p["chainsaw",]),(slideTime(m)*p["slideboom",]),((chipTime(m)*.5)*p["chipper",])),na.rm = TRUE)
}
heliSawEst<-function(m,p){
rowSums(cbind((heliTime(m)*p["helicopter",]),(sawTime(m)*p["chainsaw",])),na.rm = TRUE)
}
################ Cost Data Frames
idahoCost<-data.frame(row.names=c("fellerBuncher","forwarder","harvester","grappleSkidderLarge","cableSkidderLarge","wheelFellerBuncher","crawlerFellerBuncher","yarder","slideboom","chainsaw","chipper"),values=c(
210.10,170.00,180.00,110.00,149.80,205.10,185.60,299.00,200.00,90.00,100.00))
washingtonCost<-data.frame(row.names=c("fellerBuncher","forwarder","harvester","grappleSkidderLarge","cableSkidderLarge","wheelFellerBuncher","crawlerFellerBuncher","yarder","slideboom","chainsaw","chipper"),values=c(190.10,170.00,170.00,155.00,190.00,195.10,180.60,305.00,200.00,90.00,100.00))
oregonCost<-data.frame(row.names=c("fellerBuncher","forwarder","harvester","grappleSkidderLarge","cableSkidderLarge","wheelFellerBuncher","crawlerFellerBuncher","yarder","slideboom","chainsaw","chipper"),values=c(175.10,180.00,175.00,150.00,150.90,190.10,162.60,303.00,200.00,90.00,100.00))
############### Common Harvest Costs
yarderPrice<-function(m){
m["yarder",]+m["chainsaw",]
}
groundMechHarSkidPrice<-function(m){
m["harvester",]+(m["grappleSkidderLarge",])
}
groundMechFBSkidPrice<-function(m){
m["fellerBuncher",]+(m["grappleSkidderLarge",])
}
groundCTLPrice<-function(m){
m["forwarder",]+m["harvester",]
}
exPriceFBSkid<-idahoCost["fellerBuncher",]+(idahoCost["grappleSkidderLarge",])
exPriceHarSkid<-idahoCost["harvester",]+(idahoCost["grappleSkidderLarge",])
TyardingPrice<-function(m){
yarderPrice(m)*highYardingTime(m)
}
TgroundMechHarSkidPrice<-function(m){
exPriceHarSkid*highGroundHarSkidTime(m)
}
TgroundMechFBSkidPrice<-function(m){
exPriceFBSkid*highGroundFBSkidTime(m)
}
TgroundCTLPrice<-function(m){
forTime(m)*groundCTLPrice(idahoCost)
}
############### Prescribed Analysis
prescribedEq<-function(m){
ifelse((m$"Harvesting.System")=="Cable Manual WT", highYardingTime(m),
ifelse((m$"Harvesting.System")=="Ground-Based Man WT", highGroundHarSkidTime(m),
ifelse((m$"Harvesting.System")=="Ground-Based Mech WT", highGroundFBSkidTime(m),
ifelse((m$"Harvesting.System")==30500, highGroundCTLTime(m), 1))))
}
labelEq1<-function(m){
ifelse((m$"Harvesting.System")=="Cable Manual WT", "Cable Manual WT",
ifelse((m$"Harvesting.System")=="Ground-Based Man WT", "Sawyer Skidder WT",
ifelse((m$"Harvesting.System")=="Ground-Based Mech WT", "Fellerbuncher Skidder WT",
ifelse((m$"Harvesting.System")=="Ground-Based CTL", "Harvester Forwarder CTL",
ifelse((m$"Harvesting.System")=="Helicopter CTL", "Helicopter Harvester CTL",
ifelse((m$"Harvesting.System")=="Cable Manual WT/Log", "Cable Man WT/Log",
ifelse((m$"Harvesting.System")=="Cable Manual Log", "Cable Manual Log",
ifelse((m$"Harvesting.System")=="Ground-Based Manual Log", "Manual Skidder CTL",
ifelse((m$"Harvesting.System")=="Helicopter Manual WT", "Helicopter Man WT",
ifelse((m$"Harvesting.System")=="Cable CTL", "Cable Mech CTL", 1))))))))))
}
cNames<-function(m){colnames(m)<-c("Stand ID", "Treatment Cost", "FVS OpCost Treatment Selection")}
############## Ideal Analysis
slopeEq<-function(m){
ifelse((m$"Percent.Slope")>45.001, highYardingTime(m), idealGround(m))
}
harFBChoice<-function(m){
ifelse(TgroundMechHarSkidPrice(m)>TgroundMechFBSkidPrice(m), highGroundFBSkidTime(m),
ifelse(TgroundMechFBSkidPrice(m)>TgroundMechHarSkidPrice(m), highGroundHarSkidTime(m), 1))
}
idealGround<-function(m){
ifelse((harFBChoice(m)*groundMechHarSkidPrice(idahoCost))>TgroundCTLPrice(m), harFBChoice(m),
ifelse((harFBChoice(m)*groundMechHarSkidPrice(idahoCost))<TgroundCTLPrice(m), highGroundCTLTime(m), 1))
}
print("896: OK")
labelEq2<-function(m,p){
ifelse(m$Percent.Slope>45,ifelse(
m$One.way.Yarding.Distance>8000, ifelse(
heliHarEst(m,m3)<heliSawEst(m,m3),heliHarEst(m,m3),heliSawEst(m,m3)),ifelse(
m$"Harvesting.System"=="Cable Manual WT"|m$"Harvesting.System"=="Cable Manual WT/Log",ifelse(
yarderEst(m,m3)<yarderSawEst(m,m3),yarderEst(m,m3),yarderSawEst(m,m3)),ifelse(
yarderSawLogEst(m,m3)<yarderHarEst(m,m3)&yarderSawEst(m,m3),yarderSawLogEst(m,m3),ifelse(
yarderHarEst(m,m3)<yarderSawEst(m,m3),yarderHarEst(m,m3),yarderSawEst(m,m3))))),ifelse(
m$"Harvesting.System"=="Ground-Based CTL"|m$"Harvesting.System"=="Ground-Based Manual Log",ifelse(
harForEst(m,m3)<sawSkidEst(m,m3),harForEst(m,m3),sawSkidEst(m,m3)),ifelse(
harSkidEst(m,m3)<fbSkidEst(m,m3),harSkidEst(m,m3),fbSkidEst(m,m3))))
}
print("labelEq2: OK")
labelEq3<-function(m,p){
ifelse(m$Percent.Slope>45,ifelse(
m$One.way.Yarding.Distance>8000, ifelse(
heliHarEst(m,m3)<heliSawEst(m,m3),"Helicopter CTL","Helicopter Manual WT"),ifelse(
m$"Harvesting.System"=="Cable Manual WT"|m$"Harvesting.System"=="Cable Manual WT/Log",ifelse(
yarderEst(m,m3)<yarderSawEst(m,m3),"Cable Manual WT","Cable Manual WT/Log"),ifelse(
yarderSawLogEst(m,m3)<yarderHarEst(m,m3)&yarderSawEst(m,m3),"Cable Manual Log",ifelse(
yarderHarEst(m,m3)<yarderSawEst(m,m3),"Cable CTL","Cable Manual WT/Log")))),ifelse(
m$"Harvesting.System"=="Ground-Based CTL"|m$"Harvesting.System"=="Ground-Based Manual Log",ifelse(
harForEst(m,m3)<sawSkidEst(m,m3),"Ground-Based CTL","Ground-Based Man WT"),ifelse(
harSkidEst(m,m3)<fbSkidEst(m,m3),"Ground-Based CTL","Ground-Based Mech WT")))
}
print("labelEq3: OK")
mic<-function(x){
(m3[x,]*.35)*((30/25)+(30/45))
}
mim<-function(m,m3){ifelse((m$"Harvesting.System")=="Cable Manual WT", mic("yarder")+mic("chainsaw"),
ifelse((m$"Harvesting.System")=="Ground-Based Man WT", mic("chainsaw")+mic("grappleSkidderLarge"),
ifelse((m$"Harvesting.System")=="Ground-Based Mech WT", mic("fellerBuncher")+mic("grappleSkidderLarge"),
ifelse((m$"Harvesting.System")=="Ground-Based CTL", mic("forwarder")+mic("harvester"),
ifelse((m$"Harvesting.System")=="Helicopter CTL", mic("helicopter"),
ifelse((m$"Harvesting.System")=="Cable Manual WT/Log", mic("yarder")+mic("chainsaw"),
ifelse((m$"Harvesting.System")=="Cable Manual Log", mic("yarder")+mic("chainsaw"),
ifelse((m$"Harvesting.System")=="Ground-Based Manual Log", mic("chainsaw")+mic("grappleSkidderLarge"),
ifelse((m$"Harvesting.System")=="Helicopter Manual WT", mic("helicopter"),
ifelse((m$"Harvesting.System")=="Cable CTL", mic("chainsaw")+mic("yarder"), 1))))))))))
}
chippingCost2<-function(m,p){ifelse((m$"Harvesting.System")=="Cable Manual WT", ((chipTime(m)*.2)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Ground-Based Man WT", ((chipTime(m)*.1)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Ground-Based Mech WT", ((chipTime(m)*.15)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Ground-Based CTL", ((chipTime(m)*.2)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Helicopter CTL", ((chipTime(m)*.2)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Cable Manual WT/Log", ((chipTime(m)*.5)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Cable Manual Log", ((chipTime(m)*.1)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Ground-Based Manual Log", ((chipTime(m)*.1)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Helicopter Manual WT", ((chipTime(m)*.2)*p["chipper",]),
ifelse((m$"Harvesting.System")=="Cable CTL", ((chipTime(m)*.15)*p["chipper",]), "NAN"))))))))))
}
############# Selection
|
## Put comments here that give an overall description of what your
## functions do
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinver <- function(inver) m <<- inver
getinver <- function() m
list(set = set, get = get,
setinver = setinver,
getinver = getinver)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinver()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinver(m)
m
}
|
/cachematrix.R
|
no_license
|
zysuper/ProgrammingAssignment2
|
R
| false | false | 1,092 |
r
|
## Put comments here that give an overall description of what your
## functions do
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinver <- function(inver) m <<- inver
getinver <- function() m
list(set = set, get = get,
setinver = setinver,
getinver = getinver)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinver()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinver(m)
m
}
|
library(dplyr)
library(DataExplorer)
library(ggplot2)
library(plotly)
library(data.table)
library(zipcode)
library(tidyverse)
library(stringr)
library(maps)
library(kableExtra)
library(RcppRoll)
library(plotly)
library(GGally)
### Parsing only numbers from drug definition ###
parse_num <- function(df){
df1 <- df %>%
mutate(DRG_Num=substr(x = DRG_Def, start=1, stop=3))
return (df1)
}
### Adding group_class ###
group_class<- function(df)
{
df1 <- df %>%
group_by(DRG_Def) %>%
mutate(n_row_group=n())%>%
ungroup()%>%
mutate(group_class= cut(n_row_group, breaks = 3, labels = c("1_to_945", "946_to_1890", "1890_to_2837")))
return(df1)
}
### Adding Standardized Total Payment ###
add_standard_payment <- function(df){
df1 <- df %>%
group_by(DRG_Num, Pro_State) %>%
mutate(Mu_DRG_State_Total_Payment=mean(Avg_Tot_Payments),
Sd_DRG_State_Total_Payment=sd(Avg_Tot_Payments),
Std_DRG_State_Total_Payment=(Avg_Tot_Payments-Mu_DRG_State_Total_Payment)/Sd_DRG_State_Total_Payment) %>%
ungroup()
return(df1)
}
### Adding Standardized Difference ###
add_standard<-function(df){
df1 <- df %>%
mutate(Diff_MedCov_MedPay=Avg_Cov_Charges-Avg_Med_Payments) %>%
group_by(DRG_Num) %>%
mutate(Mu_Diff=mean(Diff_MedCov_MedPay), Sd_Diff=sd(Diff_MedCov_MedPay)) %>%
mutate(Standard_Diff=(Diff_MedCov_MedPay-Mu_Diff)/Sd_Diff)%>%
ungroup()
return(df1)
}
### Adding Standardized Difference ###
add_standard2 <- function(df){
df1 <- df %>%
group_by(DRG_Num, Pro_State)%>%
mutate(Mean_Discharge=mean(Tot_Discharges),
Stdv_Discharges=sd(Tot_Discharges),
Standardized_Discharges= (Tot_Discharges-Mean_Discharge)/Stdv_Discharges )%>%
ungroup()
return(df1)
}
|
/week5/health-functions.R
|
no_license
|
yerin-flora/AnomalyDetection
|
R
| false | false | 1,793 |
r
|
library(dplyr)
library(DataExplorer)
library(ggplot2)
library(plotly)
library(data.table)
library(zipcode)
library(tidyverse)
library(stringr)
library(maps)
library(kableExtra)
library(RcppRoll)
library(plotly)
library(GGally)
### Parsing only numbers from drug definition ###
parse_num <- function(df){
df1 <- df %>%
mutate(DRG_Num=substr(x = DRG_Def, start=1, stop=3))
return (df1)
}
### Adding group_class ###
group_class<- function(df)
{
df1 <- df %>%
group_by(DRG_Def) %>%
mutate(n_row_group=n())%>%
ungroup()%>%
mutate(group_class= cut(n_row_group, breaks = 3, labels = c("1_to_945", "946_to_1890", "1890_to_2837")))
return(df1)
}
### Adding Standardized Total Payment ###
add_standard_payment <- function(df){
df1 <- df %>%
group_by(DRG_Num, Pro_State) %>%
mutate(Mu_DRG_State_Total_Payment=mean(Avg_Tot_Payments),
Sd_DRG_State_Total_Payment=sd(Avg_Tot_Payments),
Std_DRG_State_Total_Payment=(Avg_Tot_Payments-Mu_DRG_State_Total_Payment)/Sd_DRG_State_Total_Payment) %>%
ungroup()
return(df1)
}
### Adding Standardized Difference ###
add_standard<-function(df){
df1 <- df %>%
mutate(Diff_MedCov_MedPay=Avg_Cov_Charges-Avg_Med_Payments) %>%
group_by(DRG_Num) %>%
mutate(Mu_Diff=mean(Diff_MedCov_MedPay), Sd_Diff=sd(Diff_MedCov_MedPay)) %>%
mutate(Standard_Diff=(Diff_MedCov_MedPay-Mu_Diff)/Sd_Diff)%>%
ungroup()
return(df1)
}
### Adding Standardized Difference ###
add_standard2 <- function(df){
df1 <- df %>%
group_by(DRG_Num, Pro_State)%>%
mutate(Mean_Discharge=mean(Tot_Discharges),
Stdv_Discharges=sd(Tot_Discharges),
Standardized_Discharges= (Tot_Discharges-Mean_Discharge)/Stdv_Discharges )%>%
ungroup()
return(df1)
}
|
############### sysVarInPlots
#' Produces plots for interpreting the results from sysVarIn.
#'
#' @param fullData A dataframe created by the "makeFullData" function.
#' @param sysVar_name The name of the variable in the dataframe that contains the system variable.
#' @param sysVarType Whether the system variable is "dyadic", which means both partners have the same score, or "indiv" which means the partners can have different scores
#' @param n_profiles The number of latent profiles.
#' @param testModel The name of the model that is being interpreted (e.g., sysIn$models$sysVarInteract). Only needed when the system variable is "indiv" (e.g., individual scores for each partner)
#' @param dist0name An optional name for the level-0 of the distinguishing variable (e.g., "Women"). Default is dist0.
#' @param dist1name An optional name for the level-1 of the distinguishing variable (e.g., "Men"). Default is dist1
#' @param printPlots If true (the default) plots are displayed on the screen.
#' @examples
#' data <- rties_ExampleDataShort
#' newData <- dataPrep(basedata=data, dyadId="couple", personId="person",
#' obs_name="dial", dist_name="female", time_name="time", time_lag=2)
#' ic <- indivInertCoord(prepData=newData, whichModel="inertCoord")
#' profiles <- inspectProfiles(whichModel="inertCoord", prepData=newData,
#' paramEst=ic$params, n_profiles=2)
#' fullData <- makeFullData(basedata=data, dyadId="couple", personId="person",
#' dist_name="female", lpaData=profiles, params=ic$params)
#' sysIn <- sysVarIn(fullData=fullData, sysVar_name="conflict", sysVarType="indiv", n_profiles=2)
#' sysVarInPlots(fullData=fullData, sysVar_name="conflict", sysVarType="indiv",
#' n_profiles=2, testModel=sysIn$models$sysVarInteract)
#'
#' @return Single plots or a list of plots (depending on the model that is being interpreted).
#' @import ggplot2
#' @export
sysVarInPlots <- function(fullData, sysVar_name, sysVarType, n_profiles, testModel=NULL, dist0name=NULL, dist1name=NULL, printPlots=T){
basedata <- fullData
basedata <- basedata[stats::complete.cases(basedata), ]
colnames(basedata)[colnames(basedata)== sysVar_name] <- "sysVar"
if(n_profiles > 4) {message("plots are not provided if there are more than 4 profiles") }
if(is.null(dist0name)){dist0name <- "dist0"}
if(is.null(dist1name)){dist1name <- "dist1"}
if(sysVarType == "dyadic"){
pAll <- dyadic(basedata, sysVar_name)
}
if(sysVarType == "indiv"){
vars1 <- c("dyad", "sysVar", "dist0", "profileN")
data1 <- basedata[vars1]
data2 <- stats::reshape(data1, idvar="dyad", timevar = "dist0", direction= "wide")
dyad <- sysVar.1 <- profileN.1 <- sysVar.0 <- profileN.0 <- NULL
data3 <- dplyr::rename(data2, dyad=dyad, sysVar0=sysVar.1, profileN1= profileN.1, sysVar1= sysVar.0, profileN=profileN.0)
basedata <- data3[stats::complete.cases(data3), ]
sysVar0name <- paste(sysVar_name, dist0name, sep="_")
sysVar1name <- paste(sysVar_name, dist1name, sep="_")
if(is.factor(basedata$sysVar0)){
if(n_profiles == 2){
pAll <- indiv2profilesCat(testModel, sysVar0name, sysVar1name)
}
if(n_profiles == 3){
pAll <- indiv3profilesCat(basedata, testModel, sysVar0name, sysVar1name)
}
if(n_profiles == 4){
pAll <- indiv4profilesCat(basedata, testModel, sysVar0name, sysVar1name)
}
}
if(is.numeric(basedata$sysVar0)){
if(n_profiles == 2){
pAll <- indiv2profilesCont(testModel, sysVar0name, sysVar1name)
}
if(n_profiles > 2){
sysVar0L <- mean(basedata$sysVar0, na.rm=T) - stats::sd(basedata$sysVar0, na.rm=T)
sysVar0H <- mean(basedata$sysVar0, na.rm=T) + stats::sd(basedata$sysVar0, na.rm=T)
sysVar1L <- mean(basedata$sysVar1, na.rm=T) - stats::sd(basedata$sysVar1, na.rm=T)
sysVar1H <- mean(basedata$sysVar1, na.rm=T) + stats::sd(basedata$sysVar1, na.rm=T)
dataTemp<- matrix(c(sysVar0L, sysVar0H, sysVar0L, sysVar0H, sysVar1L, sysVar1L, sysVar1H, sysVar1H), nrow=4, ncol=2)
dataTemp2 <- data.frame(dataTemp)
colnames(dataTemp2) <- c("sysVar0", "sysVar1")
prob <- data.frame(stats::predict(testModel, newdata=dataTemp2, type="probs"))
prob$sysVar0 <- c(1,2,1,2)
prob$sysVar1 <- c(1,1,2,2)
prob$sysVar0 <- factor(prob$sysVar0, levels=c(1,2), labels=c("Low", "High"))
prob$sysVar1 <- factor(prob$sysVar1, levels=c(1,2), labels=c("Low", "High"))
}
if(n_profiles == 3){
pAll <- indiv3profilesCont(prob, sysVar0name, sysVar1name)
}
if(n_profiles == 4){
pAll <- indiv4profilesCont(prob, sysVar0name, sysVar1name)
}
}
}
if(printPlots==T){print(pAll)}
return(pAll)
}
########################## The following functions are called by sysVarInPlots
####### dyadic
#' Produces plots for sysVarIn when sysVar is dyadic.
#'
#' @param basedata A dataframe created internally by the "sysVarInPlots" function.
#' @param sysVar_name The name of the variable in the dataframe that contains the system variable.
#'
#' @return A plot with the profiles on the y-axis and the system variable on the x-axis
dyadic <- function(basedata, sysVar_name){
basedata <- basedata[!duplicated(basedata$dyad), ]
sysVar <- ..prop.. <- profile <- profileN <- NULL
if(is.factor(basedata$sysVar)){
pAll <- ggplot(basedata) +
geom_bar(aes(x = sysVar, y = ..prop.., group = profile)) +
facet_wrap(~ profile) +
xlab(sysVar_name) +
ylab("Proportion in Each Profile")
} else {
pAll <- ggplot(basedata, aes(x=sysVar, y=profileN)) +
geom_point() +
xlab(sysVar_name) +
ylab("Profile")
}
return(pAll)
}
####### indiv2profilesCat
#' Produces plots for sysVarIn when sysVar is categorical and there are 2 profiles
#'
#' @param testModel The model object created by sysVarIn for the interaction model (e.g., sysVarInteract)
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A plot produced by the interactions package.
indiv2profilesCat <- function(testModel, sysVar0name, sysVar1name){
sysVar0 <- sysVar1 <- NULL
pAll <- interactions::cat_plot(testModel, pred=sysVar0, modx=sysVar1, y.label="Prob Profile = 2", x.label=sysVar0name, legend.main=sysVar1name, colors="Greys", interval=T)
return(pAll)
}
####### indiv2profilesCont
#' Produces plots for sysVarIn when sysVar is continuous and there are 2 profiles
#'
#' @param testModel The model object created by sysVarIn for the interaction model (e.g., sysVarInteract)
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A plot produced by the interactions package.
indiv2profilesCont <- function(testModel,sysVar0name, sysVar1name) {
sysVar0 <- sysVar1 <- NULL
pAll <- interactions::interact_plot(testModel, pred=sysVar0, modx=sysVar1, y.label="Prob Profile = 2", x.label=sysVar0name, legend.main=sysVar1name, colors="Greys", interval=T)
return(pAll)
}
####### indiv3profilesCat
#' Produces plots for sysVarIn when sysVar is categorical and there are 3 profiles
#' @param basedata A dataframe created internally by the "sysVarInPlots" function.
#' @param testModel The model object created by sysVarIn for the interaction model (e.g., sysVarInteract)
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A list of 3 plots showing the simple slopes for each of the profiles.
indiv3profilesCat <- function(basedata, testModel, sysVar0name, sysVar1name){
sysVar0 <- levels(basedata$sysVar0)
sysVar1 <- levels(basedata$sysVar1)
temp <- expand.grid(sysVar0, sysVar1)
colnames(temp) <- c("sysVar0", "sysVar1")
prob <- stats::predict(testModel, newdata=temp, "probs")
colnames(prob) <- c("P1","P2","P3")
temp2 <- cbind(prob, temp)
vars1 <- c("P1", "sysVar0", "sysVar1")
prob1 <- temp2[vars1]
colnames(prob1) <- c("P1", sysVar0name, sysVar1name)
vars2 <- c("P2", "sysVar0", "sysVar1")
prob2 <- temp2[vars2]
colnames(prob2) <- c("P2", sysVar0name, sysVar1name)
vars3 <- c("P3", "sysVar0", "sysVar1")
prob3 <- temp2[vars3]
colnames(prob3) <- c("P3", sysVar0name, sysVar1name)
pAll <- list()
pAll[[1]] <- ggplot(data=prob1, aes_string(x=sysVar0name, y="P1", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-1", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[2]]<- ggplot(data=prob2, aes_string(x=sysVar0name, y="P2", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-2", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[3]] <- ggplot(data=prob3, aes_string(x=sysVar0name, y="P3", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-3", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
return(pAll)
}
####### indiv4profilesCat
#' Produces plots for sysVarIn when sysVar is categorical and there are 4 profiles
#' @param basedata A dataframe created internally by the "sysVarInPlots" function.
#' @param testModel The model object created by sysVarIn for the interaction model (e.g., sysVarInteract)
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A list of 4 plots showing the simple slopes for each of the profiles.
indiv4profilesCat <- function(basedata, testModel, sysVar0name, sysVar1name){
sysVar0 <- levels(basedata$sysVar0)
sysVar1 <- levels(basedata$sysVar1)
temp <- expand.grid(sysVar0, sysVar1)
colnames(temp) <- c("sysVar0", "sysVar1")
prob <- stats::predict(testModel, newdata=temp, "probs")
colnames(prob) <- c("P1","P2","P3","P4")
temp2 <- cbind(prob, temp)
vars1 <- c("P1", "sysVar0", "sysVar1")
prob1 <- temp2[vars1]
colnames(prob1) <- c("P1", sysVar0name, sysVar1name)
vars2 <- c("P2", "sysVar0", "sysVar1")
prob2 <- temp2[vars2]
colnames(prob2) <- c("P2", sysVar0name, sysVar1name)
vars3 <- c("P3", "sysVar0", "sysVar1")
prob3 <- temp2[vars3]
colnames(prob3) <- c("P3", sysVar0name, sysVar1name)
vars4 <- c("P4", "sysVar0", "sysVar1")
prob4 <- temp2[vars4]
colnames(prob4) <- c("P4", sysVar0name, sysVar1name)
pAll <- list()
pAll[[1]] <- ggplot(data=prob1, aes_string(x=sysVar0name, y="P1", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-1", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[2]] <- ggplot(data=prob2, aes_string(x=sysVar0name, y="P2", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-2", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[3]] <- ggplot(data=prob3, aes_string(x=sysVar0name, y="P3", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-3", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[4]] <- ggplot(data=prob4, aes_string(x=sysVar0name, y="P4", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-4", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
return(pAll)
}
####### indiv3profilesCont
#' Produces plots for sysVarIn when sysVar is continuous and there are 3 profiles
#' @param prob A dataframe created internally by the "sysVarInPlots" function.
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A list of 3 plots showing the simple slopes for each of the profiles.
#'
indiv3profilesCont <- function(prob, sysVar0name, sysVar1name){
vars1 <- c("X0", "sysVar0", "sysVar1")
prob1 <- prob[vars1]
vars2 <- c("X1", "sysVar0", "sysVar1")
prob2 <- prob[vars2]
vars3 <- c("X2", "sysVar0", "sysVar1")
prob3 <- prob[vars3]
pAll <- list()
pAll[[1]] <- ggplot(prob1, aes_string(x = "sysVar0", y="X0", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-1",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[2]] <- ggplot(prob2, aes_string(x = "sysVar0", y="X1", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-2",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[3]] <- ggplot(prob3, aes_string(x = "sysVar0", y="X2", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-3",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
return(pAll)
}
####### indiv4profilesCont
#' Produces plots for sysVarIn when sysVar is continuous and there are 4 profiles
#' @param prob A dataframe created internally by the "sysVarInPlots" function.
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A list of 4 plots showing the simple slopes for each of the profiles.
#'
indiv4profilesCont <- function(prob, sysVar0name, sysVar1name){
vars1 <- c("X0", "sysVar0", "sysVar1")
prob1 <- prob[vars1]
vars2 <- c("X1", "sysVar0", "sysVar1")
prob2 <- prob[vars2]
vars3 <- c("X2", "sysVar0", "sysVar1")
prob3 <- prob[vars3]
vars4 <- c("X3", "sysVar0", "sysVar1")
prob4 <- prob[vars4]
pAll <- list()
pAll[[1]] <- ggplot(prob1, aes_string(x = "sysVar0", y="X0", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-1",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[2]] <- ggplot(prob2, aes_string(x = "sysVar0", y="X1", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-2",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[3]] <- ggplot(prob3, aes_string(x = "sysVar0", y="X2", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-3",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[4]] <- ggplot(prob4, aes_string(x = "sysVar0", y="X3", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-4",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
return(pAll)
}
#######################################
############### sysVarOutPlots
#' Produces plots for interpreting the results from sysVarIn.
#'
#' @param fullData A dataframe created by the "makeFullData" function.
#' @param sysVar_name The name of the variable in the dataframe that contains the system variable.
#' @param sysVarType Whether the system variable is "dyadic", which means both partners have the same score, or "indiv" which means the partners can have different scores
#' @param testModel The name of the model that is being interpreted (e.g., sysIn$models$sysVarInteract).
#' @param dist0name An optional name for the level-0 of the distinguishing variable (e.g., "Women"). Default is dist0.
#' @param dist1name An optional name for the level-1 of the distinguishing variable (e.g., "Men"). Default is dist1
#' @param binomial Whether the system variable is binomial. Default is false.
#' @examples
#' # See vignettes for examples.
#'
#' @return Single plots or a list of plots (depending on the model that is being interpreted).
#' @import ggplot2
#' @export
sysVarOutPlots <- function(fullData, sysVar_name, sysVarType, testModel, dist0name=NULL, dist1name=NULL, binomial=F){
basedata <- fullData
basedata <- basedata[stats::complete.cases(basedata), ]
if(is.null(dist0name)){dist0name <- "dist0"}
if(is.null(dist1name)){dist1name <- "dist1"}
colnames(basedata)[colnames(basedata)== sysVar_name] <- "sysVar"
basedata$dist <- factor(basedata$dist0, labels=c(dist1name, dist0name))
profile <- sysVar <- ..prop.. <- NULL
if(binomial==F){
if(sysVarType == "dyadic")
{
resid <- data.frame(resid(testModel))
colnames(resid) <- "resid"
pAll <- list()
pAll[[1]] <- ggplot(resid, aes(x=resid)) +
geom_histogram(color="black", fill="grey")
pAll[[2]] <- ggplot(basedata, aes(x=profile, y=sysVar)) +
geom_boxplot() +
ylab(sysVar_name)
}
if(sysVarType == "indiv")
{
resid <- data.frame(resid(testModel))
colnames(resid) <- "resid"
pAll <- list()
pAll[[1]] <- ggplot(resid, aes(x=resid)) +
geom_histogram(color="black", fill="grey")
temp <- sjPlot::plot_model(testModel, type="pred", terms=c("profile", "dist"), colors="gs", y.label=sysVar_name)
pAll[[2]] <- temp + ylab(sysVar_name)
}
}
if(binomial==T){
if(sysVarType == "dyadic")
{
label <- paste("Proportions", sysVar_name, "= 0 or 1 in each profile", sep=" ")
pAll <- ggplot(basedata) +
geom_bar(aes(x = profile, y = ..prop.., group = sysVar)) +
facet_wrap(~ sysVar) +
ylab(label)
}
if(sysVarType == "indiv")
{
temp <- sjPlot::plot_model(testModel, type="pred", terms=c("profile", "dist"), colors="gs", y.label=sysVar_name)
pAll <- temp + ylab(sysVar_name)
}
}
return(pAll)
}
|
/R/sysVarPlots.R
|
no_license
|
ebmtnprof/rties
|
R
| false | false | 18,480 |
r
|
############### sysVarInPlots
#' Produces plots for interpreting the results from sysVarIn.
#'
#' @param fullData A dataframe created by the "makeFullData" function.
#' @param sysVar_name The name of the variable in the dataframe that contains the system variable.
#' @param sysVarType Whether the system variable is "dyadic", which means both partners have the same score, or "indiv" which means the partners can have different scores
#' @param n_profiles The number of latent profiles.
#' @param testModel The name of the model that is being interpreted (e.g., sysIn$models$sysVarInteract). Only needed when the system variable is "indiv" (e.g., individual scores for each partner)
#' @param dist0name An optional name for the level-0 of the distinguishing variable (e.g., "Women"). Default is dist0.
#' @param dist1name An optional name for the level-1 of the distinguishing variable (e.g., "Men"). Default is dist1
#' @param printPlots If true (the default) plots are displayed on the screen.
#' @examples
#' data <- rties_ExampleDataShort
#' newData <- dataPrep(basedata=data, dyadId="couple", personId="person",
#' obs_name="dial", dist_name="female", time_name="time", time_lag=2)
#' ic <- indivInertCoord(prepData=newData, whichModel="inertCoord")
#' profiles <- inspectProfiles(whichModel="inertCoord", prepData=newData,
#' paramEst=ic$params, n_profiles=2)
#' fullData <- makeFullData(basedata=data, dyadId="couple", personId="person",
#' dist_name="female", lpaData=profiles, params=ic$params)
#' sysIn <- sysVarIn(fullData=fullData, sysVar_name="conflict", sysVarType="indiv", n_profiles=2)
#' sysVarInPlots(fullData=fullData, sysVar_name="conflict", sysVarType="indiv",
#' n_profiles=2, testModel=sysIn$models$sysVarInteract)
#'
#' @return Single plots or a list of plots (depending on the model that is being interpreted).
#' @import ggplot2
#' @export
sysVarInPlots <- function(fullData, sysVar_name, sysVarType, n_profiles, testModel=NULL, dist0name=NULL, dist1name=NULL, printPlots=T){
basedata <- fullData
basedata <- basedata[stats::complete.cases(basedata), ]
colnames(basedata)[colnames(basedata)== sysVar_name] <- "sysVar"
if(n_profiles > 4) {message("plots are not provided if there are more than 4 profiles") }
if(is.null(dist0name)){dist0name <- "dist0"}
if(is.null(dist1name)){dist1name <- "dist1"}
if(sysVarType == "dyadic"){
pAll <- dyadic(basedata, sysVar_name)
}
if(sysVarType == "indiv"){
vars1 <- c("dyad", "sysVar", "dist0", "profileN")
data1 <- basedata[vars1]
data2 <- stats::reshape(data1, idvar="dyad", timevar = "dist0", direction= "wide")
dyad <- sysVar.1 <- profileN.1 <- sysVar.0 <- profileN.0 <- NULL
data3 <- dplyr::rename(data2, dyad=dyad, sysVar0=sysVar.1, profileN1= profileN.1, sysVar1= sysVar.0, profileN=profileN.0)
basedata <- data3[stats::complete.cases(data3), ]
sysVar0name <- paste(sysVar_name, dist0name, sep="_")
sysVar1name <- paste(sysVar_name, dist1name, sep="_")
if(is.factor(basedata$sysVar0)){
if(n_profiles == 2){
pAll <- indiv2profilesCat(testModel, sysVar0name, sysVar1name)
}
if(n_profiles == 3){
pAll <- indiv3profilesCat(basedata, testModel, sysVar0name, sysVar1name)
}
if(n_profiles == 4){
pAll <- indiv4profilesCat(basedata, testModel, sysVar0name, sysVar1name)
}
}
if(is.numeric(basedata$sysVar0)){
if(n_profiles == 2){
pAll <- indiv2profilesCont(testModel, sysVar0name, sysVar1name)
}
if(n_profiles > 2){
sysVar0L <- mean(basedata$sysVar0, na.rm=T) - stats::sd(basedata$sysVar0, na.rm=T)
sysVar0H <- mean(basedata$sysVar0, na.rm=T) + stats::sd(basedata$sysVar0, na.rm=T)
sysVar1L <- mean(basedata$sysVar1, na.rm=T) - stats::sd(basedata$sysVar1, na.rm=T)
sysVar1H <- mean(basedata$sysVar1, na.rm=T) + stats::sd(basedata$sysVar1, na.rm=T)
dataTemp<- matrix(c(sysVar0L, sysVar0H, sysVar0L, sysVar0H, sysVar1L, sysVar1L, sysVar1H, sysVar1H), nrow=4, ncol=2)
dataTemp2 <- data.frame(dataTemp)
colnames(dataTemp2) <- c("sysVar0", "sysVar1")
prob <- data.frame(stats::predict(testModel, newdata=dataTemp2, type="probs"))
prob$sysVar0 <- c(1,2,1,2)
prob$sysVar1 <- c(1,1,2,2)
prob$sysVar0 <- factor(prob$sysVar0, levels=c(1,2), labels=c("Low", "High"))
prob$sysVar1 <- factor(prob$sysVar1, levels=c(1,2), labels=c("Low", "High"))
}
if(n_profiles == 3){
pAll <- indiv3profilesCont(prob, sysVar0name, sysVar1name)
}
if(n_profiles == 4){
pAll <- indiv4profilesCont(prob, sysVar0name, sysVar1name)
}
}
}
if(printPlots==T){print(pAll)}
return(pAll)
}
########################## The following functions are called by sysVarInPlots
####### dyadic
#' Produces plots for sysVarIn when sysVar is dyadic.
#'
#' @param basedata A dataframe created internally by the "sysVarInPlots" function.
#' @param sysVar_name The name of the variable in the dataframe that contains the system variable.
#'
#' @return A plot with the profiles on the y-axis and the system variable on the x-axis
dyadic <- function(basedata, sysVar_name){
basedata <- basedata[!duplicated(basedata$dyad), ]
sysVar <- ..prop.. <- profile <- profileN <- NULL
if(is.factor(basedata$sysVar)){
pAll <- ggplot(basedata) +
geom_bar(aes(x = sysVar, y = ..prop.., group = profile)) +
facet_wrap(~ profile) +
xlab(sysVar_name) +
ylab("Proportion in Each Profile")
} else {
pAll <- ggplot(basedata, aes(x=sysVar, y=profileN)) +
geom_point() +
xlab(sysVar_name) +
ylab("Profile")
}
return(pAll)
}
####### indiv2profilesCat
#' Produces plots for sysVarIn when sysVar is categorical and there are 2 profiles
#'
#' @param testModel The model object created by sysVarIn for the interaction model (e.g., sysVarInteract)
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A plot produced by the interactions package.
indiv2profilesCat <- function(testModel, sysVar0name, sysVar1name){
sysVar0 <- sysVar1 <- NULL
pAll <- interactions::cat_plot(testModel, pred=sysVar0, modx=sysVar1, y.label="Prob Profile = 2", x.label=sysVar0name, legend.main=sysVar1name, colors="Greys", interval=T)
return(pAll)
}
####### indiv2profilesCont
#' Produces plots for sysVarIn when sysVar is continuous and there are 2 profiles
#'
#' @param testModel The model object created by sysVarIn for the interaction model (e.g., sysVarInteract)
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A plot produced by the interactions package.
indiv2profilesCont <- function(testModel,sysVar0name, sysVar1name) {
sysVar0 <- sysVar1 <- NULL
pAll <- interactions::interact_plot(testModel, pred=sysVar0, modx=sysVar1, y.label="Prob Profile = 2", x.label=sysVar0name, legend.main=sysVar1name, colors="Greys", interval=T)
return(pAll)
}
####### indiv3profilesCat
#' Produces plots for sysVarIn when sysVar is categorical and there are 3 profiles
#' @param basedata A dataframe created internally by the "sysVarInPlots" function.
#' @param testModel The model object created by sysVarIn for the interaction model (e.g., sysVarInteract)
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A list of 3 plots showing the simple slopes for each of the profiles.
indiv3profilesCat <- function(basedata, testModel, sysVar0name, sysVar1name){
sysVar0 <- levels(basedata$sysVar0)
sysVar1 <- levels(basedata$sysVar1)
temp <- expand.grid(sysVar0, sysVar1)
colnames(temp) <- c("sysVar0", "sysVar1")
prob <- stats::predict(testModel, newdata=temp, "probs")
colnames(prob) <- c("P1","P2","P3")
temp2 <- cbind(prob, temp)
vars1 <- c("P1", "sysVar0", "sysVar1")
prob1 <- temp2[vars1]
colnames(prob1) <- c("P1", sysVar0name, sysVar1name)
vars2 <- c("P2", "sysVar0", "sysVar1")
prob2 <- temp2[vars2]
colnames(prob2) <- c("P2", sysVar0name, sysVar1name)
vars3 <- c("P3", "sysVar0", "sysVar1")
prob3 <- temp2[vars3]
colnames(prob3) <- c("P3", sysVar0name, sysVar1name)
pAll <- list()
pAll[[1]] <- ggplot(data=prob1, aes_string(x=sysVar0name, y="P1", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-1", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[2]]<- ggplot(data=prob2, aes_string(x=sysVar0name, y="P2", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-2", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[3]] <- ggplot(data=prob3, aes_string(x=sysVar0name, y="P3", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-3", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
return(pAll)
}
####### indiv4profilesCat
#' Produces plots for sysVarIn when sysVar is categorical and there are 4 profiles
#' @param basedata A dataframe created internally by the "sysVarInPlots" function.
#' @param testModel The model object created by sysVarIn for the interaction model (e.g., sysVarInteract)
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A list of 4 plots showing the simple slopes for each of the profiles.
indiv4profilesCat <- function(basedata, testModel, sysVar0name, sysVar1name){
sysVar0 <- levels(basedata$sysVar0)
sysVar1 <- levels(basedata$sysVar1)
temp <- expand.grid(sysVar0, sysVar1)
colnames(temp) <- c("sysVar0", "sysVar1")
prob <- stats::predict(testModel, newdata=temp, "probs")
colnames(prob) <- c("P1","P2","P3","P4")
temp2 <- cbind(prob, temp)
vars1 <- c("P1", "sysVar0", "sysVar1")
prob1 <- temp2[vars1]
colnames(prob1) <- c("P1", sysVar0name, sysVar1name)
vars2 <- c("P2", "sysVar0", "sysVar1")
prob2 <- temp2[vars2]
colnames(prob2) <- c("P2", sysVar0name, sysVar1name)
vars3 <- c("P3", "sysVar0", "sysVar1")
prob3 <- temp2[vars3]
colnames(prob3) <- c("P3", sysVar0name, sysVar1name)
vars4 <- c("P4", "sysVar0", "sysVar1")
prob4 <- temp2[vars4]
colnames(prob4) <- c("P4", sysVar0name, sysVar1name)
pAll <- list()
pAll[[1]] <- ggplot(data=prob1, aes_string(x=sysVar0name, y="P1", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-1", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[2]] <- ggplot(data=prob2, aes_string(x=sysVar0name, y="P2", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-2", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[3]] <- ggplot(data=prob3, aes_string(x=sysVar0name, y="P3", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-3", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[4]] <- ggplot(data=prob4, aes_string(x=sysVar0name, y="P4", fill=sysVar1name)) +
geom_bar(stat="identity", position=position_dodge()) +
scale_fill_grey() +
labs(title="Profile-4", y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
return(pAll)
}
####### indiv3profilesCont
#' Produces plots for sysVarIn when sysVar is continuous and there are 3 profiles
#' @param prob A dataframe created internally by the "sysVarInPlots" function.
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A list of 3 plots showing the simple slopes for each of the profiles.
#'
indiv3profilesCont <- function(prob, sysVar0name, sysVar1name){
vars1 <- c("X0", "sysVar0", "sysVar1")
prob1 <- prob[vars1]
vars2 <- c("X1", "sysVar0", "sysVar1")
prob2 <- prob[vars2]
vars3 <- c("X2", "sysVar0", "sysVar1")
prob3 <- prob[vars3]
pAll <- list()
pAll[[1]] <- ggplot(prob1, aes_string(x = "sysVar0", y="X0", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-1",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[2]] <- ggplot(prob2, aes_string(x = "sysVar0", y="X1", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-2",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[3]] <- ggplot(prob3, aes_string(x = "sysVar0", y="X2", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-3",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
return(pAll)
}
####### indiv4profilesCont
#' Produces plots for sysVarIn when sysVar is continuous and there are 4 profiles
#' @param prob A dataframe created internally by the "sysVarInPlots" function.
#' @param sysVar0name The name created by sysVarInPlots referring to the system variable for partner-0.
#' @param sysVar1name The name created by sysVarInPlots referring to the system variable for partner-1.
#'
#' @return A list of 4 plots showing the simple slopes for each of the profiles.
#'
indiv4profilesCont <- function(prob, sysVar0name, sysVar1name){
vars1 <- c("X0", "sysVar0", "sysVar1")
prob1 <- prob[vars1]
vars2 <- c("X1", "sysVar0", "sysVar1")
prob2 <- prob[vars2]
vars3 <- c("X2", "sysVar0", "sysVar1")
prob3 <- prob[vars3]
vars4 <- c("X3", "sysVar0", "sysVar1")
prob4 <- prob[vars4]
pAll <- list()
pAll[[1]] <- ggplot(prob1, aes_string(x = "sysVar0", y="X0", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-1",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[2]] <- ggplot(prob2, aes_string(x = "sysVar0", y="X1", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-2",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[3]] <- ggplot(prob3, aes_string(x = "sysVar0", y="X2", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-3",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
pAll[[4]] <- ggplot(prob4, aes_string(x = "sysVar0", y="X3", group="sysVar1")) +
geom_line(aes_string(linetype="sysVar1")) +
ylim(0,1) +
labs(title="Profile-4",y="Probabilty", x=sysVar0name) +
scale_linetype_discrete(name=sysVar1name)
return(pAll)
}
#######################################
############### sysVarOutPlots
#' Produces plots for interpreting the results from sysVarIn.
#'
#' @param fullData A dataframe created by the "makeFullData" function.
#' @param sysVar_name The name of the variable in the dataframe that contains the system variable.
#' @param sysVarType Whether the system variable is "dyadic", which means both partners have the same score, or "indiv" which means the partners can have different scores
#' @param testModel The name of the model that is being interpreted (e.g., sysIn$models$sysVarInteract).
#' @param dist0name An optional name for the level-0 of the distinguishing variable (e.g., "Women"). Default is dist0.
#' @param dist1name An optional name for the level-1 of the distinguishing variable (e.g., "Men"). Default is dist1
#' @param binomial Whether the system variable is binomial. Default is false.
#' @examples
#' # See vignettes for examples.
#'
#' @return Single plots or a list of plots (depending on the model that is being interpreted).
#' @import ggplot2
#' @export
sysVarOutPlots <- function(fullData, sysVar_name, sysVarType, testModel, dist0name=NULL, dist1name=NULL, binomial=F){
basedata <- fullData
basedata <- basedata[stats::complete.cases(basedata), ]
if(is.null(dist0name)){dist0name <- "dist0"}
if(is.null(dist1name)){dist1name <- "dist1"}
colnames(basedata)[colnames(basedata)== sysVar_name] <- "sysVar"
basedata$dist <- factor(basedata$dist0, labels=c(dist1name, dist0name))
profile <- sysVar <- ..prop.. <- NULL
if(binomial==F){
if(sysVarType == "dyadic")
{
resid <- data.frame(resid(testModel))
colnames(resid) <- "resid"
pAll <- list()
pAll[[1]] <- ggplot(resid, aes(x=resid)) +
geom_histogram(color="black", fill="grey")
pAll[[2]] <- ggplot(basedata, aes(x=profile, y=sysVar)) +
geom_boxplot() +
ylab(sysVar_name)
}
if(sysVarType == "indiv")
{
resid <- data.frame(resid(testModel))
colnames(resid) <- "resid"
pAll <- list()
pAll[[1]] <- ggplot(resid, aes(x=resid)) +
geom_histogram(color="black", fill="grey")
temp <- sjPlot::plot_model(testModel, type="pred", terms=c("profile", "dist"), colors="gs", y.label=sysVar_name)
pAll[[2]] <- temp + ylab(sysVar_name)
}
}
if(binomial==T){
if(sysVarType == "dyadic")
{
label <- paste("Proportions", sysVar_name, "= 0 or 1 in each profile", sep=" ")
pAll <- ggplot(basedata) +
geom_bar(aes(x = profile, y = ..prop.., group = sysVar)) +
facet_wrap(~ sysVar) +
ylab(label)
}
if(sysVarType == "indiv")
{
temp <- sjPlot::plot_model(testModel, type="pred", terms=c("profile", "dist"), colors="gs", y.label=sysVar_name)
pAll <- temp + ylab(sysVar_name)
}
}
return(pAll)
}
|
getMovingAverageResult <- function(x){
recorded_close_test <- final_return[[6]]
predicted_close_test <- final_return[[8]]
ma_close <- SMA(append(final_return[[4]], final_return[[8]]), n=7)[test_split_index:n_instances]
budget <- init_budget
stocks_held <- 0
for(i in 1:length(predicted_close_test)){
currentAssetPrice <- recorded_close_test[i]
if(predicted_close_test[i] < ma_close[i]){
# buy
assetsToBuy <- as.integer(budget / currentAssetPrice)
stocks_held <- stocks_held + assetsToBuy
budget <- budget - (currentAssetPrice * assetsToBuy)
}
else{
# sell
budget <- budget + (currentAssetPrice * stocks_held)
stocks_held <- 0
}
}
dump <- stocks_held * recorded_close_test[length(recorded_close_test)]
final_budget <- budget + dump
final_profit <- ((final_budget - init_budget) / init_budget) * 100
return(final_profit)
}
|
/ma.R
|
no_license
|
liamdx/GA-NN-AlgorithmicTrading
|
R
| false | false | 922 |
r
|
getMovingAverageResult <- function(x){
recorded_close_test <- final_return[[6]]
predicted_close_test <- final_return[[8]]
ma_close <- SMA(append(final_return[[4]], final_return[[8]]), n=7)[test_split_index:n_instances]
budget <- init_budget
stocks_held <- 0
for(i in 1:length(predicted_close_test)){
currentAssetPrice <- recorded_close_test[i]
if(predicted_close_test[i] < ma_close[i]){
# buy
assetsToBuy <- as.integer(budget / currentAssetPrice)
stocks_held <- stocks_held + assetsToBuy
budget <- budget - (currentAssetPrice * assetsToBuy)
}
else{
# sell
budget <- budget + (currentAssetPrice * stocks_held)
stocks_held <- 0
}
}
dump <- stocks_held * recorded_close_test[length(recorded_close_test)]
final_budget <- budget + dump
final_profit <- ((final_budget - init_budget) / init_budget) * 100
return(final_profit)
}
|
# Load and do basic data munging on fraternity surveys.
library(tidyverse)
library(here)
# Clean user table -----
user <- read_csv(here("data/PRIVATEDATA/user.csv")) %>%
select(
id, fbid = fb_id, zip, from = location_from, at = lives_in, age, gender, race,
collected = collected_friends_size, fraternity_id
) %>%
# useable records must have friends collected
mutate_all(funs(ifelse( . == "NULL", NA, .))) %>%
filter(id > 147) %>%
# variable cleanup
mutate(
race = ifelse(race != "White" | is.na(race), "Other", "White"),
from = tolower(from),
at = tolower(at),
zip = sprintf("%05d", zip)
) %>%
mutate_at(vars(age, collected), funs(as.numeric(.))) %>%
# attach fraternity information
left_join(read_csv(here("data/fraternities.csv"), col_types = "ccc"))
n_collected <- nrow(user)
# Clean trip data ------
trips <- read_csv(here("data/PRIVATEDATA/trip.csv")) %>%
tbl_df() %>%
select(
id = user_id,
purpose,
origin_airport = origin_airport_code,
destination_airport = destination_airport_code)
#saveRDS(trips, file = "../data/clean/trips.rds")
# calculate how close friend's cities are to Atlanta -----
# distance from Atlanta
get_miles_from_atl <- function (long2, lat2) {
long1 <- -84.38966 * pi / 180; lat1 <- 33.75449 * pi / 180
long2 <- long2 * pi / 180; lat2 <- lat2 * pi / 180
dsigma <- acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(abs(long1 - long2)) )
dsigma * 6371 * 0.621371
}
# coordinates for the friends' cities
cities <- read_csv(here("data/PRIVATEDATA/geocode/geocoded_datatables/allcities.csv")) %>%
tbl_df() %>%
mutate(
miles_from_atl = get_miles_from_atl(longitude, latitude),
# if missing coordinates, replace with the average
miles_from_atl = ifelse(is.na(miles_from_atl),
mean(miles_from_atl, na.rm = TRUE), miles_from_atl)
)
# Clean friends data -----
friends <- read_csv(here("data/PRIVATEDATA/user_friends_info.csv")) %>%
transmute(
fbid = fb_id,
id = user_id,
from = tolower(location_from),
at = tolower(lives_in)
) %>%
# FIXME: see Issue #1
left_join(cities, by = c("from" = "city"))
|
/rscripts/datacleaner.R
|
no_license
|
gregmacfarlane/facebookfraternities
|
R
| false | false | 2,193 |
r
|
# Load and do basic data munging on fraternity surveys.
library(tidyverse)
library(here)
# Clean user table -----
user <- read_csv(here("data/PRIVATEDATA/user.csv")) %>%
select(
id, fbid = fb_id, zip, from = location_from, at = lives_in, age, gender, race,
collected = collected_friends_size, fraternity_id
) %>%
# useable records must have friends collected
mutate_all(funs(ifelse( . == "NULL", NA, .))) %>%
filter(id > 147) %>%
# variable cleanup
mutate(
race = ifelse(race != "White" | is.na(race), "Other", "White"),
from = tolower(from),
at = tolower(at),
zip = sprintf("%05d", zip)
) %>%
mutate_at(vars(age, collected), funs(as.numeric(.))) %>%
# attach fraternity information
left_join(read_csv(here("data/fraternities.csv"), col_types = "ccc"))
n_collected <- nrow(user)
# Clean trip data ------
trips <- read_csv(here("data/PRIVATEDATA/trip.csv")) %>%
tbl_df() %>%
select(
id = user_id,
purpose,
origin_airport = origin_airport_code,
destination_airport = destination_airport_code)
#saveRDS(trips, file = "../data/clean/trips.rds")
# calculate how close friend's cities are to Atlanta -----
# distance from Atlanta
get_miles_from_atl <- function (long2, lat2) {
long1 <- -84.38966 * pi / 180; lat1 <- 33.75449 * pi / 180
long2 <- long2 * pi / 180; lat2 <- lat2 * pi / 180
dsigma <- acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(abs(long1 - long2)) )
dsigma * 6371 * 0.621371
}
# coordinates for the friends' cities
cities <- read_csv(here("data/PRIVATEDATA/geocode/geocoded_datatables/allcities.csv")) %>%
tbl_df() %>%
mutate(
miles_from_atl = get_miles_from_atl(longitude, latitude),
# if missing coordinates, replace with the average
miles_from_atl = ifelse(is.na(miles_from_atl),
mean(miles_from_atl, na.rm = TRUE), miles_from_atl)
)
# Clean friends data -----
friends <- read_csv(here("data/PRIVATEDATA/user_friends_info.csv")) %>%
transmute(
fbid = fb_id,
id = user_id,
from = tolower(location_from),
at = tolower(lives_in)
) %>%
# FIXME: see Issue #1
left_join(cities, by = c("from" = "city"))
|
context("Polling progress")
with_mock_HTTP({
test_that("If progress polling gives up, it tells you what to do", {
with(temp.option(crunch.timeout=0.5), {
expect_error(pollProgress("/api/progress/1.json", wait=0.25),
paste('Your process is still running on the server. It is',
'currently 22.5% complete. Check',
'`uncached(crGET("/api/progress/1.json"))` until it reports',
'100% complete'),
fixed=TRUE)
})
})
counter <- 1
with_mock(
## GET something slightly different each time through so we can
## approximate polling a changing resource
`httr::GET`=function (url, ...) {
if (is.null(url)) {
stop("No URL found", call.=FALSE)
}
url <- paste0(url, counter, ".json") ## Add counter
counter <<- counter + 1 ## Increment
url <- sub("^\\/", "", url) ## relative to cwd
out <- handleShoji(fromJSON(url, simplifyVector=FALSE))
return(list(
status_code=200,
times=structure(nchar(url), .Names="total"),
request=list(method="GET", url=url),
response=out
))
},
test_that("Progress polling goes until 100", {
expect_identical(pollProgress("/api/progress/", wait=.05), 100)
})
)
})
|
/crunch/tests/testthat/test-progress.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 1,430 |
r
|
context("Polling progress")
with_mock_HTTP({
test_that("If progress polling gives up, it tells you what to do", {
with(temp.option(crunch.timeout=0.5), {
expect_error(pollProgress("/api/progress/1.json", wait=0.25),
paste('Your process is still running on the server. It is',
'currently 22.5% complete. Check',
'`uncached(crGET("/api/progress/1.json"))` until it reports',
'100% complete'),
fixed=TRUE)
})
})
counter <- 1
with_mock(
## GET something slightly different each time through so we can
## approximate polling a changing resource
`httr::GET`=function (url, ...) {
if (is.null(url)) {
stop("No URL found", call.=FALSE)
}
url <- paste0(url, counter, ".json") ## Add counter
counter <<- counter + 1 ## Increment
url <- sub("^\\/", "", url) ## relative to cwd
out <- handleShoji(fromJSON(url, simplifyVector=FALSE))
return(list(
status_code=200,
times=structure(nchar(url), .Names="total"),
request=list(method="GET", url=url),
response=out
))
},
test_that("Progress polling goes until 100", {
expect_identical(pollProgress("/api/progress/", wait=.05), 100)
})
)
})
|
require(matlab)
require(PEIP)
require(Matrix)
require(pracma)
fast_BSF_G_sampler = function(burn, sp, thin, b0, b1, h2_divisions, epsilon, priors, draw_iter, Y, Z_1, Z_2, X){
#% -- Daniel Runcie -- %
#% Gibbs sampler for genetic covariance estimation based on mixed effects
#% model, with missing data
#% Based on:
#% Runcie and Mukherjee (2013) Dissecting high-dimensional traits
#% with Bayesian sparse factor analysis of genetic covariance matrices.
#% GENETICS.
#% (c) April 22, 2013
#% code modified from original provided by Anirban Bhattacharya
#% This function implements the BSF-G partially collapsed Gibbs sampler. It loads all input data
#% and matrices from setup.mat in the current directory. Priors and control
#% parameters are passed to the function.
#% setup.mat is a struct with at least:
#% Y: data matrix
#% X: fixed effect design matrix
#% Z_1: random effect design matrix for factor model
#% Z_2: additional random effect design matrix
#% A: Additive genetic relationship matrix
#% For analysis of Ayroles et al 2009 data, can include:
#% Ayroles_results: struct holding gene names and correlations estimated in that paper
#% For analysis of simulations:
#% U_act: r x p matrix of true breeding values
#% E_act: n x p matrix of true model residuals
#% gen_factor_Lambda: p x k_G matrix of true genetic factor loadings
#% error_factor_Lambda: p x k matrix of true residual factor loadings
#% h2: p x 1 vector of true heritabilities
#% factor_h2s: k x 1 vector of true latent factor heritabilities
#% G, R: p x p matrix of true genetic and residual covariances
#% The function takes the following inputs:
#% burn: number of burnin samples
#% sp: total number of samples to collect
#% thin: thinning rate of chain
#% b0,b1: parameters controlling rate of adaptation of factor model size
#% h2_divisions: number of discrete steps for each factor heritability parameter
#% epsilon: truncation point for factor loadings during adaptation
#% draw_iter: frequency of updating diagnostic plots
#% priors: struct holding various prior hyperparameters:
#% k_init: initial number of factors to initialize
#% as, bs: inverse gamma hyperparameters for model residuals, as well as non-factor random effects
#% df: degrees of freedom for t-distribution of ARD prior on factor loadings
#% ad1, bd1: inverse gamma hyperparamters for first factor shrinkage multiplier (/delta_1)
#% ad2, bd2: inverse gamma hyperparamters for remaining factor shrinkage multiplier (/delta_i, i \in 2...k)
#% The function output is the struct, Posterior, with the following fields:
# % Lambda, U, no_f, ps, resid_ps, delta, G_h2: matrices with each column a
#% (vectorized if necessary) posterior sample of the:
# % Lambda: factor loading matrix
#% U: genetic random effect matrix
#% no_f: number of significant factors
#% ps: genetic residual precision after accounting for latent factors
#% resid_ps: phenotypic residual precision
#% delta: column shrinkage parameters for Lambda
#% G_h2: factor heritabilties
#% B, d, W: matrices of posterior means of fixed effect (B),
#% residual genetic (d), or 2nd random effect (W) coefficients. Some may be empty
#%
#% Several diagnostic plots are produced during the run.
#% Their interpretation is described within the source codes:
#% draw_simulation_diagnostics.m: For simulated data with known true values
#% draw_results_diagnostics.m: Otherwise
rm(Y)
rm(Z_1)
rm(Z_2)
rm(X)
#global Y #%n x p matrix of phenotypic data
#global Z_1 #%n x r incidence matrix for additive genetic effects
#global Z_2 #%n x r2 incidence matrix for another set of random effects
#global X #%n x b design matrix of fixed effects
nrun = burn + sp * thin #% number of posterior samples
k_min = 1e-1 #% minimum factor loading size to report in running status
prop = 1.00 #% proportion of redundant elements within columns necessary to drop column
#% ------read data--------%
load('../setup.mat')
#%Determine if 'setup.mat' contains output of a simulation, based on if
#%known factor loadings are included. Affects plotting functions
simulation = true
if(isempty(objects('gen_factor_Lambda'))){
simulation = false
}
simulation
#%normalize Y to have zero mean and unit variances among observed values,
#%allowing for NaNs.
c(n,p) = dim(Y)
Y_full = Y
Mean_Y = zeros(1, ncol(Y))
VY = zeros(1, ncol(Y))
for(j in 1:p){
Mean_Y[j] = mean(Y[!(is.nan(Y[,j])),j])
VY[j] = var(Y[!(is.nan(Y[,j])),j])
if(is.nan(VY[j])){
VY[j] = 1
}
}
Y = Y - Mean_y
Y = Y * (1/sqrt(VY))
#%determine if a design matrix (X) exists (is loaded from setup.mat). If
#%not, make a dummy X-matrix with no columns.
if(!(exists('X'))){
X = zeros(0,n)
}
if(ncol(X) != n){
X = zeros(0,n)
}
#%Determine if a second random effects design matrix exists. If not, make a
#%dummy matrix
if(!(exists('Z_2'))){
Z_2 = zeros(0,n)
}
if(ncol(Z_2) != n){
Z_2 = zeros(0,n)
}
#%calculate model dimensions
r = nrow(Z_1)
r2 = nrow(Z_2)
b = nrow(X)
#% --- Initialize variables --- %
# %residual parameters. This structure holds the priors hyperparamters for
#%the gamma prior on the model residual variances. It also holds the current
#%estimate of the residual precision
resid$as = priors$as
resid$bs = priors$bs
resid$Y = Y
resid$p = p
resid$ps = rgamma(p, shape = resid$as, scale = 1/resid$bs) #%residual precision
#%Factors. This struct holds all information about the latent factors,
#%including the current number of factors, priors hyperparameters for the
#%factor Loadings, as well as current values of the factor loadings, their
#%precision, the factor scores, and the genetic heritability of each factor
k = priors$k_init #% initial number of factors
df = priors$df #% prior degrees of freedom for hierarchical t-prior on loadings
ad1 = priors$ad1 #% priors on delta
bd1 = priors$bd1
ad2 = priors$ad2
bd2 = priors$bd2
Factors$r = n
Factors$n = n
Factors$p = p
Factors$k = k
Factors$df = df
Factors$ad1 = ad1
Factors$bd1 = bd1
Factors$ad2 = ad2
Factors$bd2 = bd2
Factors$psijh = matrix(rgamma(p*k, shape = df/2, scale = 2/df), nrow = p, ncol = k) #%individual loadings precisions
Factors$delta = rbind(rgamma(1, shape = ad1+10, scale = 1/bd1),
t(t(rgamma(k-1, shape = ad2, scale = 1/bd2)))) #% components of tauh !!!!!! NEED SIZE
Factors$tauh = cumprod(Factors$delta) #%extra shrinkage of each loading column
Factors$Plam = Factors$psijh * t(Factors$tauh) #%total precision of each loading
Factors$Lambda = zeros(p,k) + matrix(runif(m*n), nrow = m, ncol = n) * reshape(sqrt(1/Factors$Plam),p,k) #%factor loadings
Factors$h2 = runif(k) #%factor heritability
Factors$h2_divisions = h2_divisions #%discretizations of heritability
Factors$num = 0
Factors$no_f = zeros(sp,1)
Factors$nofout = k * ones(nrun,1)
#%genetic_effects. This structure holds information about latent genetic
#%effects. U is latent genetic effects on factor traits. d is genetic
#%effects on residuals of the factor traits. Plus prior hyperparameters for
#%genetic effect precisions
genetic_effects$n = nrow(Z_1)
as = priors$as
bs = priors$bs
genetic_effects$as = as
genetic_effects$bs = bs
genetic_effects$ps = rgamma(p, shape = as, scale = 1/bs)
genetic_effects$U = matrix(rnorm(k*r),k,r) * sqrt(Factors$h2)
genetic_effects$d = matrix(rnorm(p*r),p,r) * 1/sqrt(genetic_effects$ps)
#%interaction_effects. Similar to genetic_effects structure except for
#%additional random effects that do not contribute to variation in the
#%factor traits
as = priors$as
bs = priors$bs
interaction_effects$as = as
interaction_effects$bs = bs
interaction_effects$ps = rgamma(p, shape = as, scale = 1/bs)
interaction_effects$mean = zeros(p,r2)
interaction_effects$n = r2
interaction_effects$W = matrix(rnorm(p*r2),p,r2) * 1/sqrt(interaction_effects$ps)
interaction_effects$W_out = zeros(p,r2)
#%fixed_effects hold B
fixed_effects$b = b
fixed_effects$cov = zeros(b,b) #%inverse covariance of fixed effects
fixed_effects$mean = zeros(p,b) #%mean of fixed effects
fixed_effects$B = matrix(rnorm(p*b),p,b) #%current estimate of fixed effects
Factors$scores = genetic_effects$U * Z_1 + matrix(rnorm(k*n),k,n) * sqrt(1-Factors$h2) #%initialize factor scores
#%Posterior holds Posterior samples and Posterior means
Posterior$Lambda = zeros(0,sp)
Posterior$no_f = zeros(sp,1)
Posterior$ps = zeros(p,sp)
Posterior$resid_ps = zeros(nrow(resid$ps),sp)
Posterior$B = zeros(p,nrow(X))
Posterior$U = zeros(0,sp)
Posterior$d = zeros(p,nrow(Z_1))
Posterior$W = zeros(p,nrow(Z_2))
Posterior$delta = zeros(0,sp)
Posterior$G_h2 = zeros(0,sp)
#%save run parameters and hyperparameters
params$p = p
params$n = n
params$r = r
params$Mean_Y = Mean_Y
params$VY = VY
params$b0 = b0
params$b1 = b1
params$epsilon = epsilon
params$prop = prop
params$as = priors$as
params$bs = priors$bs
params$df = priors$df
params$ad1 = priors$ad1
params$bd1 = priors$bd1
params$ad2 = priors$ad2
params$bd2 = priors$bd2
params$burn = burn
params$thin = thin
params$sp = sp
params$nrun = nrun
params$h2_divisions = h2_divisions
if(simulation){
params$U_act = U_act
params$Lambda = error_factor_Lambda
params$h2 = h2
params$G = G
params$R = R
params$B = B
params$factor_h2s = factor_h2s
params$name = name
}
#%precalculate some matrices
#%invert the random effect covariance matrices
Ainv = solve(A)
A_2_inv = diag(1, nrow(Z_2)) #%Z_2 random effects are assumed to have covariance proportional to the identity. Can be modified.
#%pre-calculate transformation parameters to diagonalize aI + bZAZ for fast
#%inversion: inv(aI + bZAZ) = 1/b*u*diag(1./(s+a/b))*u'
#%uses singular value decomposition of ZAZ for stability when ZAZ is low
#%rank
ZAZ = t(Z_1) %*% A %*% Z_1
u = svd(ZAZ)$d
s = svd(ZAZ)$u
eig_ZAZ$vectors = u
eig_ZAZ$values = diag(s)
#%fixed effects + random effects 1
#%diagonalize mixed model equations for fast inversion:
#%inv(a*blkdiag(fixed_effects.cov,Ainv) + b*[X; Z_1][X; Z_1]') = Q*diag(1./(a.*s1+b.*s2))*Q'
Design = rbind(X, Z_1)
Design2 = Design %*% t(Design)
storeGSVD = GSVD(Cholesky(blkdiag(fixed_effects$cov, Ainv)), Cholesky(Design2))
q = storeGSVD$X
S1 = storeGSVD$C
S2 = storeGSVD$S
svd_Design_Ainv$Q = t(solve(q))
svd_Design_Ainv$s1 = diag(t(S1) %*% S1)
svd_Design_Ainv$s2 = diag(t(S2) %*% S2)
Qt_Design = t(svd_Design_Ainv$Q) %*% Design
#%random effects 2
#%as above, but for random effects 2. Here, fixed effects will be conditioned on, not sampled simultaneously. Otherwise identical.
Design = Z_2
Design2 = Design %*% t(Design)
storeGSVD = GSVD(Cholesky(A_2_inv), Cholesky(Design2))
q = storeGSVD$X
S1 = storeGSVD$C
S2 = storeGSVD$S
svd_Z2_2_A2inv$Q = t(solve(q))
svd_Z2_2_A2inv$s1 = diag(t(S1) %*% S1)
svd_Z2_2_A2inv$s2 = diag(t(S2) %*%S2)
Qt_Z2 = t(svd_Z2_2_A2inv$Q) %*% Design
#%genetic effect variances of factor traits
#% diagonalizing a*Z_1*Z_1' + b*Ainv for fast inversion
#%diagonalize mixed model equations for fast inversion:
#% inv(a*Z_1*Z_1' + b*Ainv) = Q*diag(1./(a.*s1+b.*s2))*Q'
#%similar to fixed effects + random effects 1 above, but no fixed effects.
ZZt = Z_1 %*% t(Z_1)
storeGSVD = GSVD(Cholesky(ZZt), Cholesky(Ainv))
q = storeGSVD$X
S1 = storeGSVD$C
S2 = storeGSVD$S
svd_ZZ_Ainv$Q = t(solve(q))
svd_ZZ_Ainv$s1 = diag(t(S1) %*% S1)
svd_ZZ_Ainv$s2 = diag(t(S2) %*% S2)
#%------start gibbs sampling-----%
sp_num = 0
#tic
t1 = proc.time()
for(i in 1:nrun){
#%fill in missing phenotypes
#%conditioning on everything else
phenMissing = is.nan(Y_full) # which indices
if(sum(sum(phenMissing)) > 0){
meanTraits = fixed_effects$B %*% X + genetic_effects$d %*% Z_1
+ interaction_effects$W %*% Z_2 + Factors$Lambda %*% Factors$scores
meanTraits = t(meanTraits)
resids = matrix(rnorm(dim(Y_full)), nrow(Y_full), ncol(Y_full)) * 1/sqrt(t(resid$ps))
Y[phenMissing] = meanTraits[phenMissing] + resids[phenMissing]
}
#%sample Lambda
#%conditioning on W, X, F, marginalizing over D
Ytil = t(Y) - fixed_effects$B %*% X - interaction_effects$W %*% Z_2
source("sample_lambda.R")
Factors = sample_lambda(Ytil, Factors, resid, genetic_effects, eig_ZAZ)
#%sample fixed effects + random effects 1 ([B;D])
#%conditioning on W, F, L
Ytil = t(Y) - interaction_effects$W %*% Z_2 - Factors$Lambda %*% Factors$scores
N = genetic_effects$n + fixed_effects$b
source("sample_means.R")
location_sample = sample_means(Ytil, Qt_Design, N, resid, genetic_effects$ps, svd_Design_Ainv)
fixed_effects$B = location_sample[,1:fixed_effects$b]
genetic_effects$d = location_sample[,fixed_effects$b+1:fixed_effects$b+genetic_effects$n]
#%sample random effects 2
#%conditioning on B, D, F, L
Ytil = t(Y) - fixed_effects$B %*% X - genetic_effects$d %*% Z_1 - Factors$Lambda %*% Factors$scores
N = interaction_effects$n
if(N > 0){
location_sample = sample_means(Ytil, Qt_Z2, N, resid, interaction_effects$ps, svd_Z2_2_A2inv)
interaction_effects$W = location_sample
}
#%sample factor h2
#%conditioning on F, marginalizing over U
source("sample_h2s_discrete.R")
Factors = sample_h2s_discrete(Factors, eig_ZAZ)
#%sample genetic effects (U)
#%conditioning on F, Factor h2
source("sample_Us.R")
genetic_effects = sample_Us(Factors, genetic_effects, svd_ZZ_Ainv, Z_1)
#%sample F
#%conditioning on U, Lambda, B, D, W, factor h2s
Ytil = t(Y) - fixed_effects$B %*% X - genetic_effects$d %*% Z_1 - interaction_effects$W %*% Z_2
source("sample_faxtors_scores.R")
Factors = sample_factors_scores(Ytil, Factors, resid, genetic_effects, Z_1)
#% -- Update ps -- %
Lambda2 = (Factors$Lambda)^2
Factors$psijh = matrix(rgamma(nrow(Factors$df)*ncol(Factors$df), shape = Factors$df/2 + 0.5, scale = 2/(Factors$df + Lambda2 * t(Factors$tauh))), nrow(Factors$df), ncol(Factors$df))
#%continue from previous Y residual above
Ytil = Ytil - Factors$Lambda %*% Factors$scores
n = nrow(Y)
resid$ps = matrix(rgamma(nrow(resid$as)*ncol(resid$as), shape = resid$as + 0.5*n, scale = 1/(resid$bs+0.5 %*% t(rowSums(Ytil^2)))), nrow(resid$as), ncol(resid$as)) #%model residual precision
n = genetic_effects$n
genetic_effects$ps = matrix(rgamma(nrow(genetic_effects$as)*ncol(genetic_effects$as), shape = genetic_effects$as + 0.5*n, scale = 1/(genetic_effects$bs+ 0.5 %*% t(rowSums(genetic_effects$d^2)))),
nrow(genetic_effects$as), ncol(genetic_effects$as)) #%random effect 1 (D) residual precision
n = interaction_effects$n
interaction_effects$ps = matrix(rgamma(nrow(interaction_effects$as)*ncol(interaction_effects$as), shape = interaction_effects$as + 0.5*n, scale = 1/(interaction_effects$bs+0.5 %*% t(rowSums(interaction_effects$W^2)))),
nrow(interaction_effects$as), ncol(interaction_effects$as)) #%random effect 2 (W) residual precision
#%------Update delta & tauh------%
source("sample_delta.R")
c(delta, tauh) = sample_delta(Factors, Lambda2)
Factors$delta = delta
Factors$tauh = tauh
#%---update precision parameters----%
Factors$Plam = Factors$psijh * t(Factors$tauh)
#% ----- adapt number of factors to samples ----%
source("update_k.R")
c(Factors, genetic_effects) = update_k(Factors, genetic_effects, b0, b1, i, epsilon, prop)
#% -- save sampled values (after thinning) -- %
if((i %% thin) == 0 && i > burn){
sp_num = (i-burn)/thin
source("save_posterior_samples.R")
Posterior = save_posterior_samples(sp_num, params, Posterior,
resid,fixed_effects, genetic_effects, Factors,
interaction_effects)
if(mod(sp_num, 100) == 0){
save('Posterior', 'Posterior', 'params')
}
}
#% -- provide run diagnostics and plots -- %
if((i %% draw_iter) == 0){
directory = getwd()
#strread(pwd, '%s', 'delimiter', '/') # CHECK
print(directory)
print(i)
Factors$nofout[i] - Factors$num
elapsed = unname((proc.time()-t1)[3])
#%output some running statistics on the current factors and their
#%genetic variances
c(Factors$delta, t(c(1:Factors$k)), Factors$h2, t(sum(t(Factors$scores)^2))/(nrow(Y)-1), t(sum(t(genetic_effects$U)^2))/(nrow(Z_1)-1))
print(strcat('Time remaining:', num2str((nrun-i) * (elapsed/i) * 1/60)))
#%make some plots of some running statistics
if(simulation){
source("draw_simulation_diagnostics")
draw_simulation_diagnostics(i,sp_num,params,Factors,genetic_effects,resid,Posterior,gen_factor_Lambda,error_factor_Lambda,G,R,h2)
}
else{
source("draw_results_diagnostics")
draw_results_diagnostics(i, sp_num, params, Factors, Posterior)
}
}
}
#toc
(proc.time()-t1)[3]
save(Posterior, params, file = 'Posterior.Rdata')
return(Posterior, params)
}
|
/BSF-G_R/fast_BSF_G_sampler.R
|
no_license
|
lrshum17/MukherjeeRuncieRCode
|
R
| false | false | 18,346 |
r
|
require(matlab)
require(PEIP)
require(Matrix)
require(pracma)
fast_BSF_G_sampler = function(burn, sp, thin, b0, b1, h2_divisions, epsilon, priors, draw_iter, Y, Z_1, Z_2, X){
#% -- Daniel Runcie -- %
#% Gibbs sampler for genetic covariance estimation based on mixed effects
#% model, with missing data
#% Based on:
#% Runcie and Mukherjee (2013) Dissecting high-dimensional traits
#% with Bayesian sparse factor analysis of genetic covariance matrices.
#% GENETICS.
#% (c) April 22, 2013
#% code modified from original provided by Anirban Bhattacharya
#% This function implements the BSF-G partially collapsed Gibbs sampler. It loads all input data
#% and matrices from setup.mat in the current directory. Priors and control
#% parameters are passed to the function.
#% setup.mat is a struct with at least:
#% Y: data matrix
#% X: fixed effect design matrix
#% Z_1: random effect design matrix for factor model
#% Z_2: additional random effect design matrix
#% A: Additive genetic relationship matrix
#% For analysis of Ayroles et al 2009 data, can include:
#% Ayroles_results: struct holding gene names and correlations estimated in that paper
#% For analysis of simulations:
#% U_act: r x p matrix of true breeding values
#% E_act: n x p matrix of true model residuals
#% gen_factor_Lambda: p x k_G matrix of true genetic factor loadings
#% error_factor_Lambda: p x k matrix of true residual factor loadings
#% h2: p x 1 vector of true heritabilities
#% factor_h2s: k x 1 vector of true latent factor heritabilities
#% G, R: p x p matrix of true genetic and residual covariances
#% The function takes the following inputs:
#% burn: number of burnin samples
#% sp: total number of samples to collect
#% thin: thinning rate of chain
#% b0,b1: parameters controlling rate of adaptation of factor model size
#% h2_divisions: number of discrete steps for each factor heritability parameter
#% epsilon: truncation point for factor loadings during adaptation
#% draw_iter: frequency of updating diagnostic plots
#% priors: struct holding various prior hyperparameters:
#% k_init: initial number of factors to initialize
#% as, bs: inverse gamma hyperparameters for model residuals, as well as non-factor random effects
#% df: degrees of freedom for t-distribution of ARD prior on factor loadings
#% ad1, bd1: inverse gamma hyperparamters for first factor shrinkage multiplier (/delta_1)
#% ad2, bd2: inverse gamma hyperparamters for remaining factor shrinkage multiplier (/delta_i, i \in 2...k)
#% The function output is the struct, Posterior, with the following fields:
# % Lambda, U, no_f, ps, resid_ps, delta, G_h2: matrices with each column a
#% (vectorized if necessary) posterior sample of the:
# % Lambda: factor loading matrix
#% U: genetic random effect matrix
#% no_f: number of significant factors
#% ps: genetic residual precision after accounting for latent factors
#% resid_ps: phenotypic residual precision
#% delta: column shrinkage parameters for Lambda
#% G_h2: factor heritabilties
#% B, d, W: matrices of posterior means of fixed effect (B),
#% residual genetic (d), or 2nd random effect (W) coefficients. Some may be empty
#%
#% Several diagnostic plots are produced during the run.
#% Their interpretation is described within the source codes:
#% draw_simulation_diagnostics.m: For simulated data with known true values
#% draw_results_diagnostics.m: Otherwise
rm(Y)
rm(Z_1)
rm(Z_2)
rm(X)
#global Y #%n x p matrix of phenotypic data
#global Z_1 #%n x r incidence matrix for additive genetic effects
#global Z_2 #%n x r2 incidence matrix for another set of random effects
#global X #%n x b design matrix of fixed effects
nrun = burn + sp * thin #% number of posterior samples
k_min = 1e-1 #% minimum factor loading size to report in running status
prop = 1.00 #% proportion of redundant elements within columns necessary to drop column
#% ------read data--------%
load('../setup.mat')
#%Determine if 'setup.mat' contains output of a simulation, based on if
#%known factor loadings are included. Affects plotting functions
simulation = true
if(isempty(objects('gen_factor_Lambda'))){
simulation = false
}
simulation
#%normalize Y to have zero mean and unit variances among observed values,
#%allowing for NaNs.
c(n,p) = dim(Y)
Y_full = Y
Mean_Y = zeros(1, ncol(Y))
VY = zeros(1, ncol(Y))
for(j in 1:p){
Mean_Y[j] = mean(Y[!(is.nan(Y[,j])),j])
VY[j] = var(Y[!(is.nan(Y[,j])),j])
if(is.nan(VY[j])){
VY[j] = 1
}
}
Y = Y - Mean_y
Y = Y * (1/sqrt(VY))
#%determine if a design matrix (X) exists (is loaded from setup.mat). If
#%not, make a dummy X-matrix with no columns.
if(!(exists('X'))){
X = zeros(0,n)
}
if(ncol(X) != n){
X = zeros(0,n)
}
#%Determine if a second random effects design matrix exists. If not, make a
#%dummy matrix
if(!(exists('Z_2'))){
Z_2 = zeros(0,n)
}
if(ncol(Z_2) != n){
Z_2 = zeros(0,n)
}
#%calculate model dimensions
r = nrow(Z_1)
r2 = nrow(Z_2)
b = nrow(X)
#% --- Initialize variables --- %
# %residual parameters. This structure holds the priors hyperparamters for
#%the gamma prior on the model residual variances. It also holds the current
#%estimate of the residual precision
resid$as = priors$as
resid$bs = priors$bs
resid$Y = Y
resid$p = p
resid$ps = rgamma(p, shape = resid$as, scale = 1/resid$bs) #%residual precision
#%Factors. This struct holds all information about the latent factors,
#%including the current number of factors, priors hyperparameters for the
#%factor Loadings, as well as current values of the factor loadings, their
#%precision, the factor scores, and the genetic heritability of each factor
k = priors$k_init #% initial number of factors
df = priors$df #% prior degrees of freedom for hierarchical t-prior on loadings
ad1 = priors$ad1 #% priors on delta
bd1 = priors$bd1
ad2 = priors$ad2
bd2 = priors$bd2
Factors$r = n
Factors$n = n
Factors$p = p
Factors$k = k
Factors$df = df
Factors$ad1 = ad1
Factors$bd1 = bd1
Factors$ad2 = ad2
Factors$bd2 = bd2
Factors$psijh = matrix(rgamma(p*k, shape = df/2, scale = 2/df), nrow = p, ncol = k) #%individual loadings precisions
Factors$delta = rbind(rgamma(1, shape = ad1+10, scale = 1/bd1),
t(t(rgamma(k-1, shape = ad2, scale = 1/bd2)))) #% components of tauh !!!!!! NEED SIZE
Factors$tauh = cumprod(Factors$delta) #%extra shrinkage of each loading column
Factors$Plam = Factors$psijh * t(Factors$tauh) #%total precision of each loading
Factors$Lambda = zeros(p,k) + matrix(runif(m*n), nrow = m, ncol = n) * reshape(sqrt(1/Factors$Plam),p,k) #%factor loadings
Factors$h2 = runif(k) #%factor heritability
Factors$h2_divisions = h2_divisions #%discretizations of heritability
Factors$num = 0
Factors$no_f = zeros(sp,1)
Factors$nofout = k * ones(nrun,1)
#%genetic_effects. This structure holds information about latent genetic
#%effects. U is latent genetic effects on factor traits. d is genetic
#%effects on residuals of the factor traits. Plus prior hyperparameters for
#%genetic effect precisions
genetic_effects$n = nrow(Z_1)
as = priors$as
bs = priors$bs
genetic_effects$as = as
genetic_effects$bs = bs
genetic_effects$ps = rgamma(p, shape = as, scale = 1/bs)
genetic_effects$U = matrix(rnorm(k*r),k,r) * sqrt(Factors$h2)
genetic_effects$d = matrix(rnorm(p*r),p,r) * 1/sqrt(genetic_effects$ps)
#%interaction_effects. Similar to genetic_effects structure except for
#%additional random effects that do not contribute to variation in the
#%factor traits
as = priors$as
bs = priors$bs
interaction_effects$as = as
interaction_effects$bs = bs
interaction_effects$ps = rgamma(p, shape = as, scale = 1/bs)
interaction_effects$mean = zeros(p,r2)
interaction_effects$n = r2
interaction_effects$W = matrix(rnorm(p*r2),p,r2) * 1/sqrt(interaction_effects$ps)
interaction_effects$W_out = zeros(p,r2)
#%fixed_effects hold B
fixed_effects$b = b
fixed_effects$cov = zeros(b,b) #%inverse covariance of fixed effects
fixed_effects$mean = zeros(p,b) #%mean of fixed effects
fixed_effects$B = matrix(rnorm(p*b),p,b) #%current estimate of fixed effects
Factors$scores = genetic_effects$U * Z_1 + matrix(rnorm(k*n),k,n) * sqrt(1-Factors$h2) #%initialize factor scores
#%Posterior holds Posterior samples and Posterior means
Posterior$Lambda = zeros(0,sp)
Posterior$no_f = zeros(sp,1)
Posterior$ps = zeros(p,sp)
Posterior$resid_ps = zeros(nrow(resid$ps),sp)
Posterior$B = zeros(p,nrow(X))
Posterior$U = zeros(0,sp)
Posterior$d = zeros(p,nrow(Z_1))
Posterior$W = zeros(p,nrow(Z_2))
Posterior$delta = zeros(0,sp)
Posterior$G_h2 = zeros(0,sp)
#%save run parameters and hyperparameters
params$p = p
params$n = n
params$r = r
params$Mean_Y = Mean_Y
params$VY = VY
params$b0 = b0
params$b1 = b1
params$epsilon = epsilon
params$prop = prop
params$as = priors$as
params$bs = priors$bs
params$df = priors$df
params$ad1 = priors$ad1
params$bd1 = priors$bd1
params$ad2 = priors$ad2
params$bd2 = priors$bd2
params$burn = burn
params$thin = thin
params$sp = sp
params$nrun = nrun
params$h2_divisions = h2_divisions
if(simulation){
params$U_act = U_act
params$Lambda = error_factor_Lambda
params$h2 = h2
params$G = G
params$R = R
params$B = B
params$factor_h2s = factor_h2s
params$name = name
}
#%precalculate some matrices
#%invert the random effect covariance matrices
Ainv = solve(A)
A_2_inv = diag(1, nrow(Z_2)) #%Z_2 random effects are assumed to have covariance proportional to the identity. Can be modified.
#%pre-calculate transformation parameters to diagonalize aI + bZAZ for fast
#%inversion: inv(aI + bZAZ) = 1/b*u*diag(1./(s+a/b))*u'
#%uses singular value decomposition of ZAZ for stability when ZAZ is low
#%rank
ZAZ = t(Z_1) %*% A %*% Z_1
u = svd(ZAZ)$d
s = svd(ZAZ)$u
eig_ZAZ$vectors = u
eig_ZAZ$values = diag(s)
#%fixed effects + random effects 1
#%diagonalize mixed model equations for fast inversion:
#%inv(a*blkdiag(fixed_effects.cov,Ainv) + b*[X; Z_1][X; Z_1]') = Q*diag(1./(a.*s1+b.*s2))*Q'
Design = rbind(X, Z_1)
Design2 = Design %*% t(Design)
storeGSVD = GSVD(Cholesky(blkdiag(fixed_effects$cov, Ainv)), Cholesky(Design2))
q = storeGSVD$X
S1 = storeGSVD$C
S2 = storeGSVD$S
svd_Design_Ainv$Q = t(solve(q))
svd_Design_Ainv$s1 = diag(t(S1) %*% S1)
svd_Design_Ainv$s2 = diag(t(S2) %*% S2)
Qt_Design = t(svd_Design_Ainv$Q) %*% Design
#%random effects 2
#%as above, but for random effects 2. Here, fixed effects will be conditioned on, not sampled simultaneously. Otherwise identical.
Design = Z_2
Design2 = Design %*% t(Design)
storeGSVD = GSVD(Cholesky(A_2_inv), Cholesky(Design2))
q = storeGSVD$X
S1 = storeGSVD$C
S2 = storeGSVD$S
svd_Z2_2_A2inv$Q = t(solve(q))
svd_Z2_2_A2inv$s1 = diag(t(S1) %*% S1)
svd_Z2_2_A2inv$s2 = diag(t(S2) %*%S2)
Qt_Z2 = t(svd_Z2_2_A2inv$Q) %*% Design
#%genetic effect variances of factor traits
#% diagonalizing a*Z_1*Z_1' + b*Ainv for fast inversion
#%diagonalize mixed model equations for fast inversion:
#% inv(a*Z_1*Z_1' + b*Ainv) = Q*diag(1./(a.*s1+b.*s2))*Q'
#%similar to fixed effects + random effects 1 above, but no fixed effects.
ZZt = Z_1 %*% t(Z_1)
storeGSVD = GSVD(Cholesky(ZZt), Cholesky(Ainv))
q = storeGSVD$X
S1 = storeGSVD$C
S2 = storeGSVD$S
svd_ZZ_Ainv$Q = t(solve(q))
svd_ZZ_Ainv$s1 = diag(t(S1) %*% S1)
svd_ZZ_Ainv$s2 = diag(t(S2) %*% S2)
#%------start gibbs sampling-----%
sp_num = 0
#tic
t1 = proc.time()
for(i in 1:nrun){
#%fill in missing phenotypes
#%conditioning on everything else
phenMissing = is.nan(Y_full) # which indices
if(sum(sum(phenMissing)) > 0){
meanTraits = fixed_effects$B %*% X + genetic_effects$d %*% Z_1
+ interaction_effects$W %*% Z_2 + Factors$Lambda %*% Factors$scores
meanTraits = t(meanTraits)
resids = matrix(rnorm(dim(Y_full)), nrow(Y_full), ncol(Y_full)) * 1/sqrt(t(resid$ps))
Y[phenMissing] = meanTraits[phenMissing] + resids[phenMissing]
}
#%sample Lambda
#%conditioning on W, X, F, marginalizing over D
Ytil = t(Y) - fixed_effects$B %*% X - interaction_effects$W %*% Z_2
source("sample_lambda.R")
Factors = sample_lambda(Ytil, Factors, resid, genetic_effects, eig_ZAZ)
#%sample fixed effects + random effects 1 ([B;D])
#%conditioning on W, F, L
Ytil = t(Y) - interaction_effects$W %*% Z_2 - Factors$Lambda %*% Factors$scores
N = genetic_effects$n + fixed_effects$b
source("sample_means.R")
location_sample = sample_means(Ytil, Qt_Design, N, resid, genetic_effects$ps, svd_Design_Ainv)
fixed_effects$B = location_sample[,1:fixed_effects$b]
genetic_effects$d = location_sample[,fixed_effects$b+1:fixed_effects$b+genetic_effects$n]
#%sample random effects 2
#%conditioning on B, D, F, L
Ytil = t(Y) - fixed_effects$B %*% X - genetic_effects$d %*% Z_1 - Factors$Lambda %*% Factors$scores
N = interaction_effects$n
if(N > 0){
location_sample = sample_means(Ytil, Qt_Z2, N, resid, interaction_effects$ps, svd_Z2_2_A2inv)
interaction_effects$W = location_sample
}
#%sample factor h2
#%conditioning on F, marginalizing over U
source("sample_h2s_discrete.R")
Factors = sample_h2s_discrete(Factors, eig_ZAZ)
#%sample genetic effects (U)
#%conditioning on F, Factor h2
source("sample_Us.R")
genetic_effects = sample_Us(Factors, genetic_effects, svd_ZZ_Ainv, Z_1)
#%sample F
#%conditioning on U, Lambda, B, D, W, factor h2s
Ytil = t(Y) - fixed_effects$B %*% X - genetic_effects$d %*% Z_1 - interaction_effects$W %*% Z_2
source("sample_faxtors_scores.R")
Factors = sample_factors_scores(Ytil, Factors, resid, genetic_effects, Z_1)
#% -- Update ps -- %
Lambda2 = (Factors$Lambda)^2
Factors$psijh = matrix(rgamma(nrow(Factors$df)*ncol(Factors$df), shape = Factors$df/2 + 0.5, scale = 2/(Factors$df + Lambda2 * t(Factors$tauh))), nrow(Factors$df), ncol(Factors$df))
#%continue from previous Y residual above
Ytil = Ytil - Factors$Lambda %*% Factors$scores
n = nrow(Y)
resid$ps = matrix(rgamma(nrow(resid$as)*ncol(resid$as), shape = resid$as + 0.5*n, scale = 1/(resid$bs+0.5 %*% t(rowSums(Ytil^2)))), nrow(resid$as), ncol(resid$as)) #%model residual precision
n = genetic_effects$n
genetic_effects$ps = matrix(rgamma(nrow(genetic_effects$as)*ncol(genetic_effects$as), shape = genetic_effects$as + 0.5*n, scale = 1/(genetic_effects$bs+ 0.5 %*% t(rowSums(genetic_effects$d^2)))),
nrow(genetic_effects$as), ncol(genetic_effects$as)) #%random effect 1 (D) residual precision
n = interaction_effects$n
interaction_effects$ps = matrix(rgamma(nrow(interaction_effects$as)*ncol(interaction_effects$as), shape = interaction_effects$as + 0.5*n, scale = 1/(interaction_effects$bs+0.5 %*% t(rowSums(interaction_effects$W^2)))),
nrow(interaction_effects$as), ncol(interaction_effects$as)) #%random effect 2 (W) residual precision
#%------Update delta & tauh------%
source("sample_delta.R")
c(delta, tauh) = sample_delta(Factors, Lambda2)
Factors$delta = delta
Factors$tauh = tauh
#%---update precision parameters----%
Factors$Plam = Factors$psijh * t(Factors$tauh)
#% ----- adapt number of factors to samples ----%
source("update_k.R")
c(Factors, genetic_effects) = update_k(Factors, genetic_effects, b0, b1, i, epsilon, prop)
#% -- save sampled values (after thinning) -- %
if((i %% thin) == 0 && i > burn){
sp_num = (i-burn)/thin
source("save_posterior_samples.R")
Posterior = save_posterior_samples(sp_num, params, Posterior,
resid,fixed_effects, genetic_effects, Factors,
interaction_effects)
if(mod(sp_num, 100) == 0){
save('Posterior', 'Posterior', 'params')
}
}
#% -- provide run diagnostics and plots -- %
if((i %% draw_iter) == 0){
directory = getwd()
#strread(pwd, '%s', 'delimiter', '/') # CHECK
print(directory)
print(i)
Factors$nofout[i] - Factors$num
elapsed = unname((proc.time()-t1)[3])
#%output some running statistics on the current factors and their
#%genetic variances
c(Factors$delta, t(c(1:Factors$k)), Factors$h2, t(sum(t(Factors$scores)^2))/(nrow(Y)-1), t(sum(t(genetic_effects$U)^2))/(nrow(Z_1)-1))
print(strcat('Time remaining:', num2str((nrun-i) * (elapsed/i) * 1/60)))
#%make some plots of some running statistics
if(simulation){
source("draw_simulation_diagnostics")
draw_simulation_diagnostics(i,sp_num,params,Factors,genetic_effects,resid,Posterior,gen_factor_Lambda,error_factor_Lambda,G,R,h2)
}
else{
source("draw_results_diagnostics")
draw_results_diagnostics(i, sp_num, params, Factors, Posterior)
}
}
}
#toc
(proc.time()-t1)[3]
save(Posterior, params, file = 'Posterior.Rdata')
return(Posterior, params)
}
|
#' This R script will process all R mardown files (those with in_ext file extention,
#' .Rmd by default) in the current working directory. Files with a status of
#' 'processed' will be converted to markdown (with out_ext file extention, '.md'
#' by default). It will change the published parameter to 'true' and change the
#' status parameter to 'publish'.
#'
#' @param dir the directory to process R Markdown files.
#' @param out_ext the file extention to use for processed files.
#' @param in_ext the file extention of input files to process.
#' @return nothing.
#' @author Jason Bryer <jason@bryer.org>, Jarad Niemi <
convertRMarkdown <- function(dir=getwd(), images.dir=dir, images.url='/images/',
out_ext='.md', in_ext='.Rmd') {
require(knitr, quietly=TRUE, warn.conflicts=FALSE)
files <- list.files(path=dir, pattern=in_ext, ignore.case=TRUE)
for(f in files) {
message(paste("Processing ", f, sep=''))
content <- readLines(f)
frontMatter <- which(substr(content, 1, 3) == '---')
if(length(frontMatter) == 2) {
statusLine <- which(substr(content, 1, 7) == 'status:')
publishedLine <- which(substr(content, 1, 10) == 'published:')
if(statusLine > frontMatter[1] & statusLine < frontMatter[2]) {
status <- unlist(strsplit(content[statusLine], ':'))[2]
status <- sub('[[:space:]]+$', '', status)
status <- sub('^[[:space:]]+', '', status)
if(tolower(status) == 'process') {
#This is a bit of a hack but if a line has zero length (i.e. a
#black line), it will be removed in the resulting markdown file.
#This will ensure that all line returns are retained.
content[nchar(content) == 0] <- ' '
message(paste('Processing ', f, sep=''))
content[statusLine] <- 'status: publish'
content[publishedLine] <- 'published: true'
outFile <- paste(substr(f, 1, (nchar(f)-(nchar(in_ext)))), out_ext, sep='')
render_markdown(strict=TRUE)
opts_knit$set(out.format='markdown')
opts_knit$set(base.dir=images.dir)
opts_knit$set(base.url=images.url)
try(knit(text=content, output=outFile), silent=FALSE)
} else {
warning(paste("Not processing ", f, ", status is '", status,
"'. Set status to 'process' to convert.", sep=''))
}
} else {
warning("Status not found in front matter.")
}
} else {
warning("No front matter found. Will not process this file.")
}
}
invisible()
}
|
/rmarkdown.r
|
no_license
|
jarad/jarad.github.com
|
R
| false | false | 2,551 |
r
|
#' This R script will process all R mardown files (those with in_ext file extention,
#' .Rmd by default) in the current working directory. Files with a status of
#' 'processed' will be converted to markdown (with out_ext file extention, '.md'
#' by default). It will change the published parameter to 'true' and change the
#' status parameter to 'publish'.
#'
#' @param dir the directory to process R Markdown files.
#' @param out_ext the file extention to use for processed files.
#' @param in_ext the file extention of input files to process.
#' @return nothing.
#' @author Jason Bryer <jason@bryer.org>, Jarad Niemi <
convertRMarkdown <- function(dir=getwd(), images.dir=dir, images.url='/images/',
out_ext='.md', in_ext='.Rmd') {
require(knitr, quietly=TRUE, warn.conflicts=FALSE)
files <- list.files(path=dir, pattern=in_ext, ignore.case=TRUE)
for(f in files) {
message(paste("Processing ", f, sep=''))
content <- readLines(f)
frontMatter <- which(substr(content, 1, 3) == '---')
if(length(frontMatter) == 2) {
statusLine <- which(substr(content, 1, 7) == 'status:')
publishedLine <- which(substr(content, 1, 10) == 'published:')
if(statusLine > frontMatter[1] & statusLine < frontMatter[2]) {
status <- unlist(strsplit(content[statusLine], ':'))[2]
status <- sub('[[:space:]]+$', '', status)
status <- sub('^[[:space:]]+', '', status)
if(tolower(status) == 'process') {
#This is a bit of a hack but if a line has zero length (i.e. a
#black line), it will be removed in the resulting markdown file.
#This will ensure that all line returns are retained.
content[nchar(content) == 0] <- ' '
message(paste('Processing ', f, sep=''))
content[statusLine] <- 'status: publish'
content[publishedLine] <- 'published: true'
outFile <- paste(substr(f, 1, (nchar(f)-(nchar(in_ext)))), out_ext, sep='')
render_markdown(strict=TRUE)
opts_knit$set(out.format='markdown')
opts_knit$set(base.dir=images.dir)
opts_knit$set(base.url=images.url)
try(knit(text=content, output=outFile), silent=FALSE)
} else {
warning(paste("Not processing ", f, ", status is '", status,
"'. Set status to 'process' to convert.", sep=''))
}
} else {
warning("Status not found in front matter.")
}
} else {
warning("No front matter found. Will not process this file.")
}
}
invisible()
}
|
#' Simulated group markers
#'
#' This function gives you the ranked list of group markers at the specified
#' proportion of top markers (specificity)
#'
#' @param rank_df The data frame with the ranked group genes as returned by the
#' [rank_sim()] function.
#' @param spec The proportion of top ranked genes. It has to be a number between
#' 0 and 1.
#' @param n_clust The number of simulated cell groups.
#'
#' @return A list with `n_clust` elements representing their corresponding group
#' markers. Each element of the list contains the relative set of marker indexes
#' as ordered in the original `rowData(sim)`.
#' The list doesn't keep the order of the ranking.
#'
#' @export
#'
#' @examples
#' # TODO
markers_by_specificity <- function(rank_df,
spec,
n_clust) {
k <- 5 + n_clust
info <- rank_df[, c(k:(k + n_clust - 1))] ## order
p <- grep("^Rank", names(rank_df))
R <- rank_df[, p] ## rank
sorted <- sapply(1:n_clust, function(x) order(R[, x]))
k <- 10 * n_clust
length <- sapply(1:n_clust, function(x) length(which(info[, x] < k)))
l <- sapply(1:n_clust, function(x) round(spec * length[x]))
markers <- lapply(1:n_clust, function(x) sorted[1:l[x], x])
return(markers)
}
|
/R/markers_by_specificity.R
|
permissive
|
crsky1023/matchSCore2
|
R
| false | false | 1,273 |
r
|
#' Simulated group markers
#'
#' This function gives you the ranked list of group markers at the specified
#' proportion of top markers (specificity)
#'
#' @param rank_df The data frame with the ranked group genes as returned by the
#' [rank_sim()] function.
#' @param spec The proportion of top ranked genes. It has to be a number between
#' 0 and 1.
#' @param n_clust The number of simulated cell groups.
#'
#' @return A list with `n_clust` elements representing their corresponding group
#' markers. Each element of the list contains the relative set of marker indexes
#' as ordered in the original `rowData(sim)`.
#' The list doesn't keep the order of the ranking.
#'
#' @export
#'
#' @examples
#' # TODO
markers_by_specificity <- function(rank_df,
spec,
n_clust) {
k <- 5 + n_clust
info <- rank_df[, c(k:(k + n_clust - 1))] ## order
p <- grep("^Rank", names(rank_df))
R <- rank_df[, p] ## rank
sorted <- sapply(1:n_clust, function(x) order(R[, x]))
k <- 10 * n_clust
length <- sapply(1:n_clust, function(x) length(which(info[, x] < k)))
l <- sapply(1:n_clust, function(x) round(spec * length[x]))
markers <- lapply(1:n_clust, function(x) sorted[1:l[x], x])
return(markers)
}
|
## Preliminaries
rm(list=ls())
# Change working directory to where you've stored ZTRAX
path<- "P:/Peter/Hedonics/Groundwater/"
#install.packages("dplyr", repos = "http://mran.revolutionanalytics.com")
## This function will check if a package is installed, and if not, install it
pkgTest <- function(x) {
if (!require(x, character.only = TRUE))
{
install.packages(x, dep = TRUE)
if(!require(x, character.only = TRUE)) stop("Package not found")
}
}
## These lines load the required packages
packages <- c("readxl","Hmisc","DescTools","qgam","quantreg","sphet","mgcv","McSpatial","pastecs","rdd","Matrix","psych","xtable","splines","ck37r","data.table","matrixStats","tmle","xgboost", "MatchIt","gtools","statar","foreign","multiwayvcov","lmtest","readstata13","xlsx", "data.table","doSNOW","parallel","compare","doParallel","devtools","foreach","spdep","reshape2","sm","plyr","utils","tcltk","geosphere", "matrixcalc", "dplyr","ExPosition", "randomForest","lfe", "hdm", "rdrobust", "stargazer", "ggplot2", "outliers","rpart","e1071")
lapply(packages, pkgTest)
#library(statar)
## These lines set several options
options(scipen = 999) # Do not print scientific notation
options(stringsAsFactors = FALSE) ## Do not load strings as factors
memory.limit(10000000000000)
NPL<-readRDS(paste(path,'NPLfull.rds', sep=""), refhook = NULL)
pNPL<-subset(NPL,rat_name=="PROPOSAL TO NPL")
fNPL<-subset(NPL,rat_name=="FINAL LISTING ON NPL")
dNPL<-subset(NPL,rat_name=="DELETION FROM NPL"|rat_name=="PARTIAL NPL DELETION")
pdNPL<-subset(NPL,rat_name=="PARTIAL NPL DELETION")
opNPL<- pNPL#[order(pNPL$date),]
ofNPL<- fNPL#[order(fNPL$date),]
odNPL<- dNPL#[order(dNPL$date),]
opdNPL<- pdNPL#[order(pdNPL$date),]
odNPL<-odNPL[odNPL$rstate_code == "NY",]
##set up superlearner for TMLE
num_cores = RhpcBLASctl::get_num_cores()
# How many cores does this computer have?
num_cores
# Use all of those cores for parallel SuperLearner.
options(mc.cores = num_cores)
# Check how many parallel workers we are using:
getOption("mc.cores")
# Set multicore compatible seed.
set.seed(1, "L'Ecuyer-CMRG")
# multicore superlearner
# different configurations.
xgboost.tune <- list(ntrees = c(50, 100),
max_depth = c(5,10,15),
shrinkage = c( 0.01,0.1),
minobspernode = c(10))
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
xgboost <- create.Learner("SL.xgboost", tune = xgboost.tune, detailed_names = T, name_prefix = "xgb")
# configurations
length(xgboost$names)
xgboost$names
xgboost.tune2 <- list(ntrees = c(50),
max_depth = c(5,15),
shrinkage = c( 0.1),
minobspernode = c(10))
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
xgboost2 <- create.Learner("SL.xgboost", tune = xgboost.tune2, detailed_names = T, name_prefix = "xgb")
# configurations
length(xgboost2$names)
xgboost2$names
# different configurations.
glmnet.tune <- list(alpha = c(0,.1, .25,.5,.75,.9,1))
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
glmnet <- create.Learner("SL.glmnet", tune = glmnet.tune, detailed_names = T, name_prefix = "glmnet")
# configurations
length(glmnet$names)
glmnet$names
glmnet.tune2 <- list(alpha = c(0,1))
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
glmnet2 <- create.Learner("SL.glmnet", tune = glmnet.tune2, detailed_names = T, name_prefix = "glmnet")
# configurations
length(glmnet2$names)
glmnet2$names
# different configurations.
randomForest.tune <- list(ntree = c(500))
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
randomForest <- create.Learner("SL.randomForest", tune = randomForest.tune,
detailed_names = T, name_prefix = "randomForest")
# configurations
length(randomForest$names)
randomForest$names
# different configurations.
gam.tune <- list(spatialsp= c("ts","gp","tp"))#, cts.num = 10)
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
gam <- create.Learner("SL.gam", tune = gam.tune, detailed_names = T, name_prefix = "gam")
# configurations
length(gam$names)
gam$names
expandingList <- function(capacity = 10) {
buffer <- vector('list', capacity)
length <- 0
methods <- list()
methods$double.size <- function() {
buffer <<- c(buffer, vector('list', capacity))
capacity <<- capacity * 2
}
methods$add <- function(val) {
if(length == capacity) {
methods$double.size()
}
length <<- length + 1
buffer[[length]] <<- val
}
methods$as.list <- function() {
b <- buffer[0:length]
return(b)
}
methods
}
if(FALSE){
results.gam<-mgcv::gam(logprice~treatind+X+s(day,lat,long,bs="tp",m=3,k=50),data=sample)
mgcv::summary.gam(results.gam)
Xs<-cbind(X,lat,long)
}
SL.gam <- function(Y, X, newX, family, obsWeights,ms=3,ks=50,ksi=10,kt=10,spatialsp,temporalsp="gp",
deg.gam =2 , cts.num=4 ,slat=lat,slong=long, ...) {
.SL.require('mgcv')
s=mgcv:::s
cts.x <- apply(X, 2, function(x) (length(unique(x)) > cts.num))
cts.x["lat"] <- FALSE
cts.x["long"]<- FALSE
cts.x["day"]<- FALSE
if (sum(!cts.x) > 0) {
gam.model <- as.formula(paste("Y~", paste(colnames(X[, cts.x, drop = FALSE]),
collapse = "+"), "+s(lat,long,bs='",spatialsp,"',m=",ms,",k=",ks,")",sep=""))
# "+s(day,bs='",temporalsp,"',m=",ms,",k=",kt,")+",
#paste(paste("s(", colnames(X[, cts.x, drop = FALSE]),",lat,long,bs='",spatialsp,"',m=",ms,",k=",ksi,")", sep=""),
# collapse = "+"),
#"+s(day,lat,long,bs='",spatialsp,"',m=",ms,",k=",ks,")", sep=""))
} else {
gam.model <- as.formula(paste("Y~", paste(paste("s(", colnames(X[, cts.x, drop = FALSE]), ",", deg.gam, ")", sep=""), collapse = "+")))
}
# fix for when all variables are binomial
if (sum(!cts.x) == length(cts.x)) {
gam.model <- as.formula(paste("Y~", paste(colnames(X), collapse = "+"), sep = ""))
}
fit.gam <- mgcv::gam(gam.model, data = X, family = family)
pred <-mgcv::predict.gam(fit.gam, newdata = newX, type = "response")
fit <- list(object = fit.gam)
out <- list(pred = pred, fit = fit)
class(out$fit) <- c("SL.gam")
return(out)
}
predict.SL.gam <- function(object, newdata, ...){
.SL.require('gam')
pred <- gam::predict.gam(object = object$object, newdata = newdata, type = "response")
return(pred)
}
.SL.require <- function(package, message = paste('loading required package (', package, ') failed', sep = '')) {
if(!require(package, character.only = TRUE)) {
stop(message, call. = FALSE)
}
invisible(TRUE)
}
SL.library<-expandingList()
PS.library<-expandingList()
SL.library$add("SL.mean")
PS.library$add("SL.mean")
SL.library2<-expandingList()
PS.library2<-expandingList()
SL.library2$add("SL.mean")
PS.library2$add("SL.mean")
#SL.library2$add(randomForest$names)
#SL.library2$add("SL.nnls")
#SL.library2$add("SL.gam")
SL.library$add("SL.randomForest")
SL.library$add("SL.xgboost")
#if(FALSE){
for(i in 1:length(glmnet$names)){
SL.library$add(glmnet$names[i])
}
for(i in 1:length(glmnet$names)){
PS.library$add(glmnet$names[i])
}
for(i in 1:length(glmnet2$names)){
SL.library2$add(glmnet2$names[i])
}
for(i in 1:length(glmnet2$names)){
PS.library2$add(glmnet2$names[i])
}
#}
#for(i in 1:length(gam$names)){
# SL.library$add(gam$names[i])
#}
for(i in 1:length(gam$names)){
SL.library2$add(gam$names[i])
}
for(i in 1:length(xgboost$names)){
SL.library$add(c(xgboost$names[i]))
}
#for(i in 1:length(xgboost2$names)){
# SL.library2$add(c(xgboost2$names[i]))
#}
for(i in 1:length(randomForest$names)){
SL.library$add(c(randomForest$names[i]))
}
SL.library$as.list()
SL.library2$as.list()
PS.library$as.list()
PS.library2$as.list()
#############################################################################
#potential sites
psites<-expandingList()
sample<-NULL
for(k in c("full")){
for(i in psite){
#k<-"full"
#i<-73
if (file.exists(paste(path,k,'deletionbajgw',i,'.rds', sep=""))){
sample.1<-readRDS(paste(path,k,'deletionbajgw',i,'.rds', sep=""), refhook = NULL)
print(paste0(k,"and",i,"sample size = ", dim(sample.1[sample.1$treatmentgroup>0,])))
print(paste0(k,"and",i,"treated = ", dim(sample.1[sample.1$treatdgw>0,])))
print(paste0(k,"and",i,"control = ", dim(sample.1[sample.1$control>0,])))
sample.1$treatgwWL<- sample.1$treatdgw * sample.1$WaterStndCode.fWL
print(paste0(k,"and",i,"treat well = ", dim(sample.1[sample.1$treatgwWL>0,])[1]))
sample.1$treatgwMU<- sample.1$treatdgw * sample.1$WaterStndCode.fMU
print(paste0(k,"and",i,"treat public = ", dim(sample.1[sample.1$treatgwMU>0,])[1]))
cut<-100
if(dim(sample.1[sample.1$treatgwWL>0,])[1]-cut>0 & dim(sample.1[sample.1$treatgwMU>0,])[1]-cut>0 &
dim(sample.1[sample.1$control>0,])[1]-cut>0 &
dim(sample.1[sample.1$treatmentgroup>0,])[1]-dim(sample.1[sample.1$treatdgw>0,])[1]-cut>0 &
dim(sample.1)[2]-54275>0){
psites$add(paste0("site",i))
#sample<-rbind(sample,sample.1)
}
}
}
}
psites$as.list()
saveRDS(psites$as.list(), file = paste(path,'repeat5wellslist.rds', sep=""), ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
psitelist<-readRDS(paste(path,'repeat5wellslist.rds', sep=""), refhook = NULL)
###############################################################################################
psite<-c("209","210","214","217","223","224","227","228","254","262")
#sample.1<-readRDS(paste(path,k,'deletiongw',i,'.rds', sep=""), refhook = NULL)
for(k in c("full")){
sample.1<-readRDS(paste(path,k,'deletion',psite[1],'.rds', sep=""), refhook = NULL)
for(i in psite[2:10]){
if (file.exists(paste(path,k,'deletion',i,'.rds', sep=""))){
sample.2<-readRDS(paste(path,k,'deletion',i,'.rds', sep=""), refhook = NULL)
sample.1<-smartbind(sample.1,sample.2)
}
}
}
saveRDS(sample.1, file = paste(path,'repeat5wells.rds', sep=""), ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
####################################################################
#sample.1<-readRDS(paste(path,'repeat5wells.rds', sep=""), refhook = NULL)
dNPL$row<-seq(1:dim(dNPL)[1])
psitel<-dNPL[dNPL$rsitinc_desc=="LANDFILL","row"]
psitel<-c(2,4,11,12,15,16,19,20,21)
psitel<-c(2,12,15,16)
for(psite in psitel){
data<-readRDS(paste(path,'fulldeletionbajgw',psite,'.rds', sep=""), refhook = NULL)
#repeat sales Bajari
#d.sample.data<-sample1
#rm(sample1)
gc()
data<-data[data$price>0,]
data<-data[!duplicated(data[,c("date","HHID")]),]
sample.data<-data[,c("date","HHID","TransId","price","logprice")]
quants<-20
sample.data$indx<- factor(as.numeric(cut2(as.numeric(sample.data$HHID), g=quants)))
sample<-NULL
for(i in 1:quants){
#i<-1
d.sample.data<-sample.data[indx==i,]
rep.row<-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
D<-rep.row(as.numeric(d.sample.data$HHID),nrow(d.sample.data))
D<-t(D)-D
D[D>0]<-2
D[D<0]<-2
D[D==0]<-1
D[D==2]<-0
sameHouse<-D
D<-rep.row(as.numeric(d.sample.data$TransId),nrow(d.sample.data))
D<-t(D)-D
D[D>0]<-2
D[D<0]<-2
D[D==0]<-1
D[D==2]<-0
sameSale<-D
otherSales<-sameHouse-sameSale
rm(sameHouse,sameSale)
gc()
D<-rep.row(d.sample.data$date,nrow(d.sample.data))
D<-t(D)-D
D[D<0]<-0
diffDates<-D
rm(D)
gc()
library(matrixStats)
dumDiffDates<-diffDates*otherSales
dumDiffDates[dumDiffDates==0]<- 10000000000000000
#dumDiffDates[dumDiffDates-rowMins(dumDiffDates)>0]<- -1
dumDiffDates[dumDiffDates-rowMins(dumDiffDates,na.rm = TRUE)==0]<-1
dumDiffDates[dumDiffDates<0]<-0
dumDiffDates[dumDiffDates>1]<-0
dumDiffDates[dumDiffDates==10000000000000000]<-0
dumDiffDates[rowSums(dumDiffDates)-dim(dumDiffDates)[1]==0]<-0
rm(diffDates,otherSales)
gc()
d.sample.data$preprice<-dumDiffDates%*%d.sample.data$price
d.sample.data$prelogprice<-dumDiffDates%*%d.sample.data$logprice
d.sample.data$predate<-dumDiffDates%*%as.numeric(d.sample.data$date)
d.sample.data$prediffdate<-as.numeric(d.sample.data$date)-d.sample.data$predate
d.sample.data$presstatusd<-ifelse(d.sample.data$predate-as.numeric(odNPL$date[i])>0,1,0)
d.sample.data$presstatuscc<-ifelse(d.sample.data$predate-as.numeric(odNPL$ControlsComplete[i])>0,1,0)
d.sample.data<-d.sample.data[d.sample.data$predate>0,]
#sample1<-sample1[sample1$presstatus<1 ,]
#sample1<-sample1[sample1$treatdgw<1 ,]
d.sample.data$difflogprice<-d.sample.data$logprice-d.sample.data$prelogprice
sample<-rbind(sample,d.sample.data)
}
library(data.table)
sample<-sample[,c("TransId","preprice","prelogprice","predate","prediffdate","presstatusd","presstatuscc","difflogprice")]
data.dt<-data.table(data)
sample.dt<-data.table(sample)
sample.new<-merge(data.dt,sample.dt,all.x=TRUE,by="TransId")
saveRDS(sample.new, file = paste(path,'fullbaj',psite,'.rds', sep=""), ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
}
if(TRUE){
dist<-c('10k','8k','6k','4k','2k')#,'1k','500m')
#dist<-c('4k','2k')#,'1k','500m')
laglead<-c("")
llc<-laglead[1]
treatl<-c('TATE','MUATE','WLATE')
dNPL$row<-seq(1:dim(dNPL)[1])
psitel<-dNPL[dNPL$rsitinc_desc=="LANDFILL","row"]
psitel<-psitel[psitel!=3]
psite<-psitel[1]
samplefull<-readRDS(paste(path,'fullbaj',psite,'.rds', sep=""), refhook = NULL)
samplefull<-samplefull[predate>0,]
samplefull$treatst<-samplefull[[paste0('treatdgw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
buf<-1
#samplefull$buffer<-ifelse(samplefull$date-odNPL$date[psite]+(buf*365)>0&samplefull$date-odNPL$date[psite]<0,1,0)
samplefull$buffer<-ifelse(abs(samplefull$date-odNPL$date[psite])-(buf*365)<0,1,0)
samplefull<-samplefull[buffer<1,]
samplefull$timetotreat<-samplefull$date-odNPL$date[psite]
samplefull$lsite<-psite
samplefull$demlogprice<-demeanlist(samplefull$logprice,
list(as.factor(samplefull$PropertyAddressCensusTractAndBlock)))
# sample$logprice<-sample$logprice.x
#Total Average Treatment Effect
samplefull$aftpropnpl<-samplefull[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
samplefull$treatControlsComplete<-samplefull[[paste0('treatControlsComplete',psite)]]
samplefull$timefed<-samplefull[[paste0('timefed',psite)]]
samplefull$timefedControlsComplete<-samplefull[[paste0('timefedControlsComplete',psite)]]
samplefull$treatexCC<-ifelse(samplefull$treatControlsComplete==1 &samplefull$treatst==0,1,0)
#Municipal ATE
samplefull$treatdgwMU<- samplefull[[paste0('treatd',llc,'gw',psite)]]* samplefull$WaterStndCode.fMU
samplefull$treatgroupMU<-samplefull$treatmentgroup * samplefull$WaterStndCode.fMU
samplefull$controlMU<-samplefull$control*samplefull$WaterStndCode.fMU
samplefull$sample.MUATE<-samplefull$control+samplefull$treatgroupMU
samplefull$aftpropnpl<-samplefull[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
samplefull$treatControlsComplete<-samplefull[[paste0('treatControlsComplete',psite)]]
samplefull$timefed<-samplefull[[paste0('timefed',psite)]]
samplefull$timefedControlsComplete<-samplefull[[paste0('timefedControlsComplete',psite)]]
#sample$treatst<-sample[[paste0('treatd',llc,'gw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
samplefull$treatexCC<-ifelse(samplefull$treatControlsComplete==1 &samplefull$treatst==0,1,0)
#Municipal ATE
samplefull$treatdgwWL<- samplefull[[paste0('treatd',llc,'gw',psite)]]* samplefull$WaterStndCode.fWL
samplefull$treatgroupWL<-samplefull$treatmentgroup * samplefull$WaterStndCode.fWL
samplefull$controlWL<-samplefull$control*samplefull$WaterStndCode.fWL
samplefull$sample.WLATE<-samplefull$control+samplefull$treatgroupWL
samplefull$aftpropnpl<-samplefull[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
samplefull$treatControlsComplete<-samplefull[[paste0('treatControlsComplete',psite)]]
samplefull$timefed<-samplefull[[paste0('timefed',psite)]]
samplefull$timefedControlsComplete<-samplefull[[paste0('timefedControlsComplete',psite)]]
#sample$treatst<-sample[[paste0('treatd',llc,'gw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
samplefull$treatexCC<-ifelse(samplefull$treatControlsComplete==1 &samplefull$treatst==0,1,0)
for(psite in psitel[2:length(psitel)]){
sample2<-readRDS(paste(path,'fullbaj',psite,'.rds', sep=""), refhook = NULL)
sample2<-sample2[predate>0,]
sample2$treatst<-sample2[[paste0('treatdgw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
buf<-1
#sample2$buffer<-ifelse(sample2$date-odNPL$date[psite]+(buf*365)>0&sample2$date-odNPL$date[psite]<0,1,0)
sample2$buffer<-ifelse(abs(sample2$date-odNPL$date[psite])-(buf*365)<0,1,0)
sample2<-sample2[buffer<1,]
sample2$timetotreat<-sample2$date-odNPL$date[psite]
sample2$lsite<-psite
sample2$demlogprice<-demeanlist(sample2$logprice,
list(as.factor(sample2$PropertyAddressCensusTractAndBlock)))
sample2$aftpropnpl<-sample2[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
sample2$treatControlsComplete<-sample2[[paste0('treatControlsComplete',psite)]]
sample2$timefed<-sample2[[paste0('timefed',psite)]]
sample2$timefedControlsComplete<-sample2[[paste0('timefedControlsComplete',psite)]]
sample2$treatexCC<-ifelse(sample2$treatControlsComplete==1 &sample2$treatst==0,1,0)
#Municipal ATE
sample2$treatdgwMU<- sample2[[paste0('treatd',llc,'gw',psite)]]* sample2$WaterStndCode.fMU
sample2$treatgroupMU<-sample2$treatmentgroup * sample2$WaterStndCode.fMU
sample2$controlMU<-sample2$control*sample2$WaterStndCode.fMU
sample2$sample.MUATE<-sample2$control+sample2$treatgroupMU
sample2$aftpropnpl<-sample2[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
sample2$treatControlsComplete<-sample2[[paste0('treatControlsComplete',psite)]]
sample2$timefed<-sample2[[paste0('timefed',psite)]]
sample2$timefedControlsComplete<-sample2[[paste0('timefedControlsComplete',psite)]]
#sample$treatst<-sample[[paste0('treatd',llc,'gw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
sample2$treatexCC<-ifelse(sample2$treatControlsComplete==1 &sample2$treatst==0,1,0)
#Municipal ATE
sample2$treatdgwWL<- sample2[[paste0('treatd',llc,'gw',psite)]]* sample2$WaterStndCode.fWL
sample2$treatgroupWL<-sample2$treatmentgroup * sample2$WaterStndCode.fWL
sample2$controlWL<-sample2$control*sample2$WaterStndCode.fWL
sample2$sample.WLATE<-sample2$control+sample2$treatgroupWL
sample2$aftpropnpl<-sample2[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
sample2$treatControlsComplete<-sample2[[paste0('treatControlsComplete',psite)]]
sample2$timefed<-sample2[[paste0('timefed',psite)]]
sample2$timefedControlsComplete<-sample2[[paste0('timefedControlsComplete',psite)]]
#sample$treatst<-sample[[paste0('treatd',llc,'gw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
sample2$treatexCC<-ifelse(sample2$treatControlsComplete==1 &sample2$treatst==0,1,0)
samplefull<-rbind(samplefull,sample2)
}
#psite<-2
samplefull$timetotreat<-samplefull$timetotreat/365
samplefull<-samplefull[abs(timetotreat)-10<0,]
samplefull$treatd0gw<-samplefull$treatdgw
#samplefull<-samplefull[samplefull$price>15000,]
#samplefull$price<-Winsorize(samplefull$price)
}
if(TRUE){
treatc<-treatl[[1]]
#for(ll in 1:length(laglead)){
dic<-dist[[1]]
ll<-1
llc<-laglead[[ll]]
sample<-samplefull[samplefull[[paste0('dist',dic)]]>0,]
#sample$treatst<-sample$treatst-sample$presstatusd
#sample<-sample[treatst>0,]
sample<-sample[presstatusd==0,]
upper.spatial.range<-c(20)
lower.spatial.range<-c(0)
spatial.power.range<-c(10)
temporal.cut.range<-c(30)
temporal.power.range<-c(10)
urange<-upper.spatial.range
lrange<-lower.spatial.range
prange<-spatial.power.range
crange<-temporal.cut.range
qrange<-temporal.power.range
d.sample.data<-sample
W.trend.lag.variables<-function(urange,lrange,prange,crange,qrange,path){
denom<-0
for(c in crange){
for(q in qrange){
denom<-denom+1
}
}
for(u in urange){
for(l in lrange){
for(p in prange){
if(u>l){
denom<-denom+1
}
}
}
}
dist.mat<-distm (cbind(d.sample.data$PropertyAddressLongitude, d.sample.data$PropertyAddressLatitude), fun = distHaversine)
Wtime<- function(c,q){
t<-d.sample.data$predate
rep.row<-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
t<-d.sample.data$predate
T1<-rep.row(t,nrow(d.sample.data))
T2<-T1
T1<-t(T1)
Tdp<-t(t(T1-T2))
Tdp[Tdp >= c*365] = -2
Tdp[Tdp<=-365] = -2
Wtp<-1/(1+abs(Tdp))
Wtp[Wtp<0]<-0
#Wtp[Wtp == 'Inf'] = 0
Wt<-Wtp^q
Wt<-return(Wt)
print(Wt)
rm(t,T1,T2,Tdp,Wtp)
gc()
}
num<-0
for(c in crange){
for(q in qrange){
assign(paste('wt',c,'m',q,sep=""),Wtime(c,q))
num<-num+1
print(paste(num, 'of', denom,sep=" "))
}
}
Wspat<- function(u,l,p){
Sp<-dist.mat
Sp[Sp>= u*500] = -2
Sp[Sp <= l*500] = -2
Wsp<-1/(1+Sp)
Wsp[Wsp<0]<-0
#Wsp[Wsp == 'Inf'] = 0
Ws<-Wsp^p
Ws<-return(Ws)
print(Ws)
rm(Sp,Wsp)
gc()
}
for(u in urange){
for(l in lrange){
for(p in prange){
assign(paste('ws',u,'m',l, 'm',p,sep=""),Wspat(u,l,p))
if(u>l){
num<-num+1
print(paste(num,'of',denom,sep=" "))
}
else{num<-num}
}
}
}
#Dummies
rep.row<-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
if(FALSE){
A<-rep.row(d.sample.data$YearBuilt,nrow(d.sample.data))
At<-t(A)
D<-At-A
D[D<10000000000]<-1
D1<-D
}
Wst<-hadamard.prod(get(paste('ws',u,'m',l, 'm',p,sep="")), get(paste('wt',c,'m',q,sep="")))
#Wst<-Wst*get(paste('D',d, sep=""))
weight<-rowSums(Wst)
for(i in 1:nrow(d.sample.data)){
if(weight[i]==0){
weight[i]<-1
}
}
Wst<-Wst*(1/weight)
#diag(Wst)<-0
#Wst<-mat2listw(Wst, style="W")
sample[[paste0('lagu',u,'l',l, 'sp',p,'c',c, 'tp',q,sep="")]]<-Wst %*% sample$price
sample[[paste0('lnlagu',u,'l',l, 'sp',p,'c',c, 'tp',q,sep="")]]<-Wst %*%sample$logprice
saveRDS(sample, file = path, ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
#stop cluster
}
start.time <- Sys.time()
W.trend.lag.variables(upper.spatial.range,lower.spatial.range,spatial.power.range,
temporal.cut.range,temporal.power.range,paste0(path,"pretreatlag.rds"))
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
}
dist<-c('10k','8k','6k','4k','2k')#,'1k','500m')
#dist<-c('4k','2k')#,'1k','500m')
laglead<-c("")
treatl<-c('TATE','MUATE','WLATE')
#sample<-readRDS(paste0(path,"pretreatlag.rds"), refhook = NULL)
sample<-readRDS(paste0(path,"fullcen.rds"), refhook = NULL)
samplefull<-sample
quant<-10
qcut<-cut2(samplefull$preprice, g=quant, onlycuts = TRUE)
p <- ggplot(samplefull, aes(x=logprice, fill= as.character(treatmentgroup))) +
geom_density(alpha=.3) +
xlab("Log Price") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
lp <- ggplot(samplefull, aes(x=prelogprice, fill= as.character(treatmentgroup))) +
geom_density(alpha=.3) +
xlab("Pretreatment Log Price") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
sf <- ggplot(samplefull, aes(x=log(sqfeet), fill= as.character(treatmentgroup))) +
geom_density(alpha=.3) +
xlab("Log(Square Feet)") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
da <- ggplot(samplefull, aes(x=as.Date(RecordingDate), fill= as.character(treatmentgroup))) +
geom_density(alpha=.5,adjust=2) +
xlab("Date") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
yb <- ggplot(samplefull, aes(x=YearBuilt, fill= as.character(treatmentgroup))) +
geom_density(alpha=.5,adjust=2) +
xlab("Year Built") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
fb <- ggplot(samplefull, aes(x=FullBath, fill= as.character(treatmentgroup))) +
geom_density(alpha=.5,adjust=2) +
xlab("Full Bath") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
ls <- ggplot(samplefull, aes(x=LotSizeSquareFeet, fill= as.character(treatmentgroup))) +
geom_density(alpha=.5,adjust=2) +
xlab("Full Bath") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
sumvar<-c("price","sqfeet","TotalRooms","YearBuilt","FullBath")
ts<-sample[treatmentgroup==1,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
ts$year <- as.numeric(as.character(factor(format(as.Date(ts$RecordingDate),'%Y'))))
cs<-sample[treatmentgroup==0,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
cs$year <- as.numeric(as.character(factor(format(as.Date(cs$RecordingDate),'%Y'))))
sum25<-rbind("25th percentile",quantile(ts$price)[2],quantile(ts$sqfeet)[2],quantile(ts$YearBuilt)[2],quantile(ts$year)[2],quantile(ts$FullBath)[2],quantile(ts$LotSizeSquareFeet)[2],
quantile(cs$price)[2],quantile(cs$sqfeet)[2],quantile(cs$YearBuilt)[2],quantile(cs$year)[2],quantile(cs$FullBath)[2],quantile(cs$LotSizeSquareFeet)[2])
summed<-rbind("Median",median(ts$price),median(ts$sqfeet),median(ts$YearBuilt),median(ts$year),median(ts$FullBath),median(ts$LotSizeSquareFeet),
median(cs$price),median(cs$sqfeet),median(cs$YearBuilt),median(cs$year),median(cs$FullBath),median(cs$LotSizeSquareFeet))
summean<-rbind("Mean",floor(mean(ts$price)),floor(mean(ts$sqfeet)),floor(mean(ts$YearBuilt)),floor(mean(ts$year)),floor(mean(ts$FullBath)),floor(mean(ts$LotSizeSquareFeet)),
floor(mean(cs$price)),floor(mean(cs$sqfeet)),floor(mean(cs$YearBuilt)),floor(mean(cs$year)),floor(mean(cs$FullBath)),floor(mean(cs$LotSizeSquareFeet)))
sum75<-rbind("75th percentile",quantile(ts$price)[4],quantile(ts$sqfeet)[4],quantile(ts$YearBuilt)[4],quantile(ts$year)[4],quantile(ts$FullBath)[4],quantile(ts$LotSizeSquareFeet)[4],
quantile(cs$price)[4],quantile(cs$sqfeet)[4],quantile(cs$YearBuilt)[4],quantile(cs$year)[4],quantile(cs$FullBath)[4],quantile(cs$LotSizeSquareFeet)[4])
tdiff<-rbind("T-Test",base::round(as.numeric(t.test(ts$price,cs$price)["statistic"]),digits=2),
base::round(as.numeric(t.test(ts$sqfeet,cs$sqfeet)["statistic"]),digits=2),
base::round(as.numeric(t.test(ts$year,cs$year)["statistic"]),digits=2),
base::round(as.numeric(t.test(ts$YearBuilt,cs$YearBuilt)["statistic"]),digits=2),
base::round(as.numeric(t.test(ts$FullBath,cs$FullBath)["statistic"]),digits=2),
base::round(as.numeric(t.test(ts$LotSizeSquareFeet,cs$LotSizeSquareFeet)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$price,ts$price)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$sqfeet,ts$sqfeet)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$year,ts$year)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$YearBuilt,ts$YearBuilt)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$FullBath,ts$FullBath)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$LotSizeSquareFeet,ts$LotSizeSquareFeet)["statistic"]),digits=2))
rlab<-rbind(" ","Price","Square Feet","Year Built","Year","Full Bathroom","Lot Size","Price","Square Feet","Year Built","Year","Full Bathroom","Lot Size")
rlab2<-rbind(" ","Treatment","Group"," "," "," "," ","Control","Group"," "," "," "," ")
sumtab<-cbind(rlab2,rlab,sum25,summed,summean,sum75,tdiff)
rownames(sumtab)<-NULL
colnames(sumtab)<-NULL
sumtab
xtab<-xtable(sumtab)
align(xtab) <- "rl|l|rrrr|r"
print.xtable(xtab,include.rownames=FALSE, hline.after = c(0,1,7,dim(sumtab)[1]),
include.colnames=FALSE, sanitize.text.function = identity,
caption = "Summary Statistics",
label = "tab:summary",
type="latex", file=paste0(path,'latex/sumtab.tex'))
sumvar<-c("price","sqfeet","TotalRooms","YearBuilt","FullBath")
pts<-sample[treatmentgroup==1&timetotreat<0,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
pts$year <- as.numeric(as.character(factor(format(as.Date(pts$RecordingDate),'%Y'))))
pcs<-sample[treatmentgroup==0&timetotreat<0,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
pcs$year <- as.numeric(as.character(factor(format(as.Date(pcs$RecordingDate),'%Y'))))
ats<-sample[treatmentgroup==1&timetotreat>0,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
ats$year <- as.numeric(as.character(factor(format(as.Date(ats$RecordingDate),'%Y'))))
acs<-sample[treatmentgroup==0&timetotreat>0,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
acs$year <- as.numeric(as.character(factor(format(as.Date(acs$RecordingDate),'%Y'))))
sum25<-rbind("25th percentile",quantile(pts$price)[2],quantile(pts$sqfeet)[2],quantile(pts$YearBuilt)[2],quantile(pts$year)[2],quantile(pts$LotSizeSquareFeet)[2],
quantile(pcs$price)[2],quantile(pcs$sqfeet)[2],quantile(pcs$YearBuilt)[2],quantile(pcs$year)[2],quantile(pcs$LotSizeSquareFeet)[2],
quantile(ats$price)[2],quantile(ats$sqfeet)[2],quantile(ats$YearBuilt)[2],quantile(ats$year)[2],quantile(ats$LotSizeSquareFeet)[2],
quantile(acs$price)[2],quantile(acs$sqfeet)[2],quantile(acs$YearBuilt)[2],quantile(acs$year)[2],quantile(acs$LotSizeSquareFeet)[2])
summed<-rbind("Median",median(pts$price),median(pts$sqfeet),median(pts$YearBuilt),median(pts$year),median(pts$LotSizeSquareFeet),
median(pcs$price),median(pcs$sqfeet),median(pcs$YearBuilt),median(pcs$year),median(pcs$LotSizeSquareFeet),
median(ats$price),median(ats$sqfeet),median(ats$YearBuilt),median(ats$year),median(ats$LotSizeSquareFeet),
median(acs$price),median(acs$sqfeet),median(acs$YearBuilt),median(acs$year),median(acs$LotSizeSquareFeet))
summean<-rbind("Mean",mean(pts$price),mean(pts$sqfeet),mean(pts$YearBuilt),mean(pts$year),mean(pts$LotSizeSquareFeet),
mean(pcs$price),mean(pcs$sqfeet),mean(pcs$YearBuilt),mean(pcs$year),mean(pcs$LotSizeSquareFeet),
mean(ats$price),mean(ats$sqfeet),mean(ats$YearBuilt),mean(ats$year),mean(ats$LotSizeSquareFeet),
mean(acs$price),mean(acs$sqfeet),mean(acs$YearBuilt),mean(acs$year),mean(acs$LotSizeSquareFeet))
sum75<-rbind("75th percentile",quantile(pts$price)[4],quantile(pts$sqfeet)[4],quantile(pts$YearBuilt)[4],quantile(pts$year)[4],quantile(pts$LotSizeSquareFeet)[4],
quantile(pcs$price)[4],quantile(pcs$sqfeet)[4],quantile(pcs$YearBuilt)[4],quantile(pcs$year)[4],quantile(pcs$LotSizeSquareFeet)[4],
quantile(ats$price)[4],quantile(ats$sqfeet)[4],quantile(ats$YearBuilt)[4],quantile(ats$year)[4],quantile(ats$LotSizeSquareFeet)[4],
quantile(acs$price)[4],quantile(acs$sqfeet)[4],quantile(acs$YearBuilt)[4],quantile(acs$year)[4],quantile(acs$LotSizeSquareFeet)[4])
#rlab<-rbind(" ","Price","Square Feet","Year Built","Year","Lot Size","Price","Square Feet","Year Built","Year","Full Bathroom","Lot Size")
#rlab2<-rbind(" ","Treatment","Group"," "," "," "," ","Control","Group"," "," "," "," ")
sumtab1<-cbind(sum25,summed,summean,sum75)
#sample$date<-sample$date.x
#dist<-c('10k','8k','6k','5k','4k','3k','2k')#,'1k','500m')
dist<-c('8k','6k','4k','2k')#,'1k','500m')
dist<-c('10k','8k','6k','4k','2k')#,'1k','500m')
#dist<-c('4k','2k')#,'1k','500m')
laglead<-c("")
treatl<-c('TATE','MUATE','WLATE')
#di<-5
#ll<-1
#for(buf in 1:2){
specl<-c("i","a","s","y","")
#matrices
for(i in c("lm","gam","sp","match")){
for(j in c("t","wl","mu")){
for(k in c("did")){
for(treat in treatl){
for(spec in specl){
assign(paste0('betas.',i,'.',j,'.',k,'.',treat,'.',spec),matrix(ncol = length(dist),nrow=quant))
assign(paste0('ses.',i,'.',j,'.',k,'.',treat,'.',spec),matrix(ncol = length(dist),nrow=quant))
assign(paste0('ps.',i,'.',j,'.',k,'.',treat,'.',spec),matrix(ncol = length(dist),nrow=quant))
assign(paste0('betas.',i,'.',k),matrix(ncol = length(dist),nrow=length(treatl)))
assign(paste0('ses.',i,'.',k),matrix(ncol = length(dist),nrow=length(treatl)))
assign(paste0('ps.',i,'.',k),matrix(ncol = length(dist),nrow=length(treatl)))
}
}
}
}
}
di<-1
treat<-1
#psite<-21
for(treat in 1:length(treatl)){
treatc<-treatl[[treat]]
#for(ll in 1:length(laglead)){
for(di in 1:length(dist)){
for(match in c("","match")){
#match<-""
dic<-dist[[di]]
ll<-1
llc<-laglead[[ll]]
sample<-samplefull[samplefull[[paste0('dist',dic)]]>0,]
sample<-sample[presstatusd==0,]
if(match=="match"){
#LotSizeSquareFeet + YearBuilt + FullBath + HalfBath + sqfeet+day+prediffdate+predate
samplem<-sample[,c("TransId","date","treatmentgroup","LotSizeSquareFeet", "sqfeet","YearBuilt", "FullBath","day","predate")]
mdm<- matchit(treatmentgroup~LotSizeSquareFeet+ YearBuilt + FullBath +sqfeet+day+predate,data=samplem, method = "nearest", distance = "mahalanobis")
mdm.treat <- match.data(mdm, group = "treat")
mdm.control <- match.data(mdm, group = "control")
mdm<-rbind(mdm.treat,mdm.control)
mdm<-mdm[,c("TransId","date")]
mdm.full<-merge(mdm,sample,all.x = TRUE, by = "TransId")
sample<-mdm.full
}
#sample$treatst<-sample$treatst-sample$presstatusd
#sample<-sample[treatst>0,]
# sample$logprice<-sample$logprice.x
if(treatc=='TATE'){
#Total Average Treatment Effect
}
if(treatc=='MUATE'){
#Municipal ATE
sample<-subset(sample, sample.MUATE==1)
}
if(treatc=='WLATE'){
#Municipal ATE
sample<-subset(sample, sample.WLATE==1)
}
if(mean(sample$treatst)>0){
#TATE<-as.formula(logprice ~ treatdgw+ treatmentgroup+data.matrix(treatgroupm)+ data.matrix(timefe)+ data.matrix(year[,3:25])+ as.factor(HHID))
sdf<-20
lat<-sample$PropertyAddressLatitude
long<-sample$PropertyAddressLongitude
splat<-bs(lat, df = sdf)
splong<-bs(long, df = sdf)
spint<-model.matrix(~splat:splong)
spTATE<-cbind(splat,splong,spint,lat,long)
#Wst<-readRDS(paste0(path,"Wmat",treatc,dic,".rds"), refhook = NULL)
#sample$wlp<-Wst%*%sample$prelogprice
#sample$wp<-Wst%*%sample$preprice
sample$preyear<-as.numeric(format(as.Date(sample$predate),"%Y"))
sample$premonth<-as.numeric(format(as.Date(sample$predate),"%m"))
sample$year<-as.numeric(format(as.Date(sample$RecordingDate),"%Y"))
sample$month<-as.numeric(format(as.Date(sample$RecordingDate),"%m"))
sample$quarter<-ifelse(sample$month==1|sample$month==2|sample$month==3,1,0)
sample$quarter<-ifelse(sample$month==4|sample$month==5|sample$month==6,2,sample$quarter)
sample$quarter<-ifelse(sample$month==7|sample$month==8|sample$month==9,3,sample$quarter)
sample$quarter<-ifelse(sample$month==10|sample$month==11|sample$month==12,4,sample$quarter)
sample$prequarter<-ifelse(sample$premonth==1|sample$premonth==2|sample$premonth==3,1,0)
sample$prequarter<-ifelse(sample$premonth==4|sample$premonth==5|sample$premonth==6,2,sample$prequarter)
sample$prequarter<-ifelse(sample$premonth==7|sample$premonth==8|sample$premonth==9,3,sample$prequarter)
sample$prequarter<-ifelse(sample$premonth==10|sample$premonth==11|sample$premonth==12,4,sample$prequarter)
sample$prelogpricewlag<-sample$lnlagu20l0sp10c30tp10
sample$tprelprice<-sample$prelogprice*ifelse(sample$treatst==1 & sample$presstatusd==0,1,0)
sample$treatfin<-ifelse(sample$treatst==1 & sample$presstatusd==0,1,0)
indx<- factor(as.numeric(cut2(sample$preprice,cuts=qcut,minmax=TRUE)))
indx2<- factor(as.numeric(cut2(exp(sample$prelogpricewlag),cuts=qcut,minmax=TRUE)))
#indx<-ifelse(as.numeric(indx)!=as.numeric(levels(indx)[1])&
# as.numeric(indx)!=as.numeric(levels(indx)[length(levels(indx))]),"med",indx)
#indx<-ifelse(indx==1,"bottom",indx)
#indx<-ifelse(indx==quant,"top",indx)
treatind<-model.matrix(~treatst:indx-1,sample)
treatind<-treatind[,1:(length(unique(indx)))]
X<-model.matrix(~ treatmentgroup+indx:treatmentgroup+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#treatmentgroup:as.factor(lsite)+indx:treatmentgroup:as.factor(lsite)+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#poly(timetotreat,3)+poly(day,3)+
#treatmentgroup*timetotreat+#treatmentgroup*day+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
timetotreat+
sqfeet+ prediffdate+predate+prelogprice+indx+#presstatusd+
#as.factor(floor(timetotreat))+
-1,sample)
qr.X <- qr(X, tol=1e-2, LAPACK = FALSE)
(rnkX <- qr.X$rank) ## 4 (number of non-collinear columns)
(keep <- qr.X$pivot[seq_len(rnkX)])
## 1 2 4 5
X <- X[,keep]
Xlag<-model.matrix(~ treatmentgroup+indx:treatmentgroup+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#treatmentgroup:as.factor(lsite)+indx:treatmentgroup:as.factor(lsite)+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#poly(timetotreat,3)+poly(day,3)+
#treatmentgroup*timetotreat+#treatmentgroup*day+
timetotreat+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
sqfeet+ prediffdate+predate+prelogprice+indx+
prelogpricewlag+indx2+#as.factor(floor(timetotreat))+
-1,sample)
qr.Xlag <- qr(Xlag, tol=1e-2, LAPACK = FALSE)
(rnkXlag <- qr.Xlag$rank) ## 4 (number of non-collinear columns)
(keeplag <- qr.Xlag$pivot[seq_len(rnkXlag)])
## 1 2 4 5
Xlag <- Xlag[,keeplag]
results.lm.t.did.i<-felm(logprice ~treatind+
X|as.factor(cbl)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.lm.t.did.i)
results.lm.t.did.a<-felm(logprice ~treatind+
X|as.factor(cbg)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.lm.t.did.a)
results.lm.t.did.s<-felm(logprice ~treatind+X|as.factor(ctr)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.lm.t.did.s)
results.lm.t.did.y<-felm(logprice ~treatind+X|as.factor(lsite)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.lm.t.did.y)
#################################
#Spatial
results.sp.t.did.i<-felm(logprice ~treatind+
Xlag|as.factor(cbl)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.sp.t.did.i)
results.sp.t.did.a<-felm(logprice ~treatind+
Xlag|as.factor(cbg)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.sp.t.did.a)
results.sp.t.did.s<-felm(logprice ~treatind+Xlag|as.factor(ctr)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.sp.t.did.s)
results.sp.t.did.y<-felm(logprice ~treatind+Xlag|as.factor(lsite)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.sp.t.did.y)
#results.lm.t.did.z<-felm(logprice ~treatind+X|as.factor(PropertyZip)+as.factor(year):as.factor(quarter)+
# as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
#summary(results.lm.t.did.z)
#results.lm.t.did.c<-felm(logprice ~treatind+X|as.factor(PropertyCity)| 0 | lsite:year,sample) #as.factor(HHID)+
#summary(results.lm.t.did.c)
Xg<-model.matrix(~ treatmentgroup+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#poly(timetotreat,3)+poly(day,3)+
#treatmentgroup*timetotreat+#treatmentgroup*day+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
sqfeet+ prediffdate+predate+prelogprice+ indx+#indx+#presstatusd+
#as.factor(floor(timetotreat))+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)-1,sample)
Xglag<-model.matrix(~ treatmentgroup+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#poly(timetotreat,3)+poly(day,3)+
#treatmentgroup*timetotreat+#treatmentgroup*day+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
sqfeet+ prediffdate+predate+prelogprice+ indx+
prelogpricewlag+indx2+#indx+#presstatusd+
#as.factor(floor(timetotreat))+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)-1,sample)
sample$tttyear<-as.factor(ceiling(as.numeric(sample$timetotreat)))
sample1<-subset(sample,tttyear!=1)
ptlag<-model.matrix(~treatmentgroup:as.factor(ceiling(as.numeric(timetotreat))),sample1)
#as.factor(cut2(prediffdate, g=quant))+
resid.ptlag<-felm(logprice~ptlag[,c(-10)]+#treatmentgroup+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
sqfeet+timetotreat+#treatmentgroup:timetotreat+#as.factor(tttyear)+
prelogprice|as.factor(lsite):as.factor(year):as.factor(quarter)+
as.factor(lsite):as.factor(preyear):as.factor(prequarter)+
as.factor(ctr)|0|lsite:year,sample1)
summary(resid.ptlag)
qu<-c("Bottom","Middle","Top")
yearstest<-9
qun<-c(-yearstest:-2,2:yearstest)
allModelFrame <- data.frame(Variable = qun,
Coefficient = as.numeric(coef(summary(resid.ptlag))[,"Estimate"][(11-yearstest):(8+yearstest)]),
SE = as.numeric(coef(summary(resid.ptlag))[,"Cluster s.e."][(11-yearstest):(8+yearstest)]),
modelName = "ptlag")
interval2 <- -qnorm((1-0.95)/2) # 95% multiplier
leg<-c('10k','8k','6k','4k','2k')
allModelFrame<-allModelFrame[allModelFrame$modelName!="2k",]
allModelFrame$modelName<-as.factor( allModelFrame$modelName)
allModelFrame$modelName<-factor(allModelFrame$modelName,levels(allModelFrame$modelName)[c(2:5,1)])
# Plot
zp1 <- ggplot(allModelFrame, aes(colour = modelName ))
zp1 <- zp1 + geom_hline(yintercept = 0, colour = gray(1/2), lty = 2)
zp1 <- zp1 + geom_pointrange(aes(x = Variable, y = Coefficient, ymin = Coefficient - SE*interval2,
ymax = Coefficient + SE*interval2,color =modelName),
lwd = 1/2, position = position_dodge(width = 1/2),
shape = 21, fill = "WHITE")
#zp1 <- zp1 + coord_flip() + theme_bw()
#zp1 <- zp1 + geom_line(data = allModelFrame, aes(linetype =modelName ), size = 1) +
zp1 <- zp1 +theme(legend.position="none")+ ggtitle("Test of Difference in Treatment and Control Groups")+xlab('Year From Treatment')
#zp1 <- zp1 + scale_x_continuous(labels=c("1"="B","2"="M","3"="T"))
zp1 <- zp1 + scale_x_continuous(breaks=qun)
print(zp1) # The trick to these is position_dodge().
if(match==""){
ggsave(file=paste(path,'latex/','pretrends',treatc,dic, 'h5.png', sep=""),height = 5,width =9)
}
resid.lm.t.did<-felm(logprice ~Xg[,-1]|as.factor(cbl),sample) #as.factor(HHID)+
summary(resid.lm.t.did)
coefdid<-as.matrix(resid.lm.t.did$coefficients)
coefdid[is.na(coefdid),]<-0
#Xp<-X
#Xp[,1]<-0
#sample$crdid<-sample$demlogprice-cbind(treatind,X)%*%coefdid
sample$crdid<-sample$demlogprice-Xg[,-1]%*%coefdid
sample$crdid<-sample$crdid-mean(sample$crdid)
sample$post<-ifelse(sample$timetotreat>0,1,0)
treatment <- aggregate(crdid ~ treatmentgroup+as.factor(round(timetotreat,2)), data=sample, FUN=mean, na.rm=TRUE)
names(treatment)[2]<-"timetotreat"
treatment$timetotreat<-as.numeric(as.character(treatment$timetotreat))
#treatment<-treatment[as.numeric(treatment$timetotreat)!=0.5,]
ggplot() +
geom_point(data=subset(treatment,treatmentgroup==1), aes(x=timetotreat, y=crdid, color= "Treatment Group")) +
geom_point(data=subset(treatment,treatmentgroup==0), aes(x=timetotreat, y=crdid, color= "Control Group")) +
stat_smooth(method = 'loess', formula = y ~ x ,data=subset(sample,treatmentgroup==1&post==1),se= TRUE, aes(x=timetotreat, y=crdid),color= "blue") +
stat_smooth(method = 'loess', formula = y ~ x ,data=subset(sample,treatmentgroup==1&post==0),se= TRUE, aes(x=timetotreat, y=crdid),color= "blue") +
stat_smooth(method = 'loess', formula = y ~ x ,data=subset(sample,treatmentgroup==0&post==1),se= TRUE, aes(x=timetotreat, y=crdid),color= "red") +
stat_smooth(method = 'loess', formula = y ~ x ,data=subset(sample,treatmentgroup==0&post==0),se= TRUE, aes(x=timetotreat, y=crdid),color= "red") +
ggtitle("Common Trends Assumption")+
xlab('Years from Deletion') + labs(color="Legend") + geom_vline(xintercept=0)+
ylab('Monthly Average Residuals')
if(match==""){
ggsave(file=paste(path,'latex/','lmdidavg',treatc,dic, '.png', sep=""),height = 6,width =10)
}
results.lm.t.did.agg<-felm(logprice ~treatst+Xg|
as.factor(PropertyAddressCensusTractAndBlock),sample) #as.factor(HHID)+
summary(results.lm.t.did.agg)
if(match==""){
betas.lm.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Estimate"][1])
ses.lm.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Std. Error"][1])
ps.lm.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Pr(>|t|)"][1])
results.sp.t.did.agg<-felm(logprice ~treatst+Xglag|
as.factor(PropertyAddressCensusTractAndBlock),sample) #as.factor(HHID)+
summary(results.sp.t.did.agg)
betas.sp.did[treat,di]<-as.numeric(coef(summary(results.sp.t.did.agg))[,"Estimate"][1])
ses.sp.did[treat,di]<-as.numeric(coef(summary(results.sp.t.did.agg))[,"Std. Error"][1])
ps.sp.did[treat,di]<-as.numeric(coef(summary(results.sp.t.did.agg))[,"Pr(>|t|)"][1])
}
if(match=="match"){
betas.match.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Estimate"][1])
ses.match.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Std. Error"][1])
ps.match.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Pr(>|t|)"][1])
}
if(match==""){
results.gam<-mgcv::gam(logprice~treatst+Xg+#s(prelogprice,bs="cr")+
#as.factor(year):as.factor(quarter)+
#as.factor(preyear):as.factor(prequarter)+
as.factor(lsite)+s(lat,long,bs="tp",m=3,k=300),data=sample)
gam.model.t<-mgcv::summary.gam(results.gam)
betas.gam.did[treat,di]<-as.numeric(gam.model.t$p.table[,"Estimate"])[2]
ses.gam.did[treat,di]<-as.numeric(gam.model.t$p.table[,"Std. Error"])[2]
ps.gam.did[treat,di]<-as.numeric(gam.model.t$p.table[,"Pr(>|t|)"])[2]
}
#results.spline<-lm(logprice~treatind+X+spTATE,sample)
#summary(results.spline)
#results.semip<-semip(logprice~treatind+X[,1:8]+preprice,nonpar=~lat+long,window1 = .5, window2 = .5,
# kern="tcub",distance="Mahal",targetfull=NULL, print.summary=TRUE, data=sample)
#summary(results.semip)
#library(gam)
library(mgcv)
if(match==""){
s=mgcv:::s
intsp<-model.matrix(~as.factor(sample$lsite):lat+as.factor(sample$lsite):long-1)
#s1<-sample[lsite!=19,]
#X1<-X[sample$lsite!=19,]
#treatind1<-treatind[sample$lsite!=19,]
results.gam<-mgcv::gam(logprice~treatind+X+#s(prelogprice,bs="cr")+as.factor(lsite):
#s(day,bs="cr")+s(predate,bs="cr")+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)+
as.factor(lsite)+
s(PropertyAddressLatitude,PropertyAddressLongitude,bs="tp",m=3,k=300),data=sample)
gam.model<-mgcv::summary.gam(results.gam)
mgcv::summary.gam(results.gam)
#results.gam<-mgcv::gam(logprice~treatind+X+#s(prelogprice,bs="cr")+
#s(predate,bs="gp")+
#s(day,bs="gp")+
#s(lat,long,bs="ts",m=3,k=500),data=sample)
#mgcv::summary.gam(results.gam)
}
if(FALSE){
vcov_both_formula <- cluster.vcov(results.lm.t.did.t, ~ lsite + year)
dim(results.gam$R)
model.matrix(results.gam$R)
vcov.HC = solve(t(X)%*%X) %*% t(X)%*%diag(ehat^2)%*%X %*% solve(t(X)%*%X)
mg$Vp <- vcov.HC
summary(mg)
all.equal(as.numeric(predict(mg,se.fit=TRUE)$se.fit),se.yhat.HC)
results.gam<-mgcv::gam(logprice~treatind+X+#s(prelogprice,bs="cr")+
#s(day,bs="gp")+
s(lsite,bs="re")+s(year,bs="re")+
s(lat,long,bs="tp",m=3,k=300),data=sample)
mgcv::summary.gam(results.gam)
results.gam<-mgcv::gamm(logprice~treatind+X+#s(prelogprice,bs="cr")+
#s(day,bs="gp")+
s(lat,long,bs="tp",m=3,k=200),
correlation=corSymm(form=~1|PropertyAddressCensusTractAndBlock),data=sample)
mgcv::summary.gam(results.gam)
qgam.fit<-qgam(logprice~treatst+nX+#s(prelogprice,bs="cr")+
#s(day,bs="gp")+
s(lat,long,bs="tp",m=3,k=300), lsig = -1,
data=sample,qu=.5, err = 0.05,control = list("tol" = 0.01))
summary.gam(qgam.fit)
neX<-model.matrix(~ treatmentgroup+#indx:treatmentgroup:as.factor(lsite)+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#poly(timetotreat,3)+poly(day,3)+
#treatmentgroup*timetotreat+#treatmentgroup*day+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
sqfeet+as.factor(year)+as.factor(preyear)+
as.factor(quarter)+as.factor(prequarter)-1,sample)
nX<-neX
qr.nX <- qr(nX, tol=1e-2, LAPACK = FALSE)
(rnknX <- qr.nX$rank) ## 4 (number of non-collinear columns)
(keepnX <- qr.nX$pivot[seq_len(rnknX)])
## 1 2 4 5
nX <- nX[,keepnX]
rq.o<-rq.fit.sfn(as.matrix.csr(nX),y,
tmpmax=floor(10000+exp(-12.1)*(dim(nX)[1]*20-1)^2.35))
fit.qr<-rq(logprice~nX,tau=.5, data=sample,method="sfn",na.action = na.omit)
fit.qr
summary(fit.qr,se = "boot")
plot(indx,sample$logprice,xlab="Treatind", ylab="Log Price")
taus <- c(.1,.9)
abline(rq(logprice~X2,tau=.5,data=sample),col="blue")
abline(lm(logprice~X2,data=sample),lty = 3,col="red")
for( i in 1:length(taus)){
abline(rq(logprice~X2,tau=taus[i],data=sample),col="gray")
}
#PropertyAddressLatitude PropertyAddressLongitude
Wst<-readRDS(paste0(path,"Wmat",treatc,dic,".rds"), refhook = NULL)
summary(rowSums(Wst))
if(mean(rowSums(Wst))==1){
Wst<-mat2listw(Wst, style="W")
#finalb.lag.2sls.robust2 <- gstslshet(logprice~treatind+X,Wst, data = sample,
# initial.value = 0.2, eps =1e-2, inverse=FALSE,sarar=FALSE)
#summary(finalb.lag.2sls.robust2)
#effects.finalb.lag.2sls.robust2<- impacts(finalb.lag.2sls.robust2, listw= Wst, R=100)
#summary(effects.finalb.lag.2sls.robust2, zstats=TRUE, short=TRUE)
results.stsls<-sacsarlm(logprice~treatind+X, data = sample, listw=Wst, zero.policy = NULL,
na.action = na.fail)
summary(results.stsls)
}
# effects.finalb.lag.2sls.robust2<- impacts(results.stsls, listw= Wst, R=100)
#summary(effects.finalb.lag.2sls.robust2, zstats=TRUE, short=TRUE)
}
if(match==""){
if(treatc=="TATE"){
betas.lm.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.lm.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.lm.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.lm.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
betas.sp.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Estimate"][1:quant])
betas.sp.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Estimate"][1:quant])
betas.sp.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Estimate"][1:quant])
betas.sp.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Estimate"][1:quant])
betas.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.lm.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.lm.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.lm.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.lm.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
ses.sp.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Cluster s.e."][1:quant])
ses.sp.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Cluster s.e."][1:quant])
ses.sp.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Cluster s.e."][1:quant])
ses.sp.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Cluster s.e."][1:quant])
ses.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.lm.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
if(treatc=="MUATE"){
betas.lm.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.lm.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.lm.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.lm.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
betas.sp.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Estimate"][1:quant])
betas.sp.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Estimate"][1:quant])
betas.sp.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Estimate"][1:quant])
betas.sp.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Estimate"][1:quant])
betas.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.lm.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.lm.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.lm.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.lm.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
ses.sp.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Cluster s.e."][1:quant])
ses.sp.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Cluster s.e."][1:quant])
ses.sp.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Cluster s.e."][1:quant])
ses.sp.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Cluster s.e."][1:quant])
ses.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.lm.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
if(treatc=="WLATE"){
betas.lm.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.lm.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.lm.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.lm.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
betas.sp.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Estimate"][1:quant])
betas.sp.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Estimate"][1:quant])
betas.sp.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Estimate"][1:quant])
betas.sp.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Estimate"][1:quant])
betas.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.lm.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.lm.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.lm.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.lm.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
ses.sp.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Cluster s.e."][1:quant])
ses.sp.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Cluster s.e."][1:quant])
ses.sp.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Cluster s.e."][1:quant])
ses.sp.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Cluster s.e."][1:quant])
ses.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.lm.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
}
if(match=="match"){
if(treatc=="TATE"){
betas.match.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.match.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.match.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.match.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
#betas.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.match.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.match.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.match.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.match.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
#ses.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.match.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
#ps.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
if(treatc=="MUATE"){
betas.match.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.match.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.match.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.match.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
#betas.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.match.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.match.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.match.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.match.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
#ses.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.match.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
#ps.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
if(treatc=="WLATE"){
betas.match.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.match.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.match.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.match.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
#betas.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.match.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.match.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.match.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.match.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
#ses.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.match.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
#ps.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
}
if(FALSE){
sdf<-10
lat<-sample$PropertyAddressLatitude
long<-sample$PropertyAddressLongitude
splat<-bs(lat, df = sdf)
splong<-bs(long, df = sdf)
spint<-model.matrix(~splat:splong)
xcTATE<-cbind(splat,splong,spint,lat,long,poly(sample$day,5),bs(sample$day, df = 10))
year<-dplyr::select(sample, starts_with('year'))
feTATE<-model.matrix(~ treatind+indx+
#prelogprice+prediffdate+predate+
data.matrix(year[,4:25]),sample)#+#timefedControlsComplete+timefed +
#aftfinalnpl+timefinalnplfe+
#data.matrix(exdum)+
#data.matrix(year[,25]),sample)
feTATE<-as.matrix(feTATE[,SD(feTATE)>0])
feTATE<-as.matrix(feTATE[,!duplicated(cor(feTATE))])
qr.X <- qr(feTATE, tol=1e-3, LAPACK = FALSE)
(rnkX <- qr.X$rank) ## 4 (number of non-collinear columns)
(keep <- qr.X$pivot[seq_len(rnkX)])
## 1 2 4 5
feTATE <- feTATE[,keep]
W<-cbind(sample$day,lat,long,X)
colnames(W)[1]<-"day"
colnames(W)[2]<-"lat"
colnames(W)[3]<-"long"
A<-sample$treatmentgroup
V<-X
Time<-sample$day
results.tmle.t.did <- tmleMSM(Y = sample$logprice, A = A, W = W, V = V, #T= Time,
MSM = "A + V",family="gaussian",
Q.SL.library = SL.library2$as.list(),
g.SL.library = SL.library2$as.list(),
#Qform = Y ~ A+V+W,
#gform = A~1,
#hAVform = A~ 1,
ub = 20,
V_SL =5,
alpha = 0.90,
inference = TRUE,
verbose=TRUE)
print(results.tmle.t.did)
summary(results.tmle.t.did)
sdf<-5
lat<-sample$PropertyAddressLatitude
long<-sample$PropertyAddressLongitude
splat<-bs(lat, df = sdf)
splong<-bs(long, df = sdf)
spint<-model.matrix(~splat:splong)
xcTATE<-cbind(splat,splong,spint,lat,long,poly(sample$day,5),bs(sample$day, df = 10))
feTATEes<-model.matrix(~ treatexCC+#aftfinalnpl+
#as.matrix(exdum)
-1,tsample)
feTATEes<-as.matrix(feTATEes[,SD(feTATEes)>0])
if(dim(feTATEes)[2]>1){
feTATEes<-feTATEes[,!duplicated(cor(feTATEes))]
}
qr.X <- qr(feTATEes, tol=1e-3, LAPACK = FALSE)
(rnkX <- qr.X$rank) ## 4 (number of non-collinear columns)
(keep <- qr.X$pivot[seq_len(rnkX)])
## 1 2 4 5
feTATEes <- feTATEes[,keep]
if(nocc==0&dim(as.matrix(feTATEes))[2]>0){
if(dim(data.matrix(tsample))[1]>40){
W<-cbind(xcTATE[sample$control==0,],xTATE[sample$control==0,],feTATEes)
A<-sample[control==0,treatst]
V<-feTATEes
Time<-sample[control==0,day]
results.tmle.t.es <- tmleMSM(Y = tsample$logprice, A = A, W = W, V = V, #T= Time,
MSM = "A + V",family="gaussian",
Q.SL.library = SL.library2$as.list(),
g.SL.library = PS.library2$as.list(),
#Qform = Y ~ A+V+W,
#gform = A~1,
#hAVform = A~ 1,
ub = 20,
V_SL =5,
alpha = 0.90,
inference = TRUE,
verbose=TRUE)
print(results.tmle.t.es)
betas.tmle.t.es[di,treat]<-results.tmle.t.es$psi["A"]
ses.tmle.t.es[di,treat]<-results.tmle.t.es$se["A"]
ps.tmle.t.es[di,treat]<-results.tmle.t.es$pvalue["A"]
cc.betas.tmle.t.es[di,treat]<-results.tmle.t.es$psi["V"]
cc.ses.tmle.t.es[di,treat]<-results.tmle.t.es$se["V"]
cc.ps.tmle.t.es[di,treat]<-results.tmle.t.es$pvalue["V"]
}}
betas.tmle.t.did[di,treat]<-results.tmle.t.did$psi["A"]
ses.tmle.t.did[di,treat]<-results.tmle.t.did$se["A"]
ps.tmle.t.did[di,treat]<-results.tmle.t.did$pvalue["A"]
if(nocc==0 ¬g==0){
cc.betas.tmle.t.did[di,treat]<-results.tmle.t.did$psi["VtreatexCC"]
cc.ses.tmle.t.did[di,treat]<-results.tmle.t.did$se["VtreatexCC"]
cc.ps.tmle.t.did[di,treat]<-results.tmle.t.did$pvalue["VtreatexCC"]
}
}
print(paste0('distance = ',dic))
print(paste0('treat = ',treatc))
}
}
}
}
for(statchange in c('')){
for(meth in c('lm','gam','match','sp')){
for(inf in c('did')){
for(treat in treatl){
for(spec in specl){
#treat<-"TATE"
p<-get(paste0(statchange,'ps.',meth,'.t.',inf,'.',treat,'.',spec))
mystars <- ifelse(p < .001, "***", ifelse(p < .01, "** ", ifelse(p < .05, "* ", ifelse(p < .1, "^\\bullet ", " "))))
if(!is.na(p[1,1])){
#pb<-exp(get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat)))-1
#rpb<-round(pb,3)
#se<-round(exp(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat)))-1,3)
pb<-get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat,'.',spec))
rpb<-round(pb,3)
se<-round(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat,'.',spec)),3)
srpb <- matrix(paste(rpb, mystars, sep=""), ncol=dim(pb)[2] )
nsrpb<-rbind(c("",laglead),cbind(dist,srpb))
#colnames(srpb)<-laglead
#rownames(srpb)<-dist
results.mat<-matrix(nrow= 2*dim(srpb)[1],ncol= dim(srpb)[2])
for(i in 1:dim(results.mat)[1]){
if(i %% 2 != 0){
results.mat[i,]<-srpb[ceiling(i/2),]
# rownames(ols.mat)[i]<-rownames(srpb)[ceiling(i/2)]
}
if(i %% 2 == 0){
results.mat[i,]<-paste0('(',se[ceiling(i/2),],')')
}
}
results.mat<-rbind(c('10k','8k','6k','4k','2k'),results.mat)
if(meth=='lm'){
results.mat<-rbind(c('OLS','','','',''),results.mat)
}
if(meth=='gam'){
results.mat<-rbind(c('GAM','','','',''),results.mat)
}
if(meth=='match'){
results.mat<-rbind(c('Matching','','','',''),results.mat)
}
if(meth=='sp'){
results.mat<-rbind(c('Spatial Lag','','','',''),results.mat)
}
# rn<-c(paste0("(",qcut[1],","),paste0(qcut[2],"]"),paste0("(",qcut[2],","),paste0(qcut[3],"]"),
# paste0("(",qcut[3],","),paste0(qcut[4],"]"),
# paste0("(",qcut[4],","),paste0(qcut[5],"]"),paste0("(",qcut[5],","),
# paste0(qcut[6],"]"),paste0("(",qcut[6],","),paste0(qcut[7],"]"),paste0("(",qcut[7],","),
# paste0(qcut[8],"]"),paste0("(",qcut[8],","),paste0(qcut[9],"]"),paste0("(",qcut[9],","),
# paste0(qcut[10],"]"),
# paste0("(",qcut[10],","),paste0(qcut[11],"]"))
rn<-c(paste0("(",qcut[1],",",qcut[2],"]"),"Bottom Ten Percentile" ,
paste0("(",qcut[2],",",qcut[quant],"]"),"Middle 80 Percentile" ,
paste0("(",qcut[quant],",",qcut[quant+1],"]"), "Top Ten Percentile")
rn2<-c(" "," ",paste0("(",qcut[1],", ",qcut[2],"]")," ",paste0("(",qcut[2],", ",qcut[3],"]")," ",
paste0("(",qcut[3],", ",qcut[4],"]")," ",
paste0("(",qcut[4],",",qcut[5],"]")," ",paste0("(",qcut[5],",",qcut[6],"]")," ",
paste0("(",qcut[6],",",qcut[7],"]")," ",paste0("(",qcut[7],",",qcut[8],"]")," ",
paste0("(",qcut[8],",",qcut[9],"]")," ", paste0("(",qcut[9],",",qcut[10],"]")," ",
paste0("(",qcut[10],",",qcut[11],"]")," ")
#rownames(results.mat)<-rn2
results.mat<-cbind(rn2,results.mat)
xtab<-xtable(results.mat)
align(xtab) <- "rl|rrrrr"
print.xtable(xtab,include.rownames=FALSE, hline.after = c(0,1,2,dim(results.mat)[1]),
include.colnames=FALSE, sanitize.text.function = identity,
#caption = "example",
label = paste0("tab:",meth,inf,statchange,treat,spec),
type="latex", file=paste0(path,'latex/',meth,inf,statchange,treat,spec,".tex"))
qu<-c("Bottom","Middle","Top")
qun<-c(1:10)
allModelFrame <- data.frame(Variable = qun,
Coefficient = pb[,1],
SE = se[, 1],
modelName = "10k")
for(i in 2:length(dist)){
di<-dist[i]
modelFrame <- data.frame(Variable = qun,
Coefficient = pb[,i],
SE = se[, i],
modelName = di)
allModelFrame <- data.frame(rbind(allModelFrame,modelFrame))
}
interval2 <- -qnorm((1-0.90)/2) # 95% multiplier
leg<-c('10k','8k','6k','4k','2k')
allModelFrame<-allModelFrame[allModelFrame$modelName!="2k",]
allModelFrame$modelName<-as.factor( allModelFrame$modelName)
allModelFrame$modelName<-factor(allModelFrame$modelName,levels(allModelFrame$modelName)[c(2:5,1)])
# Plot
zp1 <- ggplot(allModelFrame, aes(colour = modelName ))
zp1 <- zp1 + geom_hline(yintercept = 0, colour = gray(1/2), lty = 2)
zp1 <- zp1 + geom_pointrange(aes(x = Variable, y = Coefficient, ymin = Coefficient - SE*interval2,
ymax = Coefficient + SE*interval2,color =modelName),
lwd = 1/2, position = position_dodge(width = 1/2),
shape = 21, fill = "WHITE")
#zp1 <- zp1 + coord_flip() + theme_bw()
#zp1 <- zp1 + geom_line(data = allModelFrame, aes(linetype =modelName ), size = 1) +
zp1 <- zp1 + ggtitle("Comparing distance cut-offs")+xlab('Quantile')
#zp1 <- zp1 + scale_x_continuous(labels=c("1"="B","2"="M","3"="T"))
zp1 <- zp1 + scale_x_continuous(breaks=qun)
print(zp1) # The trick to these is position_dodge().
ggsave(file=paste(path,'latex/','coeff',meth,treat,spec, '.png', sep=""),height = 7,width =9)
}
}
}
}
}
}
for(meth in c("lm","gam","match",'sp')){
p<-get(paste0('ps.',meth,'.did'))
mystars <- ifelse(p < .001, "***", ifelse(p < .01, "** ", ifelse(p < .05, "* ", ifelse(p < .1, "^\\bullet ", " "))))
#pb<-exp(get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat)))-1
#rpb<-round(pb,3)
#se<-round(exp(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat)))-1,3)
pb<-get(paste0('betas.',meth,'.did'))
rpb<-round(pb,3)
se<-round(get(paste0('ses.',meth,'.did')),3)
srpb <- matrix(paste(rpb, mystars, sep=""), ncol=dim(pb)[2] )
nsrpb<-rbind(c("",laglead),cbind(dist,srpb))
#colnames(srpb)<-laglead
#rownames(srpb)<-dist
results.mat<-matrix(nrow= 2*dim(srpb)[1],ncol= dim(srpb)[2])
for(i in 1:dim(results.mat)[1]){
if(i %% 2 != 0){
results.mat[i,]<-srpb[ceiling(i/2),]
# rownames(ols.mat)[i]<-rownames(srpb)[ceiling(i/2)]
}
if(i %% 2 == 0){
results.mat[i,]<-paste0('(',se[ceiling(i/2),],')')
}
}
results.mat<-rbind(c('10k','8k','6k','4k','2k'),results.mat)
if(meth=='lm'){
results.mat<-rbind(c('OLS','','','',''),results.mat)
}
if(meth=='gam'){
results.mat<-rbind(c('GAM','','','',''),results.mat)
}
if(meth=='match'){
results.mat<-rbind(c('Matching','','','',''),results.mat)
}
if(meth=='sp'){
results.mat<-rbind(c('Spatial Lag','','','',''),results.mat)
}
# rn<-c(paste0("(",qcut[1],","),paste0(qcut[2],"]"),paste0("(",qcut[2],","),paste0(qcut[3],"]"),
# paste0("(",qcut[3],","),paste0(qcut[4],"]"),
# paste0("(",qcut[4],","),paste0(qcut[5],"]"),paste0("(",qcut[5],","),
# paste0(qcut[6],"]"),paste0("(",qcut[6],","),paste0(qcut[7],"]"),paste0("(",qcut[7],","),
# paste0(qcut[8],"]"),paste0("(",qcut[8],","),paste0(qcut[9],"]"),paste0("(",qcut[9],","),
# paste0(qcut[10],"]"),
# paste0("(",qcut[10],","),paste0(qcut[11],"]"))
rn<-c(" "," ", "Total"," ","Municipal Water"," ","Well Water"," ")
#rownames(results.mat)<-rn
results.mat<-cbind(rn,results.mat)
xtab<-xtable(results.mat)
align(xtab) <- "rl|rrrrr"
print.xtable(xtab,include.rownames=FALSE, hline.after = c(0,1,2,dim(results.mat)[1]),
include.colnames=FALSE, sanitize.text.function = identity,
#caption = "example",
label = paste0("tab:",meth,"water"),
type="latex", file=paste0(path,'latex/ATEcomp',meth,'.tex'))
qu<-c("Bottom","Middle","Top")
if(FALSE){
allModelFrame <- data.frame(Variable = qu,
Coefficient = pb[,1],
SE = se[, 1],
modelName = "10k")
for(i in 2:length(dist)){
di<-dist[i]
modelFrame <- data.frame(Variable = qu,
Coefficient = pb[,i],
SE = se[, i],
modelName = di)
allModelFrame <- data.frame(rbind(allModelFrame,modelFrame))
}
interval2 <- -qnorm((1-0.90)/2) # 95% multiplier
leg<-c('10k','8k','6k','4k','2k')
allModelFrame<-allModelFrame[allModelFrame$modelName!="2k",]
allModelFrame$modelName<-as.factor( allModelFrame$modelName)
allModelFrame$modelName<-factor(allModelFrame$modelName,levels(allModelFrame$modelName)[c(2:5,1)])
# Plot
zp1 <- ggplot(allModelFrame, aes(colour = modelName ))
zp1 <- zp1 + geom_hline(yintercept = 0, colour = gray(1/2), lty = 2)
zp1 <- zp1 + geom_pointrange(aes(x = Variable, y = Coefficient, ymin = Coefficient - SE*interval2,
ymax = Coefficient + SE*interval2,color =modelName),
lwd = 1/2, position = position_dodge(width = 1/2),
shape = 21, fill = "WHITE")
#zp1 <- zp1 + coord_flip() + theme_bw()
#zp1 <- zp1 + geom_line(data = allModelFrame, aes(linetype =modelName ), size = 1) +
zp1 <- zp1 + ggtitle("Comparing distance cut-offs")+xlab('Quantile')
#zp1 <- zp1 + scale_x_discrete(breaks=qu)
print(zp1) # The trick to these is position_dodge().
ggsave(file=paste(path,'latex/','coeff',meth,treat, '.png', sep=""),height = 7,width =9)
}
}
for(statchange in c('')){
for(meth in c('lm',"match",'sp')){
for(inf in c('did')){
for(treat in treatl){
for(di in dist){
#treat<-"TATE"
#di<-1
#meth<-'lm'
p1<-get(paste0(statchange,'ps.',meth,'.t.',inf,'.',treat,'.',specl[1]))
p2<-get(paste0(statchange,'ps.',meth,'.t.',inf,'.',treat,'.',specl[2]))
p3<-get(paste0(statchange,'ps.',meth,'.t.',inf,'.',treat,'.',specl[3]))
p4<-get(paste0(statchange,'ps.',meth,'.t.',inf,'.',treat,'.',specl[4]))
p<-rbind(p1,p2,p3,p4)
mystars <- ifelse(p < .001, "***", ifelse(p < .01, "** ", ifelse(p < .05, "* ", ifelse(p < .1, "^\\bullet ", " "))))
if(!is.na(p[1,1])){
#pb<-exp(get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat)))-1
#rpb<-round(pb,3)
#se<-round(exp(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat)))-1,3)
pb1<-get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat,'.',specl[1]))
pb2<-get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat,'.',specl[2]))
pb3<-get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat,'.',specl[3]))
pb4<-get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat,'.',specl[4]))
pb<-rbind(pb1,pb2,pb3,pb4)
pb10k<-cbind(pb[1:10,1],pb[11:20,1],pb[21:30,1],pb[31:40,1])
pb8k<-cbind(pb[1:10,2],pb[11:20,2],pb[21:30,2],pb[31:40,2])
pb6k<-cbind(pb[1:10,3],pb[11:20,3],pb[21:30,3],pb[31:40,3])
pb4k<-cbind(pb[1:10,4],pb[11:20,4],pb[21:30,4],pb[31:40,4])
rpb<-round(pb,3)
se1<-round(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat,'.',specl[1])),3)
se2<-round(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat,'.',specl[2])),3)
se3<-round(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat,'.',specl[3])),3)
se4<-round(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat,'.',specl[4])),3)
se<-rbind(se1,se2,se3,se4)
se10k<-cbind(se[1:10,1],se[11:20,1],se[21:30,1],se[31:40,1])
se8k<-cbind(se[1:10,2],se[11:20,2],se[21:30,2],se[31:40,2])
se6k<-cbind(se[1:10,3],se[11:20,3],se[21:30,3],se[31:40,3])
se4k<-cbind(se[1:10,4],se[11:20,4],se[21:30,4],se[31:40,4])
srpb <- matrix(paste(rpb, mystars, sep=""), ncol=dim(pb)[2] )
nsrpb<-rbind(c("",laglead),cbind(dist,srpb))
#colnames(srpb)<-laglead
#rownames(srpb)<-dist
#10k
results.mat<-matrix(nrow= 2*dim(srpb)[1],ncol= dim(srpb)[2])
for(i in 1:dim(results.mat)[1]){
if(i %% 2 != 0){
results.mat[i,]<-srpb[ceiling(i/2),]
# rownames(ols.mat)[i]<-rownames(srpb)[ceiling(i/2)]
}
if(i %% 2 == 0){
results.mat[i,]<-paste0('(',se[ceiling(i/2),],')')
}
}
colnames(results.mat)<-c('10k','8k','6k','4k','2k')
# rn<-c(paste0("(",qcut[1],","),paste0(qcut[2],"]"),paste0("(",qcut[2],","),paste0(qcut[3],"]"),
# paste0("(",qcut[3],","),paste0(qcut[4],"]"),
# paste0("(",qcut[4],","),paste0(qcut[5],"]"),paste0("(",qcut[5],","),
# paste0(qcut[6],"]"),paste0("(",qcut[6],","),paste0(qcut[7],"]"),paste0("(",qcut[7],","),
# paste0(qcut[8],"]"),paste0("(",qcut[8],","),paste0(qcut[9],"]"),paste0("(",qcut[9],","),
# paste0(qcut[10],"]"),
# paste0("(",qcut[10],","),paste0(qcut[11],"]"))
results10k<-cbind(results.mat[1:20,1],results.mat[21:40,1],results.mat[41:60,1],results.mat[61:80,1])
results8k<-cbind(results.mat[1:20,2],results.mat[21:40,2],results.mat[41:60,2],results.mat[61:80,2])
results6k<-cbind(results.mat[1:20,3],results.mat[21:40,3],results.mat[41:60,3],results.mat[61:80,3])
results4k<-cbind(results.mat[1:20,4],results.mat[21:40,4],results.mat[41:60,4],results.mat[61:80,4])
FEmatrix<- c("Census Tract","Census Tract","Superfund Site","Superfund Site")
Cluster<-c("Tract by Year","Site by Year","Tract by Year","Site by Year")
rn2<-c(paste0("(",qcut[1],",",qcut[2],"]")," ",paste0("(",qcut[2],",",qcut[3],"]")," ",
paste0("(",qcut[3],",",qcut[4],"]")," ",
paste0("(",qcut[4],",",qcut[5],"]")," ",paste0("(",qcut[5],",",qcut[6],"]")," ",
paste0("(",qcut[6],",",qcut[7],"]")," ",paste0("(",qcut[7],",",qcut[8],"]")," ",
paste0("(",qcut[8],",",qcut[9],"]")," ", paste0("(",qcut[9],",",qcut[10],"]")," ",
paste0("(",qcut[10],",",qcut[11],"]")," ", "Fixed Effects", "Cluster")
results10k<-rbind(results10k,FEmatrix,Cluster)
results8k<-rbind(results8k,FEmatrix,Cluster)
results6k<-rbind(results6k,FEmatrix,Cluster)
results4k<-rbind(results4k,FEmatrix,Cluster)
rownames(results10k)<-rn2
rownames(results8k)<-rn2
rownames(results6k)<-rn2
rownames(results4k)<-rn2
rn<-c(paste0("(",qcut[1],",",qcut[2],"]"),"Bottom Ten Percentile" ,
paste0("(",qcut[2],",",qcut[quant],"]"),"Middle 80 Percentile" ,
paste0("(",qcut[quant],",",qcut[quant+1],"]"), "Top Ten Percentile")
xtable(results10k)
print.xtable(xtable(results10k),include.rownames=TRUE,
include.colnames=FALSE, sanitize.text.function = identity,
type="latex", file=paste0(path,'latex/',meth,inf,statchange,treat,"10k.tex"))
xtable(results8k)
print.xtable(xtable(results8k),include.rownames=TRUE,
include.colnames=FALSE, sanitize.text.function = identity,
type="latex", file=paste0(path,'latex/',meth,inf,statchange,treat,"8k.tex"))
xtable(results6k)
print.xtable(xtable(results6k),include.rownames=TRUE,
include.colnames=FALSE, sanitize.text.function = identity,
type="latex", file=paste0(path,'latex/',meth,inf,statchange,treat,"6k.tex"))
xtable(results4k)
print.xtable(xtable(results4k),include.rownames=TRUE,
include.colnames=FALSE, sanitize.text.function = identity,
type="latex", file=paste0(path,'latex/',meth,inf,statchange,treat,"4k.tex"))
qu<-c("Bottom","Middle","Top")
leg<-c('Block','Block Group','Tract','Site')
qun<-c(1:10)
for(j in c("4k","6k","8k","10k")){
pb<-get(paste0("pb",j))
se<-get(paste0("se",j))
allModelFrame <- data.frame(Variable = qun,
Coefficient = pb[,1],
SE = se[, 1],
modelName = 'Block')
for(i in 2:length(leg)){
le<-leg[i]
modelFrame <- data.frame(Variable = qun,
Coefficient = pb[,i],
SE = se[, i],
modelName = le)
allModelFrame <- data.frame(rbind(allModelFrame,modelFrame))
}
interval2 <- -qnorm((1-0.95)/2) # 95% multiplier
allModelFrame<-allModelFrame[allModelFrame$modelName!="2k",]
allModelFrame$modelName<-factor( allModelFrame$modelName,ordered = TRUE)
allModelFrame$modelName<-factor(allModelFrame$modelName,levels(allModelFrame$modelName)[c(1,2,4,3)],ordered = TRUE)
# Plot
zp1 <- ggplot(allModelFrame, aes(colour = modelName ))
zp1 <- zp1 + geom_hline(yintercept = 0, colour = gray(1/2), lty = 2)
zp1 <- zp1 + geom_pointrange(aes(x = Variable, y = Coefficient, ymin = Coefficient - SE*interval2,
ymax = Coefficient + SE*interval2,color =modelName),
lwd = 1/2, position = position_dodge(width = 1/2),
shape = 21, fill = "WHITE")
#zp1 <- zp1 + coord_flip() + theme_bw()
#zp1 <- zp1 + geom_line(data = allModelFrame, aes(linetype =modelName ), size = 1) +
zp1 <- zp1 + ggtitle("Comparing distance cut-offs")+xlab('Quantile')
#zp1 <- zp1 + scale_x_continuous(labels=c("1"="B","2"="M","3"="T"))
#scale_color_discrete(breaks=c("1","3","10")
zp1 <- zp1 + scale_x_continuous(breaks=qun)
zp1 <- zp1 + labs(color="Fixed Effects")
print(zp1) # The trick to these is position_dodge().
ggsave(file=paste(path,'latex/','coeff',meth,treat,j, 'ols.png', sep=""),height = 7,width =9)
}
}
}
}
}
}
}
|
/AnalyzeNYall.R
|
no_license
|
astevens186/hedonics
|
R
| false | false | 98,734 |
r
|
## Preliminaries
rm(list=ls())
# Change working directory to where you've stored ZTRAX
path<- "P:/Peter/Hedonics/Groundwater/"
#install.packages("dplyr", repos = "http://mran.revolutionanalytics.com")
## This function will check if a package is installed, and if not, install it
pkgTest <- function(x) {
if (!require(x, character.only = TRUE))
{
install.packages(x, dep = TRUE)
if(!require(x, character.only = TRUE)) stop("Package not found")
}
}
## These lines load the required packages
packages <- c("readxl","Hmisc","DescTools","qgam","quantreg","sphet","mgcv","McSpatial","pastecs","rdd","Matrix","psych","xtable","splines","ck37r","data.table","matrixStats","tmle","xgboost", "MatchIt","gtools","statar","foreign","multiwayvcov","lmtest","readstata13","xlsx", "data.table","doSNOW","parallel","compare","doParallel","devtools","foreach","spdep","reshape2","sm","plyr","utils","tcltk","geosphere", "matrixcalc", "dplyr","ExPosition", "randomForest","lfe", "hdm", "rdrobust", "stargazer", "ggplot2", "outliers","rpart","e1071")
lapply(packages, pkgTest)
#library(statar)
## These lines set several options
options(scipen = 999) # Do not print scientific notation
options(stringsAsFactors = FALSE) ## Do not load strings as factors
memory.limit(10000000000000)
NPL<-readRDS(paste(path,'NPLfull.rds', sep=""), refhook = NULL)
pNPL<-subset(NPL,rat_name=="PROPOSAL TO NPL")
fNPL<-subset(NPL,rat_name=="FINAL LISTING ON NPL")
dNPL<-subset(NPL,rat_name=="DELETION FROM NPL"|rat_name=="PARTIAL NPL DELETION")
pdNPL<-subset(NPL,rat_name=="PARTIAL NPL DELETION")
opNPL<- pNPL#[order(pNPL$date),]
ofNPL<- fNPL#[order(fNPL$date),]
odNPL<- dNPL#[order(dNPL$date),]
opdNPL<- pdNPL#[order(pdNPL$date),]
odNPL<-odNPL[odNPL$rstate_code == "NY",]
##set up superlearner for TMLE
num_cores = RhpcBLASctl::get_num_cores()
# How many cores does this computer have?
num_cores
# Use all of those cores for parallel SuperLearner.
options(mc.cores = num_cores)
# Check how many parallel workers we are using:
getOption("mc.cores")
# Set multicore compatible seed.
set.seed(1, "L'Ecuyer-CMRG")
# multicore superlearner
# different configurations.
xgboost.tune <- list(ntrees = c(50, 100),
max_depth = c(5,10,15),
shrinkage = c( 0.01,0.1),
minobspernode = c(10))
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
xgboost <- create.Learner("SL.xgboost", tune = xgboost.tune, detailed_names = T, name_prefix = "xgb")
# configurations
length(xgboost$names)
xgboost$names
xgboost.tune2 <- list(ntrees = c(50),
max_depth = c(5,15),
shrinkage = c( 0.1),
minobspernode = c(10))
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
xgboost2 <- create.Learner("SL.xgboost", tune = xgboost.tune2, detailed_names = T, name_prefix = "xgb")
# configurations
length(xgboost2$names)
xgboost2$names
# different configurations.
glmnet.tune <- list(alpha = c(0,.1, .25,.5,.75,.9,1))
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
glmnet <- create.Learner("SL.glmnet", tune = glmnet.tune, detailed_names = T, name_prefix = "glmnet")
# configurations
length(glmnet$names)
glmnet$names
glmnet.tune2 <- list(alpha = c(0,1))
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
glmnet2 <- create.Learner("SL.glmnet", tune = glmnet.tune2, detailed_names = T, name_prefix = "glmnet")
# configurations
length(glmnet2$names)
glmnet2$names
# different configurations.
randomForest.tune <- list(ntree = c(500))
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
randomForest <- create.Learner("SL.randomForest", tune = randomForest.tune,
detailed_names = T, name_prefix = "randomForest")
# configurations
length(randomForest$names)
randomForest$names
# different configurations.
gam.tune <- list(spatialsp= c("ts","gp","tp"))#, cts.num = 10)
# Set detailed names = T so we can see the configuration for each function.
# Also shorten the name prefix.
gam <- create.Learner("SL.gam", tune = gam.tune, detailed_names = T, name_prefix = "gam")
# configurations
length(gam$names)
gam$names
expandingList <- function(capacity = 10) {
buffer <- vector('list', capacity)
length <- 0
methods <- list()
methods$double.size <- function() {
buffer <<- c(buffer, vector('list', capacity))
capacity <<- capacity * 2
}
methods$add <- function(val) {
if(length == capacity) {
methods$double.size()
}
length <<- length + 1
buffer[[length]] <<- val
}
methods$as.list <- function() {
b <- buffer[0:length]
return(b)
}
methods
}
if(FALSE){
results.gam<-mgcv::gam(logprice~treatind+X+s(day,lat,long,bs="tp",m=3,k=50),data=sample)
mgcv::summary.gam(results.gam)
Xs<-cbind(X,lat,long)
}
SL.gam <- function(Y, X, newX, family, obsWeights,ms=3,ks=50,ksi=10,kt=10,spatialsp,temporalsp="gp",
deg.gam =2 , cts.num=4 ,slat=lat,slong=long, ...) {
.SL.require('mgcv')
s=mgcv:::s
cts.x <- apply(X, 2, function(x) (length(unique(x)) > cts.num))
cts.x["lat"] <- FALSE
cts.x["long"]<- FALSE
cts.x["day"]<- FALSE
if (sum(!cts.x) > 0) {
gam.model <- as.formula(paste("Y~", paste(colnames(X[, cts.x, drop = FALSE]),
collapse = "+"), "+s(lat,long,bs='",spatialsp,"',m=",ms,",k=",ks,")",sep=""))
# "+s(day,bs='",temporalsp,"',m=",ms,",k=",kt,")+",
#paste(paste("s(", colnames(X[, cts.x, drop = FALSE]),",lat,long,bs='",spatialsp,"',m=",ms,",k=",ksi,")", sep=""),
# collapse = "+"),
#"+s(day,lat,long,bs='",spatialsp,"',m=",ms,",k=",ks,")", sep=""))
} else {
gam.model <- as.formula(paste("Y~", paste(paste("s(", colnames(X[, cts.x, drop = FALSE]), ",", deg.gam, ")", sep=""), collapse = "+")))
}
# fix for when all variables are binomial
if (sum(!cts.x) == length(cts.x)) {
gam.model <- as.formula(paste("Y~", paste(colnames(X), collapse = "+"), sep = ""))
}
fit.gam <- mgcv::gam(gam.model, data = X, family = family)
pred <-mgcv::predict.gam(fit.gam, newdata = newX, type = "response")
fit <- list(object = fit.gam)
out <- list(pred = pred, fit = fit)
class(out$fit) <- c("SL.gam")
return(out)
}
predict.SL.gam <- function(object, newdata, ...){
.SL.require('gam')
pred <- gam::predict.gam(object = object$object, newdata = newdata, type = "response")
return(pred)
}
.SL.require <- function(package, message = paste('loading required package (', package, ') failed', sep = '')) {
if(!require(package, character.only = TRUE)) {
stop(message, call. = FALSE)
}
invisible(TRUE)
}
SL.library<-expandingList()
PS.library<-expandingList()
SL.library$add("SL.mean")
PS.library$add("SL.mean")
SL.library2<-expandingList()
PS.library2<-expandingList()
SL.library2$add("SL.mean")
PS.library2$add("SL.mean")
#SL.library2$add(randomForest$names)
#SL.library2$add("SL.nnls")
#SL.library2$add("SL.gam")
SL.library$add("SL.randomForest")
SL.library$add("SL.xgboost")
#if(FALSE){
for(i in 1:length(glmnet$names)){
SL.library$add(glmnet$names[i])
}
for(i in 1:length(glmnet$names)){
PS.library$add(glmnet$names[i])
}
for(i in 1:length(glmnet2$names)){
SL.library2$add(glmnet2$names[i])
}
for(i in 1:length(glmnet2$names)){
PS.library2$add(glmnet2$names[i])
}
#}
#for(i in 1:length(gam$names)){
# SL.library$add(gam$names[i])
#}
for(i in 1:length(gam$names)){
SL.library2$add(gam$names[i])
}
for(i in 1:length(xgboost$names)){
SL.library$add(c(xgboost$names[i]))
}
#for(i in 1:length(xgboost2$names)){
# SL.library2$add(c(xgboost2$names[i]))
#}
for(i in 1:length(randomForest$names)){
SL.library$add(c(randomForest$names[i]))
}
SL.library$as.list()
SL.library2$as.list()
PS.library$as.list()
PS.library2$as.list()
#############################################################################
#potential sites
psites<-expandingList()
sample<-NULL
for(k in c("full")){
for(i in psite){
#k<-"full"
#i<-73
if (file.exists(paste(path,k,'deletionbajgw',i,'.rds', sep=""))){
sample.1<-readRDS(paste(path,k,'deletionbajgw',i,'.rds', sep=""), refhook = NULL)
print(paste0(k,"and",i,"sample size = ", dim(sample.1[sample.1$treatmentgroup>0,])))
print(paste0(k,"and",i,"treated = ", dim(sample.1[sample.1$treatdgw>0,])))
print(paste0(k,"and",i,"control = ", dim(sample.1[sample.1$control>0,])))
sample.1$treatgwWL<- sample.1$treatdgw * sample.1$WaterStndCode.fWL
print(paste0(k,"and",i,"treat well = ", dim(sample.1[sample.1$treatgwWL>0,])[1]))
sample.1$treatgwMU<- sample.1$treatdgw * sample.1$WaterStndCode.fMU
print(paste0(k,"and",i,"treat public = ", dim(sample.1[sample.1$treatgwMU>0,])[1]))
cut<-100
if(dim(sample.1[sample.1$treatgwWL>0,])[1]-cut>0 & dim(sample.1[sample.1$treatgwMU>0,])[1]-cut>0 &
dim(sample.1[sample.1$control>0,])[1]-cut>0 &
dim(sample.1[sample.1$treatmentgroup>0,])[1]-dim(sample.1[sample.1$treatdgw>0,])[1]-cut>0 &
dim(sample.1)[2]-54275>0){
psites$add(paste0("site",i))
#sample<-rbind(sample,sample.1)
}
}
}
}
psites$as.list()
saveRDS(psites$as.list(), file = paste(path,'repeat5wellslist.rds', sep=""), ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
psitelist<-readRDS(paste(path,'repeat5wellslist.rds', sep=""), refhook = NULL)
###############################################################################################
psite<-c("209","210","214","217","223","224","227","228","254","262")
#sample.1<-readRDS(paste(path,k,'deletiongw',i,'.rds', sep=""), refhook = NULL)
for(k in c("full")){
sample.1<-readRDS(paste(path,k,'deletion',psite[1],'.rds', sep=""), refhook = NULL)
for(i in psite[2:10]){
if (file.exists(paste(path,k,'deletion',i,'.rds', sep=""))){
sample.2<-readRDS(paste(path,k,'deletion',i,'.rds', sep=""), refhook = NULL)
sample.1<-smartbind(sample.1,sample.2)
}
}
}
saveRDS(sample.1, file = paste(path,'repeat5wells.rds', sep=""), ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
####################################################################
#sample.1<-readRDS(paste(path,'repeat5wells.rds', sep=""), refhook = NULL)
dNPL$row<-seq(1:dim(dNPL)[1])
psitel<-dNPL[dNPL$rsitinc_desc=="LANDFILL","row"]
psitel<-c(2,4,11,12,15,16,19,20,21)
psitel<-c(2,12,15,16)
for(psite in psitel){
data<-readRDS(paste(path,'fulldeletionbajgw',psite,'.rds', sep=""), refhook = NULL)
#repeat sales Bajari
#d.sample.data<-sample1
#rm(sample1)
gc()
data<-data[data$price>0,]
data<-data[!duplicated(data[,c("date","HHID")]),]
sample.data<-data[,c("date","HHID","TransId","price","logprice")]
quants<-20
sample.data$indx<- factor(as.numeric(cut2(as.numeric(sample.data$HHID), g=quants)))
sample<-NULL
for(i in 1:quants){
#i<-1
d.sample.data<-sample.data[indx==i,]
rep.row<-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
D<-rep.row(as.numeric(d.sample.data$HHID),nrow(d.sample.data))
D<-t(D)-D
D[D>0]<-2
D[D<0]<-2
D[D==0]<-1
D[D==2]<-0
sameHouse<-D
D<-rep.row(as.numeric(d.sample.data$TransId),nrow(d.sample.data))
D<-t(D)-D
D[D>0]<-2
D[D<0]<-2
D[D==0]<-1
D[D==2]<-0
sameSale<-D
otherSales<-sameHouse-sameSale
rm(sameHouse,sameSale)
gc()
D<-rep.row(d.sample.data$date,nrow(d.sample.data))
D<-t(D)-D
D[D<0]<-0
diffDates<-D
rm(D)
gc()
library(matrixStats)
dumDiffDates<-diffDates*otherSales
dumDiffDates[dumDiffDates==0]<- 10000000000000000
#dumDiffDates[dumDiffDates-rowMins(dumDiffDates)>0]<- -1
dumDiffDates[dumDiffDates-rowMins(dumDiffDates,na.rm = TRUE)==0]<-1
dumDiffDates[dumDiffDates<0]<-0
dumDiffDates[dumDiffDates>1]<-0
dumDiffDates[dumDiffDates==10000000000000000]<-0
dumDiffDates[rowSums(dumDiffDates)-dim(dumDiffDates)[1]==0]<-0
rm(diffDates,otherSales)
gc()
d.sample.data$preprice<-dumDiffDates%*%d.sample.data$price
d.sample.data$prelogprice<-dumDiffDates%*%d.sample.data$logprice
d.sample.data$predate<-dumDiffDates%*%as.numeric(d.sample.data$date)
d.sample.data$prediffdate<-as.numeric(d.sample.data$date)-d.sample.data$predate
d.sample.data$presstatusd<-ifelse(d.sample.data$predate-as.numeric(odNPL$date[i])>0,1,0)
d.sample.data$presstatuscc<-ifelse(d.sample.data$predate-as.numeric(odNPL$ControlsComplete[i])>0,1,0)
d.sample.data<-d.sample.data[d.sample.data$predate>0,]
#sample1<-sample1[sample1$presstatus<1 ,]
#sample1<-sample1[sample1$treatdgw<1 ,]
d.sample.data$difflogprice<-d.sample.data$logprice-d.sample.data$prelogprice
sample<-rbind(sample,d.sample.data)
}
library(data.table)
sample<-sample[,c("TransId","preprice","prelogprice","predate","prediffdate","presstatusd","presstatuscc","difflogprice")]
data.dt<-data.table(data)
sample.dt<-data.table(sample)
sample.new<-merge(data.dt,sample.dt,all.x=TRUE,by="TransId")
saveRDS(sample.new, file = paste(path,'fullbaj',psite,'.rds', sep=""), ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
}
if(TRUE){
dist<-c('10k','8k','6k','4k','2k')#,'1k','500m')
#dist<-c('4k','2k')#,'1k','500m')
laglead<-c("")
llc<-laglead[1]
treatl<-c('TATE','MUATE','WLATE')
dNPL$row<-seq(1:dim(dNPL)[1])
psitel<-dNPL[dNPL$rsitinc_desc=="LANDFILL","row"]
psitel<-psitel[psitel!=3]
psite<-psitel[1]
samplefull<-readRDS(paste(path,'fullbaj',psite,'.rds', sep=""), refhook = NULL)
samplefull<-samplefull[predate>0,]
samplefull$treatst<-samplefull[[paste0('treatdgw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
buf<-1
#samplefull$buffer<-ifelse(samplefull$date-odNPL$date[psite]+(buf*365)>0&samplefull$date-odNPL$date[psite]<0,1,0)
samplefull$buffer<-ifelse(abs(samplefull$date-odNPL$date[psite])-(buf*365)<0,1,0)
samplefull<-samplefull[buffer<1,]
samplefull$timetotreat<-samplefull$date-odNPL$date[psite]
samplefull$lsite<-psite
samplefull$demlogprice<-demeanlist(samplefull$logprice,
list(as.factor(samplefull$PropertyAddressCensusTractAndBlock)))
# sample$logprice<-sample$logprice.x
#Total Average Treatment Effect
samplefull$aftpropnpl<-samplefull[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
samplefull$treatControlsComplete<-samplefull[[paste0('treatControlsComplete',psite)]]
samplefull$timefed<-samplefull[[paste0('timefed',psite)]]
samplefull$timefedControlsComplete<-samplefull[[paste0('timefedControlsComplete',psite)]]
samplefull$treatexCC<-ifelse(samplefull$treatControlsComplete==1 &samplefull$treatst==0,1,0)
#Municipal ATE
samplefull$treatdgwMU<- samplefull[[paste0('treatd',llc,'gw',psite)]]* samplefull$WaterStndCode.fMU
samplefull$treatgroupMU<-samplefull$treatmentgroup * samplefull$WaterStndCode.fMU
samplefull$controlMU<-samplefull$control*samplefull$WaterStndCode.fMU
samplefull$sample.MUATE<-samplefull$control+samplefull$treatgroupMU
samplefull$aftpropnpl<-samplefull[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
samplefull$treatControlsComplete<-samplefull[[paste0('treatControlsComplete',psite)]]
samplefull$timefed<-samplefull[[paste0('timefed',psite)]]
samplefull$timefedControlsComplete<-samplefull[[paste0('timefedControlsComplete',psite)]]
#sample$treatst<-sample[[paste0('treatd',llc,'gw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
samplefull$treatexCC<-ifelse(samplefull$treatControlsComplete==1 &samplefull$treatst==0,1,0)
#Municipal ATE
samplefull$treatdgwWL<- samplefull[[paste0('treatd',llc,'gw',psite)]]* samplefull$WaterStndCode.fWL
samplefull$treatgroupWL<-samplefull$treatmentgroup * samplefull$WaterStndCode.fWL
samplefull$controlWL<-samplefull$control*samplefull$WaterStndCode.fWL
samplefull$sample.WLATE<-samplefull$control+samplefull$treatgroupWL
samplefull$aftpropnpl<-samplefull[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
samplefull$treatControlsComplete<-samplefull[[paste0('treatControlsComplete',psite)]]
samplefull$timefed<-samplefull[[paste0('timefed',psite)]]
samplefull$timefedControlsComplete<-samplefull[[paste0('timefedControlsComplete',psite)]]
#sample$treatst<-sample[[paste0('treatd',llc,'gw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
samplefull$treatexCC<-ifelse(samplefull$treatControlsComplete==1 &samplefull$treatst==0,1,0)
for(psite in psitel[2:length(psitel)]){
sample2<-readRDS(paste(path,'fullbaj',psite,'.rds', sep=""), refhook = NULL)
sample2<-sample2[predate>0,]
sample2$treatst<-sample2[[paste0('treatdgw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
buf<-1
#sample2$buffer<-ifelse(sample2$date-odNPL$date[psite]+(buf*365)>0&sample2$date-odNPL$date[psite]<0,1,0)
sample2$buffer<-ifelse(abs(sample2$date-odNPL$date[psite])-(buf*365)<0,1,0)
sample2<-sample2[buffer<1,]
sample2$timetotreat<-sample2$date-odNPL$date[psite]
sample2$lsite<-psite
sample2$demlogprice<-demeanlist(sample2$logprice,
list(as.factor(sample2$PropertyAddressCensusTractAndBlock)))
sample2$aftpropnpl<-sample2[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
sample2$treatControlsComplete<-sample2[[paste0('treatControlsComplete',psite)]]
sample2$timefed<-sample2[[paste0('timefed',psite)]]
sample2$timefedControlsComplete<-sample2[[paste0('timefedControlsComplete',psite)]]
sample2$treatexCC<-ifelse(sample2$treatControlsComplete==1 &sample2$treatst==0,1,0)
#Municipal ATE
sample2$treatdgwMU<- sample2[[paste0('treatd',llc,'gw',psite)]]* sample2$WaterStndCode.fMU
sample2$treatgroupMU<-sample2$treatmentgroup * sample2$WaterStndCode.fMU
sample2$controlMU<-sample2$control*sample2$WaterStndCode.fMU
sample2$sample.MUATE<-sample2$control+sample2$treatgroupMU
sample2$aftpropnpl<-sample2[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
sample2$treatControlsComplete<-sample2[[paste0('treatControlsComplete',psite)]]
sample2$timefed<-sample2[[paste0('timefed',psite)]]
sample2$timefedControlsComplete<-sample2[[paste0('timefedControlsComplete',psite)]]
#sample$treatst<-sample[[paste0('treatd',llc,'gw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
sample2$treatexCC<-ifelse(sample2$treatControlsComplete==1 &sample2$treatst==0,1,0)
#Municipal ATE
sample2$treatdgwWL<- sample2[[paste0('treatd',llc,'gw',psite)]]* sample2$WaterStndCode.fWL
sample2$treatgroupWL<-sample2$treatmentgroup * sample2$WaterStndCode.fWL
sample2$controlWL<-sample2$control*sample2$WaterStndCode.fWL
sample2$sample.WLATE<-sample2$control+sample2$treatgroupWL
sample2$aftpropnpl<-sample2[[paste0('aftpropnpl',psite)]]
#sample$aftfinalnpl<-sample[[paste0('aftfinalnpl',fnplsite)]]
#sample$timefinalnplfe<-sample[[paste0('timefinalnplfe',fnplsite)]]
sample2$treatControlsComplete<-sample2[[paste0('treatControlsComplete',psite)]]
sample2$timefed<-sample2[[paste0('timefed',psite)]]
sample2$timefedControlsComplete<-sample2[[paste0('timefedControlsComplete',psite)]]
#sample$treatst<-sample[[paste0('treatd',llc,'gw',psite)]] #*sample[[paste0('dist',dist[[5]])]]
sample2$treatexCC<-ifelse(sample2$treatControlsComplete==1 &sample2$treatst==0,1,0)
samplefull<-rbind(samplefull,sample2)
}
#psite<-2
samplefull$timetotreat<-samplefull$timetotreat/365
samplefull<-samplefull[abs(timetotreat)-10<0,]
samplefull$treatd0gw<-samplefull$treatdgw
#samplefull<-samplefull[samplefull$price>15000,]
#samplefull$price<-Winsorize(samplefull$price)
}
if(TRUE){
treatc<-treatl[[1]]
#for(ll in 1:length(laglead)){
dic<-dist[[1]]
ll<-1
llc<-laglead[[ll]]
sample<-samplefull[samplefull[[paste0('dist',dic)]]>0,]
#sample$treatst<-sample$treatst-sample$presstatusd
#sample<-sample[treatst>0,]
sample<-sample[presstatusd==0,]
upper.spatial.range<-c(20)
lower.spatial.range<-c(0)
spatial.power.range<-c(10)
temporal.cut.range<-c(30)
temporal.power.range<-c(10)
urange<-upper.spatial.range
lrange<-lower.spatial.range
prange<-spatial.power.range
crange<-temporal.cut.range
qrange<-temporal.power.range
d.sample.data<-sample
W.trend.lag.variables<-function(urange,lrange,prange,crange,qrange,path){
denom<-0
for(c in crange){
for(q in qrange){
denom<-denom+1
}
}
for(u in urange){
for(l in lrange){
for(p in prange){
if(u>l){
denom<-denom+1
}
}
}
}
dist.mat<-distm (cbind(d.sample.data$PropertyAddressLongitude, d.sample.data$PropertyAddressLatitude), fun = distHaversine)
Wtime<- function(c,q){
t<-d.sample.data$predate
rep.row<-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
t<-d.sample.data$predate
T1<-rep.row(t,nrow(d.sample.data))
T2<-T1
T1<-t(T1)
Tdp<-t(t(T1-T2))
Tdp[Tdp >= c*365] = -2
Tdp[Tdp<=-365] = -2
Wtp<-1/(1+abs(Tdp))
Wtp[Wtp<0]<-0
#Wtp[Wtp == 'Inf'] = 0
Wt<-Wtp^q
Wt<-return(Wt)
print(Wt)
rm(t,T1,T2,Tdp,Wtp)
gc()
}
num<-0
for(c in crange){
for(q in qrange){
assign(paste('wt',c,'m',q,sep=""),Wtime(c,q))
num<-num+1
print(paste(num, 'of', denom,sep=" "))
}
}
Wspat<- function(u,l,p){
Sp<-dist.mat
Sp[Sp>= u*500] = -2
Sp[Sp <= l*500] = -2
Wsp<-1/(1+Sp)
Wsp[Wsp<0]<-0
#Wsp[Wsp == 'Inf'] = 0
Ws<-Wsp^p
Ws<-return(Ws)
print(Ws)
rm(Sp,Wsp)
gc()
}
for(u in urange){
for(l in lrange){
for(p in prange){
assign(paste('ws',u,'m',l, 'm',p,sep=""),Wspat(u,l,p))
if(u>l){
num<-num+1
print(paste(num,'of',denom,sep=" "))
}
else{num<-num}
}
}
}
#Dummies
rep.row<-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
if(FALSE){
A<-rep.row(d.sample.data$YearBuilt,nrow(d.sample.data))
At<-t(A)
D<-At-A
D[D<10000000000]<-1
D1<-D
}
Wst<-hadamard.prod(get(paste('ws',u,'m',l, 'm',p,sep="")), get(paste('wt',c,'m',q,sep="")))
#Wst<-Wst*get(paste('D',d, sep=""))
weight<-rowSums(Wst)
for(i in 1:nrow(d.sample.data)){
if(weight[i]==0){
weight[i]<-1
}
}
Wst<-Wst*(1/weight)
#diag(Wst)<-0
#Wst<-mat2listw(Wst, style="W")
sample[[paste0('lagu',u,'l',l, 'sp',p,'c',c, 'tp',q,sep="")]]<-Wst %*% sample$price
sample[[paste0('lnlagu',u,'l',l, 'sp',p,'c',c, 'tp',q,sep="")]]<-Wst %*%sample$logprice
saveRDS(sample, file = path, ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
#stop cluster
}
start.time <- Sys.time()
W.trend.lag.variables(upper.spatial.range,lower.spatial.range,spatial.power.range,
temporal.cut.range,temporal.power.range,paste0(path,"pretreatlag.rds"))
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
}
dist<-c('10k','8k','6k','4k','2k')#,'1k','500m')
#dist<-c('4k','2k')#,'1k','500m')
laglead<-c("")
treatl<-c('TATE','MUATE','WLATE')
#sample<-readRDS(paste0(path,"pretreatlag.rds"), refhook = NULL)
sample<-readRDS(paste0(path,"fullcen.rds"), refhook = NULL)
samplefull<-sample
quant<-10
qcut<-cut2(samplefull$preprice, g=quant, onlycuts = TRUE)
p <- ggplot(samplefull, aes(x=logprice, fill= as.character(treatmentgroup))) +
geom_density(alpha=.3) +
xlab("Log Price") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
lp <- ggplot(samplefull, aes(x=prelogprice, fill= as.character(treatmentgroup))) +
geom_density(alpha=.3) +
xlab("Pretreatment Log Price") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
sf <- ggplot(samplefull, aes(x=log(sqfeet), fill= as.character(treatmentgroup))) +
geom_density(alpha=.3) +
xlab("Log(Square Feet)") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
da <- ggplot(samplefull, aes(x=as.Date(RecordingDate), fill= as.character(treatmentgroup))) +
geom_density(alpha=.5,adjust=2) +
xlab("Date") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
yb <- ggplot(samplefull, aes(x=YearBuilt, fill= as.character(treatmentgroup))) +
geom_density(alpha=.5,adjust=2) +
xlab("Year Built") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
fb <- ggplot(samplefull, aes(x=FullBath, fill= as.character(treatmentgroup))) +
geom_density(alpha=.5,adjust=2) +
xlab("Full Bath") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
ls <- ggplot(samplefull, aes(x=LotSizeSquareFeet, fill= as.character(treatmentgroup))) +
geom_density(alpha=.5,adjust=2) +
xlab("Full Bath") +
ylab("Density")+
guides(fill=guide_legend(title="Treatment Group"))
sumvar<-c("price","sqfeet","TotalRooms","YearBuilt","FullBath")
ts<-sample[treatmentgroup==1,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
ts$year <- as.numeric(as.character(factor(format(as.Date(ts$RecordingDate),'%Y'))))
cs<-sample[treatmentgroup==0,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
cs$year <- as.numeric(as.character(factor(format(as.Date(cs$RecordingDate),'%Y'))))
sum25<-rbind("25th percentile",quantile(ts$price)[2],quantile(ts$sqfeet)[2],quantile(ts$YearBuilt)[2],quantile(ts$year)[2],quantile(ts$FullBath)[2],quantile(ts$LotSizeSquareFeet)[2],
quantile(cs$price)[2],quantile(cs$sqfeet)[2],quantile(cs$YearBuilt)[2],quantile(cs$year)[2],quantile(cs$FullBath)[2],quantile(cs$LotSizeSquareFeet)[2])
summed<-rbind("Median",median(ts$price),median(ts$sqfeet),median(ts$YearBuilt),median(ts$year),median(ts$FullBath),median(ts$LotSizeSquareFeet),
median(cs$price),median(cs$sqfeet),median(cs$YearBuilt),median(cs$year),median(cs$FullBath),median(cs$LotSizeSquareFeet))
summean<-rbind("Mean",floor(mean(ts$price)),floor(mean(ts$sqfeet)),floor(mean(ts$YearBuilt)),floor(mean(ts$year)),floor(mean(ts$FullBath)),floor(mean(ts$LotSizeSquareFeet)),
floor(mean(cs$price)),floor(mean(cs$sqfeet)),floor(mean(cs$YearBuilt)),floor(mean(cs$year)),floor(mean(cs$FullBath)),floor(mean(cs$LotSizeSquareFeet)))
sum75<-rbind("75th percentile",quantile(ts$price)[4],quantile(ts$sqfeet)[4],quantile(ts$YearBuilt)[4],quantile(ts$year)[4],quantile(ts$FullBath)[4],quantile(ts$LotSizeSquareFeet)[4],
quantile(cs$price)[4],quantile(cs$sqfeet)[4],quantile(cs$YearBuilt)[4],quantile(cs$year)[4],quantile(cs$FullBath)[4],quantile(cs$LotSizeSquareFeet)[4])
tdiff<-rbind("T-Test",base::round(as.numeric(t.test(ts$price,cs$price)["statistic"]),digits=2),
base::round(as.numeric(t.test(ts$sqfeet,cs$sqfeet)["statistic"]),digits=2),
base::round(as.numeric(t.test(ts$year,cs$year)["statistic"]),digits=2),
base::round(as.numeric(t.test(ts$YearBuilt,cs$YearBuilt)["statistic"]),digits=2),
base::round(as.numeric(t.test(ts$FullBath,cs$FullBath)["statistic"]),digits=2),
base::round(as.numeric(t.test(ts$LotSizeSquareFeet,cs$LotSizeSquareFeet)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$price,ts$price)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$sqfeet,ts$sqfeet)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$year,ts$year)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$YearBuilt,ts$YearBuilt)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$FullBath,ts$FullBath)["statistic"]),digits=2),
base::round(as.numeric(t.test(cs$LotSizeSquareFeet,ts$LotSizeSquareFeet)["statistic"]),digits=2))
rlab<-rbind(" ","Price","Square Feet","Year Built","Year","Full Bathroom","Lot Size","Price","Square Feet","Year Built","Year","Full Bathroom","Lot Size")
rlab2<-rbind(" ","Treatment","Group"," "," "," "," ","Control","Group"," "," "," "," ")
sumtab<-cbind(rlab2,rlab,sum25,summed,summean,sum75,tdiff)
rownames(sumtab)<-NULL
colnames(sumtab)<-NULL
sumtab
xtab<-xtable(sumtab)
align(xtab) <- "rl|l|rrrr|r"
print.xtable(xtab,include.rownames=FALSE, hline.after = c(0,1,7,dim(sumtab)[1]),
include.colnames=FALSE, sanitize.text.function = identity,
caption = "Summary Statistics",
label = "tab:summary",
type="latex", file=paste0(path,'latex/sumtab.tex'))
sumvar<-c("price","sqfeet","TotalRooms","YearBuilt","FullBath")
pts<-sample[treatmentgroup==1&timetotreat<0,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
pts$year <- as.numeric(as.character(factor(format(as.Date(pts$RecordingDate),'%Y'))))
pcs<-sample[treatmentgroup==0&timetotreat<0,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
pcs$year <- as.numeric(as.character(factor(format(as.Date(pcs$RecordingDate),'%Y'))))
ats<-sample[treatmentgroup==1&timetotreat>0,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
ats$year <- as.numeric(as.character(factor(format(as.Date(ats$RecordingDate),'%Y'))))
acs<-sample[treatmentgroup==0&timetotreat>0,c("price","sqfeet","YearBuilt","RecordingDate","FullBath","LotSizeSquareFeet")]
acs$year <- as.numeric(as.character(factor(format(as.Date(acs$RecordingDate),'%Y'))))
sum25<-rbind("25th percentile",quantile(pts$price)[2],quantile(pts$sqfeet)[2],quantile(pts$YearBuilt)[2],quantile(pts$year)[2],quantile(pts$LotSizeSquareFeet)[2],
quantile(pcs$price)[2],quantile(pcs$sqfeet)[2],quantile(pcs$YearBuilt)[2],quantile(pcs$year)[2],quantile(pcs$LotSizeSquareFeet)[2],
quantile(ats$price)[2],quantile(ats$sqfeet)[2],quantile(ats$YearBuilt)[2],quantile(ats$year)[2],quantile(ats$LotSizeSquareFeet)[2],
quantile(acs$price)[2],quantile(acs$sqfeet)[2],quantile(acs$YearBuilt)[2],quantile(acs$year)[2],quantile(acs$LotSizeSquareFeet)[2])
summed<-rbind("Median",median(pts$price),median(pts$sqfeet),median(pts$YearBuilt),median(pts$year),median(pts$LotSizeSquareFeet),
median(pcs$price),median(pcs$sqfeet),median(pcs$YearBuilt),median(pcs$year),median(pcs$LotSizeSquareFeet),
median(ats$price),median(ats$sqfeet),median(ats$YearBuilt),median(ats$year),median(ats$LotSizeSquareFeet),
median(acs$price),median(acs$sqfeet),median(acs$YearBuilt),median(acs$year),median(acs$LotSizeSquareFeet))
summean<-rbind("Mean",mean(pts$price),mean(pts$sqfeet),mean(pts$YearBuilt),mean(pts$year),mean(pts$LotSizeSquareFeet),
mean(pcs$price),mean(pcs$sqfeet),mean(pcs$YearBuilt),mean(pcs$year),mean(pcs$LotSizeSquareFeet),
mean(ats$price),mean(ats$sqfeet),mean(ats$YearBuilt),mean(ats$year),mean(ats$LotSizeSquareFeet),
mean(acs$price),mean(acs$sqfeet),mean(acs$YearBuilt),mean(acs$year),mean(acs$LotSizeSquareFeet))
sum75<-rbind("75th percentile",quantile(pts$price)[4],quantile(pts$sqfeet)[4],quantile(pts$YearBuilt)[4],quantile(pts$year)[4],quantile(pts$LotSizeSquareFeet)[4],
quantile(pcs$price)[4],quantile(pcs$sqfeet)[4],quantile(pcs$YearBuilt)[4],quantile(pcs$year)[4],quantile(pcs$LotSizeSquareFeet)[4],
quantile(ats$price)[4],quantile(ats$sqfeet)[4],quantile(ats$YearBuilt)[4],quantile(ats$year)[4],quantile(ats$LotSizeSquareFeet)[4],
quantile(acs$price)[4],quantile(acs$sqfeet)[4],quantile(acs$YearBuilt)[4],quantile(acs$year)[4],quantile(acs$LotSizeSquareFeet)[4])
#rlab<-rbind(" ","Price","Square Feet","Year Built","Year","Lot Size","Price","Square Feet","Year Built","Year","Full Bathroom","Lot Size")
#rlab2<-rbind(" ","Treatment","Group"," "," "," "," ","Control","Group"," "," "," "," ")
sumtab1<-cbind(sum25,summed,summean,sum75)
#sample$date<-sample$date.x
#dist<-c('10k','8k','6k','5k','4k','3k','2k')#,'1k','500m')
dist<-c('8k','6k','4k','2k')#,'1k','500m')
dist<-c('10k','8k','6k','4k','2k')#,'1k','500m')
#dist<-c('4k','2k')#,'1k','500m')
laglead<-c("")
treatl<-c('TATE','MUATE','WLATE')
#di<-5
#ll<-1
#for(buf in 1:2){
specl<-c("i","a","s","y","")
#matrices
for(i in c("lm","gam","sp","match")){
for(j in c("t","wl","mu")){
for(k in c("did")){
for(treat in treatl){
for(spec in specl){
assign(paste0('betas.',i,'.',j,'.',k,'.',treat,'.',spec),matrix(ncol = length(dist),nrow=quant))
assign(paste0('ses.',i,'.',j,'.',k,'.',treat,'.',spec),matrix(ncol = length(dist),nrow=quant))
assign(paste0('ps.',i,'.',j,'.',k,'.',treat,'.',spec),matrix(ncol = length(dist),nrow=quant))
assign(paste0('betas.',i,'.',k),matrix(ncol = length(dist),nrow=length(treatl)))
assign(paste0('ses.',i,'.',k),matrix(ncol = length(dist),nrow=length(treatl)))
assign(paste0('ps.',i,'.',k),matrix(ncol = length(dist),nrow=length(treatl)))
}
}
}
}
}
di<-1
treat<-1
#psite<-21
for(treat in 1:length(treatl)){
treatc<-treatl[[treat]]
#for(ll in 1:length(laglead)){
for(di in 1:length(dist)){
for(match in c("","match")){
#match<-""
dic<-dist[[di]]
ll<-1
llc<-laglead[[ll]]
sample<-samplefull[samplefull[[paste0('dist',dic)]]>0,]
sample<-sample[presstatusd==0,]
if(match=="match"){
#LotSizeSquareFeet + YearBuilt + FullBath + HalfBath + sqfeet+day+prediffdate+predate
samplem<-sample[,c("TransId","date","treatmentgroup","LotSizeSquareFeet", "sqfeet","YearBuilt", "FullBath","day","predate")]
mdm<- matchit(treatmentgroup~LotSizeSquareFeet+ YearBuilt + FullBath +sqfeet+day+predate,data=samplem, method = "nearest", distance = "mahalanobis")
mdm.treat <- match.data(mdm, group = "treat")
mdm.control <- match.data(mdm, group = "control")
mdm<-rbind(mdm.treat,mdm.control)
mdm<-mdm[,c("TransId","date")]
mdm.full<-merge(mdm,sample,all.x = TRUE, by = "TransId")
sample<-mdm.full
}
#sample$treatst<-sample$treatst-sample$presstatusd
#sample<-sample[treatst>0,]
# sample$logprice<-sample$logprice.x
if(treatc=='TATE'){
#Total Average Treatment Effect
}
if(treatc=='MUATE'){
#Municipal ATE
sample<-subset(sample, sample.MUATE==1)
}
if(treatc=='WLATE'){
#Municipal ATE
sample<-subset(sample, sample.WLATE==1)
}
if(mean(sample$treatst)>0){
#TATE<-as.formula(logprice ~ treatdgw+ treatmentgroup+data.matrix(treatgroupm)+ data.matrix(timefe)+ data.matrix(year[,3:25])+ as.factor(HHID))
sdf<-20
lat<-sample$PropertyAddressLatitude
long<-sample$PropertyAddressLongitude
splat<-bs(lat, df = sdf)
splong<-bs(long, df = sdf)
spint<-model.matrix(~splat:splong)
spTATE<-cbind(splat,splong,spint,lat,long)
#Wst<-readRDS(paste0(path,"Wmat",treatc,dic,".rds"), refhook = NULL)
#sample$wlp<-Wst%*%sample$prelogprice
#sample$wp<-Wst%*%sample$preprice
sample$preyear<-as.numeric(format(as.Date(sample$predate),"%Y"))
sample$premonth<-as.numeric(format(as.Date(sample$predate),"%m"))
sample$year<-as.numeric(format(as.Date(sample$RecordingDate),"%Y"))
sample$month<-as.numeric(format(as.Date(sample$RecordingDate),"%m"))
sample$quarter<-ifelse(sample$month==1|sample$month==2|sample$month==3,1,0)
sample$quarter<-ifelse(sample$month==4|sample$month==5|sample$month==6,2,sample$quarter)
sample$quarter<-ifelse(sample$month==7|sample$month==8|sample$month==9,3,sample$quarter)
sample$quarter<-ifelse(sample$month==10|sample$month==11|sample$month==12,4,sample$quarter)
sample$prequarter<-ifelse(sample$premonth==1|sample$premonth==2|sample$premonth==3,1,0)
sample$prequarter<-ifelse(sample$premonth==4|sample$premonth==5|sample$premonth==6,2,sample$prequarter)
sample$prequarter<-ifelse(sample$premonth==7|sample$premonth==8|sample$premonth==9,3,sample$prequarter)
sample$prequarter<-ifelse(sample$premonth==10|sample$premonth==11|sample$premonth==12,4,sample$prequarter)
sample$prelogpricewlag<-sample$lnlagu20l0sp10c30tp10
sample$tprelprice<-sample$prelogprice*ifelse(sample$treatst==1 & sample$presstatusd==0,1,0)
sample$treatfin<-ifelse(sample$treatst==1 & sample$presstatusd==0,1,0)
indx<- factor(as.numeric(cut2(sample$preprice,cuts=qcut,minmax=TRUE)))
indx2<- factor(as.numeric(cut2(exp(sample$prelogpricewlag),cuts=qcut,minmax=TRUE)))
#indx<-ifelse(as.numeric(indx)!=as.numeric(levels(indx)[1])&
# as.numeric(indx)!=as.numeric(levels(indx)[length(levels(indx))]),"med",indx)
#indx<-ifelse(indx==1,"bottom",indx)
#indx<-ifelse(indx==quant,"top",indx)
treatind<-model.matrix(~treatst:indx-1,sample)
treatind<-treatind[,1:(length(unique(indx)))]
X<-model.matrix(~ treatmentgroup+indx:treatmentgroup+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#treatmentgroup:as.factor(lsite)+indx:treatmentgroup:as.factor(lsite)+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#poly(timetotreat,3)+poly(day,3)+
#treatmentgroup*timetotreat+#treatmentgroup*day+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
timetotreat+
sqfeet+ prediffdate+predate+prelogprice+indx+#presstatusd+
#as.factor(floor(timetotreat))+
-1,sample)
qr.X <- qr(X, tol=1e-2, LAPACK = FALSE)
(rnkX <- qr.X$rank) ## 4 (number of non-collinear columns)
(keep <- qr.X$pivot[seq_len(rnkX)])
## 1 2 4 5
X <- X[,keep]
Xlag<-model.matrix(~ treatmentgroup+indx:treatmentgroup+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#treatmentgroup:as.factor(lsite)+indx:treatmentgroup:as.factor(lsite)+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#poly(timetotreat,3)+poly(day,3)+
#treatmentgroup*timetotreat+#treatmentgroup*day+
timetotreat+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
sqfeet+ prediffdate+predate+prelogprice+indx+
prelogpricewlag+indx2+#as.factor(floor(timetotreat))+
-1,sample)
qr.Xlag <- qr(Xlag, tol=1e-2, LAPACK = FALSE)
(rnkXlag <- qr.Xlag$rank) ## 4 (number of non-collinear columns)
(keeplag <- qr.Xlag$pivot[seq_len(rnkXlag)])
## 1 2 4 5
Xlag <- Xlag[,keeplag]
results.lm.t.did.i<-felm(logprice ~treatind+
X|as.factor(cbl)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.lm.t.did.i)
results.lm.t.did.a<-felm(logprice ~treatind+
X|as.factor(cbg)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.lm.t.did.a)
results.lm.t.did.s<-felm(logprice ~treatind+X|as.factor(ctr)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.lm.t.did.s)
results.lm.t.did.y<-felm(logprice ~treatind+X|as.factor(lsite)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.lm.t.did.y)
#################################
#Spatial
results.sp.t.did.i<-felm(logprice ~treatind+
Xlag|as.factor(cbl)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.sp.t.did.i)
results.sp.t.did.a<-felm(logprice ~treatind+
Xlag|as.factor(cbg)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.sp.t.did.a)
results.sp.t.did.s<-felm(logprice ~treatind+Xlag|as.factor(ctr)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.sp.t.did.s)
results.sp.t.did.y<-felm(logprice ~treatind+Xlag|as.factor(lsite)+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
summary(results.sp.t.did.y)
#results.lm.t.did.z<-felm(logprice ~treatind+X|as.factor(PropertyZip)+as.factor(year):as.factor(quarter)+
# as.factor(preyear):as.factor(prequarter)| 0 | lsite:year,sample) #as.factor(HHID)+
#summary(results.lm.t.did.z)
#results.lm.t.did.c<-felm(logprice ~treatind+X|as.factor(PropertyCity)| 0 | lsite:year,sample) #as.factor(HHID)+
#summary(results.lm.t.did.c)
Xg<-model.matrix(~ treatmentgroup+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#poly(timetotreat,3)+poly(day,3)+
#treatmentgroup*timetotreat+#treatmentgroup*day+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
sqfeet+ prediffdate+predate+prelogprice+ indx+#indx+#presstatusd+
#as.factor(floor(timetotreat))+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)-1,sample)
Xglag<-model.matrix(~ treatmentgroup+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#poly(timetotreat,3)+poly(day,3)+
#treatmentgroup*timetotreat+#treatmentgroup*day+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
sqfeet+ prediffdate+predate+prelogprice+ indx+
prelogpricewlag+indx2+#indx+#presstatusd+
#as.factor(floor(timetotreat))+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)-1,sample)
sample$tttyear<-as.factor(ceiling(as.numeric(sample$timetotreat)))
sample1<-subset(sample,tttyear!=1)
ptlag<-model.matrix(~treatmentgroup:as.factor(ceiling(as.numeric(timetotreat))),sample1)
#as.factor(cut2(prediffdate, g=quant))+
resid.ptlag<-felm(logprice~ptlag[,c(-10)]+#treatmentgroup+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
sqfeet+timetotreat+#treatmentgroup:timetotreat+#as.factor(tttyear)+
prelogprice|as.factor(lsite):as.factor(year):as.factor(quarter)+
as.factor(lsite):as.factor(preyear):as.factor(prequarter)+
as.factor(ctr)|0|lsite:year,sample1)
summary(resid.ptlag)
qu<-c("Bottom","Middle","Top")
yearstest<-9
qun<-c(-yearstest:-2,2:yearstest)
allModelFrame <- data.frame(Variable = qun,
Coefficient = as.numeric(coef(summary(resid.ptlag))[,"Estimate"][(11-yearstest):(8+yearstest)]),
SE = as.numeric(coef(summary(resid.ptlag))[,"Cluster s.e."][(11-yearstest):(8+yearstest)]),
modelName = "ptlag")
interval2 <- -qnorm((1-0.95)/2) # 95% multiplier
leg<-c('10k','8k','6k','4k','2k')
allModelFrame<-allModelFrame[allModelFrame$modelName!="2k",]
allModelFrame$modelName<-as.factor( allModelFrame$modelName)
allModelFrame$modelName<-factor(allModelFrame$modelName,levels(allModelFrame$modelName)[c(2:5,1)])
# Plot
zp1 <- ggplot(allModelFrame, aes(colour = modelName ))
zp1 <- zp1 + geom_hline(yintercept = 0, colour = gray(1/2), lty = 2)
zp1 <- zp1 + geom_pointrange(aes(x = Variable, y = Coefficient, ymin = Coefficient - SE*interval2,
ymax = Coefficient + SE*interval2,color =modelName),
lwd = 1/2, position = position_dodge(width = 1/2),
shape = 21, fill = "WHITE")
#zp1 <- zp1 + coord_flip() + theme_bw()
#zp1 <- zp1 + geom_line(data = allModelFrame, aes(linetype =modelName ), size = 1) +
zp1 <- zp1 +theme(legend.position="none")+ ggtitle("Test of Difference in Treatment and Control Groups")+xlab('Year From Treatment')
#zp1 <- zp1 + scale_x_continuous(labels=c("1"="B","2"="M","3"="T"))
zp1 <- zp1 + scale_x_continuous(breaks=qun)
print(zp1) # The trick to these is position_dodge().
if(match==""){
ggsave(file=paste(path,'latex/','pretrends',treatc,dic, 'h5.png', sep=""),height = 5,width =9)
}
resid.lm.t.did<-felm(logprice ~Xg[,-1]|as.factor(cbl),sample) #as.factor(HHID)+
summary(resid.lm.t.did)
coefdid<-as.matrix(resid.lm.t.did$coefficients)
coefdid[is.na(coefdid),]<-0
#Xp<-X
#Xp[,1]<-0
#sample$crdid<-sample$demlogprice-cbind(treatind,X)%*%coefdid
sample$crdid<-sample$demlogprice-Xg[,-1]%*%coefdid
sample$crdid<-sample$crdid-mean(sample$crdid)
sample$post<-ifelse(sample$timetotreat>0,1,0)
treatment <- aggregate(crdid ~ treatmentgroup+as.factor(round(timetotreat,2)), data=sample, FUN=mean, na.rm=TRUE)
names(treatment)[2]<-"timetotreat"
treatment$timetotreat<-as.numeric(as.character(treatment$timetotreat))
#treatment<-treatment[as.numeric(treatment$timetotreat)!=0.5,]
ggplot() +
geom_point(data=subset(treatment,treatmentgroup==1), aes(x=timetotreat, y=crdid, color= "Treatment Group")) +
geom_point(data=subset(treatment,treatmentgroup==0), aes(x=timetotreat, y=crdid, color= "Control Group")) +
stat_smooth(method = 'loess', formula = y ~ x ,data=subset(sample,treatmentgroup==1&post==1),se= TRUE, aes(x=timetotreat, y=crdid),color= "blue") +
stat_smooth(method = 'loess', formula = y ~ x ,data=subset(sample,treatmentgroup==1&post==0),se= TRUE, aes(x=timetotreat, y=crdid),color= "blue") +
stat_smooth(method = 'loess', formula = y ~ x ,data=subset(sample,treatmentgroup==0&post==1),se= TRUE, aes(x=timetotreat, y=crdid),color= "red") +
stat_smooth(method = 'loess', formula = y ~ x ,data=subset(sample,treatmentgroup==0&post==0),se= TRUE, aes(x=timetotreat, y=crdid),color= "red") +
ggtitle("Common Trends Assumption")+
xlab('Years from Deletion') + labs(color="Legend") + geom_vline(xintercept=0)+
ylab('Monthly Average Residuals')
if(match==""){
ggsave(file=paste(path,'latex/','lmdidavg',treatc,dic, '.png', sep=""),height = 6,width =10)
}
results.lm.t.did.agg<-felm(logprice ~treatst+Xg|
as.factor(PropertyAddressCensusTractAndBlock),sample) #as.factor(HHID)+
summary(results.lm.t.did.agg)
if(match==""){
betas.lm.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Estimate"][1])
ses.lm.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Std. Error"][1])
ps.lm.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Pr(>|t|)"][1])
results.sp.t.did.agg<-felm(logprice ~treatst+Xglag|
as.factor(PropertyAddressCensusTractAndBlock),sample) #as.factor(HHID)+
summary(results.sp.t.did.agg)
betas.sp.did[treat,di]<-as.numeric(coef(summary(results.sp.t.did.agg))[,"Estimate"][1])
ses.sp.did[treat,di]<-as.numeric(coef(summary(results.sp.t.did.agg))[,"Std. Error"][1])
ps.sp.did[treat,di]<-as.numeric(coef(summary(results.sp.t.did.agg))[,"Pr(>|t|)"][1])
}
if(match=="match"){
betas.match.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Estimate"][1])
ses.match.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Std. Error"][1])
ps.match.did[treat,di]<-as.numeric(coef(summary(results.lm.t.did.agg))[,"Pr(>|t|)"][1])
}
if(match==""){
results.gam<-mgcv::gam(logprice~treatst+Xg+#s(prelogprice,bs="cr")+
#as.factor(year):as.factor(quarter)+
#as.factor(preyear):as.factor(prequarter)+
as.factor(lsite)+s(lat,long,bs="tp",m=3,k=300),data=sample)
gam.model.t<-mgcv::summary.gam(results.gam)
betas.gam.did[treat,di]<-as.numeric(gam.model.t$p.table[,"Estimate"])[2]
ses.gam.did[treat,di]<-as.numeric(gam.model.t$p.table[,"Std. Error"])[2]
ps.gam.did[treat,di]<-as.numeric(gam.model.t$p.table[,"Pr(>|t|)"])[2]
}
#results.spline<-lm(logprice~treatind+X+spTATE,sample)
#summary(results.spline)
#results.semip<-semip(logprice~treatind+X[,1:8]+preprice,nonpar=~lat+long,window1 = .5, window2 = .5,
# kern="tcub",distance="Mahal",targetfull=NULL, print.summary=TRUE, data=sample)
#summary(results.semip)
#library(gam)
library(mgcv)
if(match==""){
s=mgcv:::s
intsp<-model.matrix(~as.factor(sample$lsite):lat+as.factor(sample$lsite):long-1)
#s1<-sample[lsite!=19,]
#X1<-X[sample$lsite!=19,]
#treatind1<-treatind[sample$lsite!=19,]
results.gam<-mgcv::gam(logprice~treatind+X+#s(prelogprice,bs="cr")+as.factor(lsite):
#s(day,bs="cr")+s(predate,bs="cr")+
as.factor(year):as.factor(quarter)+
as.factor(preyear):as.factor(prequarter)+
as.factor(lsite)+
s(PropertyAddressLatitude,PropertyAddressLongitude,bs="tp",m=3,k=300),data=sample)
gam.model<-mgcv::summary.gam(results.gam)
mgcv::summary.gam(results.gam)
#results.gam<-mgcv::gam(logprice~treatind+X+#s(prelogprice,bs="cr")+
#s(predate,bs="gp")+
#s(day,bs="gp")+
#s(lat,long,bs="ts",m=3,k=500),data=sample)
#mgcv::summary.gam(results.gam)
}
if(FALSE){
vcov_both_formula <- cluster.vcov(results.lm.t.did.t, ~ lsite + year)
dim(results.gam$R)
model.matrix(results.gam$R)
vcov.HC = solve(t(X)%*%X) %*% t(X)%*%diag(ehat^2)%*%X %*% solve(t(X)%*%X)
mg$Vp <- vcov.HC
summary(mg)
all.equal(as.numeric(predict(mg,se.fit=TRUE)$se.fit),se.yhat.HC)
results.gam<-mgcv::gam(logprice~treatind+X+#s(prelogprice,bs="cr")+
#s(day,bs="gp")+
s(lsite,bs="re")+s(year,bs="re")+
s(lat,long,bs="tp",m=3,k=300),data=sample)
mgcv::summary.gam(results.gam)
results.gam<-mgcv::gamm(logprice~treatind+X+#s(prelogprice,bs="cr")+
#s(day,bs="gp")+
s(lat,long,bs="tp",m=3,k=200),
correlation=corSymm(form=~1|PropertyAddressCensusTractAndBlock),data=sample)
mgcv::summary.gam(results.gam)
qgam.fit<-qgam(logprice~treatst+nX+#s(prelogprice,bs="cr")+
#s(day,bs="gp")+
s(lat,long,bs="tp",m=3,k=300), lsig = -1,
data=sample,qu=.5, err = 0.05,control = list("tol" = 0.01))
summary.gam(qgam.fit)
neX<-model.matrix(~ treatmentgroup+#indx:treatmentgroup:as.factor(lsite)+#:indx+#bs(timetotreat,5)+bs(day,5)+#as.factor(round(timetotreat,1))+
#poly(timetotreat,3)+poly(day,3)+
#treatmentgroup*timetotreat+#treatmentgroup*day+
LotSizeSquareFeet + YearBuilt + FullBath + HalfBath +
sqfeet+as.factor(year)+as.factor(preyear)+
as.factor(quarter)+as.factor(prequarter)-1,sample)
nX<-neX
qr.nX <- qr(nX, tol=1e-2, LAPACK = FALSE)
(rnknX <- qr.nX$rank) ## 4 (number of non-collinear columns)
(keepnX <- qr.nX$pivot[seq_len(rnknX)])
## 1 2 4 5
nX <- nX[,keepnX]
rq.o<-rq.fit.sfn(as.matrix.csr(nX),y,
tmpmax=floor(10000+exp(-12.1)*(dim(nX)[1]*20-1)^2.35))
fit.qr<-rq(logprice~nX,tau=.5, data=sample,method="sfn",na.action = na.omit)
fit.qr
summary(fit.qr,se = "boot")
plot(indx,sample$logprice,xlab="Treatind", ylab="Log Price")
taus <- c(.1,.9)
abline(rq(logprice~X2,tau=.5,data=sample),col="blue")
abline(lm(logprice~X2,data=sample),lty = 3,col="red")
for( i in 1:length(taus)){
abline(rq(logprice~X2,tau=taus[i],data=sample),col="gray")
}
#PropertyAddressLatitude PropertyAddressLongitude
Wst<-readRDS(paste0(path,"Wmat",treatc,dic,".rds"), refhook = NULL)
summary(rowSums(Wst))
if(mean(rowSums(Wst))==1){
Wst<-mat2listw(Wst, style="W")
#finalb.lag.2sls.robust2 <- gstslshet(logprice~treatind+X,Wst, data = sample,
# initial.value = 0.2, eps =1e-2, inverse=FALSE,sarar=FALSE)
#summary(finalb.lag.2sls.robust2)
#effects.finalb.lag.2sls.robust2<- impacts(finalb.lag.2sls.robust2, listw= Wst, R=100)
#summary(effects.finalb.lag.2sls.robust2, zstats=TRUE, short=TRUE)
results.stsls<-sacsarlm(logprice~treatind+X, data = sample, listw=Wst, zero.policy = NULL,
na.action = na.fail)
summary(results.stsls)
}
# effects.finalb.lag.2sls.robust2<- impacts(results.stsls, listw= Wst, R=100)
#summary(effects.finalb.lag.2sls.robust2, zstats=TRUE, short=TRUE)
}
if(match==""){
if(treatc=="TATE"){
betas.lm.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.lm.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.lm.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.lm.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
betas.sp.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Estimate"][1:quant])
betas.sp.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Estimate"][1:quant])
betas.sp.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Estimate"][1:quant])
betas.sp.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Estimate"][1:quant])
betas.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.lm.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.lm.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.lm.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.lm.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
ses.sp.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Cluster s.e."][1:quant])
ses.sp.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Cluster s.e."][1:quant])
ses.sp.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Cluster s.e."][1:quant])
ses.sp.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Cluster s.e."][1:quant])
ses.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.lm.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
if(treatc=="MUATE"){
betas.lm.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.lm.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.lm.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.lm.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
betas.sp.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Estimate"][1:quant])
betas.sp.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Estimate"][1:quant])
betas.sp.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Estimate"][1:quant])
betas.sp.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Estimate"][1:quant])
betas.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.lm.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.lm.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.lm.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.lm.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
ses.sp.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Cluster s.e."][1:quant])
ses.sp.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Cluster s.e."][1:quant])
ses.sp.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Cluster s.e."][1:quant])
ses.sp.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Cluster s.e."][1:quant])
ses.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.lm.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
if(treatc=="WLATE"){
betas.lm.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.lm.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.lm.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.lm.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
betas.sp.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Estimate"][1:quant])
betas.sp.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Estimate"][1:quant])
betas.sp.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Estimate"][1:quant])
betas.sp.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Estimate"][1:quant])
betas.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.lm.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.lm.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.lm.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.lm.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
ses.sp.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Cluster s.e."][1:quant])
ses.sp.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Cluster s.e."][1:quant])
ses.sp.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Cluster s.e."][1:quant])
ses.sp.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Cluster s.e."][1:quant])
ses.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.lm.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.lm.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.sp.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.sp.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.sp.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.sp.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.sp.t.did.y))[,"Pr(>|t|)"][1:quant])
ps.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
}
if(match=="match"){
if(treatc=="TATE"){
betas.match.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.match.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.match.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.match.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
#betas.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.match.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.match.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.match.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.match.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
#ses.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.match.t.did.TATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.TATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.TATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.TATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
#ps.gam.t.did.TATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
if(treatc=="MUATE"){
betas.match.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.match.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.match.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.match.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
#betas.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.match.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.match.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.match.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.match.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
#ses.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.match.t.did.MUATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.MUATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.MUATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.MUATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
#ps.gam.t.did.MUATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
if(treatc=="WLATE"){
betas.match.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Estimate"][1:quant])
betas.match.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Estimate"][1:quant])
betas.match.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Estimate"][1:quant])
betas.match.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Estimate"][1:quant])
#betas.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Estimate"])[2:(quant+1)]
#betas.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Estimate"][1:quant])
ses.match.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Cluster s.e."][1:quant])
ses.match.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Cluster s.e."][1:quant])
ses.match.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Cluster s.e."][1:quant])
ses.match.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Cluster s.e."][1:quant])
#ses.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Std. Error"])[2:(quant+1)]
#ses.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Std. Error"][1:quant])
ps.match.t.did.WLATE.i[,di]<-as.numeric(coef(summary(results.lm.t.did.i))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.WLATE.a[,di]<-as.numeric(coef(summary(results.lm.t.did.a))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.WLATE.s[,di]<-as.numeric(coef(summary(results.lm.t.did.s))[,"Pr(>|t|)"][1:quant])
ps.match.t.did.WLATE.y[,di]<-as.numeric(coef(summary(results.lm.t.did.y))[,"Pr(>|t|)"][1:quant])
#ps.gam.t.did.WLATE.[,di]<-as.numeric(gam.model$p.table[,"Pr(>|t|)"])[2:(quant+1)]
#ps.lm.t.es.TATE[,di]<-as.numeric(coef(summary(results.lm.t.es))[,"Pr(>|t|)"][1:quant])
}
}
if(FALSE){
sdf<-10
lat<-sample$PropertyAddressLatitude
long<-sample$PropertyAddressLongitude
splat<-bs(lat, df = sdf)
splong<-bs(long, df = sdf)
spint<-model.matrix(~splat:splong)
xcTATE<-cbind(splat,splong,spint,lat,long,poly(sample$day,5),bs(sample$day, df = 10))
year<-dplyr::select(sample, starts_with('year'))
feTATE<-model.matrix(~ treatind+indx+
#prelogprice+prediffdate+predate+
data.matrix(year[,4:25]),sample)#+#timefedControlsComplete+timefed +
#aftfinalnpl+timefinalnplfe+
#data.matrix(exdum)+
#data.matrix(year[,25]),sample)
feTATE<-as.matrix(feTATE[,SD(feTATE)>0])
feTATE<-as.matrix(feTATE[,!duplicated(cor(feTATE))])
qr.X <- qr(feTATE, tol=1e-3, LAPACK = FALSE)
(rnkX <- qr.X$rank) ## 4 (number of non-collinear columns)
(keep <- qr.X$pivot[seq_len(rnkX)])
## 1 2 4 5
feTATE <- feTATE[,keep]
W<-cbind(sample$day,lat,long,X)
colnames(W)[1]<-"day"
colnames(W)[2]<-"lat"
colnames(W)[3]<-"long"
A<-sample$treatmentgroup
V<-X
Time<-sample$day
results.tmle.t.did <- tmleMSM(Y = sample$logprice, A = A, W = W, V = V, #T= Time,
MSM = "A + V",family="gaussian",
Q.SL.library = SL.library2$as.list(),
g.SL.library = SL.library2$as.list(),
#Qform = Y ~ A+V+W,
#gform = A~1,
#hAVform = A~ 1,
ub = 20,
V_SL =5,
alpha = 0.90,
inference = TRUE,
verbose=TRUE)
print(results.tmle.t.did)
summary(results.tmle.t.did)
sdf<-5
lat<-sample$PropertyAddressLatitude
long<-sample$PropertyAddressLongitude
splat<-bs(lat, df = sdf)
splong<-bs(long, df = sdf)
spint<-model.matrix(~splat:splong)
xcTATE<-cbind(splat,splong,spint,lat,long,poly(sample$day,5),bs(sample$day, df = 10))
feTATEes<-model.matrix(~ treatexCC+#aftfinalnpl+
#as.matrix(exdum)
-1,tsample)
feTATEes<-as.matrix(feTATEes[,SD(feTATEes)>0])
if(dim(feTATEes)[2]>1){
feTATEes<-feTATEes[,!duplicated(cor(feTATEes))]
}
qr.X <- qr(feTATEes, tol=1e-3, LAPACK = FALSE)
(rnkX <- qr.X$rank) ## 4 (number of non-collinear columns)
(keep <- qr.X$pivot[seq_len(rnkX)])
## 1 2 4 5
feTATEes <- feTATEes[,keep]
if(nocc==0&dim(as.matrix(feTATEes))[2]>0){
if(dim(data.matrix(tsample))[1]>40){
W<-cbind(xcTATE[sample$control==0,],xTATE[sample$control==0,],feTATEes)
A<-sample[control==0,treatst]
V<-feTATEes
Time<-sample[control==0,day]
results.tmle.t.es <- tmleMSM(Y = tsample$logprice, A = A, W = W, V = V, #T= Time,
MSM = "A + V",family="gaussian",
Q.SL.library = SL.library2$as.list(),
g.SL.library = PS.library2$as.list(),
#Qform = Y ~ A+V+W,
#gform = A~1,
#hAVform = A~ 1,
ub = 20,
V_SL =5,
alpha = 0.90,
inference = TRUE,
verbose=TRUE)
print(results.tmle.t.es)
betas.tmle.t.es[di,treat]<-results.tmle.t.es$psi["A"]
ses.tmle.t.es[di,treat]<-results.tmle.t.es$se["A"]
ps.tmle.t.es[di,treat]<-results.tmle.t.es$pvalue["A"]
cc.betas.tmle.t.es[di,treat]<-results.tmle.t.es$psi["V"]
cc.ses.tmle.t.es[di,treat]<-results.tmle.t.es$se["V"]
cc.ps.tmle.t.es[di,treat]<-results.tmle.t.es$pvalue["V"]
}}
betas.tmle.t.did[di,treat]<-results.tmle.t.did$psi["A"]
ses.tmle.t.did[di,treat]<-results.tmle.t.did$se["A"]
ps.tmle.t.did[di,treat]<-results.tmle.t.did$pvalue["A"]
if(nocc==0 ¬g==0){
cc.betas.tmle.t.did[di,treat]<-results.tmle.t.did$psi["VtreatexCC"]
cc.ses.tmle.t.did[di,treat]<-results.tmle.t.did$se["VtreatexCC"]
cc.ps.tmle.t.did[di,treat]<-results.tmle.t.did$pvalue["VtreatexCC"]
}
}
print(paste0('distance = ',dic))
print(paste0('treat = ',treatc))
}
}
}
}
for(statchange in c('')){
for(meth in c('lm','gam','match','sp')){
for(inf in c('did')){
for(treat in treatl){
for(spec in specl){
#treat<-"TATE"
p<-get(paste0(statchange,'ps.',meth,'.t.',inf,'.',treat,'.',spec))
mystars <- ifelse(p < .001, "***", ifelse(p < .01, "** ", ifelse(p < .05, "* ", ifelse(p < .1, "^\\bullet ", " "))))
if(!is.na(p[1,1])){
#pb<-exp(get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat)))-1
#rpb<-round(pb,3)
#se<-round(exp(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat)))-1,3)
pb<-get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat,'.',spec))
rpb<-round(pb,3)
se<-round(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat,'.',spec)),3)
srpb <- matrix(paste(rpb, mystars, sep=""), ncol=dim(pb)[2] )
nsrpb<-rbind(c("",laglead),cbind(dist,srpb))
#colnames(srpb)<-laglead
#rownames(srpb)<-dist
results.mat<-matrix(nrow= 2*dim(srpb)[1],ncol= dim(srpb)[2])
for(i in 1:dim(results.mat)[1]){
if(i %% 2 != 0){
results.mat[i,]<-srpb[ceiling(i/2),]
# rownames(ols.mat)[i]<-rownames(srpb)[ceiling(i/2)]
}
if(i %% 2 == 0){
results.mat[i,]<-paste0('(',se[ceiling(i/2),],')')
}
}
results.mat<-rbind(c('10k','8k','6k','4k','2k'),results.mat)
if(meth=='lm'){
results.mat<-rbind(c('OLS','','','',''),results.mat)
}
if(meth=='gam'){
results.mat<-rbind(c('GAM','','','',''),results.mat)
}
if(meth=='match'){
results.mat<-rbind(c('Matching','','','',''),results.mat)
}
if(meth=='sp'){
results.mat<-rbind(c('Spatial Lag','','','',''),results.mat)
}
# rn<-c(paste0("(",qcut[1],","),paste0(qcut[2],"]"),paste0("(",qcut[2],","),paste0(qcut[3],"]"),
# paste0("(",qcut[3],","),paste0(qcut[4],"]"),
# paste0("(",qcut[4],","),paste0(qcut[5],"]"),paste0("(",qcut[5],","),
# paste0(qcut[6],"]"),paste0("(",qcut[6],","),paste0(qcut[7],"]"),paste0("(",qcut[7],","),
# paste0(qcut[8],"]"),paste0("(",qcut[8],","),paste0(qcut[9],"]"),paste0("(",qcut[9],","),
# paste0(qcut[10],"]"),
# paste0("(",qcut[10],","),paste0(qcut[11],"]"))
rn<-c(paste0("(",qcut[1],",",qcut[2],"]"),"Bottom Ten Percentile" ,
paste0("(",qcut[2],",",qcut[quant],"]"),"Middle 80 Percentile" ,
paste0("(",qcut[quant],",",qcut[quant+1],"]"), "Top Ten Percentile")
rn2<-c(" "," ",paste0("(",qcut[1],", ",qcut[2],"]")," ",paste0("(",qcut[2],", ",qcut[3],"]")," ",
paste0("(",qcut[3],", ",qcut[4],"]")," ",
paste0("(",qcut[4],",",qcut[5],"]")," ",paste0("(",qcut[5],",",qcut[6],"]")," ",
paste0("(",qcut[6],",",qcut[7],"]")," ",paste0("(",qcut[7],",",qcut[8],"]")," ",
paste0("(",qcut[8],",",qcut[9],"]")," ", paste0("(",qcut[9],",",qcut[10],"]")," ",
paste0("(",qcut[10],",",qcut[11],"]")," ")
#rownames(results.mat)<-rn2
results.mat<-cbind(rn2,results.mat)
xtab<-xtable(results.mat)
align(xtab) <- "rl|rrrrr"
print.xtable(xtab,include.rownames=FALSE, hline.after = c(0,1,2,dim(results.mat)[1]),
include.colnames=FALSE, sanitize.text.function = identity,
#caption = "example",
label = paste0("tab:",meth,inf,statchange,treat,spec),
type="latex", file=paste0(path,'latex/',meth,inf,statchange,treat,spec,".tex"))
qu<-c("Bottom","Middle","Top")
qun<-c(1:10)
allModelFrame <- data.frame(Variable = qun,
Coefficient = pb[,1],
SE = se[, 1],
modelName = "10k")
for(i in 2:length(dist)){
di<-dist[i]
modelFrame <- data.frame(Variable = qun,
Coefficient = pb[,i],
SE = se[, i],
modelName = di)
allModelFrame <- data.frame(rbind(allModelFrame,modelFrame))
}
interval2 <- -qnorm((1-0.90)/2) # 95% multiplier
leg<-c('10k','8k','6k','4k','2k')
allModelFrame<-allModelFrame[allModelFrame$modelName!="2k",]
allModelFrame$modelName<-as.factor( allModelFrame$modelName)
allModelFrame$modelName<-factor(allModelFrame$modelName,levels(allModelFrame$modelName)[c(2:5,1)])
# Plot
zp1 <- ggplot(allModelFrame, aes(colour = modelName ))
zp1 <- zp1 + geom_hline(yintercept = 0, colour = gray(1/2), lty = 2)
zp1 <- zp1 + geom_pointrange(aes(x = Variable, y = Coefficient, ymin = Coefficient - SE*interval2,
ymax = Coefficient + SE*interval2,color =modelName),
lwd = 1/2, position = position_dodge(width = 1/2),
shape = 21, fill = "WHITE")
#zp1 <- zp1 + coord_flip() + theme_bw()
#zp1 <- zp1 + geom_line(data = allModelFrame, aes(linetype =modelName ), size = 1) +
zp1 <- zp1 + ggtitle("Comparing distance cut-offs")+xlab('Quantile')
#zp1 <- zp1 + scale_x_continuous(labels=c("1"="B","2"="M","3"="T"))
zp1 <- zp1 + scale_x_continuous(breaks=qun)
print(zp1) # The trick to these is position_dodge().
ggsave(file=paste(path,'latex/','coeff',meth,treat,spec, '.png', sep=""),height = 7,width =9)
}
}
}
}
}
}
for(meth in c("lm","gam","match",'sp')){
p<-get(paste0('ps.',meth,'.did'))
mystars <- ifelse(p < .001, "***", ifelse(p < .01, "** ", ifelse(p < .05, "* ", ifelse(p < .1, "^\\bullet ", " "))))
#pb<-exp(get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat)))-1
#rpb<-round(pb,3)
#se<-round(exp(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat)))-1,3)
pb<-get(paste0('betas.',meth,'.did'))
rpb<-round(pb,3)
se<-round(get(paste0('ses.',meth,'.did')),3)
srpb <- matrix(paste(rpb, mystars, sep=""), ncol=dim(pb)[2] )
nsrpb<-rbind(c("",laglead),cbind(dist,srpb))
#colnames(srpb)<-laglead
#rownames(srpb)<-dist
results.mat<-matrix(nrow= 2*dim(srpb)[1],ncol= dim(srpb)[2])
for(i in 1:dim(results.mat)[1]){
if(i %% 2 != 0){
results.mat[i,]<-srpb[ceiling(i/2),]
# rownames(ols.mat)[i]<-rownames(srpb)[ceiling(i/2)]
}
if(i %% 2 == 0){
results.mat[i,]<-paste0('(',se[ceiling(i/2),],')')
}
}
results.mat<-rbind(c('10k','8k','6k','4k','2k'),results.mat)
if(meth=='lm'){
results.mat<-rbind(c('OLS','','','',''),results.mat)
}
if(meth=='gam'){
results.mat<-rbind(c('GAM','','','',''),results.mat)
}
if(meth=='match'){
results.mat<-rbind(c('Matching','','','',''),results.mat)
}
if(meth=='sp'){
results.mat<-rbind(c('Spatial Lag','','','',''),results.mat)
}
# rn<-c(paste0("(",qcut[1],","),paste0(qcut[2],"]"),paste0("(",qcut[2],","),paste0(qcut[3],"]"),
# paste0("(",qcut[3],","),paste0(qcut[4],"]"),
# paste0("(",qcut[4],","),paste0(qcut[5],"]"),paste0("(",qcut[5],","),
# paste0(qcut[6],"]"),paste0("(",qcut[6],","),paste0(qcut[7],"]"),paste0("(",qcut[7],","),
# paste0(qcut[8],"]"),paste0("(",qcut[8],","),paste0(qcut[9],"]"),paste0("(",qcut[9],","),
# paste0(qcut[10],"]"),
# paste0("(",qcut[10],","),paste0(qcut[11],"]"))
rn<-c(" "," ", "Total"," ","Municipal Water"," ","Well Water"," ")
#rownames(results.mat)<-rn
results.mat<-cbind(rn,results.mat)
xtab<-xtable(results.mat)
align(xtab) <- "rl|rrrrr"
print.xtable(xtab,include.rownames=FALSE, hline.after = c(0,1,2,dim(results.mat)[1]),
include.colnames=FALSE, sanitize.text.function = identity,
#caption = "example",
label = paste0("tab:",meth,"water"),
type="latex", file=paste0(path,'latex/ATEcomp',meth,'.tex'))
qu<-c("Bottom","Middle","Top")
if(FALSE){
allModelFrame <- data.frame(Variable = qu,
Coefficient = pb[,1],
SE = se[, 1],
modelName = "10k")
for(i in 2:length(dist)){
di<-dist[i]
modelFrame <- data.frame(Variable = qu,
Coefficient = pb[,i],
SE = se[, i],
modelName = di)
allModelFrame <- data.frame(rbind(allModelFrame,modelFrame))
}
interval2 <- -qnorm((1-0.90)/2) # 95% multiplier
leg<-c('10k','8k','6k','4k','2k')
allModelFrame<-allModelFrame[allModelFrame$modelName!="2k",]
allModelFrame$modelName<-as.factor( allModelFrame$modelName)
allModelFrame$modelName<-factor(allModelFrame$modelName,levels(allModelFrame$modelName)[c(2:5,1)])
# Plot
zp1 <- ggplot(allModelFrame, aes(colour = modelName ))
zp1 <- zp1 + geom_hline(yintercept = 0, colour = gray(1/2), lty = 2)
zp1 <- zp1 + geom_pointrange(aes(x = Variable, y = Coefficient, ymin = Coefficient - SE*interval2,
ymax = Coefficient + SE*interval2,color =modelName),
lwd = 1/2, position = position_dodge(width = 1/2),
shape = 21, fill = "WHITE")
#zp1 <- zp1 + coord_flip() + theme_bw()
#zp1 <- zp1 + geom_line(data = allModelFrame, aes(linetype =modelName ), size = 1) +
zp1 <- zp1 + ggtitle("Comparing distance cut-offs")+xlab('Quantile')
#zp1 <- zp1 + scale_x_discrete(breaks=qu)
print(zp1) # The trick to these is position_dodge().
ggsave(file=paste(path,'latex/','coeff',meth,treat, '.png', sep=""),height = 7,width =9)
}
}
for(statchange in c('')){
for(meth in c('lm',"match",'sp')){
for(inf in c('did')){
for(treat in treatl){
for(di in dist){
#treat<-"TATE"
#di<-1
#meth<-'lm'
p1<-get(paste0(statchange,'ps.',meth,'.t.',inf,'.',treat,'.',specl[1]))
p2<-get(paste0(statchange,'ps.',meth,'.t.',inf,'.',treat,'.',specl[2]))
p3<-get(paste0(statchange,'ps.',meth,'.t.',inf,'.',treat,'.',specl[3]))
p4<-get(paste0(statchange,'ps.',meth,'.t.',inf,'.',treat,'.',specl[4]))
p<-rbind(p1,p2,p3,p4)
mystars <- ifelse(p < .001, "***", ifelse(p < .01, "** ", ifelse(p < .05, "* ", ifelse(p < .1, "^\\bullet ", " "))))
if(!is.na(p[1,1])){
#pb<-exp(get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat)))-1
#rpb<-round(pb,3)
#se<-round(exp(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat)))-1,3)
pb1<-get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat,'.',specl[1]))
pb2<-get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat,'.',specl[2]))
pb3<-get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat,'.',specl[3]))
pb4<-get(paste0(statchange,'betas.',meth,'.t.',inf,'.',treat,'.',specl[4]))
pb<-rbind(pb1,pb2,pb3,pb4)
pb10k<-cbind(pb[1:10,1],pb[11:20,1],pb[21:30,1],pb[31:40,1])
pb8k<-cbind(pb[1:10,2],pb[11:20,2],pb[21:30,2],pb[31:40,2])
pb6k<-cbind(pb[1:10,3],pb[11:20,3],pb[21:30,3],pb[31:40,3])
pb4k<-cbind(pb[1:10,4],pb[11:20,4],pb[21:30,4],pb[31:40,4])
rpb<-round(pb,3)
se1<-round(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat,'.',specl[1])),3)
se2<-round(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat,'.',specl[2])),3)
se3<-round(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat,'.',specl[3])),3)
se4<-round(get(paste0(statchange,'ses.',meth,'.t.',inf,'.',treat,'.',specl[4])),3)
se<-rbind(se1,se2,se3,se4)
se10k<-cbind(se[1:10,1],se[11:20,1],se[21:30,1],se[31:40,1])
se8k<-cbind(se[1:10,2],se[11:20,2],se[21:30,2],se[31:40,2])
se6k<-cbind(se[1:10,3],se[11:20,3],se[21:30,3],se[31:40,3])
se4k<-cbind(se[1:10,4],se[11:20,4],se[21:30,4],se[31:40,4])
srpb <- matrix(paste(rpb, mystars, sep=""), ncol=dim(pb)[2] )
nsrpb<-rbind(c("",laglead),cbind(dist,srpb))
#colnames(srpb)<-laglead
#rownames(srpb)<-dist
#10k
results.mat<-matrix(nrow= 2*dim(srpb)[1],ncol= dim(srpb)[2])
for(i in 1:dim(results.mat)[1]){
if(i %% 2 != 0){
results.mat[i,]<-srpb[ceiling(i/2),]
# rownames(ols.mat)[i]<-rownames(srpb)[ceiling(i/2)]
}
if(i %% 2 == 0){
results.mat[i,]<-paste0('(',se[ceiling(i/2),],')')
}
}
colnames(results.mat)<-c('10k','8k','6k','4k','2k')
# rn<-c(paste0("(",qcut[1],","),paste0(qcut[2],"]"),paste0("(",qcut[2],","),paste0(qcut[3],"]"),
# paste0("(",qcut[3],","),paste0(qcut[4],"]"),
# paste0("(",qcut[4],","),paste0(qcut[5],"]"),paste0("(",qcut[5],","),
# paste0(qcut[6],"]"),paste0("(",qcut[6],","),paste0(qcut[7],"]"),paste0("(",qcut[7],","),
# paste0(qcut[8],"]"),paste0("(",qcut[8],","),paste0(qcut[9],"]"),paste0("(",qcut[9],","),
# paste0(qcut[10],"]"),
# paste0("(",qcut[10],","),paste0(qcut[11],"]"))
results10k<-cbind(results.mat[1:20,1],results.mat[21:40,1],results.mat[41:60,1],results.mat[61:80,1])
results8k<-cbind(results.mat[1:20,2],results.mat[21:40,2],results.mat[41:60,2],results.mat[61:80,2])
results6k<-cbind(results.mat[1:20,3],results.mat[21:40,3],results.mat[41:60,3],results.mat[61:80,3])
results4k<-cbind(results.mat[1:20,4],results.mat[21:40,4],results.mat[41:60,4],results.mat[61:80,4])
FEmatrix<- c("Census Tract","Census Tract","Superfund Site","Superfund Site")
Cluster<-c("Tract by Year","Site by Year","Tract by Year","Site by Year")
rn2<-c(paste0("(",qcut[1],",",qcut[2],"]")," ",paste0("(",qcut[2],",",qcut[3],"]")," ",
paste0("(",qcut[3],",",qcut[4],"]")," ",
paste0("(",qcut[4],",",qcut[5],"]")," ",paste0("(",qcut[5],",",qcut[6],"]")," ",
paste0("(",qcut[6],",",qcut[7],"]")," ",paste0("(",qcut[7],",",qcut[8],"]")," ",
paste0("(",qcut[8],",",qcut[9],"]")," ", paste0("(",qcut[9],",",qcut[10],"]")," ",
paste0("(",qcut[10],",",qcut[11],"]")," ", "Fixed Effects", "Cluster")
results10k<-rbind(results10k,FEmatrix,Cluster)
results8k<-rbind(results8k,FEmatrix,Cluster)
results6k<-rbind(results6k,FEmatrix,Cluster)
results4k<-rbind(results4k,FEmatrix,Cluster)
rownames(results10k)<-rn2
rownames(results8k)<-rn2
rownames(results6k)<-rn2
rownames(results4k)<-rn2
rn<-c(paste0("(",qcut[1],",",qcut[2],"]"),"Bottom Ten Percentile" ,
paste0("(",qcut[2],",",qcut[quant],"]"),"Middle 80 Percentile" ,
paste0("(",qcut[quant],",",qcut[quant+1],"]"), "Top Ten Percentile")
xtable(results10k)
print.xtable(xtable(results10k),include.rownames=TRUE,
include.colnames=FALSE, sanitize.text.function = identity,
type="latex", file=paste0(path,'latex/',meth,inf,statchange,treat,"10k.tex"))
xtable(results8k)
print.xtable(xtable(results8k),include.rownames=TRUE,
include.colnames=FALSE, sanitize.text.function = identity,
type="latex", file=paste0(path,'latex/',meth,inf,statchange,treat,"8k.tex"))
xtable(results6k)
print.xtable(xtable(results6k),include.rownames=TRUE,
include.colnames=FALSE, sanitize.text.function = identity,
type="latex", file=paste0(path,'latex/',meth,inf,statchange,treat,"6k.tex"))
xtable(results4k)
print.xtable(xtable(results4k),include.rownames=TRUE,
include.colnames=FALSE, sanitize.text.function = identity,
type="latex", file=paste0(path,'latex/',meth,inf,statchange,treat,"4k.tex"))
qu<-c("Bottom","Middle","Top")
leg<-c('Block','Block Group','Tract','Site')
qun<-c(1:10)
for(j in c("4k","6k","8k","10k")){
pb<-get(paste0("pb",j))
se<-get(paste0("se",j))
allModelFrame <- data.frame(Variable = qun,
Coefficient = pb[,1],
SE = se[, 1],
modelName = 'Block')
for(i in 2:length(leg)){
le<-leg[i]
modelFrame <- data.frame(Variable = qun,
Coefficient = pb[,i],
SE = se[, i],
modelName = le)
allModelFrame <- data.frame(rbind(allModelFrame,modelFrame))
}
interval2 <- -qnorm((1-0.95)/2) # 95% multiplier
allModelFrame<-allModelFrame[allModelFrame$modelName!="2k",]
allModelFrame$modelName<-factor( allModelFrame$modelName,ordered = TRUE)
allModelFrame$modelName<-factor(allModelFrame$modelName,levels(allModelFrame$modelName)[c(1,2,4,3)],ordered = TRUE)
# Plot
zp1 <- ggplot(allModelFrame, aes(colour = modelName ))
zp1 <- zp1 + geom_hline(yintercept = 0, colour = gray(1/2), lty = 2)
zp1 <- zp1 + geom_pointrange(aes(x = Variable, y = Coefficient, ymin = Coefficient - SE*interval2,
ymax = Coefficient + SE*interval2,color =modelName),
lwd = 1/2, position = position_dodge(width = 1/2),
shape = 21, fill = "WHITE")
#zp1 <- zp1 + coord_flip() + theme_bw()
#zp1 <- zp1 + geom_line(data = allModelFrame, aes(linetype =modelName ), size = 1) +
zp1 <- zp1 + ggtitle("Comparing distance cut-offs")+xlab('Quantile')
#zp1 <- zp1 + scale_x_continuous(labels=c("1"="B","2"="M","3"="T"))
#scale_color_discrete(breaks=c("1","3","10")
zp1 <- zp1 + scale_x_continuous(breaks=qun)
zp1 <- zp1 + labs(color="Fixed Effects")
print(zp1) # The trick to these is position_dodge().
ggsave(file=paste(path,'latex/','coeff',meth,treat,j, 'ols.png', sep=""),height = 7,width =9)
}
}
}
}
}
}
}
|
x=c(0,1,2,3,4,5)
y=x*2
plot(x,y)
|
/Lesson 1 code.R
|
no_license
|
jboyd8/r_footbalanalytics_course
|
R
| false | false | 35 |
r
|
x=c(0,1,2,3,4,5)
y=x*2
plot(x,y)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MethComp-package.R
\docType{data}
\name{sbp.MC}
\alias{sbp.MC}
\title{A MCmcmc object from the sbp data}
\format{
The format is a \code{\link{MCmcmc}} object.
}
\description{
This object is included for illustrative purposes. It is a result of using
\code{\link{MCmcmc}}, with \code{n.iter=100000} on the dataset
\code{\link{sbp}} from this package.
}
\details{
The basic data are measurements of systolic blood pressure from the
\code{\link{sbp}} dataset. Measurements are taken to be linked within
replicate. The code used to generate the object was: \preformatted{
library(MethComp) data( sbp ) spb <- Meth( sbp ) sbp.MC <- MCmcmc( sbp,
linked=TRUE, n.iter=100000, program="JAGS" ) ) }
}
\examples{
data(sbp.MC)
# How was the data generated
attr(sbp.MC,"mcmc.par")
# Traceplots
trace.MCmcmc(sbp.MC)
trace.MCmcmc(sbp.MC,"beta")
# A MCmcmc object also has class mcmc.list, so we can use the
# standard coda functions for convergence diagnostics:
# acfplot( subset.MCmcmc(sbp.MC,subset="sigma") )
# Have a look at the correlation between the 9 variance parameters
pairs( sbp.MC )
# Have a look at whether the MxI variance components are the same between methods:
\dontrun{
pairs( sbp.MC, subset=c("mi"), eq=TRUE,
panel=function(x,y,...)
{
abline(0,1)
abline(v=median(x),h=median(y),col="gray")
points(x,y,...)
}
) }
}
\keyword{datasets}
|
/man/sbp.MC.Rd
|
no_license
|
ekstroem/MethComp
|
R
| false | true | 1,511 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MethComp-package.R
\docType{data}
\name{sbp.MC}
\alias{sbp.MC}
\title{A MCmcmc object from the sbp data}
\format{
The format is a \code{\link{MCmcmc}} object.
}
\description{
This object is included for illustrative purposes. It is a result of using
\code{\link{MCmcmc}}, with \code{n.iter=100000} on the dataset
\code{\link{sbp}} from this package.
}
\details{
The basic data are measurements of systolic blood pressure from the
\code{\link{sbp}} dataset. Measurements are taken to be linked within
replicate. The code used to generate the object was: \preformatted{
library(MethComp) data( sbp ) spb <- Meth( sbp ) sbp.MC <- MCmcmc( sbp,
linked=TRUE, n.iter=100000, program="JAGS" ) ) }
}
\examples{
data(sbp.MC)
# How was the data generated
attr(sbp.MC,"mcmc.par")
# Traceplots
trace.MCmcmc(sbp.MC)
trace.MCmcmc(sbp.MC,"beta")
# A MCmcmc object also has class mcmc.list, so we can use the
# standard coda functions for convergence diagnostics:
# acfplot( subset.MCmcmc(sbp.MC,subset="sigma") )
# Have a look at the correlation between the 9 variance parameters
pairs( sbp.MC )
# Have a look at whether the MxI variance components are the same between methods:
\dontrun{
pairs( sbp.MC, subset=c("mi"), eq=TRUE,
panel=function(x,y,...)
{
abline(0,1)
abline(v=median(x),h=median(y),col="gray")
points(x,y,...)
}
) }
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/asciiart.R
\name{asciiart}
\alias{asciiart}
\title{Create asciiart image}
\usage{
asciiart(
file,
width = 80,
text_scaling = 1,
out_width = 8,
out_name = NULL,
text_col = "black",
chars = c("@", "\%", "#", "*", "+", "=", "-", ":", ".", " "),
print_text = FALSE
)
}
\arguments{
\item{file}{Path to image file or array returned from \code{jpeg::readJPEG(), png::read_PNG()}}
\item{width}{Width of output image in text characters. Use NA for original image width}
\item{text_scaling}{Text scaling factor for tweaking text size (default = 1)}
\item{out_width}{Output image width in inches}
\item{out_name}{File path including name for saved image}
\item{text_col}{Colour of text (default = "black")}
\item{chars}{Character vector set for ascii art (ordered from dark to light)}
\item{print_text}{Logical. Should the text making the picture by printed to the console?
This is hacky, every other row is removed from the image to roughly correct for vertical stretching. This
text output will not have the same aspect ratio as the original image}
}
\description{
Creates an ASCII-art/text-art image where pixel intensity is mapped to the density of character glyphs
}
\details{
A uniform grid image can be written to a device (png, pdf etc...) and an approximation of the same image
can be printed to the console (or the output of calling \code{asciiart()} could be written to a text
file with \code{readr::write_lines()}). The text written to the console by setting \code{print_text = TRUE} will not
maintain the exact same aspect ratio as the original image
}
|
/man/asciiart.Rd
|
no_license
|
cj-holmes/asciiart
|
R
| false | true | 1,656 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/asciiart.R
\name{asciiart}
\alias{asciiart}
\title{Create asciiart image}
\usage{
asciiart(
file,
width = 80,
text_scaling = 1,
out_width = 8,
out_name = NULL,
text_col = "black",
chars = c("@", "\%", "#", "*", "+", "=", "-", ":", ".", " "),
print_text = FALSE
)
}
\arguments{
\item{file}{Path to image file or array returned from \code{jpeg::readJPEG(), png::read_PNG()}}
\item{width}{Width of output image in text characters. Use NA for original image width}
\item{text_scaling}{Text scaling factor for tweaking text size (default = 1)}
\item{out_width}{Output image width in inches}
\item{out_name}{File path including name for saved image}
\item{text_col}{Colour of text (default = "black")}
\item{chars}{Character vector set for ascii art (ordered from dark to light)}
\item{print_text}{Logical. Should the text making the picture by printed to the console?
This is hacky, every other row is removed from the image to roughly correct for vertical stretching. This
text output will not have the same aspect ratio as the original image}
}
\description{
Creates an ASCII-art/text-art image where pixel intensity is mapped to the density of character glyphs
}
\details{
A uniform grid image can be written to a device (png, pdf etc...) and an approximation of the same image
can be printed to the console (or the output of calling \code{asciiart()} could be written to a text
file with \code{readr::write_lines()}). The text written to the console by setting \code{print_text = TRUE} will not
maintain the exact same aspect ratio as the original image
}
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
##' Reads output from model ensemble
##'
##' Reads output for an ensemble of length specified by \code{ensemble.size} and bounded by \code{start.year} and \code{end.year}
##' @title Read ensemble output
##' @return a list of ensemble model output
##' @param ensemble.size the number of ensemble members run
##' @param pecandir specifies where pecan writes its configuration files
##' @param outdir directory with model output to use in ensemble analysis
##' @param start.year first year to include in ensemble analysis
##' @param end.year last year to include in ensemble analysis
##' @param variables targe variables for ensemble analysis
##' @param model ecosystem model run
##' @export
#--------------------------------------------------------------------------------------------------#
read.ensemble.output <- function(ensemble.size, pecandir, outdir,
start.year, end.year, variables, model){
if (exists('runs.samples')) {
ensemble.runs <- runs.samples$ensemble
} else {
ensemble.runs <- list()
samples.file <- file.path(pecandir, 'samples.Rdata')
print(samples.file)
if(file.exists(samples.file)){
load(samples.file)
ensemble.runs <- runs.samples$ensemble
} else {
stop(samples.file, "not found required by read.ensemble.output")
}
}
ensemble.output <- list()
for(row in rownames(ensemble.runs)) {
run.id <- ensemble.runs[row, 'id']
print(run.id)
ensemble.output[[row]] <- sapply(read.output(run.id, file.path(outdir, run.id), model,
start.year, end.year, variables),
mean,na.rm=TRUE)
}
return(ensemble.output)
}
#==================================================================================================#
##' Get parameter values used in ensemble
##'
##' Returns a matrix of trait values sampled quasi-randomly based on the Halton sequence
##' to be assigned to traits over several model runs.
##' given the number of model runs and a list of sample distributions for traits
##' The model run is indexed first by model run, then by trait
##'
##' @title Get Ensemble Samples
##' @name get.ensemble.samples
##' @param ensemble.size number of runs in model ensemble
##' @param pft.samples random samples from parameter distribution, e.g. from a MCMC chain or a
##' @param env.samples env samples
##' @param method the method used to generate the ensemble samples. default = halton
##' @return matrix of quasi-random (overdispersed) samples from trait distributions
##' @export
##' @import randtoolbox
##' @references Halton, J. (1964), Algorithm 247: Radical-inverse quasi-random point sequence, ACM, p. 701, doi:10.1145/355588.365104.
##' @author David LeBauer
get.ensemble.samples <- function(ensemble.size, pft.samples,env.samples,method="halton") {
##force as numeric for compatibility with Fortran code in halton()
ensemble.size <- as.numeric(ensemble.size)
if(ensemble.size <= 0){
ans <- NULL
} else if (ensemble.size == 1) {
ans <- get.sa.sample.list(pft.samples,env.samples,0.50)
} else {
pft.samples[[length(pft.samples)+1]] = env.samples
names(pft.samples)[length(pft.samples)] <- 'env'
pft2col <- NULL
for(i in 1:length(pft.samples)){
pft2col <- c(pft2col,rep(i,length(pft.samples[[i]])))
}
halton.samples <- NULL
if(method == "halton"){
halton.samples <- halton(n = ensemble.size, dim=length(pft2col))
##force as a matrix in case length(samples)=1
halton.samples <- as.matrix(halton.samples)
} else {
#uniform random
halton.samples <- matrix(runif(ensemble.size*length(pft2col))
,ensemble.size,length(pft2col))
}
total.sample.num <- sum(sapply(pft.samples, length))
halton.samples <- NULL
if(method == "halton"){
halton.samples <- halton(n = ensemble.size, dim=total.sample.num)
##force as a matrix in case length(samples)=1
halton.samples <- as.matrix(halton.samples)
} else {
#uniform random
halton.samples <- matrix(runif(ensemble.size*total.sample.num),
ensemble.size, dim=total.sample.num)
}
ensemble.samples <- list()
col.i <- 0
for(pft.i in seq(pft.samples)){
ensemble.samples[[pft.i]] <-
matrix(nrow=ensemble.size,ncol=length(pft.samples[[pft.i]]))
for(trait.i in seq(pft.samples[[pft.i]])) {
col.i<-col.i+1
ensemble.samples[[pft.i]][, trait.i] <-
quantile(pft.samples[[pft.i]][[trait.i]],
halton.samples[, col.i])
} # end trait
ensemble.samples[[pft.i]] <- as.data.frame(ensemble.samples[[pft.i]])
colnames(ensemble.samples[[pft.i]]) <- names(pft.samples[[pft.i]])
} #end pft
names(ensemble.samples) <- names(pft.samples)
ans <- ensemble.samples
}
return(ans)
} ### End of function: get.ensemble.samples
#==================================================================================================#
##' Write ensemble config files
##'
##' Writes config files for use in meta-analysis and returns a list of run ids.
##' Given a pft.xml object, a list of lists as supplied by get.sa.samples,
##' a name to distinguish the output files, and the directory to place the files.
##' @title Write ensemble configs
##' @param defaults pft
##' @param ensemble.samples list of lists supplied by \link{get.ensemble.samples}
##' @param settings list of PEcAn settings
##' @param write.config a model-specific function to write config files, e.g. \link{write.config.ED}
##' @param clean remove old output first?
##' @return data frame of runids, writes ensemble configuration files as a side effect
##' @export
##' @author David LeBauer, Carl Davidson
write.ensemble.configs <- function(defaults, ensemble.samples, settings,
model, clean=FALSE, write.to.db = TRUE){
my.write.config <- paste("write.config.",model,sep="")
if(!exists(my.write.config)){
print(paste(my.write.config,"does not exist"))
print(paste("please make sure that the PEcAn interface is loaded for",model))
stop()
}
# TODO RK : fix this since names have changed
if(clean){
## Remove old files
if(settings$run$host$name == 'localhost') {
if("ENS" %in% dir(settings$run$host$rundir)){
file.remove(paste(settings$run$host$rundir, '*',
get.run.id('ENS', '', pft.name=pft.name), '*"', sep=''))
}
} else {
ssh(settings$run$host$name, 'rm -f ', settings$run$host$rundir, '*',
get.run.id('ENS', '', pft.name=pft.name, '*'))
}
}
if(is.null(ensemble.samples)) return(NULL)
# Open connection to database so we can store all run/ensemble information
if(write.to.db){
con <- try(db.open(settings$database), silent=TRUE)
if(is.character(con)){
con <- NULL
}
} else {
con <- NULL
}
# Get the workflow id
if ("workflow" %in% names(settings)) {
workflow.id <- settings$workflow$id
} else {
workflow.id <- -1
}
# create an ensemble id
if (!is.null(con)) {
# write enseblem first
query.base(paste("INSERT INTO ensembles (created_at, runtype, workflow_id) values (NOW(), 'ensemble', ", workflow.id, ")", sep=''), con)
ensemble.id <- query.base(paste("SELECT LAST_INSERT_ID() AS ID"), con)[['ID']]
} else {
ensemble.id <- "NA"
}
# write configuration for each run of the ensemble
runs <- data.frame()
for(counter in 1:settings$ensemble$size) {
if (!is.null(con)) {
paramlist <- paste("ensemble=", counter, sep='')
query.base(paste("INSERT INTO runs (model_id, site_id, start_time, finish_time, outdir, created_at, ensemble_id, parameter_list) values ('", settings$model$id, "', '", settings$run$site$id, "', '", settings$run$start.date, "', '", settings$run$end.date, "', '",settings$run$outdir , "', NOW(), ", ensemble.id, ", '", paramlist, "')", sep=''), con)
run.id <- query.base(paste("SELECT LAST_INSERT_ID() AS ID"), con)[['ID']]
} else {
run.id <- get.run.id('ENS', left.pad.zeros(counter, 5))
}
runs[counter, 'id'] <- run.id
# create folders (cleaning up old ones if needed)
if(clean) {
unlink(file.path(settings$rundir, run.id))
unlink(file.path(settings$modeloutdir, run.id))
}
dir.create(file.path(settings$rundir, run.id), recursive=TRUE)
dir.create(file.path(settings$modeloutdir, run.id), recursive=TRUE)
# write run information to disk
cat("runtype : ensemble\n",
"workflow id : ", workflow.id, "\n",
"ensemble id : ", ensemble.id, "\n",
"run : ", counter, "/", settings$ensemble$size, "\n",
"run id : ", run.id, "\n",
"pft names : ", as.character(lapply(settings$pfts, function(x) x[['name']])), "\n",
"model : ", model, "\n",
"model id : ", settings$model$id, "\n",
"site : ", settings$run$site$name, "\n",
"site id : ", settings$run$site$id, "\n",
"met data : ", settings$run$site$met, "\n",
"start date : ", settings$run$start.date, "\n",
"end date : ", settings$run$end.date, "\n",
"hostname : ", settings$run$host$name, "\n",
"rundir : ", file.path(settings$run$host$rundir, run.id), "\n",
"outdir : ", file.path(settings$run$host$outdir, run.id), "\n",
file=file.path(settings$rundir, run.id, "README.txt"), sep='')
do.call(my.write.config,args=list(defaults,
lapply(ensemble.samples,function(x,n){x[n,]},n=counter),
settings, run.id))
cat(run.id, file=file.path(settings$rundir, "runs.txt"), sep="\n", append=TRUE)
}
if (!is.null(con)) {
db.close(con)
}
invisible(runs)
} ### End of function: write.ensemble.configs
#==================================================================================================#
|
/utils/R/ensemble.R
|
permissive
|
jingxia/pecan
|
R
| false | false | 10,648 |
r
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
##' Reads output from model ensemble
##'
##' Reads output for an ensemble of length specified by \code{ensemble.size} and bounded by \code{start.year} and \code{end.year}
##' @title Read ensemble output
##' @return a list of ensemble model output
##' @param ensemble.size the number of ensemble members run
##' @param pecandir specifies where pecan writes its configuration files
##' @param outdir directory with model output to use in ensemble analysis
##' @param start.year first year to include in ensemble analysis
##' @param end.year last year to include in ensemble analysis
##' @param variables targe variables for ensemble analysis
##' @param model ecosystem model run
##' @export
#--------------------------------------------------------------------------------------------------#
read.ensemble.output <- function(ensemble.size, pecandir, outdir,
start.year, end.year, variables, model){
if (exists('runs.samples')) {
ensemble.runs <- runs.samples$ensemble
} else {
ensemble.runs <- list()
samples.file <- file.path(pecandir, 'samples.Rdata')
print(samples.file)
if(file.exists(samples.file)){
load(samples.file)
ensemble.runs <- runs.samples$ensemble
} else {
stop(samples.file, "not found required by read.ensemble.output")
}
}
ensemble.output <- list()
for(row in rownames(ensemble.runs)) {
run.id <- ensemble.runs[row, 'id']
print(run.id)
ensemble.output[[row]] <- sapply(read.output(run.id, file.path(outdir, run.id), model,
start.year, end.year, variables),
mean,na.rm=TRUE)
}
return(ensemble.output)
}
#==================================================================================================#
##' Get parameter values used in ensemble
##'
##' Returns a matrix of trait values sampled quasi-randomly based on the Halton sequence
##' to be assigned to traits over several model runs.
##' given the number of model runs and a list of sample distributions for traits
##' The model run is indexed first by model run, then by trait
##'
##' @title Get Ensemble Samples
##' @name get.ensemble.samples
##' @param ensemble.size number of runs in model ensemble
##' @param pft.samples random samples from parameter distribution, e.g. from a MCMC chain or a
##' @param env.samples env samples
##' @param method the method used to generate the ensemble samples. default = halton
##' @return matrix of quasi-random (overdispersed) samples from trait distributions
##' @export
##' @import randtoolbox
##' @references Halton, J. (1964), Algorithm 247: Radical-inverse quasi-random point sequence, ACM, p. 701, doi:10.1145/355588.365104.
##' @author David LeBauer
get.ensemble.samples <- function(ensemble.size, pft.samples,env.samples,method="halton") {
##force as numeric for compatibility with Fortran code in halton()
ensemble.size <- as.numeric(ensemble.size)
if(ensemble.size <= 0){
ans <- NULL
} else if (ensemble.size == 1) {
ans <- get.sa.sample.list(pft.samples,env.samples,0.50)
} else {
pft.samples[[length(pft.samples)+1]] = env.samples
names(pft.samples)[length(pft.samples)] <- 'env'
pft2col <- NULL
for(i in 1:length(pft.samples)){
pft2col <- c(pft2col,rep(i,length(pft.samples[[i]])))
}
halton.samples <- NULL
if(method == "halton"){
halton.samples <- halton(n = ensemble.size, dim=length(pft2col))
##force as a matrix in case length(samples)=1
halton.samples <- as.matrix(halton.samples)
} else {
#uniform random
halton.samples <- matrix(runif(ensemble.size*length(pft2col))
,ensemble.size,length(pft2col))
}
total.sample.num <- sum(sapply(pft.samples, length))
halton.samples <- NULL
if(method == "halton"){
halton.samples <- halton(n = ensemble.size, dim=total.sample.num)
##force as a matrix in case length(samples)=1
halton.samples <- as.matrix(halton.samples)
} else {
#uniform random
halton.samples <- matrix(runif(ensemble.size*total.sample.num),
ensemble.size, dim=total.sample.num)
}
ensemble.samples <- list()
col.i <- 0
for(pft.i in seq(pft.samples)){
ensemble.samples[[pft.i]] <-
matrix(nrow=ensemble.size,ncol=length(pft.samples[[pft.i]]))
for(trait.i in seq(pft.samples[[pft.i]])) {
col.i<-col.i+1
ensemble.samples[[pft.i]][, trait.i] <-
quantile(pft.samples[[pft.i]][[trait.i]],
halton.samples[, col.i])
} # end trait
ensemble.samples[[pft.i]] <- as.data.frame(ensemble.samples[[pft.i]])
colnames(ensemble.samples[[pft.i]]) <- names(pft.samples[[pft.i]])
} #end pft
names(ensemble.samples) <- names(pft.samples)
ans <- ensemble.samples
}
return(ans)
} ### End of function: get.ensemble.samples
#==================================================================================================#
##' Write ensemble config files
##'
##' Writes config files for use in meta-analysis and returns a list of run ids.
##' Given a pft.xml object, a list of lists as supplied by get.sa.samples,
##' a name to distinguish the output files, and the directory to place the files.
##' @title Write ensemble configs
##' @param defaults pft
##' @param ensemble.samples list of lists supplied by \link{get.ensemble.samples}
##' @param settings list of PEcAn settings
##' @param write.config a model-specific function to write config files, e.g. \link{write.config.ED}
##' @param clean remove old output first?
##' @return data frame of runids, writes ensemble configuration files as a side effect
##' @export
##' @author David LeBauer, Carl Davidson
write.ensemble.configs <- function(defaults, ensemble.samples, settings,
model, clean=FALSE, write.to.db = TRUE){
my.write.config <- paste("write.config.",model,sep="")
if(!exists(my.write.config)){
print(paste(my.write.config,"does not exist"))
print(paste("please make sure that the PEcAn interface is loaded for",model))
stop()
}
# TODO RK : fix this since names have changed
if(clean){
## Remove old files
if(settings$run$host$name == 'localhost') {
if("ENS" %in% dir(settings$run$host$rundir)){
file.remove(paste(settings$run$host$rundir, '*',
get.run.id('ENS', '', pft.name=pft.name), '*"', sep=''))
}
} else {
ssh(settings$run$host$name, 'rm -f ', settings$run$host$rundir, '*',
get.run.id('ENS', '', pft.name=pft.name, '*'))
}
}
if(is.null(ensemble.samples)) return(NULL)
# Open connection to database so we can store all run/ensemble information
if(write.to.db){
con <- try(db.open(settings$database), silent=TRUE)
if(is.character(con)){
con <- NULL
}
} else {
con <- NULL
}
# Get the workflow id
if ("workflow" %in% names(settings)) {
workflow.id <- settings$workflow$id
} else {
workflow.id <- -1
}
# create an ensemble id
if (!is.null(con)) {
# write enseblem first
query.base(paste("INSERT INTO ensembles (created_at, runtype, workflow_id) values (NOW(), 'ensemble', ", workflow.id, ")", sep=''), con)
ensemble.id <- query.base(paste("SELECT LAST_INSERT_ID() AS ID"), con)[['ID']]
} else {
ensemble.id <- "NA"
}
# write configuration for each run of the ensemble
runs <- data.frame()
for(counter in 1:settings$ensemble$size) {
if (!is.null(con)) {
paramlist <- paste("ensemble=", counter, sep='')
query.base(paste("INSERT INTO runs (model_id, site_id, start_time, finish_time, outdir, created_at, ensemble_id, parameter_list) values ('", settings$model$id, "', '", settings$run$site$id, "', '", settings$run$start.date, "', '", settings$run$end.date, "', '",settings$run$outdir , "', NOW(), ", ensemble.id, ", '", paramlist, "')", sep=''), con)
run.id <- query.base(paste("SELECT LAST_INSERT_ID() AS ID"), con)[['ID']]
} else {
run.id <- get.run.id('ENS', left.pad.zeros(counter, 5))
}
runs[counter, 'id'] <- run.id
# create folders (cleaning up old ones if needed)
if(clean) {
unlink(file.path(settings$rundir, run.id))
unlink(file.path(settings$modeloutdir, run.id))
}
dir.create(file.path(settings$rundir, run.id), recursive=TRUE)
dir.create(file.path(settings$modeloutdir, run.id), recursive=TRUE)
# write run information to disk
cat("runtype : ensemble\n",
"workflow id : ", workflow.id, "\n",
"ensemble id : ", ensemble.id, "\n",
"run : ", counter, "/", settings$ensemble$size, "\n",
"run id : ", run.id, "\n",
"pft names : ", as.character(lapply(settings$pfts, function(x) x[['name']])), "\n",
"model : ", model, "\n",
"model id : ", settings$model$id, "\n",
"site : ", settings$run$site$name, "\n",
"site id : ", settings$run$site$id, "\n",
"met data : ", settings$run$site$met, "\n",
"start date : ", settings$run$start.date, "\n",
"end date : ", settings$run$end.date, "\n",
"hostname : ", settings$run$host$name, "\n",
"rundir : ", file.path(settings$run$host$rundir, run.id), "\n",
"outdir : ", file.path(settings$run$host$outdir, run.id), "\n",
file=file.path(settings$rundir, run.id, "README.txt"), sep='')
do.call(my.write.config,args=list(defaults,
lapply(ensemble.samples,function(x,n){x[n,]},n=counter),
settings, run.id))
cat(run.id, file=file.path(settings$rundir, "runs.txt"), sep="\n", append=TRUE)
}
if (!is.null(con)) {
db.close(con)
}
invisible(runs)
} ### End of function: write.ensemble.configs
#==================================================================================================#
|
BFS <- function (A, r=nrow(A)) {
if (!is.matrix(A) || nrow(A) != ncol(A)) stop("A must be a square matrix")
if (nrow(A) <= 1) return(matrix(0,nrow(A),ncol(A)))
if (nrow(A) < r) stop("r must be a natural number <= number of rows of A")
out <- 0*A
disc <- rep(FALSE, nrow(A))
disc[r] <- TRUE
queue <- r
while (length(queue) > 0) {
new_nb <- which(A[queue[1], ] > 0 & !disc)
out[queue[1], new_nb] = 1
queue <- c(queue[-1], new_nb)
disc[new_nb] = TRUE
}
out
}
##' Get list of paths from each vertex to a root
##'
treeTop <- function(A, r) {
if (!is.matrix(A) || nrow(A) != ncol(A)) stop("A must be a square matrix")
if (nrow(A) <= 1) return(matrix(0,nrow(A),ncol(A)))
if (nrow(A) < r) stop("r must be a natural number <= number of rows of A")
## add in check for cycles
out <- vector(length=nrow(A), mode="list")
## set up root node path
disc <- rep(FALSE, nrow(A))
disc[r] <- TRUE
curr_nb <- r
out[[r]] = r
while (!all(disc)) {
new_nb <- integer(0)
for (i in curr_nb) {
tmp <- c(setdiff(which(A[,i] > 0), which(disc)))
for (j in tmp) out[[j]] = c(out[[i]], j)
new_nb <- c(new_nb, tmp)
}
disc[new_nb] = TRUE
if (length(new_nb) == 0) break
curr_nb <- new_nb
}
out
}
bitwise <- function (x) {
out <- matrix(0, nrow=length(x), ncol=53)
for (i in seq_len(53)) {
wh <- which(x >= 2^-i)
out[wh,i] <- 1
x[wh] <- x[wh] - 2^-i
}
if (!is.null(dim(x))) dim(out) <- c(dim(x), 53)
out
}
|
/R/utility.R
|
no_license
|
rje42/dependence
|
R
| false | false | 1,512 |
r
|
BFS <- function (A, r=nrow(A)) {
if (!is.matrix(A) || nrow(A) != ncol(A)) stop("A must be a square matrix")
if (nrow(A) <= 1) return(matrix(0,nrow(A),ncol(A)))
if (nrow(A) < r) stop("r must be a natural number <= number of rows of A")
out <- 0*A
disc <- rep(FALSE, nrow(A))
disc[r] <- TRUE
queue <- r
while (length(queue) > 0) {
new_nb <- which(A[queue[1], ] > 0 & !disc)
out[queue[1], new_nb] = 1
queue <- c(queue[-1], new_nb)
disc[new_nb] = TRUE
}
out
}
##' Get list of paths from each vertex to a root
##'
treeTop <- function(A, r) {
if (!is.matrix(A) || nrow(A) != ncol(A)) stop("A must be a square matrix")
if (nrow(A) <= 1) return(matrix(0,nrow(A),ncol(A)))
if (nrow(A) < r) stop("r must be a natural number <= number of rows of A")
## add in check for cycles
out <- vector(length=nrow(A), mode="list")
## set up root node path
disc <- rep(FALSE, nrow(A))
disc[r] <- TRUE
curr_nb <- r
out[[r]] = r
while (!all(disc)) {
new_nb <- integer(0)
for (i in curr_nb) {
tmp <- c(setdiff(which(A[,i] > 0), which(disc)))
for (j in tmp) out[[j]] = c(out[[i]], j)
new_nb <- c(new_nb, tmp)
}
disc[new_nb] = TRUE
if (length(new_nb) == 0) break
curr_nb <- new_nb
}
out
}
bitwise <- function (x) {
out <- matrix(0, nrow=length(x), ncol=53)
for (i in seq_len(53)) {
wh <- which(x >= 2^-i)
out[wh,i] <- 1
x[wh] <- x[wh] - 2^-i
}
if (!is.null(dim(x))) dim(out) <- c(dim(x), 53)
out
}
|
options(stringsAsFactors=F)
library(GenomicRanges)
base = "70PRS/03PRSinput/GWAS/iPSYCHPGC_HG38_update_model1QC_EUR_only_noMHC_MAF005"
prs_base = read.table(base,header=T,sep="\t")
prs_base_GR = GRanges(prs_base$CHR,IRanges(prs_base$BP,prs_base$BP))
mcols(prs_base_GR) = prs_base[,c(3,4,6:9)]
prs_snp = read.table("70PRS/04PRS_WKDIR/prsice/rawPRS_CC_QC/ASD_SPARK_EUR_SPARK.snp",sep="\t",header=T)
prs_snp_GR = GRanges(paste0("chr",prs_snp$CHR),IRanges(prs_snp$BP,prs_snp$BP))
olap = findOverlaps(prs_base_GR,prs_snp_GR)
prs_base_GR = prs_base_GR[queryHits(olap)]
save(prs_base_GR,file="70PRS/03PRSinput/GWAS/iPSYCHPGC_HG38_update_model1QC_EUR_only_noMHC_MAF005.Rdata")
|
/PRS/7B_4.extractPRSSNP_fromBase.R
|
no_license
|
thewonlab/GWAS_ASD_SPARK
|
R
| false | false | 674 |
r
|
options(stringsAsFactors=F)
library(GenomicRanges)
base = "70PRS/03PRSinput/GWAS/iPSYCHPGC_HG38_update_model1QC_EUR_only_noMHC_MAF005"
prs_base = read.table(base,header=T,sep="\t")
prs_base_GR = GRanges(prs_base$CHR,IRanges(prs_base$BP,prs_base$BP))
mcols(prs_base_GR) = prs_base[,c(3,4,6:9)]
prs_snp = read.table("70PRS/04PRS_WKDIR/prsice/rawPRS_CC_QC/ASD_SPARK_EUR_SPARK.snp",sep="\t",header=T)
prs_snp_GR = GRanges(paste0("chr",prs_snp$CHR),IRanges(prs_snp$BP,prs_snp$BP))
olap = findOverlaps(prs_base_GR,prs_snp_GR)
prs_base_GR = prs_base_GR[queryHits(olap)]
save(prs_base_GR,file="70PRS/03PRSinput/GWAS/iPSYCHPGC_HG38_update_model1QC_EUR_only_noMHC_MAF005.Rdata")
|
# Autor: Ing. Adrian Huerta
rm(list = ls())
`%>%` = magrittr::`%>%`
path = "C:/Fernando Pastor/Adrian_scripts/02_entregable/02_gridded/scripts/"
setwd(path)
## Funciones
source('functions.R')
path2 = "C:/Fernando Pastor/Adrian_scripts/02_entregable/02_gridded/dataset/OBS/TO_send/"
setwd(path2)
## Datos observados de temperatura minima dekadiaria
datos_dek <- read.csv("TN_dekadal_spatiats.csv")
## A dato espacial
datos_obs <- to_spdf(datos = datos_dek, fecha = "X2019.06.1")
path3="C:/Fernando Pastor/Adrian_scripts/02_entregable/02_gridded/results/Dekadal_Normals/"
setwd(path3)
## Seleccion de climatologia de acuerdo a variable y dekada
cov <- cov_clim(direccion = path3,
variable = "TN",
dekada = "d_06.1")
## Interpolacion IDW de anomalias y obtencion de temperatura
temp_dk <- temp_dek(Obj_sp = datos_obs,
Cov = cov)
## plot
sp::spplot(temp_dk)
## save
# raster::writeRaster(x = temp_dk,
# file = "temp_dk.tif",
# format = "GTiff")
### figuras del informe
pG <- raster::extract(temp_dk, datos_obs) %>%
round(2)
plot(datos_obs@data$X2019.06.1, pG,
ylab = "punto de grilla",
xlab = "punto de estación",
cex = 1.5, pch = 21)
path4 = "C:/Fernando Pastor/Adrian_scripts/PER_adm/"
setwd(path4)
per_shp0 <- raster::shapefile("PER_adm1.shp")
per_shp <- per_shp0 %>% broom::tidy()
lk_shp0 <- raster::shapefile("Lagos_Principales.shp")
lk_shp <- lk_shp0 %>% broom::tidy()
raster::as.data.frame(temp_dk, xy = TRUE) %>%
ggplot2::ggplot() +
ggplot2::geom_tile(ggplot2::aes(x = x, y = y, fill = res)) +
ggplot2::scale_fill_gradientn("T°C", colours = c(topo.colors(10)),
na.value = "gray20",
limits = c(-20, 20)) +
ggplot2::geom_polygon(data = per_shp, ggplot2::aes(x = long, y = lat, group = group),
fill = NA, colour = "gray20", size = 0.5) +
ggplot2::geom_polygon(data = lk_shp, ggplot2::aes(x = long, y = lat, group = group),
fill = "gray20", colour = "gray20", size = .5) +
ggplot2::theme_classic(base_size = 17) +
ggplot2::coord_quickmap(xlim = c(-72.92, -68),
ylim = c(-18.03, -13.31), expand = FALSE) +
ggplot2::labs(x = "", y = "")
path5="C:/Fernando Pastor/Adrian_scripts/02_entregable/02_gridded/results/Figures/"
ggplot2::ggsave(paste0(path5,"example.png"),
dpi = 100, scale = 0.75, height = 9, width = 9)
|
/example.R
|
no_license
|
jonathan123pastor/Temperatures_by_Adrian_Huerta
|
R
| false | false | 2,526 |
r
|
# Autor: Ing. Adrian Huerta
rm(list = ls())
`%>%` = magrittr::`%>%`
path = "C:/Fernando Pastor/Adrian_scripts/02_entregable/02_gridded/scripts/"
setwd(path)
## Funciones
source('functions.R')
path2 = "C:/Fernando Pastor/Adrian_scripts/02_entregable/02_gridded/dataset/OBS/TO_send/"
setwd(path2)
## Datos observados de temperatura minima dekadiaria
datos_dek <- read.csv("TN_dekadal_spatiats.csv")
## A dato espacial
datos_obs <- to_spdf(datos = datos_dek, fecha = "X2019.06.1")
path3="C:/Fernando Pastor/Adrian_scripts/02_entregable/02_gridded/results/Dekadal_Normals/"
setwd(path3)
## Seleccion de climatologia de acuerdo a variable y dekada
cov <- cov_clim(direccion = path3,
variable = "TN",
dekada = "d_06.1")
## Interpolacion IDW de anomalias y obtencion de temperatura
temp_dk <- temp_dek(Obj_sp = datos_obs,
Cov = cov)
## plot
sp::spplot(temp_dk)
## save
# raster::writeRaster(x = temp_dk,
# file = "temp_dk.tif",
# format = "GTiff")
### figuras del informe
pG <- raster::extract(temp_dk, datos_obs) %>%
round(2)
plot(datos_obs@data$X2019.06.1, pG,
ylab = "punto de grilla",
xlab = "punto de estación",
cex = 1.5, pch = 21)
path4 = "C:/Fernando Pastor/Adrian_scripts/PER_adm/"
setwd(path4)
per_shp0 <- raster::shapefile("PER_adm1.shp")
per_shp <- per_shp0 %>% broom::tidy()
lk_shp0 <- raster::shapefile("Lagos_Principales.shp")
lk_shp <- lk_shp0 %>% broom::tidy()
raster::as.data.frame(temp_dk, xy = TRUE) %>%
ggplot2::ggplot() +
ggplot2::geom_tile(ggplot2::aes(x = x, y = y, fill = res)) +
ggplot2::scale_fill_gradientn("T°C", colours = c(topo.colors(10)),
na.value = "gray20",
limits = c(-20, 20)) +
ggplot2::geom_polygon(data = per_shp, ggplot2::aes(x = long, y = lat, group = group),
fill = NA, colour = "gray20", size = 0.5) +
ggplot2::geom_polygon(data = lk_shp, ggplot2::aes(x = long, y = lat, group = group),
fill = "gray20", colour = "gray20", size = .5) +
ggplot2::theme_classic(base_size = 17) +
ggplot2::coord_quickmap(xlim = c(-72.92, -68),
ylim = c(-18.03, -13.31), expand = FALSE) +
ggplot2::labs(x = "", y = "")
path5="C:/Fernando Pastor/Adrian_scripts/02_entregable/02_gridded/results/Figures/"
ggplot2::ggsave(paste0(path5,"example.png"),
dpi = 100, scale = 0.75, height = 9, width = 9)
|
x <- read.csv("household_power_consumption.txt", sep = ";")
simple <-
subset(x,
Date == "1/2/2007" |
Date == "2/2/2007",
select = c(Global_active_power))
n <- simple$Global_active_power
png("plot1.png", width = 480, height = 480)
hist(
x = as.numeric(levels(n))[n],
col = "Red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)"
)
dev.off()
|
/plot_1.R
|
no_license
|
kvnch/ExData_Plotting1
|
R
| false | false | 405 |
r
|
x <- read.csv("household_power_consumption.txt", sep = ";")
simple <-
subset(x,
Date == "1/2/2007" |
Date == "2/2/2007",
select = c(Global_active_power))
n <- simple$Global_active_power
png("plot1.png", width = 480, height = 480)
hist(
x = as.numeric(levels(n))[n],
col = "Red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)"
)
dev.off()
|
#' Identify phosphorylation regulation influence downstream
#'
#' This function identifies the downstream regulation influence
#' of phosphoprotein regulation for further downstream analysis steps.
#'
#' @param data_omics_plus output list of readPWdata function; first element
#' contains an OmicsData object, secons element the genelist data corresponding
#' to the selected pathway database.
#' @return OmicsData object: list of 4 elements (OmicsD, PathwayD, TFtargetsD,
#' Status); OmicsD containing omics data set + results (after analysis);
#' PathwayD containing selected pathway databases + biopax model;
#' TFtargetsD containing selected TF target gene databases + TF target gene data.
#' @keywords manip
#' @export
#' @examples
#' \dontrun{
#' data(OmicsExampleData)
#' data_omics = readOmics(tp_prots = c(0.25, 1, 4, 8, 13, 18, 24),
#' tp_genes = c(1, 4, 8, 13, 18, 24), OmicsExampleData,
#' PWdatabase = c("biocarta", "kegg", "nci", "reactome"),
#' TFtargetdatabase = c("userspec"))
#' data_omics = readPhosphodata(data_omics,
#' phosphoreg = system.file("extdata", "phospho_reg_table.txt",
#' package = "pwOmics"))
#' data_omics = readTFdata(data_omics,
#' TF_target_path = system.file("extdata", "TF_targets.txt",
#' package = "pwOmics"))
#' data_omics_plus = readPWdata(data_omics,
#' loadgenelists = system.file("extdata/Genelists", package = "pwOmics"))
#' data_omics_plus = identifyPR(data_omics_plus)
#' }
identifyPR <- function(data_omics_plus){
updown = NULL
for(s in 1:length(data_omics_plus[[1]][[1]][[1]][[1]][[1]]))
{
updown = data_omics_plus[[1]][[5]][match(as.character(data_omics_plus[[1]][[1]][[2]][[1]][[s]][,1]), as.character(data_omics_plus[[1]][[5]][,1])),2]
data_omics_plus[[1]][[1]][[2]][[1]][[s]] = cbind(data_omics_plus[[1]][[1]][[2]][[1]][[s]], updown)
}
message("Phosphoprotein downstream regulation information is
considered in downstream analysis. \n")
return(data_omics_plus)
}
#' Identify pathway IDs and pathway names of differentially abundant proteins
#'
#' This function identifies the pathways of the differentially abundant
#' phosphoproteins dependent on the chosen database.
#' Requires rBiopaxParser package. Takes a
#' lot of time for a high number of proteins and/or if all databases are chosen.
#' First, chosen databases are loaded, then new internal pathway IDs are
#' generated.
#' Afterwards the genelists of the different databases are loaded or generated,
#' depending on the loadgenelists option. After pathway identification for the
#' reference time point, also pathway identification for different time points
#' is performed. Pathway ID mapping takes some time, especially for such big
#' databases as reactome, so use savegenelists and loadgenelists for easier and
#' faster usage...
#'
#' @param data_omics_plus output list of readPWdata function; first element
#' contains an OmicsData object, secons element the genelist data corresponding
#' to the selected pathway database.
#' @return OmicsData object: list of 4 elements (OmicsD, PathwayD, TFtargetsD,
#' Status); OmicsD containing omics data set + results (after analysis);
#' PathwayD containing selected pathway databases + biopax model;
#' TFtargetsD containing selected TF target gene databases + TF target gene data.
#' @keywords manip
#' @export
#' @examples
#' \dontrun{
#' data(OmicsExampleData)
#' data_omics = readOmics(tp_prots = c(0.25, 1, 4, 8, 13, 18, 24),
#' tp_genes = c(1, 4, 8, 13, 18, 24), OmicsExampleData,
#' PWdatabase = c("biocarta", "kegg", "nci", "reactome"),
#' TFtargetdatabase = c("userspec"))
#' data_omics = readPhosphodata(data_omics,
#' phosphoreg = system.file("extdata", "phospho_reg_table.txt",
#' package = "pwOmics"))
#' data_omics = readTFdata(data_omics,
#' TF_target_path = system.file("extdata", "TF_targets.txt",
#' package = "pwOmics"))
#' data_omics_plus = readPWdata(data_omics,
#' loadgenelists = system.file("extdata/Genelists", package = "pwOmics"))
#'
#' data_omics_plus = identifyPR(data_omics_plus)
#' setwd(system.file("extdata/Genelists", package = "pwOmics"))
#' data_omics = identifyPWs(data_omics_plus)
#' }
identifyPWs <- function(data_omics_plus){
if(data_omics_plus[[1]][[4]] == 1)
{stop("Please read in the omics data set and both pathway
database information and TF-target gene information first with
readOmics, readTFdata and readPWdata functions.")}
if(class(data_omics_plus[[1]]) != "OmicsData")
{stop("'data_omics_plus[[1]]' is not an OmicsData
object.")}
if(length(data_omics_plus[[2]]) < length(data_omics[[2]][[1]]))
{stop("'data_omics_plus[[2]]' does not contain all genelists of the
selected pathway databases. Please check if all genelists are
present in the working directory and if necessary run readPWdata
again with loadgenelists set to FALSE.")}
data_omics = PWidentallprots(data_omics_plus[[1]], data_omics_plus[[2]])
message("Pathways are identified for all proteins measured. \n")
data_omics = PWidenttps(data_omics)
message("Pathways are identified for the different timepoints. \n")
if(length(data_omics[[1]][[3]][[1]])!= 0 &
length(data_omics[[1]][[3]][[2]])!= 0)
{data_omics[[4]] = data_omics[[4]] +1
}else{
data_omics[[4]] = data_omics[[4]]
}
return(data_omics)
}
#' Identify TFs in pathways and their target genes - downstream
#' analysis.
#'
#' This function identifies the transcription factors being part of the
#' pathways of downstream analysis. Subsequently it finds the target genes of
#' these transcription factors from the selected TF-target gene database.
#'
#' @param data_omics OmicsData object.
#' @param updown boolean value; TRUE in case up- and downregulation should be
#' checked individually for intersection; FALSE = default, if only
#' deregulation should be checked for.
#' @return OmicsData object: list of 4 elements (OmicsD, PathwayD, TFtargetsD,
#' Status); OmicsD containing omics data set + results (after analysis);
#' PathwayD containing selected pathway databases + biopax model;
#' TFtargetsD containing selected TF target gene databases + TF target gene data.
#' @keywords manip
#' @export
#' @examples
#' \dontrun{
#' data(OmicsExampleData)
#' data_omics = readOmics(tp_prots = c(0.25, 1, 4, 8, 13, 18, 24),
#' tp_genes = c(1, 4, 8, 13, 18, 24), OmicsExampleData,
#' PWdatabase = c("biocarta", "kegg", "nci", "reactome"),
#' TFtargetdatabase = c("userspec"))
#' data_omics = readPhosphodata(data_omics,
#' phosphoreg = system.file("extdata", "phospho_reg_table.txt",
#' package = "pwOmics"))
#' data_omics = readTFdata(data_omics,
#' TF_target_path = system.file("extdata", "TF_targets.txt",
#' package = "pwOmics"))
#' data_omics_plus = readPWdata(data_omics,
#' loadgenelists = system.file("extdata/Genelists", package = "pwOmics"))
#' data_omics_plus = identifyPR(data_omics_plus)
#' setwd(system.file("extdata/Genelists", package = "pwOmics"))
#'
#' data_omics = identifyPWs(data_omics_plus)
#' data_omics = identifyTFs(data_omics)
#' data_omics = identifyPWTFTGs(data_omics)
#' }
identifyPWTFTGs <- function(data_omics, updown = FALSE) {
if(class(data_omics) != "OmicsData")
{stop("Parameter 'data_omics' is not an OmicsData object.")}
genelists = loadGenelists()
if(length(genelists) < length(data_omics[[2]][[1]]))
{stop("Current working directory does not contain all genelists of
the selected pathway databases. Please check if all genelists are
present in the working directory and if necessary run readPWdata
again with loadgenelists set to FALSE.")}
message("Genelists of databases are loaded/generated. \n\n")
pathwayIDs = pathwayNames = NULL
for(plen in 1: length(data_omics[[1]][[1]][[1]][[1]]))
{
PWinfo = preparePWinfo(data_omics, plen)
data_omics = PWinfo[[1]]
tps_PWs = unique(PWinfo[[2]])
PWofinterest = PWinfo[[3]]
if(length(tps_PWs) != 0)
{genelist_n = apply(rbindlist(genelists),2,as.character)
genes_PW = list()
for(k in 1: dim(tps_PWs)[1])
{ ind_genesPW = vector()
ind_genesPW = which(as.character(genelist_n[,2]) ==
as.character(tps_PWs[,pathwayIDs])[k])
genes_PW[[k]] = unique(genelist_n[ind_genesPW,1])
genes_PW[[k]] = data.frame(genes_PW = genes_PW[[k]], "upreg" = rep(tps_PWs[,upreg][k], times = length(genes_PW[[k]])),
"phosphoeffect" = rep(tps_PWs[,phosphoeffect][k], times = length(genes_PW[[k]]))) #new
}
names(genes_PW) = tps_PWs[,pathwayNames]
genes_PW_ov = genes_PW
message("Gene sets of pathways are identified: ",
names(data_omics[[1]][[2]][[1]][plen]), "\n")
temp_genelist = do.call("rbind", genes_PW) #new
temp_lists = identTFTGsinPWs(data_omics, temp_genelist)
temp_Genelist = temp_lists[[1]]
temp_targetlist = apply(temp_lists[[2]], 2, as.character)
colnames(temp_Genelist) = c("genes_PW","upreg", "phosphoeffect", "TF_PW")
data_omics[[1]][[3]][[1]][[plen+1]][[length(data_omics[[1]][[3]][[1]][[plen+1]])+1]] =
temp_Genelist
names( data_omics[[1]][[3]][[1]][[plen+1]][length(data_omics[[1]][[3]][[1]][[plen+1]])]) =
"Genelist_PW"
data_omics[[1]][[3]][[1]][[plen+1]][[length(data_omics[[1]][[3]][[1]][[plen+1]])+1]] =
temp_targetlist
names( data_omics[[1]][[3]][[1]][[plen+1]][length(data_omics[[1]][[3]][[1]][[plen+1]])]) =
"Target_Genelist_PW"
message("Transcription factors in pathways of downstream analysis are identified: ",
names(data_omics[[1]][[2]][[1]][plen]), "\n")
message("Target genes of TFs in downstream analysis are identified: ",
names(data_omics[[1]][[2]][[1]][plen]), "\n\n")
}else{
message(paste("No downstream signaling could be found for time point ",
data_omics[[1]][[1]][[1]][[1]][[plen]],"\n", sep = ""))
}
}
return(data_omics)
}
|
/R/pwOmics_downstream_analysis.R
|
no_license
|
MarenS2/pwOmics
|
R
| false | false | 10,359 |
r
|
#' Identify phosphorylation regulation influence downstream
#'
#' This function identifies the downstream regulation influence
#' of phosphoprotein regulation for further downstream analysis steps.
#'
#' @param data_omics_plus output list of readPWdata function; first element
#' contains an OmicsData object, secons element the genelist data corresponding
#' to the selected pathway database.
#' @return OmicsData object: list of 4 elements (OmicsD, PathwayD, TFtargetsD,
#' Status); OmicsD containing omics data set + results (after analysis);
#' PathwayD containing selected pathway databases + biopax model;
#' TFtargetsD containing selected TF target gene databases + TF target gene data.
#' @keywords manip
#' @export
#' @examples
#' \dontrun{
#' data(OmicsExampleData)
#' data_omics = readOmics(tp_prots = c(0.25, 1, 4, 8, 13, 18, 24),
#' tp_genes = c(1, 4, 8, 13, 18, 24), OmicsExampleData,
#' PWdatabase = c("biocarta", "kegg", "nci", "reactome"),
#' TFtargetdatabase = c("userspec"))
#' data_omics = readPhosphodata(data_omics,
#' phosphoreg = system.file("extdata", "phospho_reg_table.txt",
#' package = "pwOmics"))
#' data_omics = readTFdata(data_omics,
#' TF_target_path = system.file("extdata", "TF_targets.txt",
#' package = "pwOmics"))
#' data_omics_plus = readPWdata(data_omics,
#' loadgenelists = system.file("extdata/Genelists", package = "pwOmics"))
#' data_omics_plus = identifyPR(data_omics_plus)
#' }
identifyPR <- function(data_omics_plus){
updown = NULL
for(s in 1:length(data_omics_plus[[1]][[1]][[1]][[1]][[1]]))
{
updown = data_omics_plus[[1]][[5]][match(as.character(data_omics_plus[[1]][[1]][[2]][[1]][[s]][,1]), as.character(data_omics_plus[[1]][[5]][,1])),2]
data_omics_plus[[1]][[1]][[2]][[1]][[s]] = cbind(data_omics_plus[[1]][[1]][[2]][[1]][[s]], updown)
}
message("Phosphoprotein downstream regulation information is
considered in downstream analysis. \n")
return(data_omics_plus)
}
#' Identify pathway IDs and pathway names of differentially abundant proteins
#'
#' This function identifies the pathways of the differentially abundant
#' phosphoproteins dependent on the chosen database.
#' Requires rBiopaxParser package. Takes a
#' lot of time for a high number of proteins and/or if all databases are chosen.
#' First, chosen databases are loaded, then new internal pathway IDs are
#' generated.
#' Afterwards the genelists of the different databases are loaded or generated,
#' depending on the loadgenelists option. After pathway identification for the
#' reference time point, also pathway identification for different time points
#' is performed. Pathway ID mapping takes some time, especially for such big
#' databases as reactome, so use savegenelists and loadgenelists for easier and
#' faster usage...
#'
#' @param data_omics_plus output list of readPWdata function; first element
#' contains an OmicsData object, secons element the genelist data corresponding
#' to the selected pathway database.
#' @return OmicsData object: list of 4 elements (OmicsD, PathwayD, TFtargetsD,
#' Status); OmicsD containing omics data set + results (after analysis);
#' PathwayD containing selected pathway databases + biopax model;
#' TFtargetsD containing selected TF target gene databases + TF target gene data.
#' @keywords manip
#' @export
#' @examples
#' \dontrun{
#' data(OmicsExampleData)
#' data_omics = readOmics(tp_prots = c(0.25, 1, 4, 8, 13, 18, 24),
#' tp_genes = c(1, 4, 8, 13, 18, 24), OmicsExampleData,
#' PWdatabase = c("biocarta", "kegg", "nci", "reactome"),
#' TFtargetdatabase = c("userspec"))
#' data_omics = readPhosphodata(data_omics,
#' phosphoreg = system.file("extdata", "phospho_reg_table.txt",
#' package = "pwOmics"))
#' data_omics = readTFdata(data_omics,
#' TF_target_path = system.file("extdata", "TF_targets.txt",
#' package = "pwOmics"))
#' data_omics_plus = readPWdata(data_omics,
#' loadgenelists = system.file("extdata/Genelists", package = "pwOmics"))
#'
#' data_omics_plus = identifyPR(data_omics_plus)
#' setwd(system.file("extdata/Genelists", package = "pwOmics"))
#' data_omics = identifyPWs(data_omics_plus)
#' }
identifyPWs <- function(data_omics_plus){
if(data_omics_plus[[1]][[4]] == 1)
{stop("Please read in the omics data set and both pathway
database information and TF-target gene information first with
readOmics, readTFdata and readPWdata functions.")}
if(class(data_omics_plus[[1]]) != "OmicsData")
{stop("'data_omics_plus[[1]]' is not an OmicsData
object.")}
if(length(data_omics_plus[[2]]) < length(data_omics[[2]][[1]]))
{stop("'data_omics_plus[[2]]' does not contain all genelists of the
selected pathway databases. Please check if all genelists are
present in the working directory and if necessary run readPWdata
again with loadgenelists set to FALSE.")}
data_omics = PWidentallprots(data_omics_plus[[1]], data_omics_plus[[2]])
message("Pathways are identified for all proteins measured. \n")
data_omics = PWidenttps(data_omics)
message("Pathways are identified for the different timepoints. \n")
if(length(data_omics[[1]][[3]][[1]])!= 0 &
length(data_omics[[1]][[3]][[2]])!= 0)
{data_omics[[4]] = data_omics[[4]] +1
}else{
data_omics[[4]] = data_omics[[4]]
}
return(data_omics)
}
#' Identify TFs in pathways and their target genes - downstream
#' analysis.
#'
#' This function identifies the transcription factors being part of the
#' pathways of downstream analysis. Subsequently it finds the target genes of
#' these transcription factors from the selected TF-target gene database.
#'
#' @param data_omics OmicsData object.
#' @param updown boolean value; TRUE in case up- and downregulation should be
#' checked individually for intersection; FALSE = default, if only
#' deregulation should be checked for.
#' @return OmicsData object: list of 4 elements (OmicsD, PathwayD, TFtargetsD,
#' Status); OmicsD containing omics data set + results (after analysis);
#' PathwayD containing selected pathway databases + biopax model;
#' TFtargetsD containing selected TF target gene databases + TF target gene data.
#' @keywords manip
#' @export
#' @examples
#' \dontrun{
#' data(OmicsExampleData)
#' data_omics = readOmics(tp_prots = c(0.25, 1, 4, 8, 13, 18, 24),
#' tp_genes = c(1, 4, 8, 13, 18, 24), OmicsExampleData,
#' PWdatabase = c("biocarta", "kegg", "nci", "reactome"),
#' TFtargetdatabase = c("userspec"))
#' data_omics = readPhosphodata(data_omics,
#' phosphoreg = system.file("extdata", "phospho_reg_table.txt",
#' package = "pwOmics"))
#' data_omics = readTFdata(data_omics,
#' TF_target_path = system.file("extdata", "TF_targets.txt",
#' package = "pwOmics"))
#' data_omics_plus = readPWdata(data_omics,
#' loadgenelists = system.file("extdata/Genelists", package = "pwOmics"))
#' data_omics_plus = identifyPR(data_omics_plus)
#' setwd(system.file("extdata/Genelists", package = "pwOmics"))
#'
#' data_omics = identifyPWs(data_omics_plus)
#' data_omics = identifyTFs(data_omics)
#' data_omics = identifyPWTFTGs(data_omics)
#' }
identifyPWTFTGs <- function(data_omics, updown = FALSE) {
if(class(data_omics) != "OmicsData")
{stop("Parameter 'data_omics' is not an OmicsData object.")}
genelists = loadGenelists()
if(length(genelists) < length(data_omics[[2]][[1]]))
{stop("Current working directory does not contain all genelists of
the selected pathway databases. Please check if all genelists are
present in the working directory and if necessary run readPWdata
again with loadgenelists set to FALSE.")}
message("Genelists of databases are loaded/generated. \n\n")
pathwayIDs = pathwayNames = NULL
for(plen in 1: length(data_omics[[1]][[1]][[1]][[1]]))
{
PWinfo = preparePWinfo(data_omics, plen)
data_omics = PWinfo[[1]]
tps_PWs = unique(PWinfo[[2]])
PWofinterest = PWinfo[[3]]
if(length(tps_PWs) != 0)
{genelist_n = apply(rbindlist(genelists),2,as.character)
genes_PW = list()
for(k in 1: dim(tps_PWs)[1])
{ ind_genesPW = vector()
ind_genesPW = which(as.character(genelist_n[,2]) ==
as.character(tps_PWs[,pathwayIDs])[k])
genes_PW[[k]] = unique(genelist_n[ind_genesPW,1])
genes_PW[[k]] = data.frame(genes_PW = genes_PW[[k]], "upreg" = rep(tps_PWs[,upreg][k], times = length(genes_PW[[k]])),
"phosphoeffect" = rep(tps_PWs[,phosphoeffect][k], times = length(genes_PW[[k]]))) #new
}
names(genes_PW) = tps_PWs[,pathwayNames]
genes_PW_ov = genes_PW
message("Gene sets of pathways are identified: ",
names(data_omics[[1]][[2]][[1]][plen]), "\n")
temp_genelist = do.call("rbind", genes_PW) #new
temp_lists = identTFTGsinPWs(data_omics, temp_genelist)
temp_Genelist = temp_lists[[1]]
temp_targetlist = apply(temp_lists[[2]], 2, as.character)
colnames(temp_Genelist) = c("genes_PW","upreg", "phosphoeffect", "TF_PW")
data_omics[[1]][[3]][[1]][[plen+1]][[length(data_omics[[1]][[3]][[1]][[plen+1]])+1]] =
temp_Genelist
names( data_omics[[1]][[3]][[1]][[plen+1]][length(data_omics[[1]][[3]][[1]][[plen+1]])]) =
"Genelist_PW"
data_omics[[1]][[3]][[1]][[plen+1]][[length(data_omics[[1]][[3]][[1]][[plen+1]])+1]] =
temp_targetlist
names( data_omics[[1]][[3]][[1]][[plen+1]][length(data_omics[[1]][[3]][[1]][[plen+1]])]) =
"Target_Genelist_PW"
message("Transcription factors in pathways of downstream analysis are identified: ",
names(data_omics[[1]][[2]][[1]][plen]), "\n")
message("Target genes of TFs in downstream analysis are identified: ",
names(data_omics[[1]][[2]][[1]][plen]), "\n\n")
}else{
message(paste("No downstream signaling could be found for time point ",
data_omics[[1]][[1]][[1]][[1]][[plen]],"\n", sep = ""))
}
}
return(data_omics)
}
|
##
### ---------------
###
### Create: Jianming Zeng
### Date: 2019-07-24 15:03:19
### Email: jmzeng1314@163.com
### Blog: http://www.bio-info-trainee.com/
### Forum: http://www.biotrainee.com/thread-1376-1-1.html
### CAFS/SUSTC/Eli Lilly/University of Macau
### Update Log: 2019-07-24 First version
###
### ---------------
# 单独检查基因的表达量情况
rm(list = ls()) # clear the environment
#load all the necessary libraries
options(warn=-1) # turn off warning message globally
suppressMessages(library(Seurat))
# 首先加载前面使用Seurat包进行细胞分群的结果
start_time <- Sys.time()
load('~/Documents/10x/patient1.tumor.output.Rdata')
end_time <- Sys.time()
end_time - start_time
# 21 秒
TSNEPlot(tumor, group.by = 'cellTypes', colors.use = c('#EF8A62', '#67A9CF'))
count_matrix=tumor@data
count_matrix[1:4,1:4]
cluster=tumor@meta.data$cellTypes
table(cluster)
allGenes = row.names(tumor@raw.data)
allGenes[grep('HLA',allGenes)]
FeaturePlot(object = tumor,
features.plot ='HLA-A',
cols.use = c("grey", "blue"),
reduction.use = "tsne")
table(count_matrix['HLA-A',]>0, cluster)
# 可以看到 HLA-A 在免疫治疗前后并没有显著差异
FeaturePlot(object = tumor,
features.plot ='HLA-B',
cols.use = c("grey", "blue"),
reduction.use = "tsne")
table(count_matrix['HLA-B',]>0, cluster)
# 可以看到 HLA-B在免疫治疗前后差异很大。
|
/step3-HLA-in-tumor-of-patient1.R
|
no_license
|
zhaohongqiangsoliva/scRNA_10X
|
R
| false | false | 1,462 |
r
|
##
### ---------------
###
### Create: Jianming Zeng
### Date: 2019-07-24 15:03:19
### Email: jmzeng1314@163.com
### Blog: http://www.bio-info-trainee.com/
### Forum: http://www.biotrainee.com/thread-1376-1-1.html
### CAFS/SUSTC/Eli Lilly/University of Macau
### Update Log: 2019-07-24 First version
###
### ---------------
# 单独检查基因的表达量情况
rm(list = ls()) # clear the environment
#load all the necessary libraries
options(warn=-1) # turn off warning message globally
suppressMessages(library(Seurat))
# 首先加载前面使用Seurat包进行细胞分群的结果
start_time <- Sys.time()
load('~/Documents/10x/patient1.tumor.output.Rdata')
end_time <- Sys.time()
end_time - start_time
# 21 秒
TSNEPlot(tumor, group.by = 'cellTypes', colors.use = c('#EF8A62', '#67A9CF'))
count_matrix=tumor@data
count_matrix[1:4,1:4]
cluster=tumor@meta.data$cellTypes
table(cluster)
allGenes = row.names(tumor@raw.data)
allGenes[grep('HLA',allGenes)]
FeaturePlot(object = tumor,
features.plot ='HLA-A',
cols.use = c("grey", "blue"),
reduction.use = "tsne")
table(count_matrix['HLA-A',]>0, cluster)
# 可以看到 HLA-A 在免疫治疗前后并没有显著差异
FeaturePlot(object = tumor,
features.plot ='HLA-B',
cols.use = c("grey", "blue"),
reduction.use = "tsne")
table(count_matrix['HLA-B',]>0, cluster)
# 可以看到 HLA-B在免疫治疗前后差异很大。
|
library(SenSrivastava)
### Name: E9.11
### Title: Data on Transit Privatization
### Aliases: E9.11
### Keywords: datasets
### ** Examples
data(E9.11)
summary(E9.11)
plot(E9.11)
|
/data/genthat_extracted_code/SenSrivastava/examples/E9.11.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 184 |
r
|
library(SenSrivastava)
### Name: E9.11
### Title: Data on Transit Privatization
### Aliases: E9.11
### Keywords: datasets
### ** Examples
data(E9.11)
summary(E9.11)
plot(E9.11)
|
#' Check Levels
#'
#' Checks the levels in a factor including the order and
#' whether other levels are permitted.
#'
#' @param x The object to check.
#' @param levels A character vector of the levels.
#' @param exclusive A flag indicating whether other levels are not permitted.
#' @param order A flag indicating whether the object levels have to occur in the same order as names. To check whether x is an ordered factor use
#' \code{check_vector(x, ordered(1))}.
#' @param x_name A string of the name of the object.
#' @param error A flag indicating whether to throw an informative error or immediately generate an informative message if the check fails.
#' @return An invisible copy of x (if it doesn't throw an error).
#' @seealso \code{\link{check_nlevels}} and \code{\link{check_vector}}
#' @export
#'
#' @examples
#' check_levels(1, c("x", "y"), error = FALSE)
#' check_levels(factor(1), c("x", "y"), error = FALSE)
check_levels <- function(x, levels, exclusive = TRUE, order = TRUE,
x_name = substitute(x),
error = TRUE) {
x_name <- chk_deparse(x_name)
check_vector(levels, "", length = c(1L, chk_max_int()), unique = FALSE, named = FALSE)
check_flag_internal(exclusive)
check_flag_internal(order)
check_flag_internal(error)
x_levels <- levels(x)
if(is.null(x_levels)) chk_fail(x_name, " must have a levels attribute", error = error)
if (exclusive) {
if (order) {
if (!identical(x_levels, levels))
chk_fail(x_name, " levels must be identical to ", cc_and(levels), error = error)
} else {
if (!identical(sort(x_levels), sort(levels)))
chk_fail(x_name, " levels must include and only include ", cc_and(levels), error = error)
}
} else {
x_levels <- x_levels[x_levels %in% levels]
if (order) {
if (!identical(x_levels, levels))
chk_fail(x_name, " levels must include ", cc_and(levels), " in that order", error = error)
} else {
if (!identical(sort(x_levels), sort(levels)))
chk_fail(x_name, " levels must include ", cc_and(levels), error = error)
}
}
invisible(x)
}
|
/R/levels.R
|
permissive
|
cran/checkr
|
R
| false | false | 2,137 |
r
|
#' Check Levels
#'
#' Checks the levels in a factor including the order and
#' whether other levels are permitted.
#'
#' @param x The object to check.
#' @param levels A character vector of the levels.
#' @param exclusive A flag indicating whether other levels are not permitted.
#' @param order A flag indicating whether the object levels have to occur in the same order as names. To check whether x is an ordered factor use
#' \code{check_vector(x, ordered(1))}.
#' @param x_name A string of the name of the object.
#' @param error A flag indicating whether to throw an informative error or immediately generate an informative message if the check fails.
#' @return An invisible copy of x (if it doesn't throw an error).
#' @seealso \code{\link{check_nlevels}} and \code{\link{check_vector}}
#' @export
#'
#' @examples
#' check_levels(1, c("x", "y"), error = FALSE)
#' check_levels(factor(1), c("x", "y"), error = FALSE)
check_levels <- function(x, levels, exclusive = TRUE, order = TRUE,
x_name = substitute(x),
error = TRUE) {
x_name <- chk_deparse(x_name)
check_vector(levels, "", length = c(1L, chk_max_int()), unique = FALSE, named = FALSE)
check_flag_internal(exclusive)
check_flag_internal(order)
check_flag_internal(error)
x_levels <- levels(x)
if(is.null(x_levels)) chk_fail(x_name, " must have a levels attribute", error = error)
if (exclusive) {
if (order) {
if (!identical(x_levels, levels))
chk_fail(x_name, " levels must be identical to ", cc_and(levels), error = error)
} else {
if (!identical(sort(x_levels), sort(levels)))
chk_fail(x_name, " levels must include and only include ", cc_and(levels), error = error)
}
} else {
x_levels <- x_levels[x_levels %in% levels]
if (order) {
if (!identical(x_levels, levels))
chk_fail(x_name, " levels must include ", cc_and(levels), " in that order", error = error)
} else {
if (!identical(sort(x_levels), sort(levels)))
chk_fail(x_name, " levels must include ", cc_and(levels), error = error)
}
}
invisible(x)
}
|
#' Returns the relevant featureIds for a given geneset.
#'
#' @description
#' Gene sets are defined by the unique compound key consisting of their
#' `collection` and `name`. To fetch the featureIds associated with
#' a specific geneset, you must provide values for `i` and `j`. If
#' these are missing, then a character vector of all the unique feature ids
#' within `x` are returned.
#'
#' If the GeneSetDb `x` has been conformed to an expression object this
#' will default to return only the feature_id's that are matched to the target
#' expression object, and they will be returned using the same identifiers that
#' the target expression object uses. To change this behavior, tweak the values
#' for the `active.only` and `value` parameters, respectively.
#'
#' `x` can be either a `GeneSetDb` or a `SparrowResult`. If its the latter,
#' then this call simply delegates to the internal `GeneSetDb`.
#'
#' @rdname featureIds
#' @exportMethod featureIds
#'
#' @param x Object to retrieve the gene set from, either a `GeneSetDb` or a
#' `SparrowResult`.
#' @param i,j The collection,name compound key identifier of the gene set
#' @param value What form do you want the id's in?
#' * `"feature_id"`: the IDs used in the original geneset definitions
#' * `"x.id"`: the ids of the features as they are used in the expression
#' object.
#' * `"x.idx"`: The integer index into the expresion object `x` that the
#' `GeneSetDb`` has been conformed to.
#' @param active.only only look for gene sets that are "active"? Defaults to
#' `TRUE` if `x` is conformed to a target expression object, else `FALSE`.
#' [conform()] for further details.
#' @param ... pass through arguments
#' @return A vector of identifiers (or indexes into an expression object,
#' depending on the `value` argument) for the features in the specified
#' geneset. `NA` is returned if the geneset is not "active" (ie. listed in
#' [geneSets()])
#'
#' @examples
#' gdb <- exampleGeneSetDb()
#' fids.gs <- featureIds(gdb, 'c2', 'BIOCARTA_AGPCR_PATHWAY')
#' fids.c2 <- featureIds(gdb, 'c2')
#' fids.all <- featureIds(gdb)
#'
#' vm <- exampleExpressionSet(do.voom=TRUE)
#' gdb <- conform(gdb, vm)
#' ## fewer than before
#' fids.gs2 <- featureIds(gdb, 'c2', 'BIOCARTA_AGPCR_PATHWAY')
#' ## same as before
#' fids.gs3 <- featureIds(gdb, 'c2', 'BIOCARTA_AGPCR_PATHWAY', active.only=FALSE)
#' ## returned as row indices into vm
#' fids.idxs <- featureIds(gdb, 'c2', value='x.idx')
setGeneric("featureIds", signature="x",
function(x, i, j, value=c('feature_id', 'x.id', 'x.idx'),
active.only=is.conformed(x), ...)
standardGeneric("featureIds"))
#' Fetch the featureIdMap for a `GeneSetDb`
#'
#' The GeneSetDb has an internal data structure that is used to cross reference
#' the feature_id's used in the database construction to the features in the
#' expression object that is used to run GSEA methods against.
#'
#' @exportMethod featureIdMap
#' @param x the object to retrieve the featureIdMap from
#' @param ... pass through arguments
#' @return a data.frame of input feature_id's to conformed id's/rows/etc
#' @examples
#' gdb <- exampleGeneSetDb()
#' vm <- exampleExpressionSet()
#' gdb <- conform(gdb, vm)
#' fmap <- featureIdMap(gdb)
setGeneric("featureIdMap", function(x, ...) standardGeneric("featureIdMap"))
setGeneric("featureIdMap<-", function(x, value)
standardGeneric("featureIdMap<-")
)
#' Gene Set Collection Metadata
#'
#' Associates key:value metadata to a gene set collection of a [GeneSetDb()].
#'
#' The design of the GeneSetDb is such that we assume that groups of gene sets
#' are usually defined together and will therefore share similar metadata.
#' These groups of gene sets will fall into the same "collection", and,
#' therefore, metadata for particular gene sets are tracked at the collection
#' level.
#'
#' Types of metadata being referred to could be things like the organism
#' that a batch of gene sets were defined in, the type of feature identifiers
#' that a collection of gene sets are using (ie. [GSEABase::EntrezIdentifier()])
#' or a URL pattern that combines the collection,name compound key that one
#' can browse to in order to find out more information about the gene set.
#'
#' There are explicit helper functions that set and get these aforementioned
#' metadata, namely `featureIdType()`, `geneSetCollectionURLfunction()`, and
#' `geneSetURL()`. Aribtrary metadata can be stored at the collection level
#' using the [addCollectionMetadata()] function. More details are provided
#' below.
#'
#' @exportMethod collectionMetadata
#' @rdname collectionMetadata
#'
#' @param x Object to extract the collectionMetadata from
#' @param collection The geneset collection to to query
#' @param name The name of the metadata variable to get the value for
#' @template asdt-param
#' @param ... not used yet
#'
#' @examples
#' gdb <- exampleGeneSetDb()
#'
#' # Gene Set URLs
#' geneSetURL(gdb, 'c2', 'BIOCARTA_AGPCR_PATHWAY')
#' geneSetURL(gdb, c('c2', 'c7'),
#' c('BIOCARTA_AGPCR_PATHWAY', 'GSE14308_TH2_VS_TH1_UP'))
#'
#' # feature id types
#' featureIdType(gdb, "c2") <- GSEABase::EntrezIdentifier()
#' featureIdType(gdb, "c2")
#'
#' ## Arbitrary metadata
#' gdb <- addCollectionMetadata(gdb, 'c2', 'foo', 'bar')
#' cmh <- collectionMetadata(gdb, 'c2', as.dt = TRUE) ## print this to see
setGeneric(
"collectionMetadata",
signature=c("x", "collection", "name"),
function(x, collection, name, ...) standardGeneric("collectionMetadata"))
#' @section Gene Set URLs:
#'
#' A URL function can be defined per collection that takes the collection,name
#' compound key and generates a URL for the gene set that the user can browse
#' to for futher information. For instance, the
#' `geneSetCollectionURLfunction()` for the MSigDB collections are defined
#' like so:
#'
#' ```
#' url.fn <- function(collection, name) {
#' url <- 'http://www.broadinstitute.org/gsea/msigdb/cards/%s.html'
#' sprintf(url, name)
#' }
#' gdb <- getMSigGeneSetDb('H')
#' geneSetCollectionURLfunction(gdb, 'H') <- url.fn
#' ```
#'
#' In this way, a call to `geneSetURL(gdb, 'H', 'HALLMARK_ANGIOGENESIS')`
#' will return
#' http://www.broadinstitute.org/gsea/msigdb/cards/HALLMARK_ANGIOGENESIS.html.
#'
#' This function is vectorized over `i` and `j`
#'
#' @exportMethod geneSetURL
#' @inheritParams featureIds
#' @rdname collectionMetadata
#'
#' @return A character vector of URLs for each of the genesets identified by
#' `i, j`. `NA` is returned for genesets `i,j` that are not found in `x`.
setGeneric("geneSetURL", signature="x", function(x, i, j, ...)
standardGeneric("geneSetURL"))
# For some reason @describeIn doesn't work with the
# `geneSetCollectionURLfunction` roxygen docs
#' Get/set the gene set collection url function for a geneset collection
#'
#' Reference [collectionMetadata()] for more info.
#' @rdname geneSetCollectionURLfunction
#' @exportMethod geneSetCollectionURLfunction
setGeneric("geneSetCollectionURLfunction", signature="x", function(x, i, ...)
standardGeneric("geneSetCollectionURLfunction"))
#' @export
#' @rdname geneSetCollectionURLfunction
setGeneric("geneSetCollectionURLfunction<-",
signature="x",
function(x, i, value)
standardGeneric("geneSetCollectionURLfunction<-"))
#' @section Feature ID Types:
#'
#' When defining a set of gene sets in a collection, the identifiers used must
#' be of the same type. Most often you'll probably be working with Entrez
#' identifiers, simply because that's what most of the annotations work with.
#'
#' As such, you'd define that your collection uses geneset identifiers like
#' so:
#'
#' ```
#' gdb <- getMSigGeneSetDb('H')
#' featureIdType(gdb, 'H') <- GSEABase::ENSEMBLIdentifier()
#' ## or, equivalently (but you don't want to use this)
#' gdb <- addCollectionMetadata(gdb, 'H', 'id_type', GSEABase::ENSEMBLIdentifier())
#' ```
#'
#' @exportMethod featureIdType
#' @rdname collectionMetadata
#' @inheritParams featureIds
setGeneric("featureIdType", signature="x", function(x, i, ...)
standardGeneric("featureIdType"))
#' @export
#' @rdname collectionMetadata
setGeneric("featureIdType<-", signature="x", function(x, i, value)
standardGeneric("featureIdType<-"))
#' Fetches information for a gene set
#'
#' @description
#' Gene sets inside a [GeneSetDb()] are indexed by their collection,name
#' compound key. There is no special class to represent an individual gene set.
#' Instead, gene sets are returned as a data.frame, the rows of which enumerate
#' the features that belong to them.
#'
#' When `x` is a [SparrowResult()], this function will append
#' the differential expression statistics for the individual features generated
#' across the contrast that defined `x`.
#'
#' @exportMethod geneSet
#' @rdname geneSet
#'
#' @inheritParams featureIds
#' @param with.feature.map If `TRUE`, then details of the feature mapping
#' from the original feature_id space to the target feature space are included
#' (default: `FALSE`).
#' @param ... passed down to inner functinos
#' @template asdt-param
#' @return a `data.(frame|table)` of gene set information. If `x` is a
#' `SparrowResult` object, then differential expression statistics
#' are added as columns to this result.
setGeneric("geneSet", signature="x", function(x, i, j, ...)
standardGeneric("geneSet"))
#' Fetch the active (or all) gene sets from a GeneSetDb or SparrowResult
#'
#' @export
#' @inheritParams featureIds
#' @template asdt-param
#' @return a data.table with geneset information.
#'
#' @rdname geneSets
#' @exportMethod geneSets
#'
#' @examples
#' gdb <- exampleGeneSetDb()
#' gs <- geneSets(gdb)
setGeneric("geneSets", function(x, ...) standardGeneric('geneSets'))
#' Summarize geneset:feature relationships for specified set of features
#'
#' This function creates a geneset by feature table with geneset membership
#' information for the `features` specified by the user. Only the gene sets that
#' have any of the `features` are included in the table returned.
#'
#' @rdname geneSetSummaryByGenes
#' @exportMethod geneSetSummaryByGenes
#'
#' @param x `GeneSetDb` or `SparrowResult`
#' @param features a character vector of featureIds
#' @param with.features Include columns for `features`? If `x` is
#' is a `GeneSetDb`, these columns are `TRUE`/`FALSE`. If
#' `x` is a `SparrowResult` object, the values are the logFC of
#' the feature if present in the gene set, otherwise its `NA`.
#' @param feature.rename if `NULL`, the feature columns are prefixed with
#' `featureId_`, if `FALSE`, no renaming is done. If `x` is
#' a `SparrowResult`, then this can be the column name found in
#' `logFC(x)`, in which case the value for the feature from the given
#' column name would be used (setting this to `"symbol"`) would be a
#' common thing to do, for instance.
#' @param ... pass through arguments
#' @template asdt-param
#' @return a data.frame of geneset <-> feature incidence/feature matrix.
#'
#' @examples
#' vm <- exampleExpressionSet(do.voom=TRUE)
#' gdb <- conform(exampleGeneSetDb(), vm)
#' mg <- seas(vm, gdb, design = vm$design, contrast = 'tumor')
#' features <- c("55839", "8522", "29087")
#' gsm.hit <- geneSetSummaryByGenes(gdb, features)
#' gsm.fid <- geneSetSummaryByGenes(mg, features, feature.rename=NULL)
#' gsm.sym <- geneSetSummaryByGenes(mg, features, feature.rename='symbol')
setGeneric("geneSetSummaryByGenes", signature=c("x"),
function(x, features, with.features = TRUE, feature.rename = NULL,
..., as.dt = FALSE)
standardGeneric("geneSetSummaryByGenes"))
#' @rdname conform
#' @exportMethod conform
setGeneric("conform", function(x, ...) standardGeneric("conform"))
#' Subset a GeneSetDb to only include geneSets with specified features.
#'
#' @exportMethod subsetByFeatures
#' @rdname subsetByFeatures
#'
#' @param x `GeneSetDb`
#' @param features Character vector of featureIds
#' @param value are you feature id's entered as themselves (`feature_id`), which
#' is the default, or are you querying by their index into a target expression
#' object? This is only relevant if you are working with a `conform`-ed
#' GeneSetDb, and further you as a user won't likely invoke this argument,
#' but is used internally.
#' @param ... pass through arguments
#' @return A subset of `x` which contains only the geneSets that contain
#' features found in `featureIds`
#'
#' @examples
#' gdb <- exampleGeneSetDb()
#' features <- c("55839", "8522", "29087")
#' (gdb.sub <- subsetByFeatures(gdb, features))
setGeneric("subsetByFeatures", signature="x",
function(x, features, value=c('feature_id', 'x.id', 'x.idx'), ...)
standardGeneric("subsetByFeatures"))
#' @exportMethod unconform
#' @rdname conform
setGeneric("unconform", function(x, ...) standardGeneric("unconform"))
|
/R/AllGenerics.R
|
permissive
|
gladkia/sparrow
|
R
| false | false | 12,849 |
r
|
#' Returns the relevant featureIds for a given geneset.
#'
#' @description
#' Gene sets are defined by the unique compound key consisting of their
#' `collection` and `name`. To fetch the featureIds associated with
#' a specific geneset, you must provide values for `i` and `j`. If
#' these are missing, then a character vector of all the unique feature ids
#' within `x` are returned.
#'
#' If the GeneSetDb `x` has been conformed to an expression object this
#' will default to return only the feature_id's that are matched to the target
#' expression object, and they will be returned using the same identifiers that
#' the target expression object uses. To change this behavior, tweak the values
#' for the `active.only` and `value` parameters, respectively.
#'
#' `x` can be either a `GeneSetDb` or a `SparrowResult`. If its the latter,
#' then this call simply delegates to the internal `GeneSetDb`.
#'
#' @rdname featureIds
#' @exportMethod featureIds
#'
#' @param x Object to retrieve the gene set from, either a `GeneSetDb` or a
#' `SparrowResult`.
#' @param i,j The collection,name compound key identifier of the gene set
#' @param value What form do you want the id's in?
#' * `"feature_id"`: the IDs used in the original geneset definitions
#' * `"x.id"`: the ids of the features as they are used in the expression
#' object.
#' * `"x.idx"`: The integer index into the expresion object `x` that the
#' `GeneSetDb`` has been conformed to.
#' @param active.only only look for gene sets that are "active"? Defaults to
#' `TRUE` if `x` is conformed to a target expression object, else `FALSE`.
#' [conform()] for further details.
#' @param ... pass through arguments
#' @return A vector of identifiers (or indexes into an expression object,
#' depending on the `value` argument) for the features in the specified
#' geneset. `NA` is returned if the geneset is not "active" (ie. listed in
#' [geneSets()])
#'
#' @examples
#' gdb <- exampleGeneSetDb()
#' fids.gs <- featureIds(gdb, 'c2', 'BIOCARTA_AGPCR_PATHWAY')
#' fids.c2 <- featureIds(gdb, 'c2')
#' fids.all <- featureIds(gdb)
#'
#' vm <- exampleExpressionSet(do.voom=TRUE)
#' gdb <- conform(gdb, vm)
#' ## fewer than before
#' fids.gs2 <- featureIds(gdb, 'c2', 'BIOCARTA_AGPCR_PATHWAY')
#' ## same as before
#' fids.gs3 <- featureIds(gdb, 'c2', 'BIOCARTA_AGPCR_PATHWAY', active.only=FALSE)
#' ## returned as row indices into vm
#' fids.idxs <- featureIds(gdb, 'c2', value='x.idx')
setGeneric("featureIds", signature="x",
function(x, i, j, value=c('feature_id', 'x.id', 'x.idx'),
active.only=is.conformed(x), ...)
standardGeneric("featureIds"))
#' Fetch the featureIdMap for a `GeneSetDb`
#'
#' The GeneSetDb has an internal data structure that is used to cross reference
#' the feature_id's used in the database construction to the features in the
#' expression object that is used to run GSEA methods against.
#'
#' @exportMethod featureIdMap
#' @param x the object to retrieve the featureIdMap from
#' @param ... pass through arguments
#' @return a data.frame of input feature_id's to conformed id's/rows/etc
#' @examples
#' gdb <- exampleGeneSetDb()
#' vm <- exampleExpressionSet()
#' gdb <- conform(gdb, vm)
#' fmap <- featureIdMap(gdb)
setGeneric("featureIdMap", function(x, ...) standardGeneric("featureIdMap"))
setGeneric("featureIdMap<-", function(x, value)
standardGeneric("featureIdMap<-")
)
#' Gene Set Collection Metadata
#'
#' Associates key:value metadata to a gene set collection of a [GeneSetDb()].
#'
#' The design of the GeneSetDb is such that we assume that groups of gene sets
#' are usually defined together and will therefore share similar metadata.
#' These groups of gene sets will fall into the same "collection", and,
#' therefore, metadata for particular gene sets are tracked at the collection
#' level.
#'
#' Types of metadata being referred to could be things like the organism
#' that a batch of gene sets were defined in, the type of feature identifiers
#' that a collection of gene sets are using (ie. [GSEABase::EntrezIdentifier()])
#' or a URL pattern that combines the collection,name compound key that one
#' can browse to in order to find out more information about the gene set.
#'
#' There are explicit helper functions that set and get these aforementioned
#' metadata, namely `featureIdType()`, `geneSetCollectionURLfunction()`, and
#' `geneSetURL()`. Aribtrary metadata can be stored at the collection level
#' using the [addCollectionMetadata()] function. More details are provided
#' below.
#'
#' @exportMethod collectionMetadata
#' @rdname collectionMetadata
#'
#' @param x Object to extract the collectionMetadata from
#' @param collection The geneset collection to to query
#' @param name The name of the metadata variable to get the value for
#' @template asdt-param
#' @param ... not used yet
#'
#' @examples
#' gdb <- exampleGeneSetDb()
#'
#' # Gene Set URLs
#' geneSetURL(gdb, 'c2', 'BIOCARTA_AGPCR_PATHWAY')
#' geneSetURL(gdb, c('c2', 'c7'),
#' c('BIOCARTA_AGPCR_PATHWAY', 'GSE14308_TH2_VS_TH1_UP'))
#'
#' # feature id types
#' featureIdType(gdb, "c2") <- GSEABase::EntrezIdentifier()
#' featureIdType(gdb, "c2")
#'
#' ## Arbitrary metadata
#' gdb <- addCollectionMetadata(gdb, 'c2', 'foo', 'bar')
#' cmh <- collectionMetadata(gdb, 'c2', as.dt = TRUE) ## print this to see
setGeneric(
"collectionMetadata",
signature=c("x", "collection", "name"),
function(x, collection, name, ...) standardGeneric("collectionMetadata"))
#' @section Gene Set URLs:
#'
#' A URL function can be defined per collection that takes the collection,name
#' compound key and generates a URL for the gene set that the user can browse
#' to for futher information. For instance, the
#' `geneSetCollectionURLfunction()` for the MSigDB collections are defined
#' like so:
#'
#' ```
#' url.fn <- function(collection, name) {
#' url <- 'http://www.broadinstitute.org/gsea/msigdb/cards/%s.html'
#' sprintf(url, name)
#' }
#' gdb <- getMSigGeneSetDb('H')
#' geneSetCollectionURLfunction(gdb, 'H') <- url.fn
#' ```
#'
#' In this way, a call to `geneSetURL(gdb, 'H', 'HALLMARK_ANGIOGENESIS')`
#' will return
#' http://www.broadinstitute.org/gsea/msigdb/cards/HALLMARK_ANGIOGENESIS.html.
#'
#' This function is vectorized over `i` and `j`
#'
#' @exportMethod geneSetURL
#' @inheritParams featureIds
#' @rdname collectionMetadata
#'
#' @return A character vector of URLs for each of the genesets identified by
#' `i, j`. `NA` is returned for genesets `i,j` that are not found in `x`.
setGeneric("geneSetURL", signature="x", function(x, i, j, ...)
standardGeneric("geneSetURL"))
# For some reason @describeIn doesn't work with the
# `geneSetCollectionURLfunction` roxygen docs
#' Get/set the gene set collection url function for a geneset collection
#'
#' Reference [collectionMetadata()] for more info.
#' @rdname geneSetCollectionURLfunction
#' @exportMethod geneSetCollectionURLfunction
setGeneric("geneSetCollectionURLfunction", signature="x", function(x, i, ...)
standardGeneric("geneSetCollectionURLfunction"))
#' @export
#' @rdname geneSetCollectionURLfunction
setGeneric("geneSetCollectionURLfunction<-",
signature="x",
function(x, i, value)
standardGeneric("geneSetCollectionURLfunction<-"))
#' @section Feature ID Types:
#'
#' When defining a set of gene sets in a collection, the identifiers used must
#' be of the same type. Most often you'll probably be working with Entrez
#' identifiers, simply because that's what most of the annotations work with.
#'
#' As such, you'd define that your collection uses geneset identifiers like
#' so:
#'
#' ```
#' gdb <- getMSigGeneSetDb('H')
#' featureIdType(gdb, 'H') <- GSEABase::ENSEMBLIdentifier()
#' ## or, equivalently (but you don't want to use this)
#' gdb <- addCollectionMetadata(gdb, 'H', 'id_type', GSEABase::ENSEMBLIdentifier())
#' ```
#'
#' @exportMethod featureIdType
#' @rdname collectionMetadata
#' @inheritParams featureIds
setGeneric("featureIdType", signature="x", function(x, i, ...)
standardGeneric("featureIdType"))
#' @export
#' @rdname collectionMetadata
setGeneric("featureIdType<-", signature="x", function(x, i, value)
standardGeneric("featureIdType<-"))
#' Fetches information for a gene set
#'
#' @description
#' Gene sets inside a [GeneSetDb()] are indexed by their collection,name
#' compound key. There is no special class to represent an individual gene set.
#' Instead, gene sets are returned as a data.frame, the rows of which enumerate
#' the features that belong to them.
#'
#' When `x` is a [SparrowResult()], this function will append
#' the differential expression statistics for the individual features generated
#' across the contrast that defined `x`.
#'
#' @exportMethod geneSet
#' @rdname geneSet
#'
#' @inheritParams featureIds
#' @param with.feature.map If `TRUE`, then details of the feature mapping
#' from the original feature_id space to the target feature space are included
#' (default: `FALSE`).
#' @param ... passed down to inner functinos
#' @template asdt-param
#' @return a `data.(frame|table)` of gene set information. If `x` is a
#' `SparrowResult` object, then differential expression statistics
#' are added as columns to this result.
setGeneric("geneSet", signature="x", function(x, i, j, ...)
standardGeneric("geneSet"))
#' Fetch the active (or all) gene sets from a GeneSetDb or SparrowResult
#'
#' @export
#' @inheritParams featureIds
#' @template asdt-param
#' @return a data.table with geneset information.
#'
#' @rdname geneSets
#' @exportMethod geneSets
#'
#' @examples
#' gdb <- exampleGeneSetDb()
#' gs <- geneSets(gdb)
setGeneric("geneSets", function(x, ...) standardGeneric('geneSets'))
#' Summarize geneset:feature relationships for specified set of features
#'
#' This function creates a geneset by feature table with geneset membership
#' information for the `features` specified by the user. Only the gene sets that
#' have any of the `features` are included in the table returned.
#'
#' @rdname geneSetSummaryByGenes
#' @exportMethod geneSetSummaryByGenes
#'
#' @param x `GeneSetDb` or `SparrowResult`
#' @param features a character vector of featureIds
#' @param with.features Include columns for `features`? If `x` is
#' is a `GeneSetDb`, these columns are `TRUE`/`FALSE`. If
#' `x` is a `SparrowResult` object, the values are the logFC of
#' the feature if present in the gene set, otherwise its `NA`.
#' @param feature.rename if `NULL`, the feature columns are prefixed with
#' `featureId_`, if `FALSE`, no renaming is done. If `x` is
#' a `SparrowResult`, then this can be the column name found in
#' `logFC(x)`, in which case the value for the feature from the given
#' column name would be used (setting this to `"symbol"`) would be a
#' common thing to do, for instance.
#' @param ... pass through arguments
#' @template asdt-param
#' @return a data.frame of geneset <-> feature incidence/feature matrix.
#'
#' @examples
#' vm <- exampleExpressionSet(do.voom=TRUE)
#' gdb <- conform(exampleGeneSetDb(), vm)
#' mg <- seas(vm, gdb, design = vm$design, contrast = 'tumor')
#' features <- c("55839", "8522", "29087")
#' gsm.hit <- geneSetSummaryByGenes(gdb, features)
#' gsm.fid <- geneSetSummaryByGenes(mg, features, feature.rename=NULL)
#' gsm.sym <- geneSetSummaryByGenes(mg, features, feature.rename='symbol')
setGeneric("geneSetSummaryByGenes", signature=c("x"),
function(x, features, with.features = TRUE, feature.rename = NULL,
..., as.dt = FALSE)
standardGeneric("geneSetSummaryByGenes"))
#' @rdname conform
#' @exportMethod conform
setGeneric("conform", function(x, ...) standardGeneric("conform"))
#' Subset a GeneSetDb to only include geneSets with specified features.
#'
#' @exportMethod subsetByFeatures
#' @rdname subsetByFeatures
#'
#' @param x `GeneSetDb`
#' @param features Character vector of featureIds
#' @param value are you feature id's entered as themselves (`feature_id`), which
#' is the default, or are you querying by their index into a target expression
#' object? This is only relevant if you are working with a `conform`-ed
#' GeneSetDb, and further you as a user won't likely invoke this argument,
#' but is used internally.
#' @param ... pass through arguments
#' @return A subset of `x` which contains only the geneSets that contain
#' features found in `featureIds`
#'
#' @examples
#' gdb <- exampleGeneSetDb()
#' features <- c("55839", "8522", "29087")
#' (gdb.sub <- subsetByFeatures(gdb, features))
setGeneric("subsetByFeatures", signature="x",
function(x, features, value=c('feature_id', 'x.id', 'x.idx'), ...)
standardGeneric("subsetByFeatures"))
#' @exportMethod unconform
#' @rdname conform
setGeneric("unconform", function(x, ...) standardGeneric("unconform"))
|
#' Calculate Weighted Standard Deviation
#'
#' Function to calculate weighted standard deviation.
#' @param x The observations to calculate the standard deviations from
#' @param w The weights associated with each observation.
#' @param na.rm If \code{TRUE}, then NA values will be removed.
weighted.sd <- function(x, w, na.rm = FALSE){
sum.w <- sum(w, na.rm = na.rm)
sum.w2 <- sum(w^2, na.rm = na.rm)
mean.w <- sum(x * w,na.rm = na.rm) / sum(w, na.rm = na.rm)
x.sd.w <- sqrt((sum.w / (sum.w^2 - sum.w2)) * sum(w * (x - mean.w)^2))
return(x.sd.w)
}
#' Wilcox Location Parameter
#'
#' Modified function to calculate Wilcox' Location paramenter
wilcox.loc <- function(vec, na.rm = FALSE){
n <- length(vec)
# If number of observations is less than 2 then we just return mean as location estimate
if(n <= 2){
return(mean(vec, na.rm = na.rm))
}
# Calculating the paired avagerages
pairAvg <- sort(c(vec, combn(vec, 2, function(x)mean(x, na.rm = na.rm))))
return(median(pairAvg, na.rm = na.rm))
}
#' Cohen's d
#'
#' Function to calculate Cohen's D value when testing effect size
cohens_d <- function(x, y, na.rm = TRUE) {
if(na.rm){
x <- x[!is.na(x)]
y <- y[!is.na(y)]
}
n.x <- length(x)- 1
n.y <- length(y)- 1
mean.diff <- abs(mean(x) - mean(y))
if(n.x == 0 & n.y > 0) {
common.sd <- sqrt(n.y * var(y)/n.y)
} else if (n.x > 0 & n.y == 0){
common.sd <- sqrt(n.x * var(x)/n.x)
} else if (n.x > 0 & n.y > 0) {
common.sd <- sqrt((n.x * var(x) + n.y * var(y))/(n.x + n.y))
} else {
common.sd <- sd(c(x, y)) / 2
}
return(mean.diff/common.sd)
}
#' Default Weights for Projection Sources
#'
#' These are the weights that are used for each source when calculation weighted
#' averages and standard deviations if no weights are specified.
#' \code{c(CBS = 0.344, Yahoo = 0.400, ESPN = 0.329, NFL = 0.329,
#' FFToday = 0.379, NumberFire = 0.322, FantasyPros = 0.000,
#' FantasySharks = 0.327, FantasyFootballNerd = 0.000,
#' Walterfootball = 0.281, RTSports = 0.330,
#' FantasyData = 0.428, Fleaflicker = 0.428)}
default_weights <- c(CBS = 0.344, Yahoo = 0.400, ESPN = 0.329, NFL = 0.329,
FFToday = 0.379, NumberFire = 0.322, FantasyPros = 0.000,
FantasySharks= 0.327, FantasyFootballNerd = 0.000,
Walterfootball = 0.281, RTSports= 0.330,
FantasyData= 0.428, Fleaflicker = 0.428)
# Helper functions to calculate the quantiles and standard deviations for the
# source points. Used in the points_sd and confidence interval functions
quant_funcs <- list(average = quantile, robust = quantile,
weighted = purrr::possibly(Hmisc::wtd.quantile, c(`5%` = NaN, `95%` = NaN)))
quant_args <- list(list(probs = c(0.05, 0.95)), list(probs = c(0.05, 0.95)),
list(probs = c(0.05, 0.95), type = "i/n"))
get_quant <- function(pts, wt)invoke_map(quant_funcs, quant_args, x = pts, na.rm = TRUE, weights = wt)
sd_funcs <- list(average = function(x, w, na.rm)sd(x, na.rm = na.rm),
robust = function(x, w, na.rm)mad(x, na.rm = na.rm),
weighted = weighted.sd)
sd_args <- list(list(na.rm = TRUE), list(na.rm = TRUE), list(na.rm = TRUE))
get_sd <- function(pts, wt)invoke_map(sd_funcs, sd_args, x = pts, w = wt)
#' Calculate Source Points
#'
#' Function to calculate the projected points for each source.
#' @param data_result An output from the \link{scrape_data} function.
#' @param scoring_rules The scoring rules to be used.
source_points <- function(data_result, scoring_rules){
scoring_tbl <- make_scoring_tbl(scoring_rules)
long_result <- data_result %>%
stats_by_category() %>%
map(gather, "data_col", "stat_value", -c(id, data_src, pos)) %>%
bind_rows()
dst_pt_allow <- NULL
if("dst" %in% names(scoring_rules))
dst_pt_allow <- scoring_rules[[c("dst", "dst_pts_allowed")]]
dst_bracket <- is.null(dst_pt_allow) & !is.null(scoring_rules$pts_bracket)
dst_src <- long_result %>% slice(0) %>% add_column(points = 0)
if(dst_bracket){
dst_src <- long_result %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring$pts_bracket))
}
long_result %>%
inner_join(scoring_tbl, by = c("pos", "data_col")) %>%
mutate(points = stat_value * points) %>%
bind_rows(dst_src) %>%
group_by(pos, data_src, id) %>%
summarise(points = sum(points, na.rm = TRUE)) %>% ungroup()
}
# Generate weights from a source points table if no weights are given
weights_from_src <- function(src_pts, weights = NULL){
if(is.null(weights)){
weights <- default_weights[unique(src_pts$data_src)]
}
weights %>% as.tibble() %>%
`names<-`("weight") %>% rownames_to_column('data_src')
}
#' Calculate Standard Deviations for Projected Points
#'
#' This function calculates the standard deviaion for projected points from
#' different sources
#' @param src_pts An output from the \link{source_points} function
#' @param weights A named vector with the weights from each source.
#' See \link{default_weights}
points_sd <- function(src_pts, weights = NULL){
weight_tbl <- weights_from_src(src_pts, weights)
src_pts %>% inner_join(weight_tbl, by = "data_src") %>%
group_by(id) %>%
mutate(n_obs = n(),
weight = if_else(n_obs == 1 & weight == 0, 1, weight)) %>%
ungroup() %>% select(-n_obs) %>%
split(src_pts$pos) %>% map(~ split(.x, .x$id)) %>%
modify_depth(2, ~ get_sd(.x$points, .x$weight)) %>% modify_depth(2, as.tibble) %>%
modify_depth(1, bind_rows, .id = "id") %>% bind_rows(.id = "pos") %>%
gather("avg_type", "sd_pts", -id, -pos)
}
#' Calculate the Upper and Lower Limits for Projected Points
#'
#' This function calculates the ceiling and floor for projected points from
#' different sources based on quantiles
#' @param src_pts An output from the \link{source_points} function
#' @param weights A named vector with the weights from each source.
#' See \link{default_weights}
confidence_interval <- function(src_pts, weights = NULL){
weight_tbl <- weights_from_src(src_pts, weights)
src_pts %>% inner_join(weight_tbl, by = "data_src") %>%
group_by(id) %>%
mutate(n_obs = n(),
weight = if_else(n_obs == 1 & weight == 0, 1, weight)) %>%
ungroup() %>% select(-n_obs) %>%
split(src_pts$pos) %>% map(~ split(.x, .x$id)) %>%
modify_depth(2, ~ get_quant(.x$points, .x$weight)) %>% modify_depth(3, t) %>%
modify_depth(3, as.tibble) %>% modify_depth(2, bind_rows, .id = "avg_type") %>%
modify_depth(1, bind_rows, .id = "id") %>% bind_rows(.id = "pos") %>%
mutate(`5%` = ifelse(is.na(`5%`),` 5%`, `5%`)) %>% select(-` 5%`) %>%
rename(floor = "5%", ceiling = "95%")
}
#' Aggregate Projected Stats
#'
#' This function aggregates the projected stats collected from each source with
#' the \link{scrape_data} function.
#' @param data_result An output from the \link{scrape_data} function.
#' @param src_weights A named vector with the weights from each source.
#' See \link{default_weights}
#' @export
aggregate_stats <- function(data_result, src_weights = NULL){
if(is.null(src_weights)){
data_src <- data_result %>% map(`[[`, "data_src") %>% reduce(union)
src_weights <- default_weights[data_src]
}
weight_tbl <- src_weights %>% as.tibble() %>%
`names<-`("weight") %>% rownames_to_column('data_src')
data_result %>% stats_by_category() %>%
map(inner_join, weight_tbl, by = "data_src") %>%
map(gather, "data_col", "stat_value",
-c(id, data_src, pos, weight)) %>%
bind_rows() %>% group_by(pos, id, data_col) %>%
summarise(robust = wilcox.loc(stat_value, na.rm = TRUE),
average = mean(stat_value, na.rm = TRUE ),
weighted = weighted.mean(stat_value, w = weight, na.rm = TRUE)) %>%
gather("avg_type", "stat_value", -c(id, pos, data_col))
}
#' Calculate Projected Points
#'
#' This function calculates the projected points for each player based on the
#' aggregated stats from the \link{aggregate_stats} function. The resulting table
#' contains the projected points, the position rank and the points drop-off for
#' each player.
#' @param agg_stats An output from the \link{aggregate_stats} function
#' @param scoring_rules The scoring rules to be used.
projected_points <- function(agg_stats, scoring_rules){
scoring_tbl <- make_scoring_tbl(scoring_rules)
dst_pt_allow <- NULL
if("dst" %in% names(scoring_rules))
dst_pt_allow <- scoring_rules[[c("dst", "dst_pts_allowed")]]
dst_bracket <- is.null(dst_pt_allow) & !is.null(scoring_rules$pts_bracket)
dst_src <- agg_stats %>% slice(0) %>% add_column(points = 0)
if(dst_bracket){
dst_src <- agg_stats %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring_rules$pts_bracket))
}
dst_agg <- dst_src %>% slice(0)
if(dst_bracket){
dst_agg <- agg_stats %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring_rules$pts_bracket))
}
agg_stats %>%
inner_join(scoring_tbl, by = c("pos", "data_col")) %>%
mutate(points = stat_value * points) %>%
bind_rows(dst_agg) %>%
group_by(pos, avg_type, id) %>%
summarise(points = if_else(all(is.na(points)), NA_real_, sum(points, na.rm = TRUE))) %>%
mutate(pos_rank = dense_rank(-points),
drop_off = points - (lead(points, order_by = pos_rank) +
lead(points, 2, order_by = pos_rank)) /2 ) %>%
ungroup()
}
#' Default VOR Baseline
#'
#' This is the default baseline that is used if not otherwise specified when
#' calculating VOR:
#' \code{c(QB = 13, RB = 35, WR = 36, TE = 13, K = 8, DST = 3, DL = 10, LB = 10, DB = 10)}
default_baseline <- c(QB = 13, RB = 35, WR = 36, TE = 13, K = 8, DST = 3, DL = 10, LB = 10, DB = 10)
#' Calculate VOR
#'
#' This function calculates the VOR based on an output from the \link{projected_points}
#' and if floor or ceiling VOR is requested with floor and ceiling added from the
#' \link{confidence_interval} function
#' @param points_table An output from the \link{projected_points} function and merged
#' with output from the the \link{projected_points} function and merged if floor or ceiling vor
#' is requested
#' @param vor_baseline The VOR Baseline to be used. If omitted then the
#' \link{default_baseline} will be used
#' @param vor_var One of \code{c("points", "floor", "ceiling")} indicating which
#' basis is used for the vor calculation
set_vor <- function(points_table, vor_baseline = NULL, vor_var = c("points", "floor", "ceiling")){
if(is.null(vor_baseline))
vor_baseline <- default_baseline
vor_var <- match.arg(vor_var)
vor_tbl <- select(points_table, "id", "pos", vor_var) %>%
rename(vor_var = !!vor_var) %>% group_by(pos) %>%
mutate(vor_rank = dense_rank(-vor_var), vor_base = vor_baseline[pos]) %>%
filter(vor_rank >= vor_base - 1 & vor_rank <= vor_base + 1) %>%
summarise(vor_base = mean(vor_var)) %>% ungroup() %>%
select(pos, vor_base) %>% inner_join(points_table, by = c("pos")) %>%
rename(vor_var = !!vor_var) %>%
mutate(vor = vor_var - vor_base,
rank = dense_rank(-vor), !!vor_var := vor_var) %>%
select(id, pos, vor, rank) %>% rename_if(is.numeric, funs(paste(vor_var, ., sep = "_"))) %>%
ungroup()
return(vor_tbl)
}
#' Calculate VOR for Points, Ceiling and Floor
#'
#' This function calculates VOR for projected points as well as the floor and
#' ceiling values.
#' @param tbl The output from the \link{projected_points} function that has
#' been merged with the output from he \link{confidence_interval} function
#' @param vor_baseline The VOR baseline values to be used. If omitted then the
#' \link{default_baseline} will be used
add_vor <- function(tbl, vor_baseline = NULL){
accumulate(c("points", "floor", "ceiling"),
~ inner_join(.x, set_vor(.x, vor_baseline, vor_var = .y),
by = c("id", "pos")),
.init = tbl)[[4]]
}
#' Default Threshold Values for Tiers
#'
#' These are the default threshold values used when applying Cohen's D values
#' to determine tiers:
#' \code{c(QB = 1, RB = 1, WR = 1, TE = 1, K = 1, DST = 0.1, DL = 1, DB = 1, LB = 1)}
default_threshold <- c(QB = 1, RB = 1, WR = 1, TE = 1, K = 1, DST = 0.1, DL = 1, DB = 1, LB = 1)
#' Determine Tiers by Position
#'
#' This function determines tiers for each position by applying Cohen's D effect
#' size
#' @param data_tbl An output from the \link{projected_points} function
#' @param d_threshold THe thresholds to use when applying Cohens'd D function to
#' determine the tiers. If omitted then the \link{default_threshold} will be used.
#' @param src_points An output from the \link{source_points} function
set_tiers <- function(data_tbl, d_threshold = NULL, src_points){
if(is.null(d_threshold))
d_threshold <- default_threshold
tier_tbl <- data_tbl %>% filter(pos %in% names(d_threshold)) %>%
mutate(dthres = d_threshold[pos], tier = ifelse(pos_rank == 1, 1L, NA))
repeat{
before_na <- sum(is.na(tier_tbl$tier))
tier_tbl <-
tier_tbl %>% group_by(pos) %>% filter(tier == tier[which.max(tier)]) %>%
summarise(tier_id = first(id, order_by = -points),
cur_tier = as.integer(max(tier, na.rm = TRUE)),
dthres= max(dthres, na.rm = TRUE)) %>%
inner_join(tier_tbl %>% group_by(pos) %>% filter(is.na(tier)) %>%
summarise(max_id = first(id, order_by = -points)), by = "pos") %>%
group_by(pos) %>%
mutate(d_val = cohens_d(src_points[src_points$id == tier_id,]$points,
src_points[src_points$id == max_id,]$points),
tier = ifelse(d_val > dthres, cur_tier + 1L, cur_tier)) %>%
select(pos, id = max_id, new_tier = tier) %>% right_join(tier_tbl, by = c("pos", "id")) %>%
mutate(tier = ifelse(is.na(tier) & !is.na(new_tier), new_tier, tier)) %>%
select(-new_tier)
after_na <- sum(is.na(tier_tbl$tier))
if(before_na == after_na | after_na == 0)
break
}
tier_tbl %>% select(-dthres) %>% ungroup()
}
#' Create a Projections Table
#'
#' This function creates the projections table based on the scraped data from the
#' \link{scrape_data} function. The output is a table containing the projected
#' points, confidence intervals, standard deviation for points, and if seasonal
#' data also the VOR values
#' @param data_result An output from the \link{scrape_data} function
#' @param scoring_rules The scoring rules to be used for calculations. See
#' \code{vignette("scoring_settings")} on how to define custom scoring settings.
#' If omitted then default \link{scoring} settings will be used.
#' @param src_weights A named vector defining the weights for each source to be
#' used in calculations. If omitted then \link{default_weights} will be used.
#' @param vor_baseline A named vector defineing the baseline to use for VOR
#' calculations. If omitted then the \link{default_baseline} will be used.
#' @param tier_thresholds The threshold values to be used when determining tiers.
#' If omitted then the \link{default_threshold} will be used.
#' @export
projections_table <- function(data_result, scoring_rules = NULL, src_weights = NULL,
vor_baseline = NULL, tier_thresholds = NULL){
season <- attr(data_result, "season")
week <- attr(data_result, "week")
data_result <- keep(data_result, ~ nrow(.) > 0) %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week)
if(is.null(scoring_rules))
scoring_rules <- scoring
if(scoring_rules$rec$all_pos){
lg_type <- scoring_rules$rec$rec %>% rep(length(data_result)) %>%
`names<-`(names(data_result)) %>%
map_chr(~ case_when(.x > 0.5 ~ "PPR", .x > 0 ~ "Half", TRUE ~ "Std"))
} else {
lg_type <- map(scoring_rules$rec[-which(names(scoring_rules$rec) == "all_pos")], `[[`, "rec") %>%
keep(~ !is.null(.x)) %>%
map_chr(~ case_when(.x > 0.5 ~ "PPR", .x > 0 ~ "Half", TRUE ~ "Std"))
lg_type[setdiff(names(data_result), names(lg_type))] < "Std"
}
data_list <- invoke_map(list(src_pts = source_points, agg_stats = aggregate_stats),
list(list(data_result = data_result, scoring_rules = scoring_rules),
list(data_result = data_result, src_weights = src_weights)))
pts_uncertainty <- invoke_map(list(points_sd, confidence_interval),
src_pts = data_list$src_pts, weights = src_weights) %>%
reduce(inner_join, by = c("pos", "id","avg_type"))
out_df<- data_list$agg_stats %>%
projected_points(scoring_rules) %>%
inner_join(pts_uncertainty, by = c("pos", "id","avg_type")) %>%
group_by(avg_type) %>%
set_tiers(tier_thresholds, data_list$src_pts ) %>%
ungroup()
if(attr(data_result, "week") == 0){
out_df <- out_df %>% split(.$avg_type) %>%
map(add_vor, vor_baseline = vor_baseline) %>% bind_rows() %>%
rename(rank = points_rank)
}
out_df %>%
`attr<-`(which = "season", attr(data_result, "season")) %>%
`attr<-`(which = "week", attr(data_result, "week")) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add ECR to the Projection Table
#'
#' This function will add the ECR values to the projetions table generated from
#' the \link{projections_table} function. It will add the positional ECR, the
#' standard deviation for the positional ECR, and if seasonal data also the
#' overal ECR value
#' @param projection_table An output from the \link{projections_table} function.
#' @export
add_ecr <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
ecr_pos <- lg_type %>%
imap(~ scrape_ecr(rank_period = ifelse(week == 0, "draft", "week"),
position = .y, rank_type = .x)) %>%
map(select, id, pos_ecr = avg, sd_ecr = std_dev) %>% bind_rows()
projection_table <- left_join(projection_table, ecr_pos, by = "id")
if(week == 0){
lg_ov <- ifelse(any(lg_type == "PPR"), "PPR", ifelse(any(lg_type == "Half"), "Half", "Std"))
ecr_overall <- scrape_ecr(rank_period = "draft", rank_type = lg_ov) %>%
select(id, ecr = avg)
projection_table <- left_join(projection_table, ecr_overall, by = "id")
}
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add ADP to the Projections Table
#'
#' This function will add the ADP data to the projections table from the
#' \link{projections_table} function. It will add the average ADP from the sources
#' specfied, and the difference between the overall rank and ADP
#' @param projection_table An output from the \link{projections_table} function
#' @param sources Which ADP sources should be added. should be one or more of
#' \code{c("RTS", "CBS", "ESPN", "Yahoo", "NFL", "FFC")}
#' @export
add_adp <- function(projection_table,
sources = c("RTS", "CBS", "ESPN", "Yahoo", "NFL", "FFC")){
sources <- match.arg(sources, several.ok = TRUE)
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
if (week != 0){
warning("ADP data is not available for weekly data", call. = FALSE)
return(projection_table)
}
adp_tbl <- get_adp(sources, type = "ADP") %>% select(1, length(.)) %>%
rename_at(length(.), ~ function(x)return("adp"))
projection_table <- left_join(projection_table, adp_tbl, by = "id") %>%
mutate(adp_diff = rank - adp)
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add AAV to the Projections Table
#'
#' This function will add the AAV data to the projections table from the
#' \link{projections_table} function.
#' @param projection_table An output from the \link{projections_table} function
#' @param sources Which AAV sources should be added. should be one or more of
#' \code{c("RTS", "ESPN", "Yahoo", "NFL")}
#' @export
add_aav <- function(projection_table,
sources = c("RTS", "ESPN", "Yahoo", "NFL")){
sources = match.arg(sources, several.ok = TRUE)
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
if (week != 0){
warning("AAV data is not available for weekly data", call. = FALSE)
return(projection_table)
}
adp_tbl <- get_adp(sources, type = "AAV") %>% select(1, length(.)) %>%
rename_at(length(.), ~ function(x)return("aav"))
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Risk calculation based on two variables
#'
#' Calculation of risk is done by scaling the standard deviation variables
#' passed and averaging them before returning a measure with mean 5 and standard
#' deviation of 2
calculate_risk <- function(var1, var2){
var1 <- as.numeric(var1)
var2 <- as.numeric(var2)
Z_var1 <- scale(var1)
Z_var2 <- scale(var2)
Z_var1[is.na(Z_var1)] <- Z_var2[is.na(Z_var1)]
Z_var2[is.na(Z_var2)] <- Z_var1[is.na(Z_var2)]
risk_value <- 2 * scale(rowMeans(data.frame(Z_var1, Z_var2), na.rm=TRUE)) + 5
return(risk_value)
}
#' Add calculated risk to the table
#'
#' Calculation of risk is done by scaling the standard deviation variables
#' passed and averaging them before returning a measure with mean 5 and standard
#' deviation of 2
#' @export
add_risk <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
projection_table %>%
group_by(pos) %>%
# Calculate Risk values
mutate(risk = calculate_risk(sd_pts, sd_ecr)) %>%
ungroup() %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add player information to the table
#'
#' Adds player information to the projections table
#' @export
add_player_info <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
select(player_table, id, first_name, last_name, team, position, age, exp) %>%
inner_join(projection_table, by = "id") %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
|
/R/calc_projections.R
|
no_license
|
SirChancelot222/ffanalytics
|
R
| false | false | 22,747 |
r
|
#' Calculate Weighted Standard Deviation
#'
#' Function to calculate weighted standard deviation.
#' @param x The observations to calculate the standard deviations from
#' @param w The weights associated with each observation.
#' @param na.rm If \code{TRUE}, then NA values will be removed.
weighted.sd <- function(x, w, na.rm = FALSE){
sum.w <- sum(w, na.rm = na.rm)
sum.w2 <- sum(w^2, na.rm = na.rm)
mean.w <- sum(x * w,na.rm = na.rm) / sum(w, na.rm = na.rm)
x.sd.w <- sqrt((sum.w / (sum.w^2 - sum.w2)) * sum(w * (x - mean.w)^2))
return(x.sd.w)
}
#' Wilcox Location Parameter
#'
#' Modified function to calculate Wilcox' Location paramenter
wilcox.loc <- function(vec, na.rm = FALSE){
n <- length(vec)
# If number of observations is less than 2 then we just return mean as location estimate
if(n <= 2){
return(mean(vec, na.rm = na.rm))
}
# Calculating the paired avagerages
pairAvg <- sort(c(vec, combn(vec, 2, function(x)mean(x, na.rm = na.rm))))
return(median(pairAvg, na.rm = na.rm))
}
#' Cohen's d
#'
#' Function to calculate Cohen's D value when testing effect size
cohens_d <- function(x, y, na.rm = TRUE) {
if(na.rm){
x <- x[!is.na(x)]
y <- y[!is.na(y)]
}
n.x <- length(x)- 1
n.y <- length(y)- 1
mean.diff <- abs(mean(x) - mean(y))
if(n.x == 0 & n.y > 0) {
common.sd <- sqrt(n.y * var(y)/n.y)
} else if (n.x > 0 & n.y == 0){
common.sd <- sqrt(n.x * var(x)/n.x)
} else if (n.x > 0 & n.y > 0) {
common.sd <- sqrt((n.x * var(x) + n.y * var(y))/(n.x + n.y))
} else {
common.sd <- sd(c(x, y)) / 2
}
return(mean.diff/common.sd)
}
#' Default Weights for Projection Sources
#'
#' These are the weights that are used for each source when calculation weighted
#' averages and standard deviations if no weights are specified.
#' \code{c(CBS = 0.344, Yahoo = 0.400, ESPN = 0.329, NFL = 0.329,
#' FFToday = 0.379, NumberFire = 0.322, FantasyPros = 0.000,
#' FantasySharks = 0.327, FantasyFootballNerd = 0.000,
#' Walterfootball = 0.281, RTSports = 0.330,
#' FantasyData = 0.428, Fleaflicker = 0.428)}
default_weights <- c(CBS = 0.344, Yahoo = 0.400, ESPN = 0.329, NFL = 0.329,
FFToday = 0.379, NumberFire = 0.322, FantasyPros = 0.000,
FantasySharks= 0.327, FantasyFootballNerd = 0.000,
Walterfootball = 0.281, RTSports= 0.330,
FantasyData= 0.428, Fleaflicker = 0.428)
# Helper functions to calculate the quantiles and standard deviations for the
# source points. Used in the points_sd and confidence interval functions
quant_funcs <- list(average = quantile, robust = quantile,
weighted = purrr::possibly(Hmisc::wtd.quantile, c(`5%` = NaN, `95%` = NaN)))
quant_args <- list(list(probs = c(0.05, 0.95)), list(probs = c(0.05, 0.95)),
list(probs = c(0.05, 0.95), type = "i/n"))
get_quant <- function(pts, wt)invoke_map(quant_funcs, quant_args, x = pts, na.rm = TRUE, weights = wt)
sd_funcs <- list(average = function(x, w, na.rm)sd(x, na.rm = na.rm),
robust = function(x, w, na.rm)mad(x, na.rm = na.rm),
weighted = weighted.sd)
sd_args <- list(list(na.rm = TRUE), list(na.rm = TRUE), list(na.rm = TRUE))
get_sd <- function(pts, wt)invoke_map(sd_funcs, sd_args, x = pts, w = wt)
#' Calculate Source Points
#'
#' Function to calculate the projected points for each source.
#' @param data_result An output from the \link{scrape_data} function.
#' @param scoring_rules The scoring rules to be used.
source_points <- function(data_result, scoring_rules){
scoring_tbl <- make_scoring_tbl(scoring_rules)
long_result <- data_result %>%
stats_by_category() %>%
map(gather, "data_col", "stat_value", -c(id, data_src, pos)) %>%
bind_rows()
dst_pt_allow <- NULL
if("dst" %in% names(scoring_rules))
dst_pt_allow <- scoring_rules[[c("dst", "dst_pts_allowed")]]
dst_bracket <- is.null(dst_pt_allow) & !is.null(scoring_rules$pts_bracket)
dst_src <- long_result %>% slice(0) %>% add_column(points = 0)
if(dst_bracket){
dst_src <- long_result %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring$pts_bracket))
}
long_result %>%
inner_join(scoring_tbl, by = c("pos", "data_col")) %>%
mutate(points = stat_value * points) %>%
bind_rows(dst_src) %>%
group_by(pos, data_src, id) %>%
summarise(points = sum(points, na.rm = TRUE)) %>% ungroup()
}
# Generate weights from a source points table if no weights are given
weights_from_src <- function(src_pts, weights = NULL){
if(is.null(weights)){
weights <- default_weights[unique(src_pts$data_src)]
}
weights %>% as.tibble() %>%
`names<-`("weight") %>% rownames_to_column('data_src')
}
#' Calculate Standard Deviations for Projected Points
#'
#' This function calculates the standard deviaion for projected points from
#' different sources
#' @param src_pts An output from the \link{source_points} function
#' @param weights A named vector with the weights from each source.
#' See \link{default_weights}
points_sd <- function(src_pts, weights = NULL){
weight_tbl <- weights_from_src(src_pts, weights)
src_pts %>% inner_join(weight_tbl, by = "data_src") %>%
group_by(id) %>%
mutate(n_obs = n(),
weight = if_else(n_obs == 1 & weight == 0, 1, weight)) %>%
ungroup() %>% select(-n_obs) %>%
split(src_pts$pos) %>% map(~ split(.x, .x$id)) %>%
modify_depth(2, ~ get_sd(.x$points, .x$weight)) %>% modify_depth(2, as.tibble) %>%
modify_depth(1, bind_rows, .id = "id") %>% bind_rows(.id = "pos") %>%
gather("avg_type", "sd_pts", -id, -pos)
}
#' Calculate the Upper and Lower Limits for Projected Points
#'
#' This function calculates the ceiling and floor for projected points from
#' different sources based on quantiles
#' @param src_pts An output from the \link{source_points} function
#' @param weights A named vector with the weights from each source.
#' See \link{default_weights}
confidence_interval <- function(src_pts, weights = NULL){
weight_tbl <- weights_from_src(src_pts, weights)
src_pts %>% inner_join(weight_tbl, by = "data_src") %>%
group_by(id) %>%
mutate(n_obs = n(),
weight = if_else(n_obs == 1 & weight == 0, 1, weight)) %>%
ungroup() %>% select(-n_obs) %>%
split(src_pts$pos) %>% map(~ split(.x, .x$id)) %>%
modify_depth(2, ~ get_quant(.x$points, .x$weight)) %>% modify_depth(3, t) %>%
modify_depth(3, as.tibble) %>% modify_depth(2, bind_rows, .id = "avg_type") %>%
modify_depth(1, bind_rows, .id = "id") %>% bind_rows(.id = "pos") %>%
mutate(`5%` = ifelse(is.na(`5%`),` 5%`, `5%`)) %>% select(-` 5%`) %>%
rename(floor = "5%", ceiling = "95%")
}
#' Aggregate Projected Stats
#'
#' This function aggregates the projected stats collected from each source with
#' the \link{scrape_data} function.
#' @param data_result An output from the \link{scrape_data} function.
#' @param src_weights A named vector with the weights from each source.
#' See \link{default_weights}
#' @export
aggregate_stats <- function(data_result, src_weights = NULL){
if(is.null(src_weights)){
data_src <- data_result %>% map(`[[`, "data_src") %>% reduce(union)
src_weights <- default_weights[data_src]
}
weight_tbl <- src_weights %>% as.tibble() %>%
`names<-`("weight") %>% rownames_to_column('data_src')
data_result %>% stats_by_category() %>%
map(inner_join, weight_tbl, by = "data_src") %>%
map(gather, "data_col", "stat_value",
-c(id, data_src, pos, weight)) %>%
bind_rows() %>% group_by(pos, id, data_col) %>%
summarise(robust = wilcox.loc(stat_value, na.rm = TRUE),
average = mean(stat_value, na.rm = TRUE ),
weighted = weighted.mean(stat_value, w = weight, na.rm = TRUE)) %>%
gather("avg_type", "stat_value", -c(id, pos, data_col))
}
#' Calculate Projected Points
#'
#' This function calculates the projected points for each player based on the
#' aggregated stats from the \link{aggregate_stats} function. The resulting table
#' contains the projected points, the position rank and the points drop-off for
#' each player.
#' @param agg_stats An output from the \link{aggregate_stats} function
#' @param scoring_rules The scoring rules to be used.
projected_points <- function(agg_stats, scoring_rules){
scoring_tbl <- make_scoring_tbl(scoring_rules)
dst_pt_allow <- NULL
if("dst" %in% names(scoring_rules))
dst_pt_allow <- scoring_rules[[c("dst", "dst_pts_allowed")]]
dst_bracket <- is.null(dst_pt_allow) & !is.null(scoring_rules$pts_bracket)
dst_src <- agg_stats %>% slice(0) %>% add_column(points = 0)
if(dst_bracket){
dst_src <- agg_stats %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring_rules$pts_bracket))
}
dst_agg <- dst_src %>% slice(0)
if(dst_bracket){
dst_agg <- agg_stats %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring_rules$pts_bracket))
}
agg_stats %>%
inner_join(scoring_tbl, by = c("pos", "data_col")) %>%
mutate(points = stat_value * points) %>%
bind_rows(dst_agg) %>%
group_by(pos, avg_type, id) %>%
summarise(points = if_else(all(is.na(points)), NA_real_, sum(points, na.rm = TRUE))) %>%
mutate(pos_rank = dense_rank(-points),
drop_off = points - (lead(points, order_by = pos_rank) +
lead(points, 2, order_by = pos_rank)) /2 ) %>%
ungroup()
}
#' Default VOR Baseline
#'
#' This is the default baseline that is used if not otherwise specified when
#' calculating VOR:
#' \code{c(QB = 13, RB = 35, WR = 36, TE = 13, K = 8, DST = 3, DL = 10, LB = 10, DB = 10)}
default_baseline <- c(QB = 13, RB = 35, WR = 36, TE = 13, K = 8, DST = 3, DL = 10, LB = 10, DB = 10)
#' Calculate VOR
#'
#' This function calculates the VOR based on an output from the \link{projected_points}
#' and if floor or ceiling VOR is requested with floor and ceiling added from the
#' \link{confidence_interval} function
#' @param points_table An output from the \link{projected_points} function and merged
#' with output from the the \link{projected_points} function and merged if floor or ceiling vor
#' is requested
#' @param vor_baseline The VOR Baseline to be used. If omitted then the
#' \link{default_baseline} will be used
#' @param vor_var One of \code{c("points", "floor", "ceiling")} indicating which
#' basis is used for the vor calculation
set_vor <- function(points_table, vor_baseline = NULL, vor_var = c("points", "floor", "ceiling")){
if(is.null(vor_baseline))
vor_baseline <- default_baseline
vor_var <- match.arg(vor_var)
vor_tbl <- select(points_table, "id", "pos", vor_var) %>%
rename(vor_var = !!vor_var) %>% group_by(pos) %>%
mutate(vor_rank = dense_rank(-vor_var), vor_base = vor_baseline[pos]) %>%
filter(vor_rank >= vor_base - 1 & vor_rank <= vor_base + 1) %>%
summarise(vor_base = mean(vor_var)) %>% ungroup() %>%
select(pos, vor_base) %>% inner_join(points_table, by = c("pos")) %>%
rename(vor_var = !!vor_var) %>%
mutate(vor = vor_var - vor_base,
rank = dense_rank(-vor), !!vor_var := vor_var) %>%
select(id, pos, vor, rank) %>% rename_if(is.numeric, funs(paste(vor_var, ., sep = "_"))) %>%
ungroup()
return(vor_tbl)
}
#' Calculate VOR for Points, Ceiling and Floor
#'
#' This function calculates VOR for projected points as well as the floor and
#' ceiling values.
#' @param tbl The output from the \link{projected_points} function that has
#' been merged with the output from he \link{confidence_interval} function
#' @param vor_baseline The VOR baseline values to be used. If omitted then the
#' \link{default_baseline} will be used
add_vor <- function(tbl, vor_baseline = NULL){
accumulate(c("points", "floor", "ceiling"),
~ inner_join(.x, set_vor(.x, vor_baseline, vor_var = .y),
by = c("id", "pos")),
.init = tbl)[[4]]
}
#' Default Threshold Values for Tiers
#'
#' These are the default threshold values used when applying Cohen's D values
#' to determine tiers:
#' \code{c(QB = 1, RB = 1, WR = 1, TE = 1, K = 1, DST = 0.1, DL = 1, DB = 1, LB = 1)}
default_threshold <- c(QB = 1, RB = 1, WR = 1, TE = 1, K = 1, DST = 0.1, DL = 1, DB = 1, LB = 1)
#' Determine Tiers by Position
#'
#' This function determines tiers for each position by applying Cohen's D effect
#' size
#' @param data_tbl An output from the \link{projected_points} function
#' @param d_threshold THe thresholds to use when applying Cohens'd D function to
#' determine the tiers. If omitted then the \link{default_threshold} will be used.
#' @param src_points An output from the \link{source_points} function
set_tiers <- function(data_tbl, d_threshold = NULL, src_points){
if(is.null(d_threshold))
d_threshold <- default_threshold
tier_tbl <- data_tbl %>% filter(pos %in% names(d_threshold)) %>%
mutate(dthres = d_threshold[pos], tier = ifelse(pos_rank == 1, 1L, NA))
repeat{
before_na <- sum(is.na(tier_tbl$tier))
tier_tbl <-
tier_tbl %>% group_by(pos) %>% filter(tier == tier[which.max(tier)]) %>%
summarise(tier_id = first(id, order_by = -points),
cur_tier = as.integer(max(tier, na.rm = TRUE)),
dthres= max(dthres, na.rm = TRUE)) %>%
inner_join(tier_tbl %>% group_by(pos) %>% filter(is.na(tier)) %>%
summarise(max_id = first(id, order_by = -points)), by = "pos") %>%
group_by(pos) %>%
mutate(d_val = cohens_d(src_points[src_points$id == tier_id,]$points,
src_points[src_points$id == max_id,]$points),
tier = ifelse(d_val > dthres, cur_tier + 1L, cur_tier)) %>%
select(pos, id = max_id, new_tier = tier) %>% right_join(tier_tbl, by = c("pos", "id")) %>%
mutate(tier = ifelse(is.na(tier) & !is.na(new_tier), new_tier, tier)) %>%
select(-new_tier)
after_na <- sum(is.na(tier_tbl$tier))
if(before_na == after_na | after_na == 0)
break
}
tier_tbl %>% select(-dthres) %>% ungroup()
}
#' Create a Projections Table
#'
#' This function creates the projections table based on the scraped data from the
#' \link{scrape_data} function. The output is a table containing the projected
#' points, confidence intervals, standard deviation for points, and if seasonal
#' data also the VOR values
#' @param data_result An output from the \link{scrape_data} function
#' @param scoring_rules The scoring rules to be used for calculations. See
#' \code{vignette("scoring_settings")} on how to define custom scoring settings.
#' If omitted then default \link{scoring} settings will be used.
#' @param src_weights A named vector defining the weights for each source to be
#' used in calculations. If omitted then \link{default_weights} will be used.
#' @param vor_baseline A named vector defineing the baseline to use for VOR
#' calculations. If omitted then the \link{default_baseline} will be used.
#' @param tier_thresholds The threshold values to be used when determining tiers.
#' If omitted then the \link{default_threshold} will be used.
#' @export
projections_table <- function(data_result, scoring_rules = NULL, src_weights = NULL,
vor_baseline = NULL, tier_thresholds = NULL){
season <- attr(data_result, "season")
week <- attr(data_result, "week")
data_result <- keep(data_result, ~ nrow(.) > 0) %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week)
if(is.null(scoring_rules))
scoring_rules <- scoring
if(scoring_rules$rec$all_pos){
lg_type <- scoring_rules$rec$rec %>% rep(length(data_result)) %>%
`names<-`(names(data_result)) %>%
map_chr(~ case_when(.x > 0.5 ~ "PPR", .x > 0 ~ "Half", TRUE ~ "Std"))
} else {
lg_type <- map(scoring_rules$rec[-which(names(scoring_rules$rec) == "all_pos")], `[[`, "rec") %>%
keep(~ !is.null(.x)) %>%
map_chr(~ case_when(.x > 0.5 ~ "PPR", .x > 0 ~ "Half", TRUE ~ "Std"))
lg_type[setdiff(names(data_result), names(lg_type))] < "Std"
}
data_list <- invoke_map(list(src_pts = source_points, agg_stats = aggregate_stats),
list(list(data_result = data_result, scoring_rules = scoring_rules),
list(data_result = data_result, src_weights = src_weights)))
pts_uncertainty <- invoke_map(list(points_sd, confidence_interval),
src_pts = data_list$src_pts, weights = src_weights) %>%
reduce(inner_join, by = c("pos", "id","avg_type"))
out_df<- data_list$agg_stats %>%
projected_points(scoring_rules) %>%
inner_join(pts_uncertainty, by = c("pos", "id","avg_type")) %>%
group_by(avg_type) %>%
set_tiers(tier_thresholds, data_list$src_pts ) %>%
ungroup()
if(attr(data_result, "week") == 0){
out_df <- out_df %>% split(.$avg_type) %>%
map(add_vor, vor_baseline = vor_baseline) %>% bind_rows() %>%
rename(rank = points_rank)
}
out_df %>%
`attr<-`(which = "season", attr(data_result, "season")) %>%
`attr<-`(which = "week", attr(data_result, "week")) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add ECR to the Projection Table
#'
#' This function will add the ECR values to the projetions table generated from
#' the \link{projections_table} function. It will add the positional ECR, the
#' standard deviation for the positional ECR, and if seasonal data also the
#' overal ECR value
#' @param projection_table An output from the \link{projections_table} function.
#' @export
add_ecr <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
ecr_pos <- lg_type %>%
imap(~ scrape_ecr(rank_period = ifelse(week == 0, "draft", "week"),
position = .y, rank_type = .x)) %>%
map(select, id, pos_ecr = avg, sd_ecr = std_dev) %>% bind_rows()
projection_table <- left_join(projection_table, ecr_pos, by = "id")
if(week == 0){
lg_ov <- ifelse(any(lg_type == "PPR"), "PPR", ifelse(any(lg_type == "Half"), "Half", "Std"))
ecr_overall <- scrape_ecr(rank_period = "draft", rank_type = lg_ov) %>%
select(id, ecr = avg)
projection_table <- left_join(projection_table, ecr_overall, by = "id")
}
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add ADP to the Projections Table
#'
#' This function will add the ADP data to the projections table from the
#' \link{projections_table} function. It will add the average ADP from the sources
#' specfied, and the difference between the overall rank and ADP
#' @param projection_table An output from the \link{projections_table} function
#' @param sources Which ADP sources should be added. should be one or more of
#' \code{c("RTS", "CBS", "ESPN", "Yahoo", "NFL", "FFC")}
#' @export
add_adp <- function(projection_table,
sources = c("RTS", "CBS", "ESPN", "Yahoo", "NFL", "FFC")){
sources <- match.arg(sources, several.ok = TRUE)
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
if (week != 0){
warning("ADP data is not available for weekly data", call. = FALSE)
return(projection_table)
}
adp_tbl <- get_adp(sources, type = "ADP") %>% select(1, length(.)) %>%
rename_at(length(.), ~ function(x)return("adp"))
projection_table <- left_join(projection_table, adp_tbl, by = "id") %>%
mutate(adp_diff = rank - adp)
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add AAV to the Projections Table
#'
#' This function will add the AAV data to the projections table from the
#' \link{projections_table} function.
#' @param projection_table An output from the \link{projections_table} function
#' @param sources Which AAV sources should be added. should be one or more of
#' \code{c("RTS", "ESPN", "Yahoo", "NFL")}
#' @export
add_aav <- function(projection_table,
sources = c("RTS", "ESPN", "Yahoo", "NFL")){
sources = match.arg(sources, several.ok = TRUE)
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
if (week != 0){
warning("AAV data is not available for weekly data", call. = FALSE)
return(projection_table)
}
adp_tbl <- get_adp(sources, type = "AAV") %>% select(1, length(.)) %>%
rename_at(length(.), ~ function(x)return("aav"))
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Risk calculation based on two variables
#'
#' Calculation of risk is done by scaling the standard deviation variables
#' passed and averaging them before returning a measure with mean 5 and standard
#' deviation of 2
calculate_risk <- function(var1, var2){
var1 <- as.numeric(var1)
var2 <- as.numeric(var2)
Z_var1 <- scale(var1)
Z_var2 <- scale(var2)
Z_var1[is.na(Z_var1)] <- Z_var2[is.na(Z_var1)]
Z_var2[is.na(Z_var2)] <- Z_var1[is.na(Z_var2)]
risk_value <- 2 * scale(rowMeans(data.frame(Z_var1, Z_var2), na.rm=TRUE)) + 5
return(risk_value)
}
#' Add calculated risk to the table
#'
#' Calculation of risk is done by scaling the standard deviation variables
#' passed and averaging them before returning a measure with mean 5 and standard
#' deviation of 2
#' @export
add_risk <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
projection_table %>%
group_by(pos) %>%
# Calculate Risk values
mutate(risk = calculate_risk(sd_pts, sd_ecr)) %>%
ungroup() %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add player information to the table
#'
#' Adds player information to the projections table
#' @export
add_player_info <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
select(player_table, id, first_name, last_name, team, position, age, exp) %>%
inner_join(projection_table, by = "id") %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{detective}
\alias{detective}
\title{Amazon Detective}
\usage{
detective(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
}}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Detective uses machine learning and purpose-built visualizations to help
you to analyze and investigate security issues across your Amazon Web
Services (Amazon Web Services) workloads. Detective automatically
extracts time-based events such as login attempts, API calls, and
network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon
VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.
The Detective API primarily supports the creation and management of
behavior graphs. A behavior graph contains the extracted data from a set
of member accounts, and is created and managed by an administrator
account.
To add a member account to the behavior graph, the administrator account
sends an invitation to the account. When the account accepts the
invitation, it becomes a member account in the behavior graph.
Detective is also integrated with Organizations. The organization
management account designates the Detective administrator account for
the organization. That account becomes the administrator account for the
organization behavior graph. The Detective administrator account is also
the delegated administrator account for Detective in Organizations.
The Detective administrator account can enable any organization account
as a member account in the organization behavior graph. The organization
accounts do not receive invitations. The Detective administrator account
can also invite other accounts to the organization behavior graph.
Every behavior graph is specific to a Region. You can only use the API
to manage behavior graphs that belong to the Region that is associated
with the currently selected endpoint.
The administrator account for a behavior graph can use the Detective API
to do the following:
\itemize{
\item Enable and disable Detective. Enabling Detective creates a new
behavior graph.
\item View the list of member accounts in a behavior graph.
\item Add member accounts to a behavior graph.
\item Remove member accounts from a behavior graph.
\item Apply tags to a behavior graph.
}
The organization management account can use the Detective API to select
the delegated administrator for Detective.
The Detective administrator account for an organization can use the
Detective API to do the following:
\itemize{
\item Perform all of the functions of an administrator account.
\item Determine whether to automatically enable new organization accounts
as member accounts in the organization behavior graph.
}
An invited member account can use the Detective API to do the following:
\itemize{
\item View the list of behavior graphs that they are invited to.
\item Accept an invitation to contribute to a behavior graph.
\item Decline an invitation to contribute to a behavior graph.
\item Remove their account from a behavior graph.
}
All API actions are logged as CloudTrail events. See \href{https://docs.aws.amazon.com/detective/latest/adminguide/logging-using-cloudtrail.html}{Logging Detective API Calls with CloudTrail}.
We replaced the term "master account" with the term "administrator
account." An administrator account is used to centrally manage multiple
accounts. In the case of Detective, the administrator account manages
the accounts in their behavior graph.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- detective(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical"
)
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[paws.security.identity:detective_accept_invitation]{accept_invitation} \tab Accepts an invitation for the member account to contribute data to a behavior graph\cr
\link[paws.security.identity:detective_batch_get_graph_member_datasources]{batch_get_graph_member_datasources} \tab Gets data source package information for the behavior graph\cr
\link[paws.security.identity:detective_batch_get_membership_datasources]{batch_get_membership_datasources} \tab Gets information on the data source package history for an account\cr
\link[paws.security.identity:detective_create_graph]{create_graph} \tab Creates a new behavior graph for the calling account, and sets that account as the administrator account\cr
\link[paws.security.identity:detective_create_members]{create_members} \tab CreateMembers is used to send invitations to accounts\cr
\link[paws.security.identity:detective_delete_graph]{delete_graph} \tab Disables the specified behavior graph and queues it to be deleted\cr
\link[paws.security.identity:detective_delete_members]{delete_members} \tab Removes the specified member accounts from the behavior graph\cr
\link[paws.security.identity:detective_describe_organization_configuration]{describe_organization_configuration} \tab Returns information about the configuration for the organization behavior graph\cr
\link[paws.security.identity:detective_disable_organization_admin_account]{disable_organization_admin_account} \tab Removes the Detective administrator account in the current Region\cr
\link[paws.security.identity:detective_disassociate_membership]{disassociate_membership} \tab Removes the member account from the specified behavior graph\cr
\link[paws.security.identity:detective_enable_organization_admin_account]{enable_organization_admin_account} \tab Designates the Detective administrator account for the organization in the current Region\cr
\link[paws.security.identity:detective_get_members]{get_members} \tab Returns the membership details for specified member accounts for a behavior graph\cr
\link[paws.security.identity:detective_list_datasource_packages]{list_datasource_packages} \tab Lists data source packages in the behavior graph\cr
\link[paws.security.identity:detective_list_graphs]{list_graphs} \tab Returns the list of behavior graphs that the calling account is an administrator account of\cr
\link[paws.security.identity:detective_list_invitations]{list_invitations} \tab Retrieves the list of open and accepted behavior graph invitations for the member account\cr
\link[paws.security.identity:detective_list_members]{list_members} \tab Retrieves the list of member accounts for a behavior graph\cr
\link[paws.security.identity:detective_list_organization_admin_accounts]{list_organization_admin_accounts} \tab Returns information about the Detective administrator account for an organization\cr
\link[paws.security.identity:detective_list_tags_for_resource]{list_tags_for_resource} \tab Returns the tag values that are assigned to a behavior graph\cr
\link[paws.security.identity:detective_reject_invitation]{reject_invitation} \tab Rejects an invitation to contribute the account data to a behavior graph\cr
\link[paws.security.identity:detective_start_monitoring_member]{start_monitoring_member} \tab Sends a request to enable data ingest for a member account that has a status of ACCEPTED_BUT_DISABLED\cr
\link[paws.security.identity:detective_tag_resource]{tag_resource} \tab Applies tag values to a behavior graph\cr
\link[paws.security.identity:detective_untag_resource]{untag_resource} \tab Removes tags from a behavior graph\cr
\link[paws.security.identity:detective_update_datasource_packages]{update_datasource_packages} \tab Starts a data source packages for the behavior graph\cr
\link[paws.security.identity:detective_update_organization_configuration]{update_organization_configuration} \tab Updates the configuration for the Organizations integration in the current Region
}
}
\examples{
\dontrun{
svc <- detective()
svc$accept_invitation(
Foo = 123
)
}
}
|
/man/detective.Rd
|
no_license
|
cran/paws
|
R
| false | true | 9,236 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{detective}
\alias{detective}
\title{Amazon Detective}
\usage{
detective(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
}}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Detective uses machine learning and purpose-built visualizations to help
you to analyze and investigate security issues across your Amazon Web
Services (Amazon Web Services) workloads. Detective automatically
extracts time-based events such as login attempts, API calls, and
network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon
VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.
The Detective API primarily supports the creation and management of
behavior graphs. A behavior graph contains the extracted data from a set
of member accounts, and is created and managed by an administrator
account.
To add a member account to the behavior graph, the administrator account
sends an invitation to the account. When the account accepts the
invitation, it becomes a member account in the behavior graph.
Detective is also integrated with Organizations. The organization
management account designates the Detective administrator account for
the organization. That account becomes the administrator account for the
organization behavior graph. The Detective administrator account is also
the delegated administrator account for Detective in Organizations.
The Detective administrator account can enable any organization account
as a member account in the organization behavior graph. The organization
accounts do not receive invitations. The Detective administrator account
can also invite other accounts to the organization behavior graph.
Every behavior graph is specific to a Region. You can only use the API
to manage behavior graphs that belong to the Region that is associated
with the currently selected endpoint.
The administrator account for a behavior graph can use the Detective API
to do the following:
\itemize{
\item Enable and disable Detective. Enabling Detective creates a new
behavior graph.
\item View the list of member accounts in a behavior graph.
\item Add member accounts to a behavior graph.
\item Remove member accounts from a behavior graph.
\item Apply tags to a behavior graph.
}
The organization management account can use the Detective API to select
the delegated administrator for Detective.
The Detective administrator account for an organization can use the
Detective API to do the following:
\itemize{
\item Perform all of the functions of an administrator account.
\item Determine whether to automatically enable new organization accounts
as member accounts in the organization behavior graph.
}
An invited member account can use the Detective API to do the following:
\itemize{
\item View the list of behavior graphs that they are invited to.
\item Accept an invitation to contribute to a behavior graph.
\item Decline an invitation to contribute to a behavior graph.
\item Remove their account from a behavior graph.
}
All API actions are logged as CloudTrail events. See \href{https://docs.aws.amazon.com/detective/latest/adminguide/logging-using-cloudtrail.html}{Logging Detective API Calls with CloudTrail}.
We replaced the term "master account" with the term "administrator
account." An administrator account is used to centrally manage multiple
accounts. In the case of Detective, the administrator account manages
the accounts in their behavior graph.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- detective(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical"
)
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[paws.security.identity:detective_accept_invitation]{accept_invitation} \tab Accepts an invitation for the member account to contribute data to a behavior graph\cr
\link[paws.security.identity:detective_batch_get_graph_member_datasources]{batch_get_graph_member_datasources} \tab Gets data source package information for the behavior graph\cr
\link[paws.security.identity:detective_batch_get_membership_datasources]{batch_get_membership_datasources} \tab Gets information on the data source package history for an account\cr
\link[paws.security.identity:detective_create_graph]{create_graph} \tab Creates a new behavior graph for the calling account, and sets that account as the administrator account\cr
\link[paws.security.identity:detective_create_members]{create_members} \tab CreateMembers is used to send invitations to accounts\cr
\link[paws.security.identity:detective_delete_graph]{delete_graph} \tab Disables the specified behavior graph and queues it to be deleted\cr
\link[paws.security.identity:detective_delete_members]{delete_members} \tab Removes the specified member accounts from the behavior graph\cr
\link[paws.security.identity:detective_describe_organization_configuration]{describe_organization_configuration} \tab Returns information about the configuration for the organization behavior graph\cr
\link[paws.security.identity:detective_disable_organization_admin_account]{disable_organization_admin_account} \tab Removes the Detective administrator account in the current Region\cr
\link[paws.security.identity:detective_disassociate_membership]{disassociate_membership} \tab Removes the member account from the specified behavior graph\cr
\link[paws.security.identity:detective_enable_organization_admin_account]{enable_organization_admin_account} \tab Designates the Detective administrator account for the organization in the current Region\cr
\link[paws.security.identity:detective_get_members]{get_members} \tab Returns the membership details for specified member accounts for a behavior graph\cr
\link[paws.security.identity:detective_list_datasource_packages]{list_datasource_packages} \tab Lists data source packages in the behavior graph\cr
\link[paws.security.identity:detective_list_graphs]{list_graphs} \tab Returns the list of behavior graphs that the calling account is an administrator account of\cr
\link[paws.security.identity:detective_list_invitations]{list_invitations} \tab Retrieves the list of open and accepted behavior graph invitations for the member account\cr
\link[paws.security.identity:detective_list_members]{list_members} \tab Retrieves the list of member accounts for a behavior graph\cr
\link[paws.security.identity:detective_list_organization_admin_accounts]{list_organization_admin_accounts} \tab Returns information about the Detective administrator account for an organization\cr
\link[paws.security.identity:detective_list_tags_for_resource]{list_tags_for_resource} \tab Returns the tag values that are assigned to a behavior graph\cr
\link[paws.security.identity:detective_reject_invitation]{reject_invitation} \tab Rejects an invitation to contribute the account data to a behavior graph\cr
\link[paws.security.identity:detective_start_monitoring_member]{start_monitoring_member} \tab Sends a request to enable data ingest for a member account that has a status of ACCEPTED_BUT_DISABLED\cr
\link[paws.security.identity:detective_tag_resource]{tag_resource} \tab Applies tag values to a behavior graph\cr
\link[paws.security.identity:detective_untag_resource]{untag_resource} \tab Removes tags from a behavior graph\cr
\link[paws.security.identity:detective_update_datasource_packages]{update_datasource_packages} \tab Starts a data source packages for the behavior graph\cr
\link[paws.security.identity:detective_update_organization_configuration]{update_organization_configuration} \tab Updates the configuration for the Organizations integration in the current Region
}
}
\examples{
\dontrun{
svc <- detective()
svc$accept_invitation(
Foo = 123
)
}
}
|
\name{TES}
\alias{TES}
\title{
Calculate the total environ storage.
}
\description{
Calculates the total storage in each n input and output environs. This
function calculates the storage for both the unit input (output) and the
realized input (output) environs. Realized uses the observed inputs
(outputs) rather than an assumed unit input (output) to each node.
}
\usage{
TES(x,balance.override=FALSE)
}
\arguments{
\item{x}{
A network object.
}
\item{balance.override}{
LOGICAL: should balancing being ignored.
}
}
\value{
\item{realized.input}{input oriented, realized storage in each environ.}
\item{realized.output}{output oriented, realized storage in each environ.}
\item{unit.input }{input oriented, unit storage in each environ.}
\item{unit.output}{input oriented, unit storage in each environ.}
}
\references{
Matis, J.H. and Patten, B.C. 1981. Environ analysis of linear
compartmenal systems: the static, time invariant case. Bulletin of the
International Statistical Institute. 48, 527--565.
}
\author{
Matthew K. Lau (mkl48@nau.edu)
Stuart R. Borrett (borretts@uncw.edu)
David E. Hines (deh9951@uncw.edu)
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{enaStorage},\link{enaEnviron}}
}
\examples{
data(troModels)
TES(troModels[[6]])
}
|
/vignettes/enaR.Rcheck/00_pkg_src/enaR/man/TES.Rd
|
no_license
|
STecchio/enaR
|
R
| false | false | 1,380 |
rd
|
\name{TES}
\alias{TES}
\title{
Calculate the total environ storage.
}
\description{
Calculates the total storage in each n input and output environs. This
function calculates the storage for both the unit input (output) and the
realized input (output) environs. Realized uses the observed inputs
(outputs) rather than an assumed unit input (output) to each node.
}
\usage{
TES(x,balance.override=FALSE)
}
\arguments{
\item{x}{
A network object.
}
\item{balance.override}{
LOGICAL: should balancing being ignored.
}
}
\value{
\item{realized.input}{input oriented, realized storage in each environ.}
\item{realized.output}{output oriented, realized storage in each environ.}
\item{unit.input }{input oriented, unit storage in each environ.}
\item{unit.output}{input oriented, unit storage in each environ.}
}
\references{
Matis, J.H. and Patten, B.C. 1981. Environ analysis of linear
compartmenal systems: the static, time invariant case. Bulletin of the
International Statistical Institute. 48, 527--565.
}
\author{
Matthew K. Lau (mkl48@nau.edu)
Stuart R. Borrett (borretts@uncw.edu)
David E. Hines (deh9951@uncw.edu)
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{enaStorage},\link{enaEnviron}}
}
\examples{
data(troModels)
TES(troModels[[6]])
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.