content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
library(zoo)
library(dplyr)
library(dplR)
library(lubridate)
library(ggplot2)
library(scales)
# sourcedata had to be correctly reformatted to tab deliminted. Originally downloaded from IMR comes as fixed width.
# make list of file names
fnames = list.files('Ingoy Station (feb18)/')
# read in and paste all data together
data <- do.call("rbind", lapply(fnames, function(x) read.table(paste('Ingoy Station (feb18)/', x, sep=''),
skip=1, sep='\t', na.strings = " ")))
# change variable names
names(data) = c('Stasjon','Dato','Dyp','Temperatur','Salt','Kal_temperatur','Kal_salt')
# modify date column
data$Dato = as.Date(factor(data$Dato), '%d.%m.%Y')
class()
data %>%
filter(Kal_temperatur > 1.5) %>%
ggplot(aes(x=Dato, y=Kal_temperatur, group=Dyp, color=Dyp)) + geom_line() +
scale_x_date(date_breaks = "3 month",
labels=date_format("%b.%y"),
limits = as.Date(c('2000-03-01','2018-01-01')))
# the following code works expect need to require certain % data points per yr
data %>%
filter(Kal_temperatur > 1.5) %>%
mutate(year=year(Dato)) %>%
group_by(year, Dyp) %>%
summarize(ann_mean = mean(Kal_temperatur)) %>%
ggplot(aes(x=year, y=ann_mean, group=Dyp, color=Dyp)) + geom_line()
|
/read_data.R
|
no_license
|
yasureyoubetchya/IngoyStation
|
R
| false | false | 1,298 |
r
|
library(zoo)
library(dplyr)
library(dplR)
library(lubridate)
library(ggplot2)
library(scales)
# sourcedata had to be correctly reformatted to tab deliminted. Originally downloaded from IMR comes as fixed width.
# make list of file names
fnames = list.files('Ingoy Station (feb18)/')
# read in and paste all data together
data <- do.call("rbind", lapply(fnames, function(x) read.table(paste('Ingoy Station (feb18)/', x, sep=''),
skip=1, sep='\t', na.strings = " ")))
# change variable names
names(data) = c('Stasjon','Dato','Dyp','Temperatur','Salt','Kal_temperatur','Kal_salt')
# modify date column
data$Dato = as.Date(factor(data$Dato), '%d.%m.%Y')
class()
data %>%
filter(Kal_temperatur > 1.5) %>%
ggplot(aes(x=Dato, y=Kal_temperatur, group=Dyp, color=Dyp)) + geom_line() +
scale_x_date(date_breaks = "3 month",
labels=date_format("%b.%y"),
limits = as.Date(c('2000-03-01','2018-01-01')))
# the following code works expect need to require certain % data points per yr
data %>%
filter(Kal_temperatur > 1.5) %>%
mutate(year=year(Dato)) %>%
group_by(year, Dyp) %>%
summarize(ann_mean = mean(Kal_temperatur)) %>%
ggplot(aes(x=year, y=ann_mean, group=Dyp, color=Dyp)) + geom_line()
|
obsolete <- function()
{
pheatmap(rxc,
color = col,
legend = TRUE,
main = "",
angle_col = "315",
filename = "INF1_pQTL_immune_qtl.png",
width = 17,
height = 11,
treeheight_row = 100,
treeheigh_col = 100,
cellheight = 20,
cellwidth = 20,
fontsize_row = 14,
fontsize = 13)
library(highcharter)
fntltp <- JS("function(){
return this.series.xAxis.categories[this.point.x] + ' ' +
this.series.yAxis.categories[this.point.y] + ':<br>' +
Highcharts.numberFormat(this.point.value, 2);
}")
hc <- data.frame()
i <- 1
for(cn in colnames(tbl)) for(rn in rownames(tbl)) {
hc[i,c("f1","f2","v")] <- c(cn,rn,tbl[rn,cn])
i <- i + 1
}
n <- 4
stops <- data.frame(
q = 0:n/n,
c = c("#4287f5","grey","#ffffff","grey","#e32222"),
stringsAsFactors = FALSE
)
hc$f1 <- as.factor(hc$f1)
hc$f2 <- as.factor(hc$f2)
f1 <- levels(hc$f1)
highchart() %>%
hc_title(text = "pQTLs and Immune-related Diseases",align="center")%>%
hc_xAxis(categories = f1) %>%
hc_yAxis(categories = hc$f2, reversed = TRUE)%>%
hc_colorAxis(min = -1, max=1, stops=list_parse2(stops)) %>%
hc_legend(align = "right",layout = "vertical",
margin = 0,verticalAlign = "top",
y = 30,symbolHeight = 200) %>%
hc_tooltip(formatter = fntltp) %>%
hc_add_series(data = hc, type = "heatmap",
hcaes(x = f1,y = f2,value = v),
dataLabels = list(enabled = FALSE))
library(gap)
aux <- with(with(mat, cbind(inv_chr_pos_a1_a2(MarkerName)[c("chr","pos")],rsid,Allele1,Allele2,prots,HLA,cistrans,efoTraits,qtl_direction)), {
flag <- (HLA==1)
# a bit too specific here as they involve too many proteins nevertheless each combination with the same effect allele
Allele1[8:13] <- "T"
Allele2[8:13] <- "C"
Allele1[25] <- "C"
Allele2[25] <- "G"
colId <- paste0(substr(chr,4,5),":",pos,"(",Allele1,"/",Allele2,")")
colId[flag] <- paste0(colId[flag],"*")
colLabel <- paste0(colId," (",prots,")")
col <- rep("blue",nrow(mat))
col[cistrans=="cis"] <- "red"
data.frame(colLabel,col,efoTraits,qtl_direction)
})
Col <- unique(aux[c("colLabel","col")])
rownames(Col) <- with(Col,colLabel)
RXC <- with(aux,table(efoTraits,colLabel))
indices <- aux[c("efoTraits","colLabel","qtl_direction")]
for(cn in colnames(RXC)) for(rn in rownames(RXC)) {
s <- subset(indices,efoTraits==rn & colLabel==cn)
qd <- s[["qtl_direction"]]
if(length(qd)>1) stop("duplicates")
class(qd) <- "numeric"
if(nrow(s)>0 & !is.na(qd[1])) RXC[rn,cn] <- qd[1]
}
library(gplots)
png("INF1_pQTL_immune_gplots.png",height=35,width=40,units="cm",res=300)
heatmap.2(RXC, scale = "none", keysize=0.8, col = colorpanel(5, "blue", "white", "red"), margin=c(20,20), trace = "none",
colCol=Col[colnames(RXC),"col"], dendrogram="none", density.info = "none", srtCol=45)
dev.off()
efo_list_immune <- subset(read.csv("work/efo_list_annotated.csv",as.is=TRUE),immune_mediated==1)
isd1 <- merge(aggr,subset(ps,efo%in%with(efo_list_immune,EFO)),by="hg19_coordinates")
write.table(isd1,file="isd1.tsv",row.names=FALSE,quote=FALSE,sep="\t")
load(file.path(INF,"files","efo.rda"))
efo_0000540 <- gsub(":","_",as.data.frame(isd)[["efo_0000540"]])
isd2 <- merge(aggr,subset(ps,efo%in%efo_0000540),by="hg19_coordinates")
write.table(isd2,file="isd2.tsv",row.names=FALSE,quote=FALSE,sep="\t")
fang_efo <- gsub(":","_",with(read.delim("doc/fang.efos.txt",as.is=TRUE),id))
isd3 <- merge(aggr,subset(ps,efo%in%fang_efo),by="hg19_coordinates")
write.table(isd3,file="isd3.tsv",row.names=FALSE,quote=FALSE,sep="\t")
# A test of colorRampPalette
YlOrBr <- c("#4287f5","grey","#ffffff","grey","#e32222")
filled.contour(volcano,color.palette = colorRampPalette(YlOrBr, space = "Lab"), asp = 1)
# Colouring for the dendrogram
library(dendextend)
Rowv <- rxc %>% scale %>% dist %>% hclust %>% as.dendrogram %>%
set("branches_k_color", k = 3) %>% set("branches_lwd", 1.2) %>%
ladderize
Colv <- rxc %>% scale %>% t %>% dist %>% hclust %>% as.dendrogram %>%
set("branches_k_color", k = 2, value = c("orange", "blue")) %>%
set("branches_lwd", 1.2) %>%
ladderize
# stats
heatmap(scale(rxc), scale = "none")
heatmap(scale(rxc), Rowv = Rowv, Colv = Colv, scale = "none")
# gplots
heatmap.2(scale(rxc), scale = "none", col = bluered(100), Rowv = Rowv, Colv = Colv, trace = "none", density.info = "none")
options(width=200)
IL_12Bcis <- ieugwasr::phewas("rs10076557")
data.frame(IL_12Bcis)
}
|
/rsid/pqtlGWAS-defunct.R
|
no_license
|
jinghuazhao/INF
|
R
| false | false | 4,832 |
r
|
obsolete <- function()
{
pheatmap(rxc,
color = col,
legend = TRUE,
main = "",
angle_col = "315",
filename = "INF1_pQTL_immune_qtl.png",
width = 17,
height = 11,
treeheight_row = 100,
treeheigh_col = 100,
cellheight = 20,
cellwidth = 20,
fontsize_row = 14,
fontsize = 13)
library(highcharter)
fntltp <- JS("function(){
return this.series.xAxis.categories[this.point.x] + ' ' +
this.series.yAxis.categories[this.point.y] + ':<br>' +
Highcharts.numberFormat(this.point.value, 2);
}")
hc <- data.frame()
i <- 1
for(cn in colnames(tbl)) for(rn in rownames(tbl)) {
hc[i,c("f1","f2","v")] <- c(cn,rn,tbl[rn,cn])
i <- i + 1
}
n <- 4
stops <- data.frame(
q = 0:n/n,
c = c("#4287f5","grey","#ffffff","grey","#e32222"),
stringsAsFactors = FALSE
)
hc$f1 <- as.factor(hc$f1)
hc$f2 <- as.factor(hc$f2)
f1 <- levels(hc$f1)
highchart() %>%
hc_title(text = "pQTLs and Immune-related Diseases",align="center")%>%
hc_xAxis(categories = f1) %>%
hc_yAxis(categories = hc$f2, reversed = TRUE)%>%
hc_colorAxis(min = -1, max=1, stops=list_parse2(stops)) %>%
hc_legend(align = "right",layout = "vertical",
margin = 0,verticalAlign = "top",
y = 30,symbolHeight = 200) %>%
hc_tooltip(formatter = fntltp) %>%
hc_add_series(data = hc, type = "heatmap",
hcaes(x = f1,y = f2,value = v),
dataLabels = list(enabled = FALSE))
library(gap)
aux <- with(with(mat, cbind(inv_chr_pos_a1_a2(MarkerName)[c("chr","pos")],rsid,Allele1,Allele2,prots,HLA,cistrans,efoTraits,qtl_direction)), {
flag <- (HLA==1)
# a bit too specific here as they involve too many proteins nevertheless each combination with the same effect allele
Allele1[8:13] <- "T"
Allele2[8:13] <- "C"
Allele1[25] <- "C"
Allele2[25] <- "G"
colId <- paste0(substr(chr,4,5),":",pos,"(",Allele1,"/",Allele2,")")
colId[flag] <- paste0(colId[flag],"*")
colLabel <- paste0(colId," (",prots,")")
col <- rep("blue",nrow(mat))
col[cistrans=="cis"] <- "red"
data.frame(colLabel,col,efoTraits,qtl_direction)
})
Col <- unique(aux[c("colLabel","col")])
rownames(Col) <- with(Col,colLabel)
RXC <- with(aux,table(efoTraits,colLabel))
indices <- aux[c("efoTraits","colLabel","qtl_direction")]
for(cn in colnames(RXC)) for(rn in rownames(RXC)) {
s <- subset(indices,efoTraits==rn & colLabel==cn)
qd <- s[["qtl_direction"]]
if(length(qd)>1) stop("duplicates")
class(qd) <- "numeric"
if(nrow(s)>0 & !is.na(qd[1])) RXC[rn,cn] <- qd[1]
}
library(gplots)
png("INF1_pQTL_immune_gplots.png",height=35,width=40,units="cm",res=300)
heatmap.2(RXC, scale = "none", keysize=0.8, col = colorpanel(5, "blue", "white", "red"), margin=c(20,20), trace = "none",
colCol=Col[colnames(RXC),"col"], dendrogram="none", density.info = "none", srtCol=45)
dev.off()
efo_list_immune <- subset(read.csv("work/efo_list_annotated.csv",as.is=TRUE),immune_mediated==1)
isd1 <- merge(aggr,subset(ps,efo%in%with(efo_list_immune,EFO)),by="hg19_coordinates")
write.table(isd1,file="isd1.tsv",row.names=FALSE,quote=FALSE,sep="\t")
load(file.path(INF,"files","efo.rda"))
efo_0000540 <- gsub(":","_",as.data.frame(isd)[["efo_0000540"]])
isd2 <- merge(aggr,subset(ps,efo%in%efo_0000540),by="hg19_coordinates")
write.table(isd2,file="isd2.tsv",row.names=FALSE,quote=FALSE,sep="\t")
fang_efo <- gsub(":","_",with(read.delim("doc/fang.efos.txt",as.is=TRUE),id))
isd3 <- merge(aggr,subset(ps,efo%in%fang_efo),by="hg19_coordinates")
write.table(isd3,file="isd3.tsv",row.names=FALSE,quote=FALSE,sep="\t")
# A test of colorRampPalette
YlOrBr <- c("#4287f5","grey","#ffffff","grey","#e32222")
filled.contour(volcano,color.palette = colorRampPalette(YlOrBr, space = "Lab"), asp = 1)
# Colouring for the dendrogram
library(dendextend)
Rowv <- rxc %>% scale %>% dist %>% hclust %>% as.dendrogram %>%
set("branches_k_color", k = 3) %>% set("branches_lwd", 1.2) %>%
ladderize
Colv <- rxc %>% scale %>% t %>% dist %>% hclust %>% as.dendrogram %>%
set("branches_k_color", k = 2, value = c("orange", "blue")) %>%
set("branches_lwd", 1.2) %>%
ladderize
# stats
heatmap(scale(rxc), scale = "none")
heatmap(scale(rxc), Rowv = Rowv, Colv = Colv, scale = "none")
# gplots
heatmap.2(scale(rxc), scale = "none", col = bluered(100), Rowv = Rowv, Colv = Colv, trace = "none", density.info = "none")
options(width=200)
IL_12Bcis <- ieugwasr::phewas("rs10076557")
data.frame(IL_12Bcis)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesis_operations.R
\name{kinesis_describe_limits}
\alias{kinesis_describe_limits}
\title{Describes the shard limits and usage for the account}
\usage{
kinesis_describe_limits()
}
\description{
Describes the shard limits and usage for the account.
}
\details{
If you update your account limits, the old limits might be returned for
a few minutes.
This operation has a limit of one transaction per second per account.
}
\section{Request syntax}{
\preformatted{svc$describe_limits()
}
}
\keyword{internal}
|
/paws/man/kinesis_describe_limits.Rd
|
permissive
|
johnnytommy/paws
|
R
| false | true | 585 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesis_operations.R
\name{kinesis_describe_limits}
\alias{kinesis_describe_limits}
\title{Describes the shard limits and usage for the account}
\usage{
kinesis_describe_limits()
}
\description{
Describes the shard limits and usage for the account.
}
\details{
If you update your account limits, the old limits might be returned for
a few minutes.
This operation has a limit of one transaction per second per account.
}
\section{Request syntax}{
\preformatted{svc$describe_limits()
}
}
\keyword{internal}
|
context("kinesisvideo")
svc <- paws::kinesisvideo()
test_that("describe_stream", {
expect_error(svc$describe_stream(), NA)
})
test_that("list_streams", {
expect_error(svc$list_streams(), NA)
})
test_that("list_streams", {
expect_error(svc$list_streams(MaxResults = 20), NA)
})
test_that("list_tags_for_stream", {
expect_error(svc$list_tags_for_stream(), NA)
})
|
/paws/tests/testthat/test_kinesisvideo.R
|
permissive
|
peoplecure/paws
|
R
| false | false | 374 |
r
|
context("kinesisvideo")
svc <- paws::kinesisvideo()
test_that("describe_stream", {
expect_error(svc$describe_stream(), NA)
})
test_that("list_streams", {
expect_error(svc$list_streams(), NA)
})
test_that("list_streams", {
expect_error(svc$list_streams(MaxResults = 20), NA)
})
test_that("list_tags_for_stream", {
expect_error(svc$list_tags_for_stream(), NA)
})
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2017 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Tests around getting the bounding box coordinates from an Excel worksheet
#
# Author: Nicola Lambiase, Mirai Solutions GmbH
#
#############################################################################
test.workbook.getBoundingBox <- function() {
# Create workbooks
wb.xls <- loadWorkbook(rsrc("resources/testWorkbookReadWorksheet.xls"), create = FALSE)
wb.xlsx <- loadWorkbook(rsrc("resources/testWorkbookReadWorksheet.xlsx"), create = FALSE)
dim1 <- matrix(c(17, 6, 25, 9), dimnames = list(c(), c("Test5")))
dim2 <- matrix(c(17, 7, 24, 9), dimnames = list(c(), c("Test5")))
dim3 <- matrix(c(17, 7, 25, 9), dimnames = list(c(), c("Test5")))
dim4 <- matrix(c(17, 6, 24, 9), dimnames = list(c(), c("Test5")))
dim5 <- matrix(c(11, 6, 16, 9, 8, 4, 16, 7, 17, 6, 25, 9), ncol=3, dimnames = list(c(), c("Test1","Test4","Test5")))
# Checking the dimensions of a bounding box when no row/column is specified (*.xls)
res <- getBoundingBox(wb.xls, sheet = "Test5")
checkEquals(res, dim1)
# Checking the dimensions of a bounding box when no row/column is specified (*.xlsx)
res <- getBoundingBox(wb.xlsx, sheet = "Test5")
checkEquals(res, dim1)
# Checking the dimensions of a bounding box when start and end cells are specified (*.xls)
res <- getBoundingBox(wb.xls, sheet = "Test5", startRow=17, startCol=7, endRow=24, endCol=9)
checkEquals(res, dim2)
# Checking the dimensions of a bounding box when start and end cells are specified (*.xlsx)
res <- getBoundingBox(wb.xlsx, sheet = "Test5", startRow=17, startCol=7, endRow=24, endCol=9)
checkEquals(res, dim2)
# Checking the dimensions of a bounding box when start and end columns are specified (*.xls)
res <- getBoundingBox(wb.xls, sheet = "Test5", startCol=7, endCol=9)
checkEquals(res, dim3)
# Checking the dimensions of a bounding box when start and end columns are specified (*.xlsx)
res <- getBoundingBox(wb.xlsx, sheet = "Test5", startCol=7, endCol=9)
checkEquals(res, dim3)
# Checking the dimensions of a bounding box when only the end row is specified (*.xls)
res <- getBoundingBox(wb.xls, sheet = "Test5", endRow=24)
checkEquals(res, dim4)
# Checking the dimensions of a bounding box when only the end row is specified (*.xlsx)
res <- getBoundingBox(wb.xlsx, sheet = "Test5", endRow=24)
checkEquals(res, dim4)
# Checking the dimensions of bounding boxes when multiple sheets are specified (*.xls)
res <- getBoundingBox(wb.xls, sheet = c("Test1","Test4","Test5"))
checkEquals(res, dim5)
# Checking the dimensions of bounding boxes when multiple sheets are specified (*.xlsx)
res <- getBoundingBox(wb.xlsx, sheet = c("Test1","Test4","Test5"))
checkEquals(res, dim5)
}
|
/inst/unitTests/runit.workbook.getBoundingBox.R
|
no_license
|
GSuvorov/xlconnect
|
R
| false | false | 3,698 |
r
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2017 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Tests around getting the bounding box coordinates from an Excel worksheet
#
# Author: Nicola Lambiase, Mirai Solutions GmbH
#
#############################################################################
test.workbook.getBoundingBox <- function() {
# Create workbooks
wb.xls <- loadWorkbook(rsrc("resources/testWorkbookReadWorksheet.xls"), create = FALSE)
wb.xlsx <- loadWorkbook(rsrc("resources/testWorkbookReadWorksheet.xlsx"), create = FALSE)
dim1 <- matrix(c(17, 6, 25, 9), dimnames = list(c(), c("Test5")))
dim2 <- matrix(c(17, 7, 24, 9), dimnames = list(c(), c("Test5")))
dim3 <- matrix(c(17, 7, 25, 9), dimnames = list(c(), c("Test5")))
dim4 <- matrix(c(17, 6, 24, 9), dimnames = list(c(), c("Test5")))
dim5 <- matrix(c(11, 6, 16, 9, 8, 4, 16, 7, 17, 6, 25, 9), ncol=3, dimnames = list(c(), c("Test1","Test4","Test5")))
# Checking the dimensions of a bounding box when no row/column is specified (*.xls)
res <- getBoundingBox(wb.xls, sheet = "Test5")
checkEquals(res, dim1)
# Checking the dimensions of a bounding box when no row/column is specified (*.xlsx)
res <- getBoundingBox(wb.xlsx, sheet = "Test5")
checkEquals(res, dim1)
# Checking the dimensions of a bounding box when start and end cells are specified (*.xls)
res <- getBoundingBox(wb.xls, sheet = "Test5", startRow=17, startCol=7, endRow=24, endCol=9)
checkEquals(res, dim2)
# Checking the dimensions of a bounding box when start and end cells are specified (*.xlsx)
res <- getBoundingBox(wb.xlsx, sheet = "Test5", startRow=17, startCol=7, endRow=24, endCol=9)
checkEquals(res, dim2)
# Checking the dimensions of a bounding box when start and end columns are specified (*.xls)
res <- getBoundingBox(wb.xls, sheet = "Test5", startCol=7, endCol=9)
checkEquals(res, dim3)
# Checking the dimensions of a bounding box when start and end columns are specified (*.xlsx)
res <- getBoundingBox(wb.xlsx, sheet = "Test5", startCol=7, endCol=9)
checkEquals(res, dim3)
# Checking the dimensions of a bounding box when only the end row is specified (*.xls)
res <- getBoundingBox(wb.xls, sheet = "Test5", endRow=24)
checkEquals(res, dim4)
# Checking the dimensions of a bounding box when only the end row is specified (*.xlsx)
res <- getBoundingBox(wb.xlsx, sheet = "Test5", endRow=24)
checkEquals(res, dim4)
# Checking the dimensions of bounding boxes when multiple sheets are specified (*.xls)
res <- getBoundingBox(wb.xls, sheet = c("Test1","Test4","Test5"))
checkEquals(res, dim5)
# Checking the dimensions of bounding boxes when multiple sheets are specified (*.xlsx)
res <- getBoundingBox(wb.xlsx, sheet = c("Test1","Test4","Test5"))
checkEquals(res, dim5)
}
|
library(FSelector)
## generate prediction model (adapting from old pathipred) 2015-10
get.prediction.model <- function(prettyways.matrix, exp.design, type, k, filter_paths=F) {
## pre-process data
cat("Removing paths without variability...\n")
m <- t(remove.novar.paths(prettyways.matrix))
ed <- sapply(rownames(m), function(x) add.exp.info(x, exp.design, 1, 2, type))
m <- data.frame(m, ed)
## filter features with Correlation-based Feature Selection (CFS)
if(filter_paths) {
cat("Correlation-based Feature Selection...\n")
m.filtered <- cfs(ed ~ ., data=m)
m <- m[, c(m.filtered, "ed")]
}
## get prediction model
cat("Generating best model...\n")
if (type == "categorical") {
svm.type = "C-classification"
} else if (type == "continuous") {
svm.type = "eps-regression"
}
bestmod <- get.best.model(m, svm.type, k)
return(list("model"=bestmod, "input"=m))
}
## remove paths without variability -> equal value across samples
remove.novar.paths <- function(m) {
equalPaths <- apply(m, 1, function(x) { all(x == x[1]) })
varPaths <- m[!equalPaths, ]
return(varPaths)
}
## add experimental design info to activation values matrix
add.exp.info <- function(id, df, sample.indx, class.indx, type) {
c <- df[, sample.indx] == id
res <- df[c, ]
if (type == "categorical") {
return(as.character(res[class.indx]))
} else if (type == "continuous") {
return(as.numeric(res[class.indx]))
}
}
## generate best.model
get.best.model <- function(m, svm.type, k){
tune.svm.model <- tune.svm(ed ~ ., data=m, scale=T, type=svm.type, kernel="radial", cost=10^(1:2), gamma=10^(-6:-3), cross=k)
bestGamma <- tune.svm.model$best.parameters[[1]]
bestC <- tune.svm.model$best.parameters[[2]]
best.model <- svm(ed ~ ., data=m, scale=T, type=svm.type, kernel="radial", cost=bestC, gamma=bestGamma, cross=k)
return(best.model)
}
## Compute confusion matrix
get.confusion.mat <- function(svm.pred, best.model) {
acc <- table(pred = svm.pred, true = best.model$input[,"ed"])
acc.export <- as.data.frame.matrix(acc)
acc.export <- cbind(c("", rownames(acc.export)), c(colnames(acc.export)[1],acc.export[,1]), c(colnames(acc.export)[2],acc.export[,2]))
diag <- classAgreement(acc)$diag
kappa <- classAgreement(acc)$kappa
rand <- classAgreement(acc)$rand
crand <- classAgreement(acc)$crand
return(list("acc"=acc, "diag"=diag, "kappa"=kappa, "rand"=rand, "crand"=crand))
}
get.stats <- function(svm.pred, best.model, edtype){
conf.mat <- get.confusion.mat(svm.pred, best.model)
if (edtype == "categorical") {
sensitivity <- conf.mat[["acc"]][1,1]/sum(conf.mat[["acc"]][,1]) ## True Positives / Positive
specificity <- conf.mat[["acc"]][2,2]/sum(conf.mat[["acc"]][,2]) ## True Negatives / Negatives
PPV <- conf.mat[["acc"]][1,1]/sum(conf.mat[["acc"]][1,]) ## True Positives / Test Outcome Positive
NPV <- conf.mat[["acc"]][2,2]/sum(conf.mat[["acc"]][2,]) ## True Negatives / Test Outcome Negatives
FPR <- 1 - specificity
FNR <- 1 - sensitivity
LRP <- sensitivity/(1-specificity)
LRN <- (1 - sensitivity)/specificity
all.stats <- as.data.frame(list(statistic=c("Sensitivity", "Specificity", "Positive Predictive Value",
"Negative Predictive Value", "False Positive Rate", "False Negative Rate", "Likelihood Ratio Positive",
"Likelihood Ratio Negative", "Percentage of data points in the main diagonal",
"Percentage of data points in the main diagonal corrected for agreement by chance", "Rand index",
"Rand index corrected for agreement by chance", "Total Accuracy"),
value=c(sensitivity, specificity, PPV, NPV, FPR, FNR, LRP, LRN, conf.mat[["diag"]], conf.mat[["kappa"]], conf.mat[["rand"]], conf.mat[["crand"]],
best.model$model$tot.accuracy)))
} else if (edtype == "continuous") {
all.stats <- as.data.frame(list(statistic=c("Percentage of data points in the main diagonal",
"Percentage of data points in the main diagonal corrected for agreement by chance",
"Rand index", "Rand index corrected for agreement by chance", "Total Mean Squared Error"),
value=c(conf.mat[["diag"]], conf.mat[["kappa"]], conf.mat[["rand"]], conf.mat[["crand"]], best.model$model$tot.MSE)))
}
return(all.stats)
}
## best.model statistics
get.predmod.stats <- function(best.model, edtype) {
svm.pred <- predict(best.model[["model"]], best.model[["input"]], decision.values=F, probability=F, na.action=na.omit)
conf.mat <- get.confusion.mat(svm.pred, best.model)
stats <- get.stats(svm.pred, best.model, edtype)
return(stats)
}
predict.newdataset <- function(model, m, exp.design, type) {
ed <- sapply(rownames(m), function(x) add.exp.info(x, exp.design, 1, 2, type))
m <- data.frame(m, ed)
svm.pred <- predict(model, m, decision.values=FALSE, probability=FALSE, na.action=na.omit)
pred.res <- as.data.frame(cbind(names(svm.pred), as.character(svm.pred)))
names(pred.res) <- c("sample", "prediction")
return(list("svm_pred"=svm.pred, "pred_res"=pred.res))
}
save.pred.res <- function(model,model_stats,pretty_results,path_vals,pathigraphs,output.folder,filter.paths, effector, conf=0.05){
if(!file.exists(output.folder)){
dir.create(output.folder)
}
# pathipred
write.table(path_vals,file=paste0(output.folder,"/paths_vals.txt"),col.names=T,row.names=T,quote=F,sep="\t")
save(model, file=paste0(output.folder,"/model.RData"))
if(!is.null(model_stats)) write.table(model_stats,file=paste0(output.folder,"/model_stats.txt"),col.names=T,row.names=F,quote=F,sep="\t")
# CFS + cellmaps
if (filter.paths) {
features <- colnames(model[["input"]])[-length(colnames(model[["input"]]))]
write.table(features,file=paste0(output.folder,"/filtered_features_cfs.txt"),col.names=F,row.names=F,quote=F,sep="\t")
# CELLMAPS
if(!exists(paste0(output.folder, "/sifs4CellMaps/")))
dir.create(paste0(output.folder, "/sifs4CellMaps/"))
colors <- c(rep("UP", times=length(features)), rep("DOWN", times=length(rownames(path_vals))-length(features)))
pvals <- as.numeric(c(rep(0.0001, times=length(features)), rep(0.6, times=length(rownames(path_vals))-length(features))))
mywt <- data.frame(as.numeric(rep(0.3, times=length(rownames(path_vals)))), as.character(colors), as.numeric(rep(0.3, times=length(rownames(path_vals)))), pvals,stringsAsFactors=F)
colnames(mywt) <- c("p.value", "UP/DOWN", "statistic", "FDRp.value")
touse <- rownames(path_vals) %in% features
rownames(mywt) <- c(features, rownames(path_vals)[!touse])
comp_names <- sapply(strsplit(rownames(mywt),"__"),"[[",1)
for(pathway in names(results$by.path)){
this_comp <- mywt[which(comp_names==pathway),]
print(pathway)
write.attributes(this_comp, pathway, pathigraphs, paste0(output.folder, "/sifs4CellMaps/", pathway), moreatts=NULL, effector=effector,conf=conf)
}
dir.create(paste0(output.folder,"/report/"))
create.html.report2(fpathigraphs,mywt,pretty_home,output.folder,effector=(decompose==F),template_name = "report_template.html",output_name = "network.html")
}
}
save.prednewdataset.res <- function(prediction,prediction_stats,pretty_results,path_vals,pathigraphs,output.folder){
if(!file.exists(output.folder)){
dir.create(output.folder)
}
write.table(path.vals,file=paste0(output.folder,"/path_vals.txt"),col.names=T,row.names=T,quote=F,sep="\t")
write.table(prediction,file=paste0(output.folder,"/prediction_results.txt"),col.names=T,row.names=F,quote=F,sep="\t")
write.table(prediction_stats,file=paste0(output.folder,"/prediction_stats.txt"),col.names=T,row.names=F,quote=F,sep="\t")
}
|
/code_develop/predictor/predict.r
|
no_license
|
martahidalgo/hpConstructor
|
R
| false | false | 8,087 |
r
|
library(FSelector)
## generate prediction model (adapting from old pathipred) 2015-10
get.prediction.model <- function(prettyways.matrix, exp.design, type, k, filter_paths=F) {
## pre-process data
cat("Removing paths without variability...\n")
m <- t(remove.novar.paths(prettyways.matrix))
ed <- sapply(rownames(m), function(x) add.exp.info(x, exp.design, 1, 2, type))
m <- data.frame(m, ed)
## filter features with Correlation-based Feature Selection (CFS)
if(filter_paths) {
cat("Correlation-based Feature Selection...\n")
m.filtered <- cfs(ed ~ ., data=m)
m <- m[, c(m.filtered, "ed")]
}
## get prediction model
cat("Generating best model...\n")
if (type == "categorical") {
svm.type = "C-classification"
} else if (type == "continuous") {
svm.type = "eps-regression"
}
bestmod <- get.best.model(m, svm.type, k)
return(list("model"=bestmod, "input"=m))
}
## remove paths without variability -> equal value across samples
remove.novar.paths <- function(m) {
equalPaths <- apply(m, 1, function(x) { all(x == x[1]) })
varPaths <- m[!equalPaths, ]
return(varPaths)
}
## add experimental design info to activation values matrix
add.exp.info <- function(id, df, sample.indx, class.indx, type) {
c <- df[, sample.indx] == id
res <- df[c, ]
if (type == "categorical") {
return(as.character(res[class.indx]))
} else if (type == "continuous") {
return(as.numeric(res[class.indx]))
}
}
## generate best.model
get.best.model <- function(m, svm.type, k){
tune.svm.model <- tune.svm(ed ~ ., data=m, scale=T, type=svm.type, kernel="radial", cost=10^(1:2), gamma=10^(-6:-3), cross=k)
bestGamma <- tune.svm.model$best.parameters[[1]]
bestC <- tune.svm.model$best.parameters[[2]]
best.model <- svm(ed ~ ., data=m, scale=T, type=svm.type, kernel="radial", cost=bestC, gamma=bestGamma, cross=k)
return(best.model)
}
## Compute confusion matrix
get.confusion.mat <- function(svm.pred, best.model) {
acc <- table(pred = svm.pred, true = best.model$input[,"ed"])
acc.export <- as.data.frame.matrix(acc)
acc.export <- cbind(c("", rownames(acc.export)), c(colnames(acc.export)[1],acc.export[,1]), c(colnames(acc.export)[2],acc.export[,2]))
diag <- classAgreement(acc)$diag
kappa <- classAgreement(acc)$kappa
rand <- classAgreement(acc)$rand
crand <- classAgreement(acc)$crand
return(list("acc"=acc, "diag"=diag, "kappa"=kappa, "rand"=rand, "crand"=crand))
}
get.stats <- function(svm.pred, best.model, edtype){
conf.mat <- get.confusion.mat(svm.pred, best.model)
if (edtype == "categorical") {
sensitivity <- conf.mat[["acc"]][1,1]/sum(conf.mat[["acc"]][,1]) ## True Positives / Positive
specificity <- conf.mat[["acc"]][2,2]/sum(conf.mat[["acc"]][,2]) ## True Negatives / Negatives
PPV <- conf.mat[["acc"]][1,1]/sum(conf.mat[["acc"]][1,]) ## True Positives / Test Outcome Positive
NPV <- conf.mat[["acc"]][2,2]/sum(conf.mat[["acc"]][2,]) ## True Negatives / Test Outcome Negatives
FPR <- 1 - specificity
FNR <- 1 - sensitivity
LRP <- sensitivity/(1-specificity)
LRN <- (1 - sensitivity)/specificity
all.stats <- as.data.frame(list(statistic=c("Sensitivity", "Specificity", "Positive Predictive Value",
"Negative Predictive Value", "False Positive Rate", "False Negative Rate", "Likelihood Ratio Positive",
"Likelihood Ratio Negative", "Percentage of data points in the main diagonal",
"Percentage of data points in the main diagonal corrected for agreement by chance", "Rand index",
"Rand index corrected for agreement by chance", "Total Accuracy"),
value=c(sensitivity, specificity, PPV, NPV, FPR, FNR, LRP, LRN, conf.mat[["diag"]], conf.mat[["kappa"]], conf.mat[["rand"]], conf.mat[["crand"]],
best.model$model$tot.accuracy)))
} else if (edtype == "continuous") {
all.stats <- as.data.frame(list(statistic=c("Percentage of data points in the main diagonal",
"Percentage of data points in the main diagonal corrected for agreement by chance",
"Rand index", "Rand index corrected for agreement by chance", "Total Mean Squared Error"),
value=c(conf.mat[["diag"]], conf.mat[["kappa"]], conf.mat[["rand"]], conf.mat[["crand"]], best.model$model$tot.MSE)))
}
return(all.stats)
}
## best.model statistics
get.predmod.stats <- function(best.model, edtype) {
svm.pred <- predict(best.model[["model"]], best.model[["input"]], decision.values=F, probability=F, na.action=na.omit)
conf.mat <- get.confusion.mat(svm.pred, best.model)
stats <- get.stats(svm.pred, best.model, edtype)
return(stats)
}
predict.newdataset <- function(model, m, exp.design, type) {
ed <- sapply(rownames(m), function(x) add.exp.info(x, exp.design, 1, 2, type))
m <- data.frame(m, ed)
svm.pred <- predict(model, m, decision.values=FALSE, probability=FALSE, na.action=na.omit)
pred.res <- as.data.frame(cbind(names(svm.pred), as.character(svm.pred)))
names(pred.res) <- c("sample", "prediction")
return(list("svm_pred"=svm.pred, "pred_res"=pred.res))
}
save.pred.res <- function(model,model_stats,pretty_results,path_vals,pathigraphs,output.folder,filter.paths, effector, conf=0.05){
if(!file.exists(output.folder)){
dir.create(output.folder)
}
# pathipred
write.table(path_vals,file=paste0(output.folder,"/paths_vals.txt"),col.names=T,row.names=T,quote=F,sep="\t")
save(model, file=paste0(output.folder,"/model.RData"))
if(!is.null(model_stats)) write.table(model_stats,file=paste0(output.folder,"/model_stats.txt"),col.names=T,row.names=F,quote=F,sep="\t")
# CFS + cellmaps
if (filter.paths) {
features <- colnames(model[["input"]])[-length(colnames(model[["input"]]))]
write.table(features,file=paste0(output.folder,"/filtered_features_cfs.txt"),col.names=F,row.names=F,quote=F,sep="\t")
# CELLMAPS
if(!exists(paste0(output.folder, "/sifs4CellMaps/")))
dir.create(paste0(output.folder, "/sifs4CellMaps/"))
colors <- c(rep("UP", times=length(features)), rep("DOWN", times=length(rownames(path_vals))-length(features)))
pvals <- as.numeric(c(rep(0.0001, times=length(features)), rep(0.6, times=length(rownames(path_vals))-length(features))))
mywt <- data.frame(as.numeric(rep(0.3, times=length(rownames(path_vals)))), as.character(colors), as.numeric(rep(0.3, times=length(rownames(path_vals)))), pvals,stringsAsFactors=F)
colnames(mywt) <- c("p.value", "UP/DOWN", "statistic", "FDRp.value")
touse <- rownames(path_vals) %in% features
rownames(mywt) <- c(features, rownames(path_vals)[!touse])
comp_names <- sapply(strsplit(rownames(mywt),"__"),"[[",1)
for(pathway in names(results$by.path)){
this_comp <- mywt[which(comp_names==pathway),]
print(pathway)
write.attributes(this_comp, pathway, pathigraphs, paste0(output.folder, "/sifs4CellMaps/", pathway), moreatts=NULL, effector=effector,conf=conf)
}
dir.create(paste0(output.folder,"/report/"))
create.html.report2(fpathigraphs,mywt,pretty_home,output.folder,effector=(decompose==F),template_name = "report_template.html",output_name = "network.html")
}
}
save.prednewdataset.res <- function(prediction,prediction_stats,pretty_results,path_vals,pathigraphs,output.folder){
if(!file.exists(output.folder)){
dir.create(output.folder)
}
write.table(path.vals,file=paste0(output.folder,"/path_vals.txt"),col.names=T,row.names=T,quote=F,sep="\t")
write.table(prediction,file=paste0(output.folder,"/prediction_results.txt"),col.names=T,row.names=F,quote=F,sep="\t")
write.table(prediction_stats,file=paste0(output.folder,"/prediction_stats.txt"),col.names=T,row.names=F,quote=F,sep="\t")
}
|
\name{sdfPLA}
\alias{sdfPLA}
\title{
Spectral density function for PLA
}
\description{
Computes the spectral density function for the PLA model with parameter a
at the Fourier frequencies, 2*Pi*j/n, j=1,...,[n/2],
where n is the length of the time series.
The evaluation is very fast. Bivariate interpolation and asymptotic approximation
are used.
}
\usage{
sdfPLA(a, n)
}
\arguments{
\item{a}{
PLA parameter
}
\item{n}{
length of time series
}
}
\details{
The details of the implementation are discussed in the accompanying vignette.
The parameter a should be in the interval (0,2).
series n should be greater than 2.
}
\value{
a vector of length [n/2] of the spectral density values.
}
\author{
A. I. McLeod and J. Veenstra
}
\seealso{
\code{\link{sdfFD}}
}
\examples{
sdfPLA(0.2, 100)
}
\keyword{ ts }
|
/man/sdfPLA.Rd
|
no_license
|
cran/FGN
|
R
| false | false | 864 |
rd
|
\name{sdfPLA}
\alias{sdfPLA}
\title{
Spectral density function for PLA
}
\description{
Computes the spectral density function for the PLA model with parameter a
at the Fourier frequencies, 2*Pi*j/n, j=1,...,[n/2],
where n is the length of the time series.
The evaluation is very fast. Bivariate interpolation and asymptotic approximation
are used.
}
\usage{
sdfPLA(a, n)
}
\arguments{
\item{a}{
PLA parameter
}
\item{n}{
length of time series
}
}
\details{
The details of the implementation are discussed in the accompanying vignette.
The parameter a should be in the interval (0,2).
series n should be greater than 2.
}
\value{
a vector of length [n/2] of the spectral density values.
}
\author{
A. I. McLeod and J. Veenstra
}
\seealso{
\code{\link{sdfFD}}
}
\examples{
sdfPLA(0.2, 100)
}
\keyword{ ts }
|
context("Test all plots")
pmxClassHelpers <- test_pmxClass_helpers()
test_that("We can call all pmx_plot_xx with success", {
ctr <- pmxClassHelpers$ctr
pmx_plots <- ctr %>% plot_names()
pmx_function_plots <- sprintf("pmx_plot_%s", pmx_plots)
res <- lapply(
pmx_function_plots,
function(fun) {
is_function <- exists(fun, where = "package:ggPMX", mode = "function")
if (is_function) {
do.call(fun, list(ctr = ctr))
} else {
if (fun == "pmx_plot_indiv") {
ctr %>% pmx_plot_individual(1)
}
}
}
)
expect_true(all(vapply(res, function(x) inherits(x, "gg") || is.null(x), TRUE)))
})
|
/tests/testthat/test-pmx-all-plots.R
|
no_license
|
csetraynor/ggPMX
|
R
| false | false | 660 |
r
|
context("Test all plots")
pmxClassHelpers <- test_pmxClass_helpers()
test_that("We can call all pmx_plot_xx with success", {
ctr <- pmxClassHelpers$ctr
pmx_plots <- ctr %>% plot_names()
pmx_function_plots <- sprintf("pmx_plot_%s", pmx_plots)
res <- lapply(
pmx_function_plots,
function(fun) {
is_function <- exists(fun, where = "package:ggPMX", mode = "function")
if (is_function) {
do.call(fun, list(ctr = ctr))
} else {
if (fun == "pmx_plot_indiv") {
ctr %>% pmx_plot_individual(1)
}
}
}
)
expect_true(all(vapply(res, function(x) inherits(x, "gg") || is.null(x), TRUE)))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visTree.R
\name{visTree}
\alias{visTree}
\title{Visualize Recursive Partitioning and Regression Trees (rpart object)}
\usage{
visTree(object, main = "", submain = "", footer = "", direction = "UD",
fallenLeaves = FALSE, rules = TRUE, simplifyRules = TRUE,
shapeVar = "dot", shapeY = "square", colorVar = NULL, colorY = NULL,
colorEdges = "#8181F7", nodesFontSize = 16, edgesFontSize = 14,
edgesFontAlign = "horizontal", legend = TRUE, legendNodesSize = 22,
legendFontSize = 16, legendWidth = 0.1, legendNcol = 1,
legendPosition = "left", nodesPopSize = FALSE, minNodeSize = 15,
maxNodeSize = 30, highlightNearest = list(enabled = TRUE, degree =
list(from = 50000, to = 0), hover = FALSE, algorithm = "hierarchical"),
collapse = list(enabled = TRUE, fit = TRUE, resetHighlight = TRUE,
clusterOptions = list(fixed = TRUE, physics = FALSE)), updateShape = TRUE,
tooltipDelay = 500, digits = 3, height = "600px", width = "100\%",
export = TRUE)
}
\arguments{
\item{object}{\code{rpart}, rpart object}
\item{main}{For add a title. See \link{visNetwork}}
\item{submain}{For add a subtitle. See \link{visNetwork}}
\item{footer}{For add a footer. See \link{visNetwork}}
\item{direction}{\code{character}, The direction of the hierarchical layout.
The available options are: UD, DU, LR, RL. To simplify:
up-down, down-up, left-right, right-left. Default UD. See \link{visHierarchicalLayout}}
\item{fallenLeaves}{\code{boolean} leaf nodes at the bottom of the graph ? Default to FALSE}
\item{rules}{\code{boolean}, add rules in tooltips ? Default to TRUE}
\item{simplifyRules}{\code{boolean}, simplify rules writing}
\item{shapeVar}{\code{character}, shape for variables nodes See \link{visNodes}}
\item{shapeY}{\code{character}, shape for terminal nodes See \link{visNodes}}
\item{colorVar}{\code{character} colors to use or \code{data.frame} To set color of variables. 2 columns :
\itemize{
\item{"variable"}{ : names of variables}
\item{"color"}{ : colors (in hexa). See examples}
}}
\item{colorY}{if classification tree : \code{character} colors to use or \code{data.frame} 2 columns :
\itemize{
\item{"modality"}{ : levels of Y}
\item{"color"}{ : colors (in hexa)}
}
if regression tree : \code{character}, 2 colors (min and max, in hexa)}
\item{colorEdges}{\code{character} color of edges, in hexa. Default to #8181F7}
\item{nodesFontSize}{\code{numeric}, size of labels of nodes. Default to 16}
\item{edgesFontSize}{\code{numeric}, size of labels of edges Default to 14}
\item{edgesFontAlign}{\code{character}, for edges only. Default tp 'horizontal'. Possible options: 'horizontal' (Default),'top','middle','bottom'. See \link{visEdges}}
\item{legend}{\code{boolean}, add legend ? Default TRUE. \link{visLegend}}
\item{legendNodesSize}{\code{numeric}, size of nodes in legend. Default to 22}
\item{legendFontSize}{\code{numeric}, size of labels of nodes in legend. Default to 16}
\item{legendWidth}{\code{numeric}, legend width, between 0 and 1. Default 0.1}
\item{legendNcol}{\code{numeric}, number of columns in legend. Default 1}
\item{legendPosition}{\code{character}, one of "left" (Default) or "right"}
\item{nodesPopSize}{\code{boolean}, nodes sizes depends on population ? Default to FALSE}
\item{minNodeSize}{\code{numeric}, in case of \code{nodesPopSize}, minimum size of a node. Defaut to 15. Else, nodes size is minNodeSize + maxNodeSize / 2}
\item{maxNodeSize}{\code{numeric}, in case of \code{nodesPopSize}, maximum size of a node. Defaut to 30. Else, nodes size is minNodeSize + maxNodeSize / 2}
\item{highlightNearest}{\code{list}, Highlight nearest nodes. See \link{visOptions}}
\item{collapse}{\code{list}, collapse or not using double click on a node ? See \link{visOptions}}
\item{updateShape}{\code{boolean}, in case of collapse, udpate cluster node shape as terminal node ? Default to TRUE}
\item{tooltipDelay}{\code{numeric}, delay for tooltips in millisecond. Default 500}
\item{digits}{\code{numeric}, number of digits. Default to 3}
\item{height}{\code{character}, default to "600px"}
\item{width}{\code{character}, default to "100\%"}
\item{export}{\code{boolean}, add export button. Default to TRUE}
}
\value{
a visNetwork object
}
\description{
Visualize Recursive Partitioning and Regression Trees \code{rpart}. Have a look to \link{visTreeEditor} to edity and get back network, or to \link{visTreeModuleServer} to use custom tree module in R
}
\examples{
\dontrun{
library(rpart)
# Basic classification tree
res <- rpart(Species~., data=iris)
visTree(res, main = "Iris classification Tree")
# Basic regression tree
res <- rpart(Petal.Length~., data=iris)
visTree(res, edgesFontSize = 14, nodesFontSize = 16)
# Complex tree
data("solder")
res <- rpart(Opening~., data = solder, control = rpart.control(cp = 0.00005))
visTree(res, height = "800px", nodesPopSize = TRUE, minNodeSize = 10, maxNodeSize = 30)
# ----- Options
res <- rpart(Opening~., data = solder, control = rpart.control(cp = 0.005))
# fallen leaves + align edges label & size
visTree(res, fallenLeaves = TRUE, height = "500px",
edgesFontAlign = "middle", edgesFontSize = 20)
# disable rules in tooltip, and render tooltip faster
# disable hover highlight
visTree(res, rules = FALSE, tooltipDelay = 0,
highlightNearest = list(enabled = TRUE, degree = list(from = 50000, to = 0),
hover = FALSE, algorithm = "hierarchical"))
# Change color with data.frame
colorVar <- data.frame(variable = names(solder),
color = c("#339933", "#b30000","#4747d1","#88cc00", "#9900ff","#247856"))
colorY <- data.frame(modality = unique(solder$Opening),
color = c("#AA00AA", "#CDAD15", "#213478"))
visTree(res, colorEdges = "#000099", colorVar = colorVar, colorY = colorY)
# Change color with vector
visTree(res, colorEdges = "#000099",
colorVar = substring(rainbow(6), 1, 7),
colorY = c("blue", "green", "orange"))
}
}
\references{
See online documentation \url{http://datastorm-open.github.io/visNetwork/}
}
\seealso{
\link{visTreeEditor}, \link{visTreeModuleServer}, \link{visNetworkEditor}
}
|
/man/visTree.Rd
|
no_license
|
vanradd/visNetwork
|
R
| false | true | 6,142 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visTree.R
\name{visTree}
\alias{visTree}
\title{Visualize Recursive Partitioning and Regression Trees (rpart object)}
\usage{
visTree(object, main = "", submain = "", footer = "", direction = "UD",
fallenLeaves = FALSE, rules = TRUE, simplifyRules = TRUE,
shapeVar = "dot", shapeY = "square", colorVar = NULL, colorY = NULL,
colorEdges = "#8181F7", nodesFontSize = 16, edgesFontSize = 14,
edgesFontAlign = "horizontal", legend = TRUE, legendNodesSize = 22,
legendFontSize = 16, legendWidth = 0.1, legendNcol = 1,
legendPosition = "left", nodesPopSize = FALSE, minNodeSize = 15,
maxNodeSize = 30, highlightNearest = list(enabled = TRUE, degree =
list(from = 50000, to = 0), hover = FALSE, algorithm = "hierarchical"),
collapse = list(enabled = TRUE, fit = TRUE, resetHighlight = TRUE,
clusterOptions = list(fixed = TRUE, physics = FALSE)), updateShape = TRUE,
tooltipDelay = 500, digits = 3, height = "600px", width = "100\%",
export = TRUE)
}
\arguments{
\item{object}{\code{rpart}, rpart object}
\item{main}{For add a title. See \link{visNetwork}}
\item{submain}{For add a subtitle. See \link{visNetwork}}
\item{footer}{For add a footer. See \link{visNetwork}}
\item{direction}{\code{character}, The direction of the hierarchical layout.
The available options are: UD, DU, LR, RL. To simplify:
up-down, down-up, left-right, right-left. Default UD. See \link{visHierarchicalLayout}}
\item{fallenLeaves}{\code{boolean} leaf nodes at the bottom of the graph ? Default to FALSE}
\item{rules}{\code{boolean}, add rules in tooltips ? Default to TRUE}
\item{simplifyRules}{\code{boolean}, simplify rules writing}
\item{shapeVar}{\code{character}, shape for variables nodes See \link{visNodes}}
\item{shapeY}{\code{character}, shape for terminal nodes See \link{visNodes}}
\item{colorVar}{\code{character} colors to use or \code{data.frame} To set color of variables. 2 columns :
\itemize{
\item{"variable"}{ : names of variables}
\item{"color"}{ : colors (in hexa). See examples}
}}
\item{colorY}{if classification tree : \code{character} colors to use or \code{data.frame} 2 columns :
\itemize{
\item{"modality"}{ : levels of Y}
\item{"color"}{ : colors (in hexa)}
}
if regression tree : \code{character}, 2 colors (min and max, in hexa)}
\item{colorEdges}{\code{character} color of edges, in hexa. Default to #8181F7}
\item{nodesFontSize}{\code{numeric}, size of labels of nodes. Default to 16}
\item{edgesFontSize}{\code{numeric}, size of labels of edges Default to 14}
\item{edgesFontAlign}{\code{character}, for edges only. Default tp 'horizontal'. Possible options: 'horizontal' (Default),'top','middle','bottom'. See \link{visEdges}}
\item{legend}{\code{boolean}, add legend ? Default TRUE. \link{visLegend}}
\item{legendNodesSize}{\code{numeric}, size of nodes in legend. Default to 22}
\item{legendFontSize}{\code{numeric}, size of labels of nodes in legend. Default to 16}
\item{legendWidth}{\code{numeric}, legend width, between 0 and 1. Default 0.1}
\item{legendNcol}{\code{numeric}, number of columns in legend. Default 1}
\item{legendPosition}{\code{character}, one of "left" (Default) or "right"}
\item{nodesPopSize}{\code{boolean}, nodes sizes depends on population ? Default to FALSE}
\item{minNodeSize}{\code{numeric}, in case of \code{nodesPopSize}, minimum size of a node. Defaut to 15. Else, nodes size is minNodeSize + maxNodeSize / 2}
\item{maxNodeSize}{\code{numeric}, in case of \code{nodesPopSize}, maximum size of a node. Defaut to 30. Else, nodes size is minNodeSize + maxNodeSize / 2}
\item{highlightNearest}{\code{list}, Highlight nearest nodes. See \link{visOptions}}
\item{collapse}{\code{list}, collapse or not using double click on a node ? See \link{visOptions}}
\item{updateShape}{\code{boolean}, in case of collapse, udpate cluster node shape as terminal node ? Default to TRUE}
\item{tooltipDelay}{\code{numeric}, delay for tooltips in millisecond. Default 500}
\item{digits}{\code{numeric}, number of digits. Default to 3}
\item{height}{\code{character}, default to "600px"}
\item{width}{\code{character}, default to "100\%"}
\item{export}{\code{boolean}, add export button. Default to TRUE}
}
\value{
a visNetwork object
}
\description{
Visualize Recursive Partitioning and Regression Trees \code{rpart}. Have a look to \link{visTreeEditor} to edity and get back network, or to \link{visTreeModuleServer} to use custom tree module in R
}
\examples{
\dontrun{
library(rpart)
# Basic classification tree
res <- rpart(Species~., data=iris)
visTree(res, main = "Iris classification Tree")
# Basic regression tree
res <- rpart(Petal.Length~., data=iris)
visTree(res, edgesFontSize = 14, nodesFontSize = 16)
# Complex tree
data("solder")
res <- rpart(Opening~., data = solder, control = rpart.control(cp = 0.00005))
visTree(res, height = "800px", nodesPopSize = TRUE, minNodeSize = 10, maxNodeSize = 30)
# ----- Options
res <- rpart(Opening~., data = solder, control = rpart.control(cp = 0.005))
# fallen leaves + align edges label & size
visTree(res, fallenLeaves = TRUE, height = "500px",
edgesFontAlign = "middle", edgesFontSize = 20)
# disable rules in tooltip, and render tooltip faster
# disable hover highlight
visTree(res, rules = FALSE, tooltipDelay = 0,
highlightNearest = list(enabled = TRUE, degree = list(from = 50000, to = 0),
hover = FALSE, algorithm = "hierarchical"))
# Change color with data.frame
colorVar <- data.frame(variable = names(solder),
color = c("#339933", "#b30000","#4747d1","#88cc00", "#9900ff","#247856"))
colorY <- data.frame(modality = unique(solder$Opening),
color = c("#AA00AA", "#CDAD15", "#213478"))
visTree(res, colorEdges = "#000099", colorVar = colorVar, colorY = colorY)
# Change color with vector
visTree(res, colorEdges = "#000099",
colorVar = substring(rainbow(6), 1, 7),
colorY = c("blue", "green", "orange"))
}
}
\references{
See online documentation \url{http://datastorm-open.github.io/visNetwork/}
}
\seealso{
\link{visTreeEditor}, \link{visTreeModuleServer}, \link{visNetworkEditor}
}
|
##Performing K-folds cross validation
library(randomForest)
library(rcdk)
library(ROCR)
##Reading the mutagen dataset
dat1<-read.csv("mutagen.txt",sep="\t",header=F)
smi <-lapply(as.character(dat1$V1),parse.smiles)
cmp.fp<-vector("list",nrow(dat1))
## generate fingerprints
for (i in 1:nrow(dat1)){
cmp.fp[i]<-lapply(smi[[i]][1],get.fingerprint,type="maccs")
}
##Convert fingerprints to matrix form
fpmac<-fp.to.matrix(cmp.fp)
cmp.finger<-as.data.frame(fpmac)
#Adding Outcome column and ID columns
train<-cbind(cmp.finger,dat1$V3,dat1$V2)
colnames(train)[168]<-"IDs"
colnames(train)[167]<-"Outcome"
k=10 #number of folds
n= floor(nrow(train)/k)
err.vect = rep(NA,k) #vetor to put the AUC score
for (i in 1:k){
s1 <- ((i-1)* n+1)
s2 <- (i*n)
subset <- s1:s2
cv.train <- train[-subset,]
cv.test <- train [subset,]
#Change the model accordingly with your preference and see which model performs better
fit <- randomForest(x=cv.train[1:166],y= cv.train$Outcome,ntree = 500)
prediction <- predict (fit,newdata= cv.test[1:166],type<-"prob")
pred.rf<-prediction(prediction[,2],cv.test$Outcome)
err.vect[i]<-performance(pred.rf,"auc")@y.values[[1]]
print(paste("AUC for the fold",i,":",err.vect[i]))
}
##Printing the mean AUC
print(paste("Average AUC:",mean(err.vect)))
|
/crossvalidation.R
|
no_license
|
nkkchem/CheminfoR
|
R
| false | false | 1,327 |
r
|
##Performing K-folds cross validation
library(randomForest)
library(rcdk)
library(ROCR)
##Reading the mutagen dataset
dat1<-read.csv("mutagen.txt",sep="\t",header=F)
smi <-lapply(as.character(dat1$V1),parse.smiles)
cmp.fp<-vector("list",nrow(dat1))
## generate fingerprints
for (i in 1:nrow(dat1)){
cmp.fp[i]<-lapply(smi[[i]][1],get.fingerprint,type="maccs")
}
##Convert fingerprints to matrix form
fpmac<-fp.to.matrix(cmp.fp)
cmp.finger<-as.data.frame(fpmac)
#Adding Outcome column and ID columns
train<-cbind(cmp.finger,dat1$V3,dat1$V2)
colnames(train)[168]<-"IDs"
colnames(train)[167]<-"Outcome"
k=10 #number of folds
n= floor(nrow(train)/k)
err.vect = rep(NA,k) #vetor to put the AUC score
for (i in 1:k){
s1 <- ((i-1)* n+1)
s2 <- (i*n)
subset <- s1:s2
cv.train <- train[-subset,]
cv.test <- train [subset,]
#Change the model accordingly with your preference and see which model performs better
fit <- randomForest(x=cv.train[1:166],y= cv.train$Outcome,ntree = 500)
prediction <- predict (fit,newdata= cv.test[1:166],type<-"prob")
pred.rf<-prediction(prediction[,2],cv.test$Outcome)
err.vect[i]<-performance(pred.rf,"auc")@y.values[[1]]
print(paste("AUC for the fold",i,":",err.vect[i]))
}
##Printing the mean AUC
print(paste("Average AUC:",mean(err.vect)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hr_.R
\name{hr_}
\alias{hr_}
\title{Create a horizontal rule}
\usage{
hr_(..., id = NULL, class = NULL, global = NULL)
}
\arguments{
\item{...}{a collection of HTML objects, textual
elements (as bare text or Markdown-styled text),
arbitrary attributes (given as named variables).}
\item{id}{a global attribute for specifying a
unique ID for an HTML element (the value must
be unique within the HTML document, though, this
is not currently enforced here). The primary use
of the \code{id} attribute is to point to a style
in a style sheet. The \code{id} is also used by
JavaScript (via the HTML DOM) to manipulate the
element with the specific \code{id}.
When constructing an \code{id}, there are a few
other things to note: (1) it must contain at
least one character, (2) it must not contain any
space characters, and (3) in HTML, all values are
case-insensitive.}
\item{class}{a global attribute for specifying
one or more classnames for an element. The
\code{class} attribute is primarily used in
pointing to a class in a style sheet. However,
this attribute can also be used by a JavaScript
(via the HTML DOM) to make changes to HTML
elements with a specified class. If providing
several classes, one can either use
\code{c("[classname1]", "[classname2]", ...)} or
a single string with space-separated class names.
When constructing class names, there are two
things to keep in mind: (1) they begin with a
letter (\code{A-Z} or \code{a-z}), and (2)
subsequent characters can be letters
(\code{A-Za-z}), digits (\code{0-9}), hyphens,
and underscores.}
\item{global}{provides an opportunity to supply
global attributes other than the \code{id} and
\code{class} attributes (which have their own
arguments). This is most easily accomplished via
the namesake \code{global()} function since it
allows for inline help and validation for each
of the global attributes. For example, setting
the global attributes \code{title} and
\code{lang} can be done by using this:
\code{global = global(lang = "en", title = "my_title")}.}
}
\description{
Allows for the creation of a horizontal rule,
which is used to signify a shift of topic or to
separate content within an HTML page.
}
|
/man/hr_.Rd
|
permissive
|
rich-iannone/hyper
|
R
| false | true | 2,254 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hr_.R
\name{hr_}
\alias{hr_}
\title{Create a horizontal rule}
\usage{
hr_(..., id = NULL, class = NULL, global = NULL)
}
\arguments{
\item{...}{a collection of HTML objects, textual
elements (as bare text or Markdown-styled text),
arbitrary attributes (given as named variables).}
\item{id}{a global attribute for specifying a
unique ID for an HTML element (the value must
be unique within the HTML document, though, this
is not currently enforced here). The primary use
of the \code{id} attribute is to point to a style
in a style sheet. The \code{id} is also used by
JavaScript (via the HTML DOM) to manipulate the
element with the specific \code{id}.
When constructing an \code{id}, there are a few
other things to note: (1) it must contain at
least one character, (2) it must not contain any
space characters, and (3) in HTML, all values are
case-insensitive.}
\item{class}{a global attribute for specifying
one or more classnames for an element. The
\code{class} attribute is primarily used in
pointing to a class in a style sheet. However,
this attribute can also be used by a JavaScript
(via the HTML DOM) to make changes to HTML
elements with a specified class. If providing
several classes, one can either use
\code{c("[classname1]", "[classname2]", ...)} or
a single string with space-separated class names.
When constructing class names, there are two
things to keep in mind: (1) they begin with a
letter (\code{A-Z} or \code{a-z}), and (2)
subsequent characters can be letters
(\code{A-Za-z}), digits (\code{0-9}), hyphens,
and underscores.}
\item{global}{provides an opportunity to supply
global attributes other than the \code{id} and
\code{class} attributes (which have their own
arguments). This is most easily accomplished via
the namesake \code{global()} function since it
allows for inline help and validation for each
of the global attributes. For example, setting
the global attributes \code{title} and
\code{lang} can be done by using this:
\code{global = global(lang = "en", title = "my_title")}.}
}
\description{
Allows for the creation of a horizontal rule,
which is used to signify a shift of topic or to
separate content within an HTML page.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_region_from_coordinates.R
\name{get_region_from_coordinates}
\alias{get_region_from_coordinates}
\title{Return the geographci region of a set of coordinates according to administrative boundaries.}
\usage{
get_region_from_coordinates(obj, shapefile = "NUTS0",
path_to_shapefile = NULL)
}
\arguments{
\item{obj}{A data frame of coordinates stored using lat/lon coordinates. See Details section for further details.}
\item{shapefile}{The shapefile to be used to aggregate the grid points.}
}
\value{
A vector containing all the \code{shapefile_id_field}
}
\description{
The function defines in which region a set of coordinates (stored into a \code{data.frame}) lie
The administrative boundaries can be chosen from a set of pre-definites shapefiles.
}
\details{
Details
\code{obj} must be a data frame containing two variables:
1. Latitude: a double in the range -90, 90 named as lat, Lat, latitude, latit, etc.
2. Longitude: a double in the range -180, 180 named as lon, Lon, longitude, long, etc.
The shapefiles available are the following:
\itemize{
\item \code{NUTS0-2}: Data from EUROSTAT NUTS (small islands removed) https://ec.europa.eu/eurostat/web/gisco/geodata/reference-data/administrative-units-statistical-units/nutscountries_EU
\item \code{eh2050}: cluster as defined in the FP7 e-Highway2050 project
\item \code{hybas05}: HydroBASINS Level 5 (http://www.hydrosheds.org/page/hydrobasins)
\item \code{hybas06}: HydroBASINS Level 6 (http://www.hydrosheds.org/page/hydrobasins)
\item \code{WAPP}: WAPP catchments from the JRC LISFLOOD hydrological model
}
}
\author{
Matteo De Felice
}
|
/man/get_region_from_coordinates.Rd
|
no_license
|
matteodefelice/panas
|
R
| false | true | 1,683 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_region_from_coordinates.R
\name{get_region_from_coordinates}
\alias{get_region_from_coordinates}
\title{Return the geographci region of a set of coordinates according to administrative boundaries.}
\usage{
get_region_from_coordinates(obj, shapefile = "NUTS0",
path_to_shapefile = NULL)
}
\arguments{
\item{obj}{A data frame of coordinates stored using lat/lon coordinates. See Details section for further details.}
\item{shapefile}{The shapefile to be used to aggregate the grid points.}
}
\value{
A vector containing all the \code{shapefile_id_field}
}
\description{
The function defines in which region a set of coordinates (stored into a \code{data.frame}) lie
The administrative boundaries can be chosen from a set of pre-definites shapefiles.
}
\details{
Details
\code{obj} must be a data frame containing two variables:
1. Latitude: a double in the range -90, 90 named as lat, Lat, latitude, latit, etc.
2. Longitude: a double in the range -180, 180 named as lon, Lon, longitude, long, etc.
The shapefiles available are the following:
\itemize{
\item \code{NUTS0-2}: Data from EUROSTAT NUTS (small islands removed) https://ec.europa.eu/eurostat/web/gisco/geodata/reference-data/administrative-units-statistical-units/nutscountries_EU
\item \code{eh2050}: cluster as defined in the FP7 e-Highway2050 project
\item \code{hybas05}: HydroBASINS Level 5 (http://www.hydrosheds.org/page/hydrobasins)
\item \code{hybas06}: HydroBASINS Level 6 (http://www.hydrosheds.org/page/hydrobasins)
\item \code{WAPP}: WAPP catchments from the JRC LISFLOOD hydrological model
}
}
\author{
Matteo De Felice
}
|
library(data.table)
library(xgboost)
library(Metrics)
library(Matrix)
library(mice)
library(plyr)
library(dplyr)
library(tidyr)
library(ggplot2)
#set working directory
setwd('/users/thesmithfamily/desktop/kaggle/ames')
#define submission file for later
SUBMISSION = "/users/thesmithfamily/desktop/kaggle/ames/sample_submission.csv"
#load data
train <- read.csv("train.csv")
test <- read.csv("test.csv")
#Row binding train & test set for feature engineering
train_test = bind_rows(train, test)
#remove houses with more than 4000 square feet as recommended by the dataset creator, https://ww2.amstat.org/publications/jse/v19n3/decock.pdf
#train_test <- train_test[which(train_test$GrLivArea < 4000),]
#train <- train[which(train$GrLivArea < 4000),]
#test <- test[which(test$GrLivArea < 4000),]
#set number of rows in training set
ntrain = nrow(train)
#set variable to be predicted
y_train <- train$SalePrice
#plot Sale Prices to get a feel for the data
hist(y_train, breaks = 100, xlim = c(30000, 800000), xlab = "SalePrice")
#taking the log of SalePrice appears to fix the skewness of the data
hist(log(y_train), breaks = 100, xlab = "log(SalePrice)")
#recode y_train to log(y_train)
y_train <- log(y_train)
#Remove Id since of no use
train$Id = NULL
train$SalePrice = NULL
test$Id = NULL
#graph distributions of continuous variables to check for normality
hist(train_test$MasVnrArea, breaks = 100)
hist(log(train_test$MasVnrArea), breaks = 100)
hist(train_test$BsmtFinSF1, breaks = 100)
hist(log(train_test$BsmtFinSF1), breaks = 100)
hist(train_test$BsmtFinSF2, breaks = 100)
hist(log(train_test$BsmtFinSF2), breaks = 100)
hist(train_test$BsmtUnfSF, breaks = 100)
hist(log(train_test$BsmtUnfSF), breaks = 100)
hist(train_test$TotalBsmtSF, breaks = 100)
hist(log(train_test$TotalBsmtSF), breaks = 100)
#hist(train_test$X1stFlrSF, breaks = 100) - looks good, no transformation needed
hist(train_test$X2ndFlrSF, breaks = 100)
hist(log(train_test$X2ndFlrSF), breaks = 100)
hist(train_test$GrLivArea, breaks = 100)
hist((train_test$GrLivArea)**(1/3), breaks = 100)
hist(train_test$GarageArea, breaks = 100)
hist(log(train_test$GarageArea), breaks = 100)
hist(train_test$WoodDeckSF, breaks = 100)
hist(log(train_test$WoodDeckSF), breaks = 100)
hist(train_test$OpenPorchSF, breaks = 100)
hist(log(train_test$OpenPorchSF), breaks = 100)
hist(train_test$EnclosedPorch, breaks = 100)
hist(log(train_test$EnclosedPorch), breaks = 100)
hist(train_test$ScreenPorch, breaks = 100)
hist(log(train_test$ScreenPorch), breaks = 100)
hist(train_test$LotFrontage, breaks = 100)
hist(log(train_test$LotFrontage), breaks = 100)
hist(train_test$LotArea, breaks = 100)
hist(log(train_test$LotArea), breaks = 100)
#create new variables
#create a total SF variable from other Square Footage variables
train_test$TotalSF = rowSums(cbind(train_test$TotalBsmtSF, train_test$X1stFlrSF, train_test$X2ndFlrSF))
attach(train_test)
#create a dummy variable for the three most expensive neighborhoods
train_test$NeighborhoodDummy <- ifelse(Neighborhood == "NoRidge", 1, ifelse(Neighborhood == "NridgHt", 1, ifelse(Neighborhood == "Somerst", 1, 0)))
#convert MSSubClass from integer to factor
train_test$MSSubClass[train_test$MSSubClass == "20"] <- "SC20"
train_test$MSSubClass[train_test$MSSubClass == "30"] <- "SC30"
train_test$MSSubClass[train_test$MSSubClass == "40"] <- "SC40"
train_test$MSSubClass[train_test$MSSubClass == "45"] <- "SC45"
train_test$MSSubClass[train_test$MSSubClass == "50"] <- "SC50"
train_test$MSSubClass[train_test$MSSubClass == "60"] <- "SC60"
train_test$MSSubClass[train_test$MSSubClass == "70"] <- "SC70"
train_test$MSSubClass[train_test$MSSubClass == "75"] <- "SC75"
train_test$MSSubClass[train_test$MSSubClass == "80"] <- "SC80"
train_test$MSSubClass[train_test$MSSubClass == "85"] <- "SC85"
train_test$MSSubClass[train_test$MSSubClass == "90"] <- "SC90"
train_test$MSSubClass[train_test$MSSubClass == "120"] <- "SC120"
train_test$MSSubClass[train_test$MSSubClass == "150"] <- "SC150"
train_test$MSSubClass[train_test$MSSubClass == "160"] <- "SC160"
train_test$MSSubClass[train_test$MSSubClass == "180"] <- "SC180"
train_test$MSSubClass[train_test$MSSubClass == "190"] <- "SC190"
#recode NA for Alley to "None"
AlleyLevels <- levels(train_test$Alley)
AlleyLevels[length(AlleyLevels) + 1] <- "None"
train_test$Alley <- factor(train_test$Alley, levels = AlleyLevels)
train_test$Alley[is.na(train_test$Alley)] <- "None"
#recode NA for Fence to "None"
FenceLevels <- levels(train_test$Fence)
FenceLevels[length(FenceLevels) + 1] <- "None"
train_test$Fence <- factor(train_test$Fence, levels = FenceLevels)
train_test$Fence[is.na(train_test$Fence)] <- "None"
#recode NA for BsmtCond to "None"
BsmtCondLevels <- levels(train_test$BsmtCond)
BsmtCondLevels[length(BsmtCondLevels) + 1] <- "None"
train_test$BsmtCond <- factor(train_test$BsmtCond, levels = BsmtCondLevels)
train_test$BsmtCond[is.na(train_test$BsmtCond)] <- "None"
#recode NA for BsmtExposure to "None"
BsmtExposureLevels <- levels(train_test$BsmtExposure)
BsmtExposureLevels[length(BsmtExposureLevels) + 1] <- "None"
train_test$BsmtExposure <- factor(train_test$BsmtExposure, levels = BsmtExposureLevels)
train_test$BsmtExposure[is.na(train_test$BsmtExposure)] <- "None"
#recode NA for BsmtFinType1 to "None"
BsmtFinType1Levels <- levels(train_test$BsmtFinType1)
BsmtFinType1Levels[length(BsmtFinType1Levels) + 1] <- "None"
train_test$BsmtFinType1 <- factor(train_test$BsmtFinType1, levels = BsmtFinType1Levels)
train_test$BsmtFinType1[is.na(train_test$BsmtFinType1)] <- "None"
#recode NA for BsmtFinType2 to "None"
BsmtFinType2Levels <- levels(train_test$BsmtFinType2)
BsmtFinType2Levels[length(BsmtFinType2Levels) + 1] <- "None"
train_test$BsmtFinType2 <- factor(train_test$BsmtFinType2, levels = BsmtFinType2Levels)
train_test$BsmtFinType2[is.na(train_test$BsmtFinType2)] <- "None"
#recode NA for FireplaceQu to "None"
FireplaceQuLevels <- levels(train_test$FireplaceQu)
FireplaceQuLevels[length(FireplaceQuLevels) + 1] <- "None"
train_test$FireplaceQu <- factor(train_test$FireplaceQu, levels = FireplaceQuLevels)
train_test$FireplaceQu[is.na(train_test$FireplaceQu)] <- "None"
#replace NA with 0 where it makes sense
train_test$MasVnrArea[is.na(train_test$MasVnrArea)] <- 0
#create transformation variables
train_test$MasVnrArea <- log(train_test$MasVnrArea)
train_test$BsmtFinSF1 <- log(train_test$BsmtFinSF1)
train_test$BsmtFinSF2 <- log(train_test$BsmtFinSF2)
train_test$BsmtUnfSF <- log(train_test$BsmtUnfSF)
train_test$TotalBsmtSF <- log(train_test$TotalBsmtSF)
train_test$X2ndFlrSF <- log(train_test$X2ndFlrSF)
train_test$GrLivArea <- log(train_test$GrLivArea)
train_test$GarageArea <- log(train_test$GarageArea)
train_test$WoodDeckSF <- log(train_test$WoodDeckSF)
train_test$OpenPorchSF <- log(train_test$OpenPorchSF)
train_test$EnclosedPorch <- log(train_test$EnclosedPorch)
train_test$ScreenPorch <- log(train_test$ScreenPorch)
train_test$LotFrontage <- log(train_test$LotFrontage)
train_test$LotArea <- log(train_test$LotArea)
#HeatingQC - dummy for ExAndGd/Not
train_test$HeatingQCDummy <- ifelse(HeatingQC == "Ex", 1, train_test$HeatingQCDummy <- ifelse(HeatingQC == "Gd", 1, 0))
#SaleCondition - dummy for Normal/Not
train_test$SaleConditionDummy <- ifelse(SaleCondition == "Normal", 1, 0)
#Condition1 - dummy for Norm/NotNorm
train_test$Condition1Dummy <- ifelse(Condition1 == "Norm", 1, 0)
#Foundation - dummy for PConc/NotPConc
train_test$FoundationDummy <- ifelse(Foundation == "PConc", 1, 0)
#ExterCond - dummy for ExAndGd/NotExAndGd
train_test$ExterCondDummy <- ifelse(ExterCond == "Ex", 1, train_test$ExterCondDummy <- ifelse(ExterCond == "Gd", 1, 0))
#LandContour - dummy variable for Lvl/notLvl
train_test$LandContourDummy <- ifelse(LandContour == "Lvl", 1, 0)
#YearRemodAdd - dummy variable for remodel within the past year of selling
train_test$YearRemodAddDummy <- ifelse(YearRemodAdd == YrSold, 1, 0)
#NewHouseDummy - dummy variable for whether a house was built in the year it sold
train_test$NewHouseDummy <- ifelse(YearBuilt == YrSold, 1, 0)
#HouseAge - variable representing the age of the house when it sold
train_test$HouseAge <- train_test$YrSold - train_test$YearBuilt
#GarageAge - variable representing the age of the garage when the house was sold
train_test$GarageAge <- train_test$YrSold - train_test$GarageYrBlt
#TimeRemod - variable representing number of years since last remodel when the house sold
train_test$TimeRemod <- train_test$YrSold - train_test$YearRemodAdd
#IsRemod - dummy variable representing whether there has been a remodel on the house
train_test$IsRemod <- ifelse(train_test$YearBuilt == train_test$YearRemodAdd, 0, 1)
#NumBath - variable representing the total number of bathrooms
train_test$NumBath <- (0.5 * train_test$HalfBath) + (0.5 * train_test$BsmtHalfBath) +
train_test$FullBath + train_test$BsmtFullBath
#NumRooms - variable representing the total number of rooms + bathrooms
train_test$NumRooms <- train_test$TotRmsAbvGrd + train_test$FullBath + train_test$HalfBath
#polynomials of top continuous features according to gain on importance model
train_test$TotalSF2 <- train_test$TotalSF**2
train_test$TotalSF3 <- train_test$TotalSF**3
train_test$TotalSFsqrt <- sqrt(train_test$TotalSF)
train_test$X2ndFlrSF2 <- train_test$X2ndFlrSF**2
train_test$X2ndFlrSF3 <- train_test$X2ndFlrSF**3
train_test$X2ndFlrSFsqrt <- sqrt(train_test$X2ndFlrSF)
train_test$GarageArea2 <- train_test$GarageArea**2
train_test$GarageArea3 <- train_test$GarageArea**3
train_test$GarageAreasqrt <- sqrt(train_test$GarageArea)
train_test$X1stFlrSF2 <- train_test$X1stFlrSF**2
train_test$X1stFlrSF3 <- train_test$X1stFlrSF**3
train_test$X1stFlrSFsqrt <- sqrt(train_test$X1stFlrSF)
train_test$LotFrontage2 <- train_test$LotFrontage**2
train_test$LotFrontage3 <- train_test$LotFrontage**3
train_test$LotFrontagesqrt <- sqrt(train_test$LotFrontage)
#get names of all variables in full data set
features=names(train_test)
#convert character into integer
for(f in features){
if(class(train_test[[f]]) == "character"){
levels = sort(unique(train_test[[f]]))
train_test[[f]] = as.integer(factor(train_test[[f]],levels = levels))
}
}
#features to exclude, Utilities, Electrical, PoolQC, and MiscFeature excluded due to low variance
#SalePrice excluded to allow XGBoost to work properly
features_to_drop <- c("Utilities","Electrical","PoolQC","MiscFeature",
"SalePrice")
#splitting whole data back again minus the dropped features
train_x = train_test[1:ntrain,!(features) %in% features_to_drop]
test_x = train_test[(ntrain+1):nrow(train_test),!(features) %in% features_to_drop]
#convert into numeric for XGBoost implementation
train_x[] <- lapply(train_x, as.numeric)
test_x[] <- lapply(test_x, as.numeric)
#replaces -inf with 0
train_x <- do.call(data.frame,lapply(train_x, function(x) replace(x, is.infinite(x), 0)))
test_x <- do.call(data.frame,lapply(test_x, function(x) replace(x, is.infinite(x), 0)))
#missing values imputation with mice
set.seed(256)
to_impute <- as.data.frame(test_x)
impute <- to_impute[c("MSZoning","Exterior1st","Exterior2nd","BsmtFinSF1",
"BsmtFinSF2","BsmtUnfSF","TotalBsmtSF","BsmtFullBath","BsmtHalfBath",
"KitchenQual","Functional","GarageCars","GarageArea","SaleType","TotalSF",
"GarageFinish","BsmtQual","GarageCond","GarageQual","GarageYrBlt",
"GarageType","LotFrontage","NumBath")]
#specify package complete is in to avoid confusion with tidyr
imputed <- mice::complete(mice(impute,m=5))
to_impute$MSZoning=imputed$MSZoning
to_impute$Utilities=imputed$Utilities
to_impute$Exterior1st=imputed$Exterior1st
to_impute$Exterior2nd=imputed$Exterior2nd
to_impute$BsmtFinSF1=imputed$BsmtFinSF1
to_impute$BsmtFinSF2=imputed$BsmtFinSF2
to_impute$BsmtUnfSF=imputed$BsmtUnfSF
to_impute$TotalBsmtSF=imputed$TotalBsmtSF
to_impute$BsmtHalfBath=imputed$BsmtHalfBath
to_impute$BsmtFullBath=imputed$BsmtFullBath
to_impute$KitchenQual=imputed$KitchenQual
to_impute$Functional=imputed$Functional
to_impute$GarageCars=imputed$GarageCars
to_impute$GarageArea=imputed$GarageArea
to_impute$SaleType=imputed$SaleType
to_impute$TotalSF=imputed$TotalSF
to_impute$GarageFinish=imputed$GarageFinish
to_impute$BsmtQual=imputed$BsmtQual
to_impute$GarageCond=imputed$GarageCond
to_impute$GarageQual=imputed$GarageQual
to_impute$GarageYrBlt=imputed$GarageYrBlt
to_impute$GarageType=imputed$GarageType
to_impute$GarageAge=imputed$GarageAge
to_impute$LotFrontage=imputed$LotFrontage
to_impute$NumBath=imputed$NumBath
test_x = as.data.table(to_impute)
to_impute <- as.data.frame(train_x)
impute <- to_impute[c("MSZoning","Exterior1st","Exterior2nd","BsmtFinSF1",
"BsmtFinSF2","BsmtUnfSF","TotalBsmtSF","BsmtFullBath","BsmtHalfBath",
"KitchenQual","Functional","GarageCars","GarageArea","SaleType","TotalSF",
"GarageFinish","BsmtQual","GarageCond","GarageQual","GarageYrBlt",
"GarageType","LotFrontage","NumBath","GarageAge","MasVnrType")]
#specify package complete is in to avoid confusion with tidyr
imputed <- mice::complete(mice(impute,m=5))
to_impute$MSZoning=imputed$MSZoning
to_impute$Utilities=imputed$Utilities
to_impute$Exterior1st=imputed$Exterior1st
to_impute$Exterior2nd=imputed$Exterior2nd
to_impute$BsmtFinSF1=imputed$BsmtFinSF1
to_impute$BsmtFinSF2=imputed$BsmtFinSF2
to_impute$BsmtUnfSF=imputed$BsmtUnfSF
to_impute$TotalBsmtSF=imputed$TotalBsmtSF
to_impute$BsmtHalfBath=imputed$BsmtHalfBath
to_impute$BsmtFullBath=imputed$BsmtFullBath
to_impute$KitchenQual=imputed$KitchenQual
to_impute$Functional=imputed$Functional
to_impute$GarageCars=imputed$GarageCars
to_impute$GarageArea=imputed$GarageArea
to_impute$GarageArea2=imputed$GarageArea2
to_impute$GarageArea3=imputed$GarageArea3
to_impute$SaleType=imputed$SaleType
to_impute$TotalSF=imputed$TotalSF
to_impute$TotalSF2=imputed$TotalSF2
to_impute$TotalSF3=imputed$TotalSF3
to_impute$TotalSFsqrt=imputed$TotalSFsqrt
to_impute$GarageFinish=imputed$GarageFinish
to_impute$BsmtQual=imputed$BsmtQual
to_impute$GarageCond=imputed$GarageCond
to_impute$GarageQual=imputed$GarageQual
to_impute$GarageYrBlt=imputed$GarageYrBlt
to_impute$GarageType=imputed$GarageType
to_impute$GarageAge=imputed$GarageAge
to_impute$LotFrontage=imputed$LotFrontage
to_impute$LotFrontage2=imputed$LotFrontage2
to_impute$LotFrontage3=imputed$LotFrontage3
to_impute$LotFrontagesqrt=imputed$LotFrontagesqrt
to_impute$NumBath=imputed$NumBath
to_impute$MasVnrType=imputed$MasVnrType
train_x = as.data.table(to_impute)
train_x <- scale(train_x)
test_x <- scale(test_x)
#create xgb.DMatrix objects
dtrain = xgb.DMatrix(as.matrix(train_x), label = y_train, missing = NaN)
dtest = xgb.DMatrix(as.matrix(test_x), missing = NaN)
#custom grid search of parameter tuning
searchGridSubCol <- expand.grid(subsample = c(0.5, 0.75, 1),
colsample_bytree = c(0.4, 0.6, 0.8, 1),
max_depth = c(4, 6, 8, 10))
ntrees <- 1500
#Build an xgb.DMatrix object
DMMatrixTrain <- dtrain
rmseErrorsHyperparameters <- apply(searchGridSubCol, 1, function(parameterList){
#browser() here for debugging
#Extract Parameters to test
currentSubsampleRate <- parameterList[["subsample"]]
currentColsampleRate <- parameterList[["colsample_bytree"]]
currentMax_depth <- parameterList[["max_depth"]]
xgboostModelCV <- xgb.cv(data = DMMatrixTrain, nrounds = ntrees, nfold = 5, showsd = TRUE,
metrics = "rmse", verbose = TRUE, eval_metric = "rmse",
objective = "reg:linear", max_depth = currentMax_depth, eta = 0.02,
subsample = currentSubsampleRate, colsample_bytree = currentColsampleRate)
#Save rmse of the last iteration
rmse <- tail(xgboostModelCV$evaluation_log$test_rmse_mean, 1)
return(c(rmse, currentSubsampleRate, currentColsampleRate, currentMax_depth))
})
# create line graph of tested parameters
x <- c(1:48)
y <- rmseErrorsHyperparameters[1,]
plot(x, y, type="b", xlab = "Iteration", ylab = "Mean Test RMSE")
#iteration 11 had the lowest mean test RMSE,
#so subsample = 0.75, colsample_bytree = 1, max_depth = 4
# set best tested parameters for xgboost
xgb_params_1 = list(
objective = "reg:linear",
eta = 0.02, # learning rate
max_depth = 4, # max tree depth
eval_metric = "rmse", # evaluation/loss metric
subsample = 0.75, # best tested value
colsample_bytree = 1 # best tested value
)
# fit the model with the parameters specified above
xgb_1 = xgboost(data = dtrain,
params = xgb_params_1,
nrounds = 5000, # max number of trees to build
verbose = TRUE, # will print performance information
print_every_n = 1, # will print all messages
early_stopping_rounds = 10 # stop if no improvement within 10 trees
)
# cross-validate xgboost to get the accurate measure of error (rmse)
xgb_cv_1 = xgb.cv(params = xgb_params_1,
data = dtrain,
nrounds = 5000,
nfold = 5, # randomly partition original dataset into 5 equal size subsamples
prediction = TRUE, # return the prediction using the final model
showsd = TRUE, # standard deviation of loss across folds
stratified = TRUE, # sample is unbalanced; use stratified sampling
verbose = TRUE,
print_every_n = 1,
early_stopping_rounds = 10
)
# plot the rmse for the training and testing samples
xgb_cv_1$evaluation_log %>%
select(-contains("std")) %>%
select(-contains("iter")) %>%
mutate(IterationNum = 1:n()) %>%
gather(TestOrTrain, rmse, -IterationNum) %>%
ggplot(aes(x = IterationNum, y = rmse, group = TestOrTrain, color = TestOrTrain),
ylim = c(0,12)) +
geom_line() +
theme_bw()
#train data and write to CSV file
submission = fread(SUBMISSION, colClasses = c("integer","numeric"))
submission$SalePrice = predict(xgb_1, dtest)
submission$SalePrice = exp(submission$SalePrice)
write.csv(submission,"xgb_1.csv", row.names = FALSE)
#read in prediction for graphing training SalePrice vs predicted SalePrice
xgbModel_1 <- read.csv("xgb_1.csv")
range(xgbModel_1$SalePrice)
plot(log(xgbModel_1$SalePrice), ylim = c(10.5,13.5))
plot(y_train, ylim = c(10.5,13.5))
y_train_df <- data.frame(y_train)
y_pred_df <- data.frame(log(xgbModel_1$SalePrice))
names(y_pred_df)[names(y_pred_df) == "log.xgbModel_1.SalePrice."] <- "y_train"
combined <- rbind(y_train_df, y_pred_df)
plot.ts(combined$y_train, main = "xgbModel_1")
#find importance of variables
model <- xgb.dump(xgb_1, with_stats = T)
#get the feature names
names <- dimnames(data.matrix(train_x[,-1]))[[2]]
#compute the feature importance matrix
importance_matrix <- xgb.importance(names, model = xgb_1)
#graph the importance
xgb.plot.importance(importance_matrix[1:nrow(importance_matrix),])
# Model ensembling to try to improve accuracy
#model ensembling (xgboost attempt 14, avg1-3, xgb50, random forest solution)
xgb50 <- read.csv("xgb50_0_23787.csv")
xgb14 <- read.csv("xgb14_0_12601.csv")
xgbavg1 <- read.csv("xgbavg_0_12570.csv")
xgbavg2 <- read.csv("xgbavg2_0_13060.csv")
xgbavg3 <- read.csv("xgbavg3_0_12781.csv")
rf_solution <- read.csv("rf_Solution3_0_18823.csv")
#store data frames in temporary frame, then average values
temp <- cbind(xgb50, xgb14, xgb14, xgbavg1, xgbavg1, xgbavg2, xgbavg3, rf_solution)
xgbavgNew <- sapply(unique(colnames(temp)), function(x) rowMeans(temp[, colnames(temp) == x, drop=FALSE]))
#write the CSV file
write.csv(xgbavgNew,"xgbavgNew1.csv",row.names = FALSE)
#plot the final predictions
xgbavgNewPlot <- read.csv("xgbavgNew1.csv")
y_pred_dfNew <- data.frame(log(xgbavgNewPlot$SalePrice))
names(y_pred_dfNew)[names(y_pred_dfNew) == "log.xgbavgNewPlot.SalePrice."] <- "y_train"
combinedNew <- rbind(y_train_df, y_pred_dfNew)
plot.ts(combinedNew$y_train, main = "xgbavgNew w/out 51-53, xgbavg1 twice, xgb14 twice, rf_solution")
|
/Kaggle Housing XGBoost.R
|
no_license
|
smithjph/House-Prices-Machine-Learning
|
R
| false | false | 20,611 |
r
|
library(data.table)
library(xgboost)
library(Metrics)
library(Matrix)
library(mice)
library(plyr)
library(dplyr)
library(tidyr)
library(ggplot2)
#set working directory
setwd('/users/thesmithfamily/desktop/kaggle/ames')
#define submission file for later
SUBMISSION = "/users/thesmithfamily/desktop/kaggle/ames/sample_submission.csv"
#load data
train <- read.csv("train.csv")
test <- read.csv("test.csv")
#Row binding train & test set for feature engineering
train_test = bind_rows(train, test)
#remove houses with more than 4000 square feet as recommended by the dataset creator, https://ww2.amstat.org/publications/jse/v19n3/decock.pdf
#train_test <- train_test[which(train_test$GrLivArea < 4000),]
#train <- train[which(train$GrLivArea < 4000),]
#test <- test[which(test$GrLivArea < 4000),]
#set number of rows in training set
ntrain = nrow(train)
#set variable to be predicted
y_train <- train$SalePrice
#plot Sale Prices to get a feel for the data
hist(y_train, breaks = 100, xlim = c(30000, 800000), xlab = "SalePrice")
#taking the log of SalePrice appears to fix the skewness of the data
hist(log(y_train), breaks = 100, xlab = "log(SalePrice)")
#recode y_train to log(y_train)
y_train <- log(y_train)
#Remove Id since of no use
train$Id = NULL
train$SalePrice = NULL
test$Id = NULL
#graph distributions of continuous variables to check for normality
hist(train_test$MasVnrArea, breaks = 100)
hist(log(train_test$MasVnrArea), breaks = 100)
hist(train_test$BsmtFinSF1, breaks = 100)
hist(log(train_test$BsmtFinSF1), breaks = 100)
hist(train_test$BsmtFinSF2, breaks = 100)
hist(log(train_test$BsmtFinSF2), breaks = 100)
hist(train_test$BsmtUnfSF, breaks = 100)
hist(log(train_test$BsmtUnfSF), breaks = 100)
hist(train_test$TotalBsmtSF, breaks = 100)
hist(log(train_test$TotalBsmtSF), breaks = 100)
#hist(train_test$X1stFlrSF, breaks = 100) - looks good, no transformation needed
hist(train_test$X2ndFlrSF, breaks = 100)
hist(log(train_test$X2ndFlrSF), breaks = 100)
hist(train_test$GrLivArea, breaks = 100)
hist((train_test$GrLivArea)**(1/3), breaks = 100)
hist(train_test$GarageArea, breaks = 100)
hist(log(train_test$GarageArea), breaks = 100)
hist(train_test$WoodDeckSF, breaks = 100)
hist(log(train_test$WoodDeckSF), breaks = 100)
hist(train_test$OpenPorchSF, breaks = 100)
hist(log(train_test$OpenPorchSF), breaks = 100)
hist(train_test$EnclosedPorch, breaks = 100)
hist(log(train_test$EnclosedPorch), breaks = 100)
hist(train_test$ScreenPorch, breaks = 100)
hist(log(train_test$ScreenPorch), breaks = 100)
hist(train_test$LotFrontage, breaks = 100)
hist(log(train_test$LotFrontage), breaks = 100)
hist(train_test$LotArea, breaks = 100)
hist(log(train_test$LotArea), breaks = 100)
#create new variables
#create a total SF variable from other Square Footage variables
train_test$TotalSF = rowSums(cbind(train_test$TotalBsmtSF, train_test$X1stFlrSF, train_test$X2ndFlrSF))
attach(train_test)
#create a dummy variable for the three most expensive neighborhoods
train_test$NeighborhoodDummy <- ifelse(Neighborhood == "NoRidge", 1, ifelse(Neighborhood == "NridgHt", 1, ifelse(Neighborhood == "Somerst", 1, 0)))
#convert MSSubClass from integer to factor
train_test$MSSubClass[train_test$MSSubClass == "20"] <- "SC20"
train_test$MSSubClass[train_test$MSSubClass == "30"] <- "SC30"
train_test$MSSubClass[train_test$MSSubClass == "40"] <- "SC40"
train_test$MSSubClass[train_test$MSSubClass == "45"] <- "SC45"
train_test$MSSubClass[train_test$MSSubClass == "50"] <- "SC50"
train_test$MSSubClass[train_test$MSSubClass == "60"] <- "SC60"
train_test$MSSubClass[train_test$MSSubClass == "70"] <- "SC70"
train_test$MSSubClass[train_test$MSSubClass == "75"] <- "SC75"
train_test$MSSubClass[train_test$MSSubClass == "80"] <- "SC80"
train_test$MSSubClass[train_test$MSSubClass == "85"] <- "SC85"
train_test$MSSubClass[train_test$MSSubClass == "90"] <- "SC90"
train_test$MSSubClass[train_test$MSSubClass == "120"] <- "SC120"
train_test$MSSubClass[train_test$MSSubClass == "150"] <- "SC150"
train_test$MSSubClass[train_test$MSSubClass == "160"] <- "SC160"
train_test$MSSubClass[train_test$MSSubClass == "180"] <- "SC180"
train_test$MSSubClass[train_test$MSSubClass == "190"] <- "SC190"
#recode NA for Alley to "None"
AlleyLevels <- levels(train_test$Alley)
AlleyLevels[length(AlleyLevels) + 1] <- "None"
train_test$Alley <- factor(train_test$Alley, levels = AlleyLevels)
train_test$Alley[is.na(train_test$Alley)] <- "None"
#recode NA for Fence to "None"
FenceLevels <- levels(train_test$Fence)
FenceLevels[length(FenceLevels) + 1] <- "None"
train_test$Fence <- factor(train_test$Fence, levels = FenceLevels)
train_test$Fence[is.na(train_test$Fence)] <- "None"
#recode NA for BsmtCond to "None"
BsmtCondLevels <- levels(train_test$BsmtCond)
BsmtCondLevels[length(BsmtCondLevels) + 1] <- "None"
train_test$BsmtCond <- factor(train_test$BsmtCond, levels = BsmtCondLevels)
train_test$BsmtCond[is.na(train_test$BsmtCond)] <- "None"
#recode NA for BsmtExposure to "None"
BsmtExposureLevels <- levels(train_test$BsmtExposure)
BsmtExposureLevels[length(BsmtExposureLevels) + 1] <- "None"
train_test$BsmtExposure <- factor(train_test$BsmtExposure, levels = BsmtExposureLevels)
train_test$BsmtExposure[is.na(train_test$BsmtExposure)] <- "None"
#recode NA for BsmtFinType1 to "None"
BsmtFinType1Levels <- levels(train_test$BsmtFinType1)
BsmtFinType1Levels[length(BsmtFinType1Levels) + 1] <- "None"
train_test$BsmtFinType1 <- factor(train_test$BsmtFinType1, levels = BsmtFinType1Levels)
train_test$BsmtFinType1[is.na(train_test$BsmtFinType1)] <- "None"
#recode NA for BsmtFinType2 to "None"
BsmtFinType2Levels <- levels(train_test$BsmtFinType2)
BsmtFinType2Levels[length(BsmtFinType2Levels) + 1] <- "None"
train_test$BsmtFinType2 <- factor(train_test$BsmtFinType2, levels = BsmtFinType2Levels)
train_test$BsmtFinType2[is.na(train_test$BsmtFinType2)] <- "None"
#recode NA for FireplaceQu to "None"
FireplaceQuLevels <- levels(train_test$FireplaceQu)
FireplaceQuLevels[length(FireplaceQuLevels) + 1] <- "None"
train_test$FireplaceQu <- factor(train_test$FireplaceQu, levels = FireplaceQuLevels)
train_test$FireplaceQu[is.na(train_test$FireplaceQu)] <- "None"
#replace NA with 0 where it makes sense
train_test$MasVnrArea[is.na(train_test$MasVnrArea)] <- 0
#create transformation variables
train_test$MasVnrArea <- log(train_test$MasVnrArea)
train_test$BsmtFinSF1 <- log(train_test$BsmtFinSF1)
train_test$BsmtFinSF2 <- log(train_test$BsmtFinSF2)
train_test$BsmtUnfSF <- log(train_test$BsmtUnfSF)
train_test$TotalBsmtSF <- log(train_test$TotalBsmtSF)
train_test$X2ndFlrSF <- log(train_test$X2ndFlrSF)
train_test$GrLivArea <- log(train_test$GrLivArea)
train_test$GarageArea <- log(train_test$GarageArea)
train_test$WoodDeckSF <- log(train_test$WoodDeckSF)
train_test$OpenPorchSF <- log(train_test$OpenPorchSF)
train_test$EnclosedPorch <- log(train_test$EnclosedPorch)
train_test$ScreenPorch <- log(train_test$ScreenPorch)
train_test$LotFrontage <- log(train_test$LotFrontage)
train_test$LotArea <- log(train_test$LotArea)
#HeatingQC - dummy for ExAndGd/Not
train_test$HeatingQCDummy <- ifelse(HeatingQC == "Ex", 1, train_test$HeatingQCDummy <- ifelse(HeatingQC == "Gd", 1, 0))
#SaleCondition - dummy for Normal/Not
train_test$SaleConditionDummy <- ifelse(SaleCondition == "Normal", 1, 0)
#Condition1 - dummy for Norm/NotNorm
train_test$Condition1Dummy <- ifelse(Condition1 == "Norm", 1, 0)
#Foundation - dummy for PConc/NotPConc
train_test$FoundationDummy <- ifelse(Foundation == "PConc", 1, 0)
#ExterCond - dummy for ExAndGd/NotExAndGd
train_test$ExterCondDummy <- ifelse(ExterCond == "Ex", 1, train_test$ExterCondDummy <- ifelse(ExterCond == "Gd", 1, 0))
#LandContour - dummy variable for Lvl/notLvl
train_test$LandContourDummy <- ifelse(LandContour == "Lvl", 1, 0)
#YearRemodAdd - dummy variable for remodel within the past year of selling
train_test$YearRemodAddDummy <- ifelse(YearRemodAdd == YrSold, 1, 0)
#NewHouseDummy - dummy variable for whether a house was built in the year it sold
train_test$NewHouseDummy <- ifelse(YearBuilt == YrSold, 1, 0)
#HouseAge - variable representing the age of the house when it sold
train_test$HouseAge <- train_test$YrSold - train_test$YearBuilt
#GarageAge - variable representing the age of the garage when the house was sold
train_test$GarageAge <- train_test$YrSold - train_test$GarageYrBlt
#TimeRemod - variable representing number of years since last remodel when the house sold
train_test$TimeRemod <- train_test$YrSold - train_test$YearRemodAdd
#IsRemod - dummy variable representing whether there has been a remodel on the house
train_test$IsRemod <- ifelse(train_test$YearBuilt == train_test$YearRemodAdd, 0, 1)
#NumBath - variable representing the total number of bathrooms
train_test$NumBath <- (0.5 * train_test$HalfBath) + (0.5 * train_test$BsmtHalfBath) +
train_test$FullBath + train_test$BsmtFullBath
#NumRooms - variable representing the total number of rooms + bathrooms
train_test$NumRooms <- train_test$TotRmsAbvGrd + train_test$FullBath + train_test$HalfBath
#polynomials of top continuous features according to gain on importance model
train_test$TotalSF2 <- train_test$TotalSF**2
train_test$TotalSF3 <- train_test$TotalSF**3
train_test$TotalSFsqrt <- sqrt(train_test$TotalSF)
train_test$X2ndFlrSF2 <- train_test$X2ndFlrSF**2
train_test$X2ndFlrSF3 <- train_test$X2ndFlrSF**3
train_test$X2ndFlrSFsqrt <- sqrt(train_test$X2ndFlrSF)
train_test$GarageArea2 <- train_test$GarageArea**2
train_test$GarageArea3 <- train_test$GarageArea**3
train_test$GarageAreasqrt <- sqrt(train_test$GarageArea)
train_test$X1stFlrSF2 <- train_test$X1stFlrSF**2
train_test$X1stFlrSF3 <- train_test$X1stFlrSF**3
train_test$X1stFlrSFsqrt <- sqrt(train_test$X1stFlrSF)
train_test$LotFrontage2 <- train_test$LotFrontage**2
train_test$LotFrontage3 <- train_test$LotFrontage**3
train_test$LotFrontagesqrt <- sqrt(train_test$LotFrontage)
#get names of all variables in full data set
features=names(train_test)
#convert character into integer
for(f in features){
if(class(train_test[[f]]) == "character"){
levels = sort(unique(train_test[[f]]))
train_test[[f]] = as.integer(factor(train_test[[f]],levels = levels))
}
}
#features to exclude, Utilities, Electrical, PoolQC, and MiscFeature excluded due to low variance
#SalePrice excluded to allow XGBoost to work properly
features_to_drop <- c("Utilities","Electrical","PoolQC","MiscFeature",
"SalePrice")
#splitting whole data back again minus the dropped features
train_x = train_test[1:ntrain,!(features) %in% features_to_drop]
test_x = train_test[(ntrain+1):nrow(train_test),!(features) %in% features_to_drop]
#convert into numeric for XGBoost implementation
train_x[] <- lapply(train_x, as.numeric)
test_x[] <- lapply(test_x, as.numeric)
#replaces -inf with 0
train_x <- do.call(data.frame,lapply(train_x, function(x) replace(x, is.infinite(x), 0)))
test_x <- do.call(data.frame,lapply(test_x, function(x) replace(x, is.infinite(x), 0)))
#missing values imputation with mice
set.seed(256)
to_impute <- as.data.frame(test_x)
impute <- to_impute[c("MSZoning","Exterior1st","Exterior2nd","BsmtFinSF1",
"BsmtFinSF2","BsmtUnfSF","TotalBsmtSF","BsmtFullBath","BsmtHalfBath",
"KitchenQual","Functional","GarageCars","GarageArea","SaleType","TotalSF",
"GarageFinish","BsmtQual","GarageCond","GarageQual","GarageYrBlt",
"GarageType","LotFrontage","NumBath")]
#specify package complete is in to avoid confusion with tidyr
imputed <- mice::complete(mice(impute,m=5))
to_impute$MSZoning=imputed$MSZoning
to_impute$Utilities=imputed$Utilities
to_impute$Exterior1st=imputed$Exterior1st
to_impute$Exterior2nd=imputed$Exterior2nd
to_impute$BsmtFinSF1=imputed$BsmtFinSF1
to_impute$BsmtFinSF2=imputed$BsmtFinSF2
to_impute$BsmtUnfSF=imputed$BsmtUnfSF
to_impute$TotalBsmtSF=imputed$TotalBsmtSF
to_impute$BsmtHalfBath=imputed$BsmtHalfBath
to_impute$BsmtFullBath=imputed$BsmtFullBath
to_impute$KitchenQual=imputed$KitchenQual
to_impute$Functional=imputed$Functional
to_impute$GarageCars=imputed$GarageCars
to_impute$GarageArea=imputed$GarageArea
to_impute$SaleType=imputed$SaleType
to_impute$TotalSF=imputed$TotalSF
to_impute$GarageFinish=imputed$GarageFinish
to_impute$BsmtQual=imputed$BsmtQual
to_impute$GarageCond=imputed$GarageCond
to_impute$GarageQual=imputed$GarageQual
to_impute$GarageYrBlt=imputed$GarageYrBlt
to_impute$GarageType=imputed$GarageType
to_impute$GarageAge=imputed$GarageAge
to_impute$LotFrontage=imputed$LotFrontage
to_impute$NumBath=imputed$NumBath
test_x = as.data.table(to_impute)
to_impute <- as.data.frame(train_x)
impute <- to_impute[c("MSZoning","Exterior1st","Exterior2nd","BsmtFinSF1",
"BsmtFinSF2","BsmtUnfSF","TotalBsmtSF","BsmtFullBath","BsmtHalfBath",
"KitchenQual","Functional","GarageCars","GarageArea","SaleType","TotalSF",
"GarageFinish","BsmtQual","GarageCond","GarageQual","GarageYrBlt",
"GarageType","LotFrontage","NumBath","GarageAge","MasVnrType")]
#specify package complete is in to avoid confusion with tidyr
imputed <- mice::complete(mice(impute,m=5))
to_impute$MSZoning=imputed$MSZoning
to_impute$Utilities=imputed$Utilities
to_impute$Exterior1st=imputed$Exterior1st
to_impute$Exterior2nd=imputed$Exterior2nd
to_impute$BsmtFinSF1=imputed$BsmtFinSF1
to_impute$BsmtFinSF2=imputed$BsmtFinSF2
to_impute$BsmtUnfSF=imputed$BsmtUnfSF
to_impute$TotalBsmtSF=imputed$TotalBsmtSF
to_impute$BsmtHalfBath=imputed$BsmtHalfBath
to_impute$BsmtFullBath=imputed$BsmtFullBath
to_impute$KitchenQual=imputed$KitchenQual
to_impute$Functional=imputed$Functional
to_impute$GarageCars=imputed$GarageCars
to_impute$GarageArea=imputed$GarageArea
to_impute$GarageArea2=imputed$GarageArea2
to_impute$GarageArea3=imputed$GarageArea3
to_impute$SaleType=imputed$SaleType
to_impute$TotalSF=imputed$TotalSF
to_impute$TotalSF2=imputed$TotalSF2
to_impute$TotalSF3=imputed$TotalSF3
to_impute$TotalSFsqrt=imputed$TotalSFsqrt
to_impute$GarageFinish=imputed$GarageFinish
to_impute$BsmtQual=imputed$BsmtQual
to_impute$GarageCond=imputed$GarageCond
to_impute$GarageQual=imputed$GarageQual
to_impute$GarageYrBlt=imputed$GarageYrBlt
to_impute$GarageType=imputed$GarageType
to_impute$GarageAge=imputed$GarageAge
to_impute$LotFrontage=imputed$LotFrontage
to_impute$LotFrontage2=imputed$LotFrontage2
to_impute$LotFrontage3=imputed$LotFrontage3
to_impute$LotFrontagesqrt=imputed$LotFrontagesqrt
to_impute$NumBath=imputed$NumBath
to_impute$MasVnrType=imputed$MasVnrType
train_x = as.data.table(to_impute)
train_x <- scale(train_x)
test_x <- scale(test_x)
#create xgb.DMatrix objects
dtrain = xgb.DMatrix(as.matrix(train_x), label = y_train, missing = NaN)
dtest = xgb.DMatrix(as.matrix(test_x), missing = NaN)
#custom grid search of parameter tuning
searchGridSubCol <- expand.grid(subsample = c(0.5, 0.75, 1),
colsample_bytree = c(0.4, 0.6, 0.8, 1),
max_depth = c(4, 6, 8, 10))
ntrees <- 1500
#Build an xgb.DMatrix object
DMMatrixTrain <- dtrain
rmseErrorsHyperparameters <- apply(searchGridSubCol, 1, function(parameterList){
#browser() here for debugging
#Extract Parameters to test
currentSubsampleRate <- parameterList[["subsample"]]
currentColsampleRate <- parameterList[["colsample_bytree"]]
currentMax_depth <- parameterList[["max_depth"]]
xgboostModelCV <- xgb.cv(data = DMMatrixTrain, nrounds = ntrees, nfold = 5, showsd = TRUE,
metrics = "rmse", verbose = TRUE, eval_metric = "rmse",
objective = "reg:linear", max_depth = currentMax_depth, eta = 0.02,
subsample = currentSubsampleRate, colsample_bytree = currentColsampleRate)
#Save rmse of the last iteration
rmse <- tail(xgboostModelCV$evaluation_log$test_rmse_mean, 1)
return(c(rmse, currentSubsampleRate, currentColsampleRate, currentMax_depth))
})
# create line graph of tested parameters
x <- c(1:48)
y <- rmseErrorsHyperparameters[1,]
plot(x, y, type="b", xlab = "Iteration", ylab = "Mean Test RMSE")
#iteration 11 had the lowest mean test RMSE,
#so subsample = 0.75, colsample_bytree = 1, max_depth = 4
# set best tested parameters for xgboost
xgb_params_1 = list(
objective = "reg:linear",
eta = 0.02, # learning rate
max_depth = 4, # max tree depth
eval_metric = "rmse", # evaluation/loss metric
subsample = 0.75, # best tested value
colsample_bytree = 1 # best tested value
)
# fit the model with the parameters specified above
xgb_1 = xgboost(data = dtrain,
params = xgb_params_1,
nrounds = 5000, # max number of trees to build
verbose = TRUE, # will print performance information
print_every_n = 1, # will print all messages
early_stopping_rounds = 10 # stop if no improvement within 10 trees
)
# cross-validate xgboost to get the accurate measure of error (rmse)
xgb_cv_1 = xgb.cv(params = xgb_params_1,
data = dtrain,
nrounds = 5000,
nfold = 5, # randomly partition original dataset into 5 equal size subsamples
prediction = TRUE, # return the prediction using the final model
showsd = TRUE, # standard deviation of loss across folds
stratified = TRUE, # sample is unbalanced; use stratified sampling
verbose = TRUE,
print_every_n = 1,
early_stopping_rounds = 10
)
# plot the rmse for the training and testing samples
xgb_cv_1$evaluation_log %>%
select(-contains("std")) %>%
select(-contains("iter")) %>%
mutate(IterationNum = 1:n()) %>%
gather(TestOrTrain, rmse, -IterationNum) %>%
ggplot(aes(x = IterationNum, y = rmse, group = TestOrTrain, color = TestOrTrain),
ylim = c(0,12)) +
geom_line() +
theme_bw()
#train data and write to CSV file
submission = fread(SUBMISSION, colClasses = c("integer","numeric"))
submission$SalePrice = predict(xgb_1, dtest)
submission$SalePrice = exp(submission$SalePrice)
write.csv(submission,"xgb_1.csv", row.names = FALSE)
#read in prediction for graphing training SalePrice vs predicted SalePrice
xgbModel_1 <- read.csv("xgb_1.csv")
range(xgbModel_1$SalePrice)
plot(log(xgbModel_1$SalePrice), ylim = c(10.5,13.5))
plot(y_train, ylim = c(10.5,13.5))
y_train_df <- data.frame(y_train)
y_pred_df <- data.frame(log(xgbModel_1$SalePrice))
names(y_pred_df)[names(y_pred_df) == "log.xgbModel_1.SalePrice."] <- "y_train"
combined <- rbind(y_train_df, y_pred_df)
plot.ts(combined$y_train, main = "xgbModel_1")
#find importance of variables
model <- xgb.dump(xgb_1, with_stats = T)
#get the feature names
names <- dimnames(data.matrix(train_x[,-1]))[[2]]
#compute the feature importance matrix
importance_matrix <- xgb.importance(names, model = xgb_1)
#graph the importance
xgb.plot.importance(importance_matrix[1:nrow(importance_matrix),])
# Model ensembling to try to improve accuracy
#model ensembling (xgboost attempt 14, avg1-3, xgb50, random forest solution)
xgb50 <- read.csv("xgb50_0_23787.csv")
xgb14 <- read.csv("xgb14_0_12601.csv")
xgbavg1 <- read.csv("xgbavg_0_12570.csv")
xgbavg2 <- read.csv("xgbavg2_0_13060.csv")
xgbavg3 <- read.csv("xgbavg3_0_12781.csv")
rf_solution <- read.csv("rf_Solution3_0_18823.csv")
#store data frames in temporary frame, then average values
temp <- cbind(xgb50, xgb14, xgb14, xgbavg1, xgbavg1, xgbavg2, xgbavg3, rf_solution)
xgbavgNew <- sapply(unique(colnames(temp)), function(x) rowMeans(temp[, colnames(temp) == x, drop=FALSE]))
#write the CSV file
write.csv(xgbavgNew,"xgbavgNew1.csv",row.names = FALSE)
#plot the final predictions
xgbavgNewPlot <- read.csv("xgbavgNew1.csv")
y_pred_dfNew <- data.frame(log(xgbavgNewPlot$SalePrice))
names(y_pred_dfNew)[names(y_pred_dfNew) == "log.xgbavgNewPlot.SalePrice."] <- "y_train"
combinedNew <- rbind(y_train_df, y_pred_dfNew)
plot.ts(combinedNew$y_train, main = "xgbavgNew w/out 51-53, xgbavg1 twice, xgb14 twice, rf_solution")
|
#' Read PLINK binary data
#'
#' Read PLINK binary data and save in gData format. This is a wrapper around
#' \code{\link[snpStats]{read.plink}} in the Bioconductor package
#' \code{snpStats}. This package needs to be installed for the function to
#' work.
#'
#' @param bed The name of the file containing the packed binary SNP genotype
#' data. It should have the extension .bed; If it doesn't, then this extension
#' will be appended.
#' @param bim The file containing the SNP descriptions. If not specified
#' \code{bed} is used with its file extension replaced by bim.
#' @param fam The file containing subject (and, possibly, family) identifiers.
#' This is basically a tab-delimited "pedfile". If not specified
#' \code{bed} is used with its file extension replaced by fam.
#' @param ... Further arguments passed to \code{\link[snpStats]{read.plink}}.
#'
#' @return An object of class \code{gData}.
#'
#' @importFrom methods as
#' @export
readPLINK <- function(bed,
bim,
fam,
...) {
if (!requireNamespace("snpStats", quietly = TRUE)) {
stop("Package snpStats needed for reading PLINK files.")
}
genoPLINK <- snpStats::read.plink(bed = bed, bim = bim, fam = fam, ...)
## Get makers.
markers <- as(genoPLINK$genotypes, "numeric")
## Get map.
map <- genoPLINK$map
names(map) <- c("chr", "snp.name", "cM", "pos", "allele.1", "allele.2")
# Create a gData object containing map and marker information.
res <- createGData(geno = markers, map = map)
return(res)
}
|
/R/readPlink.R
|
no_license
|
cran/statgenGWAS
|
R
| false | false | 1,606 |
r
|
#' Read PLINK binary data
#'
#' Read PLINK binary data and save in gData format. This is a wrapper around
#' \code{\link[snpStats]{read.plink}} in the Bioconductor package
#' \code{snpStats}. This package needs to be installed for the function to
#' work.
#'
#' @param bed The name of the file containing the packed binary SNP genotype
#' data. It should have the extension .bed; If it doesn't, then this extension
#' will be appended.
#' @param bim The file containing the SNP descriptions. If not specified
#' \code{bed} is used with its file extension replaced by bim.
#' @param fam The file containing subject (and, possibly, family) identifiers.
#' This is basically a tab-delimited "pedfile". If not specified
#' \code{bed} is used with its file extension replaced by fam.
#' @param ... Further arguments passed to \code{\link[snpStats]{read.plink}}.
#'
#' @return An object of class \code{gData}.
#'
#' @importFrom methods as
#' @export
readPLINK <- function(bed,
bim,
fam,
...) {
if (!requireNamespace("snpStats", quietly = TRUE)) {
stop("Package snpStats needed for reading PLINK files.")
}
genoPLINK <- snpStats::read.plink(bed = bed, bim = bim, fam = fam, ...)
## Get makers.
markers <- as(genoPLINK$genotypes, "numeric")
## Get map.
map <- genoPLINK$map
names(map) <- c("chr", "snp.name", "cM", "pos", "allele.1", "allele.2")
# Create a gData object containing map and marker information.
res <- createGData(geno = markers, map = map)
return(res)
}
|
#
# lohboot.R
#
# $Revision: 1.13 $ $Date: 2014/10/08 10:25:26 $
#
# Loh's bootstrap CI's for local pcf, local K etc
#
lohboot <-
function(X,
fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom"),
..., nsim=200, confidence=0.95, global=FALSE, type=7) {
stopifnot(is.ppp(X))
fun.name <- short.deparse(substitute(fun))
if(is.character(fun)) {
fun <- match.arg(fun)
} else if(is.function(fun)) {
flist <- list(pcf=pcf, Kest=Kest, Lest=Lest,
pcfinhom=pcfinhom, Kinhom=Kinhom, Linhom=Linhom)
id <- match(list(fun), flist)
if(is.na(id))
stop(paste("Loh's bootstrap is not supported for the function",
sQuote(fun.name)))
fun <- names(flist)[id]
} else stop("Unrecognised format for argument fun")
# validate confidence level
stopifnot(confidence > 0.5 && confidence < 1)
alpha <- 1 - confidence
if(!global) {
probs <- c(alpha/2, 1-alpha/2)
rank <- nsim * probs[2]
} else {
probs <- 1-alpha
rank <- nsim * probs
}
if(abs(rank - round(rank)) > 0.001)
warning(paste("confidence level", confidence,
"corresponds to a non-integer rank", paren(rank),
"so quantiles will be interpolated"))
n <- npoints(X)
# compute local functions
localfun <- switch(fun,
pcf=localpcf,
Kest=localK,
Lest=localL,
pcfinhom=localpcfinhom,
Kinhom=localKinhom,
Linhom=localLinhom)
f <- localfun(X, ...)
theo <- f$theo
# parse edge correction info
correction <- attr(f, "correction")
switch(correction,
none = { ctag <- "un"; cadj <- "uncorrected" },
border = { ctag <- "bord"; cadj <- "border-corrected" },
translate = { ctag <- "trans"; cadj <- "translation-corrected" },
isotropic = { ctag <- "iso"; cadj <- "Ripley isotropic corrected" })
# first n columns are the local pcfs (etc) for the n points of X
y <- as.matrix(as.data.frame(f))[, 1:n]
nr <- nrow(y)
# average them
ymean <- .rowMeans(y, na.rm=TRUE, nr, n)
# resample
ystar <- matrix(, nrow=nr, ncol=nsim)
for(i in 1:nsim) {
# resample n points with replacement
ind <- sample(n, replace=TRUE)
# average their local pcfs
ystar[,i] <- .rowMeans(y[,ind], nr, n, na.rm=TRUE)
}
# compute quantiles
if(!global) {
# pointwise quantiles
hilo <- apply(ystar, 1, quantile,
probs=probs, na.rm=TRUE, type=type)
} else {
# quantiles of deviation
ydif <- sweep(ystar, 1, ymean)
ydev <- apply(abs(ydif), 2, max, na.rm=TRUE)
crit <- quantile(ydev, probs=probs, na.rm=TRUE, type=type)
hilo <- rbind(ymean - crit, ymean + crit)
}
# create fv object
df <- data.frame(r=f$r,
theo=theo,
ymean,
lo=hilo[1,],
hi=hilo[2,])
colnames(df)[3] <- ctag
CIlevel <- paste(100 * confidence, "%% confidence", sep="")
desc <- c("distance argument r",
"theoretical Poisson %s",
paste(cadj, "estimate of %s"),
paste("lower", CIlevel, "limit for %s"),
paste("upper", CIlevel, "limit for %s"))
clabl <- paste("hat(%s)[", ctag, "](r)", sep="")
labl <- c("r", "%s[pois](r)", clabl, "%s[loCI](r)", "%s[hiCI](r)")
switch(fun,
pcf={ fname <- "g" ; ylab <- quote(g(r)) },
Kest={ fname <- "K" ; ylab <- quote(K(r)) },
Lest={ fname <- "L" ; ylab <- quote(L(r)) },
pcfinhom={ fname <- "g[inhom]" ; ylab <- quote(g[inhom](r)) },
Kinhom={ fname <- "K[inhom]" ; ylab <- quote(K[inhom](r)) },
Linhom={ fname <- "L[inhom]" ; ylab <- quote(L[inhom](r)) })
g <- fv(df, "r", ylab, ctag, , c(0, max(f$r)), labl, desc, fname=fname)
formula(g) <- . ~ r
fvnames(g, ".") <- c(ctag, "theo", "hi", "lo")
fvnames(g, ".s") <- c("hi", "lo")
unitname(g) <- unitname(X)
g
}
|
/R/lohboot.R
|
no_license
|
mirca/spatstat
|
R
| false | false | 4,014 |
r
|
#
# lohboot.R
#
# $Revision: 1.13 $ $Date: 2014/10/08 10:25:26 $
#
# Loh's bootstrap CI's for local pcf, local K etc
#
lohboot <-
function(X,
fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom"),
..., nsim=200, confidence=0.95, global=FALSE, type=7) {
stopifnot(is.ppp(X))
fun.name <- short.deparse(substitute(fun))
if(is.character(fun)) {
fun <- match.arg(fun)
} else if(is.function(fun)) {
flist <- list(pcf=pcf, Kest=Kest, Lest=Lest,
pcfinhom=pcfinhom, Kinhom=Kinhom, Linhom=Linhom)
id <- match(list(fun), flist)
if(is.na(id))
stop(paste("Loh's bootstrap is not supported for the function",
sQuote(fun.name)))
fun <- names(flist)[id]
} else stop("Unrecognised format for argument fun")
# validate confidence level
stopifnot(confidence > 0.5 && confidence < 1)
alpha <- 1 - confidence
if(!global) {
probs <- c(alpha/2, 1-alpha/2)
rank <- nsim * probs[2]
} else {
probs <- 1-alpha
rank <- nsim * probs
}
if(abs(rank - round(rank)) > 0.001)
warning(paste("confidence level", confidence,
"corresponds to a non-integer rank", paren(rank),
"so quantiles will be interpolated"))
n <- npoints(X)
# compute local functions
localfun <- switch(fun,
pcf=localpcf,
Kest=localK,
Lest=localL,
pcfinhom=localpcfinhom,
Kinhom=localKinhom,
Linhom=localLinhom)
f <- localfun(X, ...)
theo <- f$theo
# parse edge correction info
correction <- attr(f, "correction")
switch(correction,
none = { ctag <- "un"; cadj <- "uncorrected" },
border = { ctag <- "bord"; cadj <- "border-corrected" },
translate = { ctag <- "trans"; cadj <- "translation-corrected" },
isotropic = { ctag <- "iso"; cadj <- "Ripley isotropic corrected" })
# first n columns are the local pcfs (etc) for the n points of X
y <- as.matrix(as.data.frame(f))[, 1:n]
nr <- nrow(y)
# average them
ymean <- .rowMeans(y, na.rm=TRUE, nr, n)
# resample
ystar <- matrix(, nrow=nr, ncol=nsim)
for(i in 1:nsim) {
# resample n points with replacement
ind <- sample(n, replace=TRUE)
# average their local pcfs
ystar[,i] <- .rowMeans(y[,ind], nr, n, na.rm=TRUE)
}
# compute quantiles
if(!global) {
# pointwise quantiles
hilo <- apply(ystar, 1, quantile,
probs=probs, na.rm=TRUE, type=type)
} else {
# quantiles of deviation
ydif <- sweep(ystar, 1, ymean)
ydev <- apply(abs(ydif), 2, max, na.rm=TRUE)
crit <- quantile(ydev, probs=probs, na.rm=TRUE, type=type)
hilo <- rbind(ymean - crit, ymean + crit)
}
# create fv object
df <- data.frame(r=f$r,
theo=theo,
ymean,
lo=hilo[1,],
hi=hilo[2,])
colnames(df)[3] <- ctag
CIlevel <- paste(100 * confidence, "%% confidence", sep="")
desc <- c("distance argument r",
"theoretical Poisson %s",
paste(cadj, "estimate of %s"),
paste("lower", CIlevel, "limit for %s"),
paste("upper", CIlevel, "limit for %s"))
clabl <- paste("hat(%s)[", ctag, "](r)", sep="")
labl <- c("r", "%s[pois](r)", clabl, "%s[loCI](r)", "%s[hiCI](r)")
switch(fun,
pcf={ fname <- "g" ; ylab <- quote(g(r)) },
Kest={ fname <- "K" ; ylab <- quote(K(r)) },
Lest={ fname <- "L" ; ylab <- quote(L(r)) },
pcfinhom={ fname <- "g[inhom]" ; ylab <- quote(g[inhom](r)) },
Kinhom={ fname <- "K[inhom]" ; ylab <- quote(K[inhom](r)) },
Linhom={ fname <- "L[inhom]" ; ylab <- quote(L[inhom](r)) })
g <- fv(df, "r", ylab, ctag, , c(0, max(f$r)), labl, desc, fname=fname)
formula(g) <- . ~ r
fvnames(g, ".") <- c(ctag, "theo", "hi", "lo")
fvnames(g, ".s") <- c("hi", "lo")
unitname(g) <- unitname(X)
g
}
|
#!
pointspat<-function(x=NULL,y=NULL,yvar=NULL,yvar1=NULL,ydqc=NULL,xvar=NULL,brk=NULL,col=NULL,
namefileout=NULL,mtxt=NULL,mtxt1=NULL,orog=NULL,pos="bottomright",
xl=NULL,yl=NULL,bnd=NULL,cx=NULL,colext=NULL,
yvartext=NULL,yvartextcex=NULL,xvar.orog=NULL,
colpoints=NULL,legcex=NULL) {
# ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
if (is.null(legcex)) legcex<-0.8
if (is.null(colext)) {
col1<-c("#FFFFFF",col,"#000000")
} else {
col1<-c(colext[1],col,colext[2])
}
if (is.null(xvar)){
png(file=namefileout,width=1200,height=1200)
image(xvar.orog,breaks=seq(0,2500,length=10),col=gray.colors(9),main=mtxt,
xlab="",ylab="",xlim=xl,ylim=yl,cex.main=1.6)
contour(xvar.orog,levels=c(500,1500),drawlabels=F,col="black",lwd=1,add=T)
plot(bnd,add=T)
legtxt<-vector()
# good
for (b in 1:length(col1)) {
cond<-NULL
if (b==1) cond<-which(yvar<brk[b])
if (b==length(col1)) cond<-which(yvar>=brk[b-1])
if (b>1 & b<length(col1)) cond<-which( (yvar>=brk[b-1]) & (yvar<brk[b]))
if (b==1) legtxt[b]<-paste("<",brk[b],sep="")
if (b==length(col1)) legtxt[b]<-paste(">=",brk[b-1],sep="")
if (b>1 & b<length(col1)) legtxt[b]<-paste(brk[b-1]," ",brk[b],sep="")
points(x[cond],y[cond],pch=19,cex=cx[b],col=col1[b])
points(x[cond],y[cond],cex=(cx[b]+0.1),col=colpoints)
}
legend(x=pos,legend=legtxt,cex=legcex,fill=col1)
if (length(mtxt1)>0) mtext(mtxt1,side=3,cex=1.6)
if (!is.null(xvar.orog)) contour(xvar.orog,levels=c(0,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
dev.off()
return()
}
if (is.null(x)){
png(file=namefileout,width=1200,height=1200)
image(xvar,breaks=brk,col=col,main=mtxt,
xlab="",ylab="",xlim=xl,ylim=yl,cex.main=1.6)
contour(xvar,levels=c(0,-10,-20),drawlabels=F,col=c("red","darkgreen","darkblue"),lwd=2,add=T)
plot(bnd,add=T)
if (length(colext)==2) {
mx<-cellStats(xvar,max)
mn<-cellStats(xvar,min)
image(xvar,breaks=c(brk[length(brk)],mx),add=T,col=colext[2])
image(xvar,breaks=c(mn,brk[1]),add=T,col=colext[1])
}
# points(x,y)
legtxt<-vector()
# good
for (b in 1:length(col1)) {
if (b==1) legtxt[b]<-paste("<",brk[b],sep="")
if (b==length(col1)) legtxt[b]<-paste(">=",brk[b-1],sep="")
if (b>1 & b<length(col1)) legtxt[b]<-paste(brk[b-1]," ",brk[b],sep="")
}
if (!is.null(xvar.orog)) contour(xvar.orog,levels=c(0,100,250,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
legend(x=pos,legend=legtxt,cex=legcex,fill=col1)
if (length(mtxt1)>0) mtext(mtxt1,side=3,cex=1.6)
dev.off()
return()
}
if (length(ydqc)==0) ydqc<-rep(0,length=length(yvar))
if (length(colpoints)==0) colpoints<-"black"
png(file=namefileout,width=1200,height=1200)
plot(x[ydqc==0],y[ydqc==0],main=mtxt,xlab="",ylab="",xlim=xl,ylim=yl,cex.main=1.6)
plot(bnd,add=T)
# if (!is.null(orog)) contour(orog,levels=c(1,200,700,1500),add=T,col="black",lwd=1)
image(xvar,breaks=brk,add=T,col=col)
if (!is.null(xvar.orog)) contour(xvar.orog,levels=c(0,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
contour(xvar,levels=c(0,-10,-20),drawlabels=F,col=c("red","darkgreen","darkblue"),lwd=2,add=T)
if (length(colext)==2) {
mx<-cellStats(xvar,max)
mn<-cellStats(xvar,min)
image(xvar,breaks=c((brk[length(brk)]+0.001),mx),add=T,col=colext[2])
image(xvar,breaks=c(mn,(brk[1]-0.001)),add=T,col=colext[1])
}
# points(x,y)
legtxt<-vector()
# good
for (b in 1:length(col1)) {
cond<-NULL
if (b==1) cond<-which(yvar<brk[b] & ydqc>=0)
if (b==length(col1)) cond<-which(yvar>=brk[b-1] & ydqc>=0)
if (b>1 & b<length(col1)) cond<-which( (yvar>=brk[b-1]) & (yvar<brk[b]) & ydqc>=0)
if (b==1) legtxt[b]<-paste("<",brk[b],sep="")
if (b==length(col1)) legtxt[b]<-paste(">=",brk[b-1],sep="")
if (b>1 & b<length(col1)) legtxt[b]<-paste(brk[b-1]," ",brk[b],sep="")
points(x[cond],y[cond],pch=19,cex=cx[b],col=col1[b])
points(x[cond],y[cond],cex=(cx[b]+0.1),col=colpoints)
}
# erroneous
points(x[ydqc==1],y[ydqc==1],col="black",pch=4,lwd=3)
# missing
if (length(yvar1)>0) {
for (b in 1:length(col1)) {
cond<-NULL
if (b==1) cond<-which(yvar1<brk[b] & ydqc==-1)
if (b==length(col1)) cond<-which(yvar1>=brk[b-1] & ydqc==-1)
if (b>1 & b<length(col1)) cond<-which( (yvar1>=brk[b-1]) & (yvar1<brk[b]) & ydqc==-1)
points(x[cond],y[cond],pch=19,cex=0.5,col=col1[b])
points(x[cond],y[cond],cex=0.6,col=colpoints)
}
}
#
if (!is.null(yvartext)) {
text(x,y,labels=yvartext,cex=yvartextcex)
}
#
legend(x=pos,legend=legtxt,cex=legcex,fill=col1)
if (length(mtxt1)>0) mtext(mtxt1,side=3,cex=1.6)
if (!is.null(xvar.orog)) contour(xvar.orog,levels=c(0,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
dev.off()
return()
}
#!
rainspatplot<-function(x=NULL,y=NULL,yvar=NULL,yvar1=NULL,ydqc=NULL,xvar=NULL,brk=NULL,col=NULL,
namefileout=NULL,mtxt=NULL,mtxt1=NULL,pos="bottomright",
xl=NULL,yl=NULL,bnd=NULL,cx=NULL,colext=NULL,
yvartext=NULL,yvartextcex=NULL,xvar.orog=NULL,
colpoints=NULL,legcex=NULL) {
# ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
if (is.null(legcex)) legcex<-0.8
col1<-c(colext[1],col,colext[2])
if (length(ydqc)==0) ydqc<-rep(0,length=length(yvar))
if (length(colpoints)==0) colpoints<-"black"
y.miss<-which(ydqc==-1)
png(file=namefileout,width=1200,height=1200)
plot(x[ydqc==0],y[ydqc==0],main=mtxt,xlab="",ylab="",xlim=xl,ylim=yl,cex.main=1.6)
plot(bnd,add=T)
# if (!is.null(orog)) contour(orog,levels=c(1,200,700,1500),add=T,col="black",lwd=1)
image(xvar.orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
xvar1<-xvar
xvar1[xvar==0]<-NA
image(xvar1,breaks=brk,add=T,col=col)
# contour(xvar,levels=c(0,-10,-20),drawlabels=F,col=c("red","darkgreen","darkblue"),lwd=2,add=T)
if (length(colext)==2) {
mx<-cellStats(xvar,max)
mn<-cellStats(xvar,min)
image(xvar1,breaks=c((brk[length(brk)]+0.001),mx),add=T,col=colext[2])
image(xvar1,breaks=c(mn,(brk[1]-0.001)),add=T,col=colext[1])
}
# points(x,y)
legtxt<-vector()
# good
n.col1<-length(col1)
legtxt[1]<-"(No rain)"
for (b in 1:n.col1) {
cond<-NULL
if (b==1) {
cond<-which(yvar<brk[b] & ydqc>=0)
}
if (b==n.col1) {
cond<-which(yvar>=brk[b-1] & ydqc>=0)
legtxt[n.col1-b+2]<-paste("[",brk[b-1],", >mm",sep="")
}
if (b>1 & b<n.col1) {
cond<-which( (yvar>=brk[b-1]) & (yvar<brk[b]) & ydqc>=0)
legtxt[n.col1-b+2]<-paste("[",brk[b-1],", ",brk[b],"> mm",sep="")
}
points(x[cond],y[cond],pch=19,cex=cx[b],col=col1[b])
points(x[cond],y[cond],cex=(cx[b]+0.1),col=colpoints)
}
# erroneous
points(x[ydqc>0],y[ydqc>0],col="black",pch=4,lwd=3)
# missing
if (length(y.miss)>0) {
for (b in 1:length(col1)) {
cond<-NULL
if (b==1) cond<-which(yvar[ydqc==-1]<brk[b])
if (b==length(col1)) cond<-which(yvar[ydqc==-1]>=brk[b-1])
if (b>1 & b<length(col1)) cond<-which( (yvar[ydqc==-1]>=brk[b-1]) & (yvar[ydqc==-1]<brk[b]))
if (length(cond)>0) points(x[ydqc==-1][cond],y[ydqc==-1][cond],pch=19,cex=cx[b],col=col1[b])
if (length(cond)>0) points(x[ydqc==-1][cond],y[ydqc==-1][cond],cex=(cx[b]+0.1),col=colpoints)
}
}
#
if (!is.null(yvartext)) {
text(x,y,labels=yvartext,cex=yvartextcex)
}
#
legend(x=pos,legend=legtxt,cex=legcex,fill=c(col1[1],col1[n.col1:2]))
if (length(mtxt1)>0) mtext(mtxt1,side=3,cex=1.6)
contour(xvar.orog,levels=c(0,100,250,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
dev.off()
return()
}
#!
PRECplot<-function(namefileout=NULL,
y.data=NULL,
r.data=NULL,
# scale=NULL,
# col.scale=NULL,
orog=NULL,
bound=NULL,
par=NULL) {
# NA observation is NA
# -1 missing DQC info
# 0 good observation
# 100 bad observation: KDVH flag > 2 | observation not good in external DQC |
# observed value not plausible | station in blacklist/s
# 200 bad observation: dry-station surrounded only by wet-stations (close enough)
# 300 bad observation: wet-stations surrounded only by dry-stations (close enough)
# 400 bad observation: dry observation is (1) not included in a dry area
# (2) is in Norway
# 500 bad observation: wet observation is (1) not included in an event (2) in Norway
#x=NULL,y=NULL,yvar=NULL,yvar1=NULL,ydqc=NULL,xvar=NULL,brk=NULL,col=NULL,
# namefileout=NULL,mtxt=NULL,mtxt1=NULL,pos="bottomright",
# xl=NULL,yl=NULL,bnd=NULL,cx=NULL,colext=NULL,
# yvartext=NULL,yvartextcex=NULL,xvar.orog=NULL,
# colpoints=NULL,legcex=NULL) {
## ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
y.data$x<-as.numeric(y.data$x)
y.data$y<-as.numeric(y.data$y)
leg.str<-"no rain"
n.col<-length(par$col.scale)
#
png(file=namefileout,width=1200,height=1200)
plot(y.data$x[!is.na(y.data$yo)],y.data$y[!is.na(y.data$yo)],
main=par$main,xlab=par$xlab,ylab=par$ylab,xlim=par$xl,ylim=par$yl,cex.main=1.6,col="white")
image(orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
image(r.data,col=par$col.scale,breaks=par$scale,add=T)
# dry observations
dry<-!is.na(y.data$yo) & (y.data$yo<0.1)
aux<-which(dry & y.data$dqcflag<=0)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="gray",pch=21,cex=1.2)
aux<-which(dry & y.data$dqcflag==100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="gray",pch=24,cex=1.2)
aux<-which(dry & y.data$dqcflag>100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="gray",pch=25,cex=1.2)
# wet observations
for (c in 1:n.col) {
in.break<-(!is.na(y.data$yo)) & (y.data$yo>=par$scale[c]) & (y.data$yo<par$scale[c+1])
aux<-which(in.break & y.data$dqcflag<=0)
if (length(aux)>0)
points(y.data$x[aux],y.data$y[aux],col="black",bg=par$col.scale[c],pch=21,cex=1.2)
if (c==1) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=1),") mm",sep=""))
} else if (c==2) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c<n.col) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=0),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c==n.col) {
leg.str<-c(leg.str,paste(">",formatC(par$scale[c],format="f",digits=0),"mm",sep=""))
}
aux<-which(in.break & y.data$dqcflag==100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg=par$col.scale[c],col="black",pch=24,cex=1.2)
aux<-which(in.break & y.data$dqcflag>100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg=par$col.scale[c],col="black",pch=25,cex=1.2)
}
plot(bound,add=T)
legend(x="bottomright",fill=rev(c("gray",par$col.scale)),legend=rev(leg.str),cex=1.5)
# contour(orog,levels=c(0,100,250,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
# contour(r.data,levels=c(5,10,30,70,100),drawlabels=F,col="black",lwd=0.8,add=T)
dev.off()
return()
}
#!
TEMPplot<-function(namefileout=NULL,
y.data=NULL,
r.data=NULL,
orog=NULL,
bound=NULL,
par=NULL) {
## ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
y.data$x<-as.numeric(y.data$x)
y.data$y<-as.numeric(y.data$y)
leg.str<-vector()
n.col<-length(par$col.scale)
#
png(file=namefileout,width=1200,height=1200)
plot(y.data$x[!is.na(y.data$yo)],y.data$y[!is.na(y.data$yo)],
main=par$main,xlab=par$xlab,ylab=par$ylab,xlim=par$xl,ylim=par$yl,cex.main=1.6,col="white")
image(orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
image(r.data,col=par$col.scale,breaks=par$scale,add=T)
# observations
for (c in 1:n.col) {
in.break<-(!is.na(y.data$yo)) & (y.data$yo>=par$scale[c]) & (y.data$yo<par$scale[c+1])
aux<-which(in.break & y.data$dqcflag<=0)
if (length(aux)>0)
points(y.data$x[aux],y.data$y[aux],col="black",bg=par$col.scale[c],pch=21,cex=1.2)
if (c==1) {
leg.str[1]<-paste("<",formatC(par$scale[c+1],format="f",digits=0),sep="")
} else if (c<n.col) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=0),", ",
formatC(par$scale[c+1],format="f",digits=0),")",sep=""))
} else if (c==n.col) {
leg.str<-c(leg.str,paste(">",formatC(par$scale[c],format="f",digits=0),sep=""))
}
aux<-which(in.break & y.data$dqcflag>0)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg=par$col.scale[c],col="black",pch=24,cex=1.2)
}
plot(bound,add=T)
legend(x="bottomright",fill=rev(c(par$col.scale,"white")),legend=rev(c(leg.str,expression(paste("Temp,",degree,"C")))),cex=1.5)
dev.off()
return()
}
# Function to plot color bar
color.bar <- function(col, scale, nticks=11, title='') {
usr<-as.numeric(par("usr"))
min.y<-usr[3]
max.y<-usr[4]
scale.y <- length(col)/(max.y-min.y)
i.ind<-vector(length=10)
for (i in 1:10) i.ind[i]<-min(which(scale>(10*i)))-1
print(usr)
print(scale.y)
print(i.ind)
# dev.new(width=1.75, height=5)
# par(new=T)
dx<-(usr[2]-usr[1])/40
x1<-usr[1]+dx*39
print(paste(dx,x1,min.y,max.y))
# plot(c(x1,x1+dx), c(min.y,max.y), type='n', bty='n', xaxt='n', xlab='', yaxt='n', ylab='', main=title)
# axis(2, ticks, las=1)
for (i in 1:length(col)) {
y = (i-1)/scale.y + min.y
rect(x1,y,x1+dx,y+1/scale.y, col=col[i], border=NA)
}
text(x=x1+dx/2,y=((i.ind-1)/scale.y + min.y),labels=round(scale[i.ind],0),cex=1.8,
col="black")
}
#!
TEMPplot.IDI<-function(namefileout=NULL,
y.data=NULL,
r.data=NULL,
orog=NULL,
bound=NULL,
par=NULL) {
## ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
y.data$x<-as.numeric(y.data$x)
y.data$y<-as.numeric(y.data$y)
leg.str<-""
n.col<-length(par$col.scale)
yidi<-suppressWarnings(as.numeric(y.data$yidi)*100)
#
png(file=namefileout,width=1200,height=1200)
plot(y.data$x[!is.na(y.data$yo)],y.data$y[!is.na(y.data$yo)],
main=par$main,xlab=par$xlab,ylab=par$ylab,xlim=par$xl,ylim=par$yl,cex.main=1.6,col="white")
image(orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
image(r.data,col=par$col.scale,breaks=par$scale,add=T)
# observations
for (c in 1:n.col) {
in.break<-(!is.na(y.data$yo)) & (yidi>=par$scale[c]) & (yidi<par$scale[c+1])
aux<-which(in.break & y.data$dqcflag<=0)
if (length(aux)>0)
points(y.data$x[aux],y.data$y[aux],col="black",bg=par$col.scale[c],pch=21,cex=1.2)
if (c==1) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=1),") mm",sep=""))
} else if (c==2) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c<n.col) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=0),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c==n.col) {
leg.str<-c(leg.str,paste(">",formatC(par$scale[c],format="f",digits=0),"mm",sep=""))
}
aux<-which(in.break & y.data$dqcflag>0)
# if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg=par$col.scale[c],col="black",pch=24,cex=2.2)
if (length(aux)>0) {
print("prek")
points(y.data$x[aux],y.data$y[aux],bg="black",col="black",pch=24,cex=1.2)
}
}
plot(bound,add=T)
# legend(x="bottomright",fill=rev(par$col.scale),legend=rev(leg.str),cex=1)
print(paste(length(par$col.scale),length(par$scale)))
color.bar(par$col.scale,par$scale)
dev.off()
return()
}
#!
PRECplot.IDI<-function(namefileout=NULL,
y.data=NULL,
r.data=NULL,
orog=NULL,
bound=NULL,
par=NULL) {
# NA observation is NA
# -1 missing DQC info
# 0 good observation
# 100 bad observation: KDVH flag > 2 | observation not good in external DQC |
# observed value not plausible | station in blacklist/s
# 200 bad observation: dry-station surrounded only by wet-stations (close enough)
# 300 bad observation: wet-stations surrounded only by dry-stations (close enough)
# 400 bad observation: dry observation is (1) not included in a dry area
# (2) is in Norway
# 500 bad observation: wet observation is (1) not included in an event (2) in Norway
#----------------------------------------------------------------------------------
y.data$x<-as.numeric(y.data$x)
y.data$y<-as.numeric(y.data$y)
leg.str<-""
n.col<-length(par$col.scale)
yidi<-suppressWarnings(as.numeric(y.data$yidi))*100
#
png(file=namefileout,width=1200,height=1200)
plot(y.data$x[!is.na(y.data$yo)],y.data$y[!is.na(y.data$yo)],
main=par$main,xlab=par$xlab,ylab=par$ylab,xlim=par$xl,ylim=par$yl,cex.main=1.6,col="white")
image(orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
image(r.data,col=par$col.scale,breaks=par$scale,add=T)
# dry observations
dry<-!is.na(y.data$yo) & (y.data$yo<0.1)
aux<-which(dry & y.data$dqcflag<=0)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="gray",pch=21,cex=1.2)
aux<-which(dry & y.data$dqcflag==100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="black",pch=24,cex=1.2)
aux<-which(dry & y.data$dqcflag>100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="black",pch=25,cex=1.2)
# wet observations
for (c in 1:n.col) {
in.break<-(!is.na(y.data$yo)) & (yidi>=par$scale[c]) & (yidi<par$scale[c+1])
aux<-which(in.break & y.data$dqcflag<=0)
if (length(aux)>0)
points(y.data$x[aux],y.data$y[aux],col="black",bg=par$col.scale[c],pch=21,cex=1.2)
if (c==1) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=1),") mm",sep=""))
} else if (c==2) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c<n.col) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=0),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c==n.col) {
leg.str<-c(leg.str,paste(">",formatC(par$scale[c],format="f",digits=0),"mm",sep=""))
}
}
aux<-which(y.data$dqcflag==100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg="black",col="red",pch=24,cex=1.2)
aux<-which(y.data$dqcflag>100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg="black",col="red",pch=25,cex=1.2)
plot(bound,add=T)
# legend(x="bottomright",fill=rev(par$col.scale),legend=rev(leg.str),cex=1)
print(paste(length(par$col.scale),length(par$scale)))
color.bar(par$col.scale,par$scale)
dev.off()
return()
}
#!
rainspatplot.cra<-function(x=NULL,y=NULL,yvar=NULL,yvar1=NULL,ydqc=NULL,xvar=NULL,brk=NULL,col=NULL,
namefileout=NULL,mtxt=NULL,mtxt1=NULL,legpos="bottomright",
xl=NULL,yl=NULL,bnd=NULL,cx=NULL,colext=NULL,
yvartext=NULL,yvartextcex=NULL,xvar.orog=NULL,
cra.lab=NULL,cra.x=NULL,cra.y=NULL,
n.cra=NULL,
cra.stn.dens=NULL,
mean=NULL,
max.x=NULL,
max.yo=NULL,
max.ya=NULL,
max.yav=NULL,
cv.rms.rel=NULL,
cv.bias.sq=NULL,
cv.rmse.sq=NULL,
cv.made.sq=NULL,
colpoints=NULL,legcex=NULL) {
# ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
if (is.null(legcex)) legcex<-0.8
col1<-c(colext[1],col,colext[2])
if (length(ydqc)==0) ydqc<-rep(0,length=length(yvar))
if (length(colpoints)==0) colpoints<-"black"
png(file=namefileout,width=1200,height=1200)
plot(x[ydqc==0],y[ydqc==0],main=mtxt,xlab="",ylab="",xlim=xl,ylim=yl,cex.main=1.6)
plot(bnd,add=T)
# if (!is.null(orog)) contour(orog,levels=c(1,200,700,1500),add=T,col="black",lwd=1)
image(xvar.orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
xvar1<-xvar
xvar1[xvar==0]<-NA
image(xvar1,breaks=brk,add=T,col=col)
contour(xvar.orog,levels=c(0,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
# contour(xvar,levels=c(0,-10,-20),drawlabels=F,col=c("red","darkgreen","darkblue"),lwd=2,add=T)
if (length(colext)==2) {
mx<-cellStats(xvar,max)
mn<-cellStats(xvar,min)
image(xvar1,breaks=c((brk[length(brk)]+0.001),mx),add=T,col=colext[2])
image(xvar1,breaks=c(mn,(brk[1]-0.001)),add=T,col=colext[1])
}
# points(x,y)
legtxt<-vector()
# good
n.col1<-length(col1)
legtxt[1]<-"(No rain)"
for (b in 1:n.col1) {
cond<-NULL
if (b==1) {
cond<-which(yvar<brk[b] & ydqc>=0)
}
if (b==n.col1) {
cond<-which(yvar>=brk[b-1] & ydqc>=0)
legtxt[n.col1-b+2]<-paste("[",brk[b-1],",",sep="")
}
if (b>1 & b<n.col1) {
cond<-which( (yvar>=brk[b-1]) & (yvar<brk[b]) & ydqc>=0)
legtxt[n.col1-b+2]<-paste("[",brk[b-1],", ",brk[b],sep="")
}
points(x[cond],y[cond],pch=19,cex=cx[b],col=col1[b])
points(x[cond],y[cond],cex=(cx[b]+0.1),col=colpoints)
}
text(cra.x,cra.y,cra.lab,cex=2.5)
Lx<-xl[2]-xl[1]
Lx.3<-Lx/3
Lx.3.46<-Lx.3/46
Ly<-yl[2]-yl[1]
Ly.55<-Ly/55
seqx<-xl[1]+c(1,3,6,10,14,18,22,26,30,34,38,42)*Lx.3.46
text(seqx,yl[2],adj=0,cex=1,c("#","n","sdns","avg","mx","mo","ma","mav","cvrel","bia","rms","made"))
for (i in 1:length(cra.x)) {
text(seqx,(yl[2]-Ly.55*i),adj=0,cex=1.,
c(cra.lab[i],
round(n.cra[i],0),
round(sqrt(cra.stn.dens[i])/pi,0),
round(mean[i],1),
round(max.x[i],1),
round(max.yo[i],1),
round(max.ya[i],1),
round(max.yav[i],1),
round(cv.rms.rel[i]*100,0),
round(cv.bias.sq[i],1),
round(cv.rmse.sq[i],1),
round(cv.made.sq[i],1)))
}
# erroneous
points(x[ydqc==1],y[ydqc==1],col="black",pch=4,lwd=3)
# missing
if (length(yvar1)>0) {
for (b in 1:length(col1)) {
cond<-NULL
if (b==1) cond<-which(yvar1<brk[b] & ydqc==-1)
if (b==length(col1)) cond<-which(yvar1>=brk[b-1] & ydqc==-1)
if (b>1 & b<length(col1)) cond<-which( (yvar1>=brk[b-1]) & (yvar1<brk[b]) & ydqc==-1)
points(x[cond],y[cond],pch=19,cex=0.5,col=col1[b])
points(x[cond],y[cond],cex=0.6,col=colpoints)
}
}
#
if (!is.null(yvartext)) {
text(x,y,labels=yvartext,cex=yvartextcex)
}
#
legend(x=legpos,legend=legtxt,cex=legcex,fill=c(col1[1],col1[n.col1:2]))
if (length(mtxt1)>0) mtext(mtxt1,side=3,cex=1.6)
dev.off()
return()
}
|
/lib/Bspat_plot.R
|
no_license
|
congca/seNorge2
|
R
| false | false | 24,322 |
r
|
#!
pointspat<-function(x=NULL,y=NULL,yvar=NULL,yvar1=NULL,ydqc=NULL,xvar=NULL,brk=NULL,col=NULL,
namefileout=NULL,mtxt=NULL,mtxt1=NULL,orog=NULL,pos="bottomright",
xl=NULL,yl=NULL,bnd=NULL,cx=NULL,colext=NULL,
yvartext=NULL,yvartextcex=NULL,xvar.orog=NULL,
colpoints=NULL,legcex=NULL) {
# ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
if (is.null(legcex)) legcex<-0.8
if (is.null(colext)) {
col1<-c("#FFFFFF",col,"#000000")
} else {
col1<-c(colext[1],col,colext[2])
}
if (is.null(xvar)){
png(file=namefileout,width=1200,height=1200)
image(xvar.orog,breaks=seq(0,2500,length=10),col=gray.colors(9),main=mtxt,
xlab="",ylab="",xlim=xl,ylim=yl,cex.main=1.6)
contour(xvar.orog,levels=c(500,1500),drawlabels=F,col="black",lwd=1,add=T)
plot(bnd,add=T)
legtxt<-vector()
# good
for (b in 1:length(col1)) {
cond<-NULL
if (b==1) cond<-which(yvar<brk[b])
if (b==length(col1)) cond<-which(yvar>=brk[b-1])
if (b>1 & b<length(col1)) cond<-which( (yvar>=brk[b-1]) & (yvar<brk[b]))
if (b==1) legtxt[b]<-paste("<",brk[b],sep="")
if (b==length(col1)) legtxt[b]<-paste(">=",brk[b-1],sep="")
if (b>1 & b<length(col1)) legtxt[b]<-paste(brk[b-1]," ",brk[b],sep="")
points(x[cond],y[cond],pch=19,cex=cx[b],col=col1[b])
points(x[cond],y[cond],cex=(cx[b]+0.1),col=colpoints)
}
legend(x=pos,legend=legtxt,cex=legcex,fill=col1)
if (length(mtxt1)>0) mtext(mtxt1,side=3,cex=1.6)
if (!is.null(xvar.orog)) contour(xvar.orog,levels=c(0,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
dev.off()
return()
}
if (is.null(x)){
png(file=namefileout,width=1200,height=1200)
image(xvar,breaks=brk,col=col,main=mtxt,
xlab="",ylab="",xlim=xl,ylim=yl,cex.main=1.6)
contour(xvar,levels=c(0,-10,-20),drawlabels=F,col=c("red","darkgreen","darkblue"),lwd=2,add=T)
plot(bnd,add=T)
if (length(colext)==2) {
mx<-cellStats(xvar,max)
mn<-cellStats(xvar,min)
image(xvar,breaks=c(brk[length(brk)],mx),add=T,col=colext[2])
image(xvar,breaks=c(mn,brk[1]),add=T,col=colext[1])
}
# points(x,y)
legtxt<-vector()
# good
for (b in 1:length(col1)) {
if (b==1) legtxt[b]<-paste("<",brk[b],sep="")
if (b==length(col1)) legtxt[b]<-paste(">=",brk[b-1],sep="")
if (b>1 & b<length(col1)) legtxt[b]<-paste(brk[b-1]," ",brk[b],sep="")
}
if (!is.null(xvar.orog)) contour(xvar.orog,levels=c(0,100,250,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
legend(x=pos,legend=legtxt,cex=legcex,fill=col1)
if (length(mtxt1)>0) mtext(mtxt1,side=3,cex=1.6)
dev.off()
return()
}
if (length(ydqc)==0) ydqc<-rep(0,length=length(yvar))
if (length(colpoints)==0) colpoints<-"black"
png(file=namefileout,width=1200,height=1200)
plot(x[ydqc==0],y[ydqc==0],main=mtxt,xlab="",ylab="",xlim=xl,ylim=yl,cex.main=1.6)
plot(bnd,add=T)
# if (!is.null(orog)) contour(orog,levels=c(1,200,700,1500),add=T,col="black",lwd=1)
image(xvar,breaks=brk,add=T,col=col)
if (!is.null(xvar.orog)) contour(xvar.orog,levels=c(0,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
contour(xvar,levels=c(0,-10,-20),drawlabels=F,col=c("red","darkgreen","darkblue"),lwd=2,add=T)
if (length(colext)==2) {
mx<-cellStats(xvar,max)
mn<-cellStats(xvar,min)
image(xvar,breaks=c((brk[length(brk)]+0.001),mx),add=T,col=colext[2])
image(xvar,breaks=c(mn,(brk[1]-0.001)),add=T,col=colext[1])
}
# points(x,y)
legtxt<-vector()
# good
for (b in 1:length(col1)) {
cond<-NULL
if (b==1) cond<-which(yvar<brk[b] & ydqc>=0)
if (b==length(col1)) cond<-which(yvar>=brk[b-1] & ydqc>=0)
if (b>1 & b<length(col1)) cond<-which( (yvar>=brk[b-1]) & (yvar<brk[b]) & ydqc>=0)
if (b==1) legtxt[b]<-paste("<",brk[b],sep="")
if (b==length(col1)) legtxt[b]<-paste(">=",brk[b-1],sep="")
if (b>1 & b<length(col1)) legtxt[b]<-paste(brk[b-1]," ",brk[b],sep="")
points(x[cond],y[cond],pch=19,cex=cx[b],col=col1[b])
points(x[cond],y[cond],cex=(cx[b]+0.1),col=colpoints)
}
# erroneous
points(x[ydqc==1],y[ydqc==1],col="black",pch=4,lwd=3)
# missing
if (length(yvar1)>0) {
for (b in 1:length(col1)) {
cond<-NULL
if (b==1) cond<-which(yvar1<brk[b] & ydqc==-1)
if (b==length(col1)) cond<-which(yvar1>=brk[b-1] & ydqc==-1)
if (b>1 & b<length(col1)) cond<-which( (yvar1>=brk[b-1]) & (yvar1<brk[b]) & ydqc==-1)
points(x[cond],y[cond],pch=19,cex=0.5,col=col1[b])
points(x[cond],y[cond],cex=0.6,col=colpoints)
}
}
#
if (!is.null(yvartext)) {
text(x,y,labels=yvartext,cex=yvartextcex)
}
#
legend(x=pos,legend=legtxt,cex=legcex,fill=col1)
if (length(mtxt1)>0) mtext(mtxt1,side=3,cex=1.6)
if (!is.null(xvar.orog)) contour(xvar.orog,levels=c(0,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
dev.off()
return()
}
#!
rainspatplot<-function(x=NULL,y=NULL,yvar=NULL,yvar1=NULL,ydqc=NULL,xvar=NULL,brk=NULL,col=NULL,
namefileout=NULL,mtxt=NULL,mtxt1=NULL,pos="bottomright",
xl=NULL,yl=NULL,bnd=NULL,cx=NULL,colext=NULL,
yvartext=NULL,yvartextcex=NULL,xvar.orog=NULL,
colpoints=NULL,legcex=NULL) {
# ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
if (is.null(legcex)) legcex<-0.8
col1<-c(colext[1],col,colext[2])
if (length(ydqc)==0) ydqc<-rep(0,length=length(yvar))
if (length(colpoints)==0) colpoints<-"black"
y.miss<-which(ydqc==-1)
png(file=namefileout,width=1200,height=1200)
plot(x[ydqc==0],y[ydqc==0],main=mtxt,xlab="",ylab="",xlim=xl,ylim=yl,cex.main=1.6)
plot(bnd,add=T)
# if (!is.null(orog)) contour(orog,levels=c(1,200,700,1500),add=T,col="black",lwd=1)
image(xvar.orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
xvar1<-xvar
xvar1[xvar==0]<-NA
image(xvar1,breaks=brk,add=T,col=col)
# contour(xvar,levels=c(0,-10,-20),drawlabels=F,col=c("red","darkgreen","darkblue"),lwd=2,add=T)
if (length(colext)==2) {
mx<-cellStats(xvar,max)
mn<-cellStats(xvar,min)
image(xvar1,breaks=c((brk[length(brk)]+0.001),mx),add=T,col=colext[2])
image(xvar1,breaks=c(mn,(brk[1]-0.001)),add=T,col=colext[1])
}
# points(x,y)
legtxt<-vector()
# good
n.col1<-length(col1)
legtxt[1]<-"(No rain)"
for (b in 1:n.col1) {
cond<-NULL
if (b==1) {
cond<-which(yvar<brk[b] & ydqc>=0)
}
if (b==n.col1) {
cond<-which(yvar>=brk[b-1] & ydqc>=0)
legtxt[n.col1-b+2]<-paste("[",brk[b-1],", >mm",sep="")
}
if (b>1 & b<n.col1) {
cond<-which( (yvar>=brk[b-1]) & (yvar<brk[b]) & ydqc>=0)
legtxt[n.col1-b+2]<-paste("[",brk[b-1],", ",brk[b],"> mm",sep="")
}
points(x[cond],y[cond],pch=19,cex=cx[b],col=col1[b])
points(x[cond],y[cond],cex=(cx[b]+0.1),col=colpoints)
}
# erroneous
points(x[ydqc>0],y[ydqc>0],col="black",pch=4,lwd=3)
# missing
if (length(y.miss)>0) {
for (b in 1:length(col1)) {
cond<-NULL
if (b==1) cond<-which(yvar[ydqc==-1]<brk[b])
if (b==length(col1)) cond<-which(yvar[ydqc==-1]>=brk[b-1])
if (b>1 & b<length(col1)) cond<-which( (yvar[ydqc==-1]>=brk[b-1]) & (yvar[ydqc==-1]<brk[b]))
if (length(cond)>0) points(x[ydqc==-1][cond],y[ydqc==-1][cond],pch=19,cex=cx[b],col=col1[b])
if (length(cond)>0) points(x[ydqc==-1][cond],y[ydqc==-1][cond],cex=(cx[b]+0.1),col=colpoints)
}
}
#
if (!is.null(yvartext)) {
text(x,y,labels=yvartext,cex=yvartextcex)
}
#
legend(x=pos,legend=legtxt,cex=legcex,fill=c(col1[1],col1[n.col1:2]))
if (length(mtxt1)>0) mtext(mtxt1,side=3,cex=1.6)
contour(xvar.orog,levels=c(0,100,250,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
dev.off()
return()
}
#!
PRECplot<-function(namefileout=NULL,
y.data=NULL,
r.data=NULL,
# scale=NULL,
# col.scale=NULL,
orog=NULL,
bound=NULL,
par=NULL) {
# NA observation is NA
# -1 missing DQC info
# 0 good observation
# 100 bad observation: KDVH flag > 2 | observation not good in external DQC |
# observed value not plausible | station in blacklist/s
# 200 bad observation: dry-station surrounded only by wet-stations (close enough)
# 300 bad observation: wet-stations surrounded only by dry-stations (close enough)
# 400 bad observation: dry observation is (1) not included in a dry area
# (2) is in Norway
# 500 bad observation: wet observation is (1) not included in an event (2) in Norway
#x=NULL,y=NULL,yvar=NULL,yvar1=NULL,ydqc=NULL,xvar=NULL,brk=NULL,col=NULL,
# namefileout=NULL,mtxt=NULL,mtxt1=NULL,pos="bottomright",
# xl=NULL,yl=NULL,bnd=NULL,cx=NULL,colext=NULL,
# yvartext=NULL,yvartextcex=NULL,xvar.orog=NULL,
# colpoints=NULL,legcex=NULL) {
## ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
y.data$x<-as.numeric(y.data$x)
y.data$y<-as.numeric(y.data$y)
leg.str<-"no rain"
n.col<-length(par$col.scale)
#
png(file=namefileout,width=1200,height=1200)
plot(y.data$x[!is.na(y.data$yo)],y.data$y[!is.na(y.data$yo)],
main=par$main,xlab=par$xlab,ylab=par$ylab,xlim=par$xl,ylim=par$yl,cex.main=1.6,col="white")
image(orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
image(r.data,col=par$col.scale,breaks=par$scale,add=T)
# dry observations
dry<-!is.na(y.data$yo) & (y.data$yo<0.1)
aux<-which(dry & y.data$dqcflag<=0)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="gray",pch=21,cex=1.2)
aux<-which(dry & y.data$dqcflag==100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="gray",pch=24,cex=1.2)
aux<-which(dry & y.data$dqcflag>100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="gray",pch=25,cex=1.2)
# wet observations
for (c in 1:n.col) {
in.break<-(!is.na(y.data$yo)) & (y.data$yo>=par$scale[c]) & (y.data$yo<par$scale[c+1])
aux<-which(in.break & y.data$dqcflag<=0)
if (length(aux)>0)
points(y.data$x[aux],y.data$y[aux],col="black",bg=par$col.scale[c],pch=21,cex=1.2)
if (c==1) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=1),") mm",sep=""))
} else if (c==2) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c<n.col) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=0),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c==n.col) {
leg.str<-c(leg.str,paste(">",formatC(par$scale[c],format="f",digits=0),"mm",sep=""))
}
aux<-which(in.break & y.data$dqcflag==100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg=par$col.scale[c],col="black",pch=24,cex=1.2)
aux<-which(in.break & y.data$dqcflag>100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg=par$col.scale[c],col="black",pch=25,cex=1.2)
}
plot(bound,add=T)
legend(x="bottomright",fill=rev(c("gray",par$col.scale)),legend=rev(leg.str),cex=1.5)
# contour(orog,levels=c(0,100,250,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
# contour(r.data,levels=c(5,10,30,70,100),drawlabels=F,col="black",lwd=0.8,add=T)
dev.off()
return()
}
#!
TEMPplot<-function(namefileout=NULL,
y.data=NULL,
r.data=NULL,
orog=NULL,
bound=NULL,
par=NULL) {
## ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
y.data$x<-as.numeric(y.data$x)
y.data$y<-as.numeric(y.data$y)
leg.str<-vector()
n.col<-length(par$col.scale)
#
png(file=namefileout,width=1200,height=1200)
plot(y.data$x[!is.na(y.data$yo)],y.data$y[!is.na(y.data$yo)],
main=par$main,xlab=par$xlab,ylab=par$ylab,xlim=par$xl,ylim=par$yl,cex.main=1.6,col="white")
image(orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
image(r.data,col=par$col.scale,breaks=par$scale,add=T)
# observations
for (c in 1:n.col) {
in.break<-(!is.na(y.data$yo)) & (y.data$yo>=par$scale[c]) & (y.data$yo<par$scale[c+1])
aux<-which(in.break & y.data$dqcflag<=0)
if (length(aux)>0)
points(y.data$x[aux],y.data$y[aux],col="black",bg=par$col.scale[c],pch=21,cex=1.2)
if (c==1) {
leg.str[1]<-paste("<",formatC(par$scale[c+1],format="f",digits=0),sep="")
} else if (c<n.col) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=0),", ",
formatC(par$scale[c+1],format="f",digits=0),")",sep=""))
} else if (c==n.col) {
leg.str<-c(leg.str,paste(">",formatC(par$scale[c],format="f",digits=0),sep=""))
}
aux<-which(in.break & y.data$dqcflag>0)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg=par$col.scale[c],col="black",pch=24,cex=1.2)
}
plot(bound,add=T)
legend(x="bottomright",fill=rev(c(par$col.scale,"white")),legend=rev(c(leg.str,expression(paste("Temp,",degree,"C")))),cex=1.5)
dev.off()
return()
}
# Function to plot color bar
color.bar <- function(col, scale, nticks=11, title='') {
usr<-as.numeric(par("usr"))
min.y<-usr[3]
max.y<-usr[4]
scale.y <- length(col)/(max.y-min.y)
i.ind<-vector(length=10)
for (i in 1:10) i.ind[i]<-min(which(scale>(10*i)))-1
print(usr)
print(scale.y)
print(i.ind)
# dev.new(width=1.75, height=5)
# par(new=T)
dx<-(usr[2]-usr[1])/40
x1<-usr[1]+dx*39
print(paste(dx,x1,min.y,max.y))
# plot(c(x1,x1+dx), c(min.y,max.y), type='n', bty='n', xaxt='n', xlab='', yaxt='n', ylab='', main=title)
# axis(2, ticks, las=1)
for (i in 1:length(col)) {
y = (i-1)/scale.y + min.y
rect(x1,y,x1+dx,y+1/scale.y, col=col[i], border=NA)
}
text(x=x1+dx/2,y=((i.ind-1)/scale.y + min.y),labels=round(scale[i.ind],0),cex=1.8,
col="black")
}
#!
TEMPplot.IDI<-function(namefileout=NULL,
y.data=NULL,
r.data=NULL,
orog=NULL,
bound=NULL,
par=NULL) {
## ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
y.data$x<-as.numeric(y.data$x)
y.data$y<-as.numeric(y.data$y)
leg.str<-""
n.col<-length(par$col.scale)
yidi<-suppressWarnings(as.numeric(y.data$yidi)*100)
#
png(file=namefileout,width=1200,height=1200)
plot(y.data$x[!is.na(y.data$yo)],y.data$y[!is.na(y.data$yo)],
main=par$main,xlab=par$xlab,ylab=par$ylab,xlim=par$xl,ylim=par$yl,cex.main=1.6,col="white")
image(orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
image(r.data,col=par$col.scale,breaks=par$scale,add=T)
# observations
for (c in 1:n.col) {
in.break<-(!is.na(y.data$yo)) & (yidi>=par$scale[c]) & (yidi<par$scale[c+1])
aux<-which(in.break & y.data$dqcflag<=0)
if (length(aux)>0)
points(y.data$x[aux],y.data$y[aux],col="black",bg=par$col.scale[c],pch=21,cex=1.2)
if (c==1) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=1),") mm",sep=""))
} else if (c==2) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c<n.col) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=0),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c==n.col) {
leg.str<-c(leg.str,paste(">",formatC(par$scale[c],format="f",digits=0),"mm",sep=""))
}
aux<-which(in.break & y.data$dqcflag>0)
# if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg=par$col.scale[c],col="black",pch=24,cex=2.2)
if (length(aux)>0) {
print("prek")
points(y.data$x[aux],y.data$y[aux],bg="black",col="black",pch=24,cex=1.2)
}
}
plot(bound,add=T)
# legend(x="bottomright",fill=rev(par$col.scale),legend=rev(leg.str),cex=1)
print(paste(length(par$col.scale),length(par$scale)))
color.bar(par$col.scale,par$scale)
dev.off()
return()
}
#!
PRECplot.IDI<-function(namefileout=NULL,
y.data=NULL,
r.data=NULL,
orog=NULL,
bound=NULL,
par=NULL) {
# NA observation is NA
# -1 missing DQC info
# 0 good observation
# 100 bad observation: KDVH flag > 2 | observation not good in external DQC |
# observed value not plausible | station in blacklist/s
# 200 bad observation: dry-station surrounded only by wet-stations (close enough)
# 300 bad observation: wet-stations surrounded only by dry-stations (close enough)
# 400 bad observation: dry observation is (1) not included in a dry area
# (2) is in Norway
# 500 bad observation: wet observation is (1) not included in an event (2) in Norway
#----------------------------------------------------------------------------------
y.data$x<-as.numeric(y.data$x)
y.data$y<-as.numeric(y.data$y)
leg.str<-""
n.col<-length(par$col.scale)
yidi<-suppressWarnings(as.numeric(y.data$yidi))*100
#
png(file=namefileout,width=1200,height=1200)
plot(y.data$x[!is.na(y.data$yo)],y.data$y[!is.na(y.data$yo)],
main=par$main,xlab=par$xlab,ylab=par$ylab,xlim=par$xl,ylim=par$yl,cex.main=1.6,col="white")
image(orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
image(r.data,col=par$col.scale,breaks=par$scale,add=T)
# dry observations
dry<-!is.na(y.data$yo) & (y.data$yo<0.1)
aux<-which(dry & y.data$dqcflag<=0)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="gray",pch=21,cex=1.2)
aux<-which(dry & y.data$dqcflag==100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="black",pch=24,cex=1.2)
aux<-which(dry & y.data$dqcflag>100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],col="black",bg="black",pch=25,cex=1.2)
# wet observations
for (c in 1:n.col) {
in.break<-(!is.na(y.data$yo)) & (yidi>=par$scale[c]) & (yidi<par$scale[c+1])
aux<-which(in.break & y.data$dqcflag<=0)
if (length(aux)>0)
points(y.data$x[aux],y.data$y[aux],col="black",bg=par$col.scale[c],pch=21,cex=1.2)
if (c==1) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=1),") mm",sep=""))
} else if (c==2) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=1),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c<n.col) {
leg.str<-c(leg.str,paste("[",formatC(par$scale[c],format="f",digits=0),", ",
formatC(par$scale[c+1],format="f",digits=0),") mm",sep=""))
} else if (c==n.col) {
leg.str<-c(leg.str,paste(">",formatC(par$scale[c],format="f",digits=0),"mm",sep=""))
}
}
aux<-which(y.data$dqcflag==100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg="black",col="red",pch=24,cex=1.2)
aux<-which(y.data$dqcflag>100)
if (length(aux)>0) points(y.data$x[aux],y.data$y[aux],bg="black",col="red",pch=25,cex=1.2)
plot(bound,add=T)
# legend(x="bottomright",fill=rev(par$col.scale),legend=rev(leg.str),cex=1)
print(paste(length(par$col.scale),length(par$scale)))
color.bar(par$col.scale,par$scale)
dev.off()
return()
}
#!
rainspatplot.cra<-function(x=NULL,y=NULL,yvar=NULL,yvar1=NULL,ydqc=NULL,xvar=NULL,brk=NULL,col=NULL,
namefileout=NULL,mtxt=NULL,mtxt1=NULL,legpos="bottomright",
xl=NULL,yl=NULL,bnd=NULL,cx=NULL,colext=NULL,
yvartext=NULL,yvartextcex=NULL,xvar.orog=NULL,
cra.lab=NULL,cra.x=NULL,cra.y=NULL,
n.cra=NULL,
cra.stn.dens=NULL,
mean=NULL,
max.x=NULL,
max.yo=NULL,
max.ya=NULL,
max.yav=NULL,
cv.rms.rel=NULL,
cv.bias.sq=NULL,
cv.rmse.sq=NULL,
cv.made.sq=NULL,
colpoints=NULL,legcex=NULL) {
# ydqc==0 good; ydqc==-1 missing; ydqc==1 erroneous
#----------------------------------------------------------------------------------
if (is.null(legcex)) legcex<-0.8
col1<-c(colext[1],col,colext[2])
if (length(ydqc)==0) ydqc<-rep(0,length=length(yvar))
if (length(colpoints)==0) colpoints<-"black"
png(file=namefileout,width=1200,height=1200)
plot(x[ydqc==0],y[ydqc==0],main=mtxt,xlab="",ylab="",xlim=xl,ylim=yl,cex.main=1.6)
plot(bnd,add=T)
# if (!is.null(orog)) contour(orog,levels=c(1,200,700,1500),add=T,col="black",lwd=1)
image(xvar.orog,breaks=c(0,500,1000,1500,2000,2500),col=gray(seq(0.7,1,length=5)),add=T)
xvar1<-xvar
xvar1[xvar==0]<-NA
image(xvar1,breaks=brk,add=T,col=col)
contour(xvar.orog,levels=c(0,500,1500),drawlabels=F,col="black",lwd=0.8,add=T)
# contour(xvar,levels=c(0,-10,-20),drawlabels=F,col=c("red","darkgreen","darkblue"),lwd=2,add=T)
if (length(colext)==2) {
mx<-cellStats(xvar,max)
mn<-cellStats(xvar,min)
image(xvar1,breaks=c((brk[length(brk)]+0.001),mx),add=T,col=colext[2])
image(xvar1,breaks=c(mn,(brk[1]-0.001)),add=T,col=colext[1])
}
# points(x,y)
legtxt<-vector()
# good
n.col1<-length(col1)
legtxt[1]<-"(No rain)"
for (b in 1:n.col1) {
cond<-NULL
if (b==1) {
cond<-which(yvar<brk[b] & ydqc>=0)
}
if (b==n.col1) {
cond<-which(yvar>=brk[b-1] & ydqc>=0)
legtxt[n.col1-b+2]<-paste("[",brk[b-1],",",sep="")
}
if (b>1 & b<n.col1) {
cond<-which( (yvar>=brk[b-1]) & (yvar<brk[b]) & ydqc>=0)
legtxt[n.col1-b+2]<-paste("[",brk[b-1],", ",brk[b],sep="")
}
points(x[cond],y[cond],pch=19,cex=cx[b],col=col1[b])
points(x[cond],y[cond],cex=(cx[b]+0.1),col=colpoints)
}
text(cra.x,cra.y,cra.lab,cex=2.5)
Lx<-xl[2]-xl[1]
Lx.3<-Lx/3
Lx.3.46<-Lx.3/46
Ly<-yl[2]-yl[1]
Ly.55<-Ly/55
seqx<-xl[1]+c(1,3,6,10,14,18,22,26,30,34,38,42)*Lx.3.46
text(seqx,yl[2],adj=0,cex=1,c("#","n","sdns","avg","mx","mo","ma","mav","cvrel","bia","rms","made"))
for (i in 1:length(cra.x)) {
text(seqx,(yl[2]-Ly.55*i),adj=0,cex=1.,
c(cra.lab[i],
round(n.cra[i],0),
round(sqrt(cra.stn.dens[i])/pi,0),
round(mean[i],1),
round(max.x[i],1),
round(max.yo[i],1),
round(max.ya[i],1),
round(max.yav[i],1),
round(cv.rms.rel[i]*100,0),
round(cv.bias.sq[i],1),
round(cv.rmse.sq[i],1),
round(cv.made.sq[i],1)))
}
# erroneous
points(x[ydqc==1],y[ydqc==1],col="black",pch=4,lwd=3)
# missing
if (length(yvar1)>0) {
for (b in 1:length(col1)) {
cond<-NULL
if (b==1) cond<-which(yvar1<brk[b] & ydqc==-1)
if (b==length(col1)) cond<-which(yvar1>=brk[b-1] & ydqc==-1)
if (b>1 & b<length(col1)) cond<-which( (yvar1>=brk[b-1]) & (yvar1<brk[b]) & ydqc==-1)
points(x[cond],y[cond],pch=19,cex=0.5,col=col1[b])
points(x[cond],y[cond],cex=0.6,col=colpoints)
}
}
#
if (!is.null(yvartext)) {
text(x,y,labels=yvartext,cex=yvartextcex)
}
#
legend(x=legpos,legend=legtxt,cex=legcex,fill=c(col1[1],col1[n.col1:2]))
if (length(mtxt1)>0) mtext(mtxt1,side=3,cex=1.6)
dev.off()
return()
}
|
#==============================================================================
# Author: Zachary M. Smith
# Created: 3/13/2017
# Updated: 06/13/2017
# Maintained: Zachary M. Smith
# Purpose: Import and prepare watershed characteristic data.
#==============================================================================
# Set working directory
#setwd("//Pike/data/Projects/ImperviousCover_802/Phase2/Watershed_Characteristics")
#------------------------------------------------------------------------------
# Source functions from impervious_functions.R
source("functions/impervious_functions.R")
#------------------------------------------------------------------------------
# Load package for importing individual excel sheets.
library(readxl)
# Load package for data manipulation
library(dplyr)
#------------------------------------------------------------------------------
# J. Palmer created two xlsx files containing watershed Characteristics:
# 1) Baseline/Current Scenario
# file name = Phase2_Master_Watershed_Characteristics_values_032417.xlsx
# date = 03-24-17
# 2) Baseline/Impervious Scenario
# file name = Phase2_Watershed_Characteristics_Imprvous_values_061217.xlsx
# date = 06-12-17
# These files were merged into one xlsx file by Z. Smith on 06-13-17.
#------------------------------------------------------------------------------
#file.path <- "//Pike/data/Projects/ImperviousCover_802/Phase2/Watershed_Characteristics"
file.path <- "data/final_watershed_characteristics_061617.xlsx"
# Import the Baseline/Current Scenario sheet.
base.cur <- read_excel(file.path, sheet = "Base_Current")
# Import the Baseline/Impervious Scenario sheet.
base.imp <- read_excel(file.path, sheet = "Base_Impervious")
# Import the Ecoregion sheet.
ecoregion <- read_excel(file.path, sheet = "Ecoregions")
#------------------------------------------------------------------------------
# Prepare ecoregion data.
ecoregion.df <- ecoregion %>%
rename(WATERSHED = Watershed,
ECOREGION4 = US_L4CODE) %>%
mutate(ECOREGION3 = substr(ECOREGION4, 1, 2),
#Ecoregion_C; C for custom.
ECOREGION_C = if_else(ECOREGION4 %in% c("45e", "64b",
"64c", "64d", "65n"), "PIEDMONT_UP",
if_else(ECOREGION4 %in% c("64a"), "PIEDMONT_LOW",
if_else(ECOREGION4 %in% c("66a", "66b"), "BLUE",
if_else(ECOREGION4 %in% c("67a", "67b"), "VALLEY",
if_else(ECOREGION4 %in% c("67c", "67d"), "RIDGE",
if_else(ECOREGION4 %in% c("69a", "69b"), "CA", "ERROR")))))),
ECOREGION_C = if_else(ECOREGION_C %in% c("PIEDMONT_UP", "PIEDMONT_LOW", "VALLEY"), "VALLEY", ECOREGION_C),
ECOREGION4 = as.factor(ECOREGION4),
ECOREGION3 = as.factor(ECOREGION3),
ECOREGION_C = as.factor(ECOREGION_C))
# Prepare the Baseline/Current Scenario.
base.cur <- prep_watershed_char(base.cur)
base.cur <- right_join(ecoregion.df, base.cur, by = "WATERSHED") %>%
mutate(FCODE = as.numeric(as.factor(PHYSIO)))
# Prepare the Baseline/Impervious Scenario.
base.imp <- prep_watershed_char(base.imp)
base.imp <- right_join(ecoregion.df, base.imp, by = "WATERSHED") %>%
mutate(FCODE = as.numeric(as.factor(PHYSIO)))
#------------------------------------------------------------------------------
# Remove functions that are no longer necessary.
rm(clean_up, prep_watershed_char, ecoregion.df)
|
/prep/prep_metrics.R
|
no_license
|
zsmith27/Impervious_Cover
|
R
| false | false | 3,563 |
r
|
#==============================================================================
# Author: Zachary M. Smith
# Created: 3/13/2017
# Updated: 06/13/2017
# Maintained: Zachary M. Smith
# Purpose: Import and prepare watershed characteristic data.
#==============================================================================
# Set working directory
#setwd("//Pike/data/Projects/ImperviousCover_802/Phase2/Watershed_Characteristics")
#------------------------------------------------------------------------------
# Source functions from impervious_functions.R
source("functions/impervious_functions.R")
#------------------------------------------------------------------------------
# Load package for importing individual excel sheets.
library(readxl)
# Load package for data manipulation
library(dplyr)
#------------------------------------------------------------------------------
# J. Palmer created two xlsx files containing watershed Characteristics:
# 1) Baseline/Current Scenario
# file name = Phase2_Master_Watershed_Characteristics_values_032417.xlsx
# date = 03-24-17
# 2) Baseline/Impervious Scenario
# file name = Phase2_Watershed_Characteristics_Imprvous_values_061217.xlsx
# date = 06-12-17
# These files were merged into one xlsx file by Z. Smith on 06-13-17.
#------------------------------------------------------------------------------
#file.path <- "//Pike/data/Projects/ImperviousCover_802/Phase2/Watershed_Characteristics"
file.path <- "data/final_watershed_characteristics_061617.xlsx"
# Import the Baseline/Current Scenario sheet.
base.cur <- read_excel(file.path, sheet = "Base_Current")
# Import the Baseline/Impervious Scenario sheet.
base.imp <- read_excel(file.path, sheet = "Base_Impervious")
# Import the Ecoregion sheet.
ecoregion <- read_excel(file.path, sheet = "Ecoregions")
#------------------------------------------------------------------------------
# Prepare ecoregion data.
ecoregion.df <- ecoregion %>%
rename(WATERSHED = Watershed,
ECOREGION4 = US_L4CODE) %>%
mutate(ECOREGION3 = substr(ECOREGION4, 1, 2),
#Ecoregion_C; C for custom.
ECOREGION_C = if_else(ECOREGION4 %in% c("45e", "64b",
"64c", "64d", "65n"), "PIEDMONT_UP",
if_else(ECOREGION4 %in% c("64a"), "PIEDMONT_LOW",
if_else(ECOREGION4 %in% c("66a", "66b"), "BLUE",
if_else(ECOREGION4 %in% c("67a", "67b"), "VALLEY",
if_else(ECOREGION4 %in% c("67c", "67d"), "RIDGE",
if_else(ECOREGION4 %in% c("69a", "69b"), "CA", "ERROR")))))),
ECOREGION_C = if_else(ECOREGION_C %in% c("PIEDMONT_UP", "PIEDMONT_LOW", "VALLEY"), "VALLEY", ECOREGION_C),
ECOREGION4 = as.factor(ECOREGION4),
ECOREGION3 = as.factor(ECOREGION3),
ECOREGION_C = as.factor(ECOREGION_C))
# Prepare the Baseline/Current Scenario.
base.cur <- prep_watershed_char(base.cur)
base.cur <- right_join(ecoregion.df, base.cur, by = "WATERSHED") %>%
mutate(FCODE = as.numeric(as.factor(PHYSIO)))
# Prepare the Baseline/Impervious Scenario.
base.imp <- prep_watershed_char(base.imp)
base.imp <- right_join(ecoregion.df, base.imp, by = "WATERSHED") %>%
mutate(FCODE = as.numeric(as.factor(PHYSIO)))
#------------------------------------------------------------------------------
# Remove functions that are no longer necessary.
rm(clean_up, prep_watershed_char, ecoregion.df)
|
test_that("When 'bulkDownloading' set to FALSE, 'DownloadDEE2Data' function
could get the same data for all given SRR accessions as when downloaded
for each accessions seperately using DEE2 legacy download", {
SRRvec = c("SRR1783836", "SRR1783837", "SRR1783838", "SRR1999221",
"SRR2153338", "SRR2153409", "SRR2153289")
## Disable bulk download option
se <- DownloadDEE2Data('hsapiens', SRRvec, bulkDownloading = FALSE)
## Download the DEE2 data of sample 'SRR2153289' using DEE2 legacy
## download.
listLegacy_SRR1783836 <- getDEE2(species = "hsapiens",
SRRvec = c("SRR1783836"),
legacy = TRUE)
listLegacy_SRR1783837 <- getDEE2(species = "hsapiens",
SRRvec = c("SRR1783837"),
legacy = TRUE)
listLegacy_SRR2153289 <- getDEE2(species = "hsapiens",
SRRvec = c("SRR2153289"),
legacy = TRUE)
## Check if counts equal for the first given SRR accession
expect_equal(sum(listLegacy_SRR1783836$GeneCounts[, c('SRR1783836')]),
sum(assay(se)[, c('SRR1783836')]))
## Check if counts equal for the second given SRR accession
expect_equal(sum(listLegacy_SRR1783837$GeneCounts[, c('SRR1783837')]),
sum(assay(se)[, c('SRR1783837')]))
## Check if counts equal for the last given SRR accession
expect_equal(sum(listLegacy_SRR2153289$GeneCounts[, c('SRR2153289')]),
sum(assay(se)[, c('SRR2153289')]))
})
|
/tests/testthat/test.dataDownload.R
|
no_license
|
uilnauyis/homosapien-DEE2-data
|
R
| false | false | 1,963 |
r
|
test_that("When 'bulkDownloading' set to FALSE, 'DownloadDEE2Data' function
could get the same data for all given SRR accessions as when downloaded
for each accessions seperately using DEE2 legacy download", {
SRRvec = c("SRR1783836", "SRR1783837", "SRR1783838", "SRR1999221",
"SRR2153338", "SRR2153409", "SRR2153289")
## Disable bulk download option
se <- DownloadDEE2Data('hsapiens', SRRvec, bulkDownloading = FALSE)
## Download the DEE2 data of sample 'SRR2153289' using DEE2 legacy
## download.
listLegacy_SRR1783836 <- getDEE2(species = "hsapiens",
SRRvec = c("SRR1783836"),
legacy = TRUE)
listLegacy_SRR1783837 <- getDEE2(species = "hsapiens",
SRRvec = c("SRR1783837"),
legacy = TRUE)
listLegacy_SRR2153289 <- getDEE2(species = "hsapiens",
SRRvec = c("SRR2153289"),
legacy = TRUE)
## Check if counts equal for the first given SRR accession
expect_equal(sum(listLegacy_SRR1783836$GeneCounts[, c('SRR1783836')]),
sum(assay(se)[, c('SRR1783836')]))
## Check if counts equal for the second given SRR accession
expect_equal(sum(listLegacy_SRR1783837$GeneCounts[, c('SRR1783837')]),
sum(assay(se)[, c('SRR1783837')]))
## Check if counts equal for the last given SRR accession
expect_equal(sum(listLegacy_SRR2153289$GeneCounts[, c('SRR2153289')]),
sum(assay(se)[, c('SRR2153289')]))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_parms.r
\name{write_parms}
\alias{write_parms}
\title{Write parameter file}
\usage{
write_parms(parm_list, file_name, file_dir = "./")
}
\arguments{
\item{parm_list}{(required) named list of parameters to record}
\item{file_name}{(required) path to file where parameters should be written}
\item{file_dir}{directory where file should be saved, if not included in
\code{file_name}. Defaults to current directory.}
}
\value{
nothing
}
\description{
Write simulation parameters to a file
}
\details{
This function writes paramater assignments from a named list of parameters
to a given file. Each element in the list is written as a line that
can be read into R (using \code{\link{source}}) such the name of the list
element becomes the object name. The function only works for vectors, lists
with one level, and functions. It then appends two lines defining
niche parameter lists for hosts and symbionts.
}
|
/CAMM/man/write_parms.Rd
|
no_license
|
jescoyle/CAMM
|
R
| false | true | 994 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_parms.r
\name{write_parms}
\alias{write_parms}
\title{Write parameter file}
\usage{
write_parms(parm_list, file_name, file_dir = "./")
}
\arguments{
\item{parm_list}{(required) named list of parameters to record}
\item{file_name}{(required) path to file where parameters should be written}
\item{file_dir}{directory where file should be saved, if not included in
\code{file_name}. Defaults to current directory.}
}
\value{
nothing
}
\description{
Write simulation parameters to a file
}
\details{
This function writes paramater assignments from a named list of parameters
to a given file. Each element in the list is written as a line that
can be read into R (using \code{\link{source}}) such the name of the list
element becomes the object name. The function only works for vectors, lists
with one level, and functions. It then appends two lines defining
niche parameter lists for hosts and symbionts.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EM.R
\name{EM.M.W}
\alias{EM.M.W}
\title{Maximization step for W}
\usage{
EM.M.W(Xc, sigSq, V, Vvar, K)
}
\description{
Maximization step for W
}
|
/man/EM.M.W.Rd
|
permissive
|
mattdneal/stpca
|
R
| false | true | 224 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EM.R
\name{EM.M.W}
\alias{EM.M.W}
\title{Maximization step for W}
\usage{
EM.M.W(Xc, sigSq, V, Vvar, K)
}
\description{
Maximization step for W
}
|
#######################################################################
############################## PRÁCTICA 1 #############################
#######################################################################
#################### Tipos de variables (atómicas) ####################
# 1. Caracter (character)
v_char <- "hola"
# 2. Númerica (numeric)
v_num <- 5.56
# 3. Entero (integer)
v_inter <- 10L #es diferente a:
v_inter <- 10 # Sin la "L" se guarda el valor como una v. tipo númerica, es decir, como un 10.0 o más decimales
# 4. Lógicas (logic)
v.log <- T
################### ¿Qué tipo de dato es un objeto? ###################
# La función class() permite saber de quñe tipo es un objeto.
class(v_inter)
######################### Función Vector: c() #########################
# Vector: objeto que almacena una serie de elementos del mismo tipo.
# 1. Vector caracter:
vector_char <- c("a", "b", "c", "d", "e")
# 2. Vector númerico:
vector_num <- c(1, 2, 4, 6, 7.68)
# 3. Vector de enteros:
vector_inter <- c(10L, 3L, 100L, 45L, 5L)
# 4. Vector de lógicos:
vector_log <- c(T, F, T, F, F)
# Se utilizan los corchetes [] para llamar algún elemento en especifico del vector. Al interior de los corchetes se escribe la posición del elemento que se quiere llamar:
vector_num[4]
# La función length() se utiliza para saber la longitus de un vector u objeto, es decir, para conocer cuántos elementos tiene almacenados.
length(vector_num)
# Nota 1: en vectores siempre se guardan el mismo tipo de datos (no números y letras, etc.)
# Nota 2: en caso de almacenar tipos de datos mixtos (números + letras + other) la función c() convierte los datos al tipo dominante character > numeric > logica. Por ejemplo:
vector_de_vectores <- c(vector_char, vector_inter, vector_log, vector_num)
class(vector_de_vectores)
######################## Función Lista: list() ########################
# list() permite guardar diferentes tipos de vectores en un único objeto sin cambiar el tipo de datos original. Al respetar el tipo de datos almacenado en cada vector es posible guardar gráficas, bases de datos, etc. en el mismo objeto.
vector_de_vectores <- list(vector_char, vector_inter, vector_log, vector_num)
# Para llamar los valores de vector en especifico cnetnido en una lista se utiliza doble corchete [[]]:
vector_de_vectores[[1]]
# Para llamar una observación en especifica incluida en un vector determinado se utiliza doble corchetes [[]] seguidos de un par de corchetes [] donde se señala la ubicación del elemento a llamar:
vector_de_vectores[[1]][5]
#######################################################################
#######################################################################
#######################################################################
|
/practica_1.R
|
no_license
|
MemLem/R-CURSO
|
R
| false | false | 2,866 |
r
|
#######################################################################
############################## PRÁCTICA 1 #############################
#######################################################################
#################### Tipos de variables (atómicas) ####################
# 1. Caracter (character)
v_char <- "hola"
# 2. Númerica (numeric)
v_num <- 5.56
# 3. Entero (integer)
v_inter <- 10L #es diferente a:
v_inter <- 10 # Sin la "L" se guarda el valor como una v. tipo númerica, es decir, como un 10.0 o más decimales
# 4. Lógicas (logic)
v.log <- T
################### ¿Qué tipo de dato es un objeto? ###################
# La función class() permite saber de quñe tipo es un objeto.
class(v_inter)
######################### Función Vector: c() #########################
# Vector: objeto que almacena una serie de elementos del mismo tipo.
# 1. Vector caracter:
vector_char <- c("a", "b", "c", "d", "e")
# 2. Vector númerico:
vector_num <- c(1, 2, 4, 6, 7.68)
# 3. Vector de enteros:
vector_inter <- c(10L, 3L, 100L, 45L, 5L)
# 4. Vector de lógicos:
vector_log <- c(T, F, T, F, F)
# Se utilizan los corchetes [] para llamar algún elemento en especifico del vector. Al interior de los corchetes se escribe la posición del elemento que se quiere llamar:
vector_num[4]
# La función length() se utiliza para saber la longitus de un vector u objeto, es decir, para conocer cuántos elementos tiene almacenados.
length(vector_num)
# Nota 1: en vectores siempre se guardan el mismo tipo de datos (no números y letras, etc.)
# Nota 2: en caso de almacenar tipos de datos mixtos (números + letras + other) la función c() convierte los datos al tipo dominante character > numeric > logica. Por ejemplo:
vector_de_vectores <- c(vector_char, vector_inter, vector_log, vector_num)
class(vector_de_vectores)
######################## Función Lista: list() ########################
# list() permite guardar diferentes tipos de vectores en un único objeto sin cambiar el tipo de datos original. Al respetar el tipo de datos almacenado en cada vector es posible guardar gráficas, bases de datos, etc. en el mismo objeto.
vector_de_vectores <- list(vector_char, vector_inter, vector_log, vector_num)
# Para llamar los valores de vector en especifico cnetnido en una lista se utiliza doble corchete [[]]:
vector_de_vectores[[1]]
# Para llamar una observación en especifica incluida en un vector determinado se utiliza doble corchetes [[]] seguidos de un par de corchetes [] donde se señala la ubicación del elemento a llamar:
vector_de_vectores[[1]][5]
#######################################################################
#######################################################################
#######################################################################
|
## Programming Assignment 2: This function use Lexical Scoping of R
## makeCacheMateix function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
/cachematrix.R
|
no_license
|
jugipa/ProgrammingAssignment2
|
R
| false | false | 698 |
r
|
## Programming Assignment 2: This function use Lexical Scoping of R
## makeCacheMateix function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
library(bsts)
### Name: shorten
### Title: Shorten long names
### Aliases: Shorten
### Keywords: character
### ** Examples
Shorten(c("/usr/common/foo.tex", "/usr/common/barbarian.tex"))
# returns c("foo", "barbarian")
Shorten(c("hello", "hellobye"))
# returns c("", "bye")
Shorten(c("hello", "hello"))
# returns c("hello", "hello")
Shorten(c("", "x", "xx"))
# returns c("", "x", "xx")
Shorten("abcde")
# returns "abcde"
|
/data/genthat_extracted_code/bsts/examples/shorten.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 451 |
r
|
library(bsts)
### Name: shorten
### Title: Shorten long names
### Aliases: Shorten
### Keywords: character
### ** Examples
Shorten(c("/usr/common/foo.tex", "/usr/common/barbarian.tex"))
# returns c("foo", "barbarian")
Shorten(c("hello", "hellobye"))
# returns c("", "bye")
Shorten(c("hello", "hello"))
# returns c("hello", "hello")
Shorten(c("", "x", "xx"))
# returns c("", "x", "xx")
Shorten("abcde")
# returns "abcde"
|
library(MASS)
library(tidyverse)
library(ggplot2)
Data=iris
traindata=Data
testdata=Data
Test=testdata[,c(1,2,3,4)]
newdf <- data.frame(Sepal.Length=Test[,1],Sepal.Width=Test[,2],Petal.Length=Test[,3],Petal.Width=Test[,4])
r1=lda(Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width ,data=traindata)
r2=predict(object=r1,newdf)
T=table(testdata[,5],r2$class)
T
misclaserror=1-sum(diag(T))/sum(T)
misclaserror
datPred<-data.frame(Species=testdata$Species,r2$x) #create data.frame
r3 <- lda(datPred[,2:3], datPred[,1])
x <- seq(min(datPred[,2]), max(datPred[,2]), length.out=150)
y <- seq(min(datPred[,3]), max(datPred[,3]), length.out=150)
Xcon <- matrix(c(rep(x,length(y)),
rep(y, rep(length(x), length(y)))),,2)
r3.pr1 <- predict(r3, Xcon)$post[, c("setosa","versicolor")] %*% c(1,1)
r3.pr2 <- predict(r3, Xcon)$post[, c("virginica","setosa")] %*% c(1,1)
pr3<-data.frame(x=rep(x, length(y)), y=rep(y, each=length(x)),
z1=as.vector(r3.pr1), z2=as.vector(r3.pr2))
ggplot(datPred, aes(x=LD1, y=LD2) ) +
geom_point(size = 2, aes(pch = Species, col=Species)) +
geom_contour(data=pr3, aes(x=x, y=y, z=z1), breaks=c(0,.5)) +
geom_contour(data=pr3, aes(x=x, y=y, z=z2), breaks=c(0,.5))+
labs(title="Classification Based on X1, X2,X3 and X4")
|
/lda four variables.R
|
no_license
|
Bodhoditya/Iris-R-Codes
|
R
| false | false | 1,293 |
r
|
library(MASS)
library(tidyverse)
library(ggplot2)
Data=iris
traindata=Data
testdata=Data
Test=testdata[,c(1,2,3,4)]
newdf <- data.frame(Sepal.Length=Test[,1],Sepal.Width=Test[,2],Petal.Length=Test[,3],Petal.Width=Test[,4])
r1=lda(Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width ,data=traindata)
r2=predict(object=r1,newdf)
T=table(testdata[,5],r2$class)
T
misclaserror=1-sum(diag(T))/sum(T)
misclaserror
datPred<-data.frame(Species=testdata$Species,r2$x) #create data.frame
r3 <- lda(datPred[,2:3], datPred[,1])
x <- seq(min(datPred[,2]), max(datPred[,2]), length.out=150)
y <- seq(min(datPred[,3]), max(datPred[,3]), length.out=150)
Xcon <- matrix(c(rep(x,length(y)),
rep(y, rep(length(x), length(y)))),,2)
r3.pr1 <- predict(r3, Xcon)$post[, c("setosa","versicolor")] %*% c(1,1)
r3.pr2 <- predict(r3, Xcon)$post[, c("virginica","setosa")] %*% c(1,1)
pr3<-data.frame(x=rep(x, length(y)), y=rep(y, each=length(x)),
z1=as.vector(r3.pr1), z2=as.vector(r3.pr2))
ggplot(datPred, aes(x=LD1, y=LD2) ) +
geom_point(size = 2, aes(pch = Species, col=Species)) +
geom_contour(data=pr3, aes(x=x, y=y, z=z1), breaks=c(0,.5)) +
geom_contour(data=pr3, aes(x=x, y=y, z=z2), breaks=c(0,.5))+
labs(title="Classification Based on X1, X2,X3 and X4")
|
library(BuenColors)
dt <- read.table("../data/three_comparison/TSS_scores_3channels.tsv", fill = 0, header = TRUE)
dt$pos <- 1:dim(dt)[1]
tb <- pretty_plot(fontsize = 8) + L_border() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
p1 <- ggplot(dt, aes(x = pos, y = ASAP_A)) +
geom_line() + tb + labs(y = "") +
scale_y_continuous(limits = c(0,12), expand = c(0,0), breaks = c(0, 5, 10))
p2 <- ggplot(dt, aes(x = pos, y = ASAP_B)) +
geom_line() + tb + labs(y = "") +
scale_y_continuous(limits = c(0,12), expand = c(0,0), breaks = c(0, 5, 10))
p3 <- ggplot(dt, aes(x = pos, y = ASAP_C)) +
geom_line() + tb + labs(y = "") +
scale_y_continuous(limits = c(0,12), expand = c(0,0), breaks = c(0, 5, 10))
ggsave(cowplot::plot_grid(p1, p2, p3, ncol = 1),
file = "../plots/TSS3.pdf", width = 1.3, height = 2.5)
|
/pbmc_TBNK_comparisons_asapseq/code/02_TSS_scores.R
|
no_license
|
liuxiaoping2020/asap_reproducibility
|
R
| false | false | 900 |
r
|
library(BuenColors)
dt <- read.table("../data/three_comparison/TSS_scores_3channels.tsv", fill = 0, header = TRUE)
dt$pos <- 1:dim(dt)[1]
tb <- pretty_plot(fontsize = 8) + L_border() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
p1 <- ggplot(dt, aes(x = pos, y = ASAP_A)) +
geom_line() + tb + labs(y = "") +
scale_y_continuous(limits = c(0,12), expand = c(0,0), breaks = c(0, 5, 10))
p2 <- ggplot(dt, aes(x = pos, y = ASAP_B)) +
geom_line() + tb + labs(y = "") +
scale_y_continuous(limits = c(0,12), expand = c(0,0), breaks = c(0, 5, 10))
p3 <- ggplot(dt, aes(x = pos, y = ASAP_C)) +
geom_line() + tb + labs(y = "") +
scale_y_continuous(limits = c(0,12), expand = c(0,0), breaks = c(0, 5, 10))
ggsave(cowplot::plot_grid(p1, p2, p3, ncol = 1),
file = "../plots/TSS3.pdf", width = 1.3, height = 2.5)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discretize_xgb.R
\name{step_discretize_xgb}
\alias{step_discretize_xgb}
\alias{tidy.step_discretize_xgb}
\title{Discretize numeric variables with XgBoost}
\usage{
step_discretize_xgb(
recipe,
...,
role = NA,
trained = FALSE,
outcome = NULL,
prop = 0.8,
learn_rate = 0.3,
num_breaks = 10,
tree_depth = 1,
rules = NULL,
skip = FALSE,
id = rand_id("discretize_xgb")
)
\method{tidy}{step_discretize_xgb}(x, ...)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the sequence of
operations for this recipe.}
\item{...}{One or more selector functions to choose which variables are
affected by the step. See \code{\link[=selections]{selections()}} for more details.}
\item{role}{Defaults to \code{"predictor"}.}
\item{trained}{A logical to indicate if the quantities for preprocessing
have been estimated.}
\item{outcome}{A call to \code{vars} to specify which variable is used as the
outcome to train XgBoost models in order to discretize explanatory
variables.}
\item{prop}{The share of data used for training the splits (the rest is used
as a validation set for early stopping). Defaults to 0.80.}
\item{learn_rate}{The rate at which the boosting algorithm adapts from
iteration-to-iteration. Corresponds to \code{eta} in the \pkg{xgboost} package.
Defaults to 0.3.}
\item{num_breaks}{The \emph{maximum} number of discrete bins to bucket continuous
features. Corresponds to \code{max_bin} in the \pkg{xgboost} package. Defaults to
10.}
\item{tree_depth}{The maximum depth of the tree (i.e. number of splits).
Corresponds to \code{max_depth} in the \pkg{xgboost} package. Defaults to 1.}
\item{rules}{The splitting rules of the best XgBoost tree to retain for
each variable.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[recipes:bake.recipe]{recipes::bake.recipe()}}? While all operations are baked
when \code{\link[recipes:prep.recipe]{recipes::prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_discretize_xgb} object.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of existing steps (if any).
}
\description{
\code{step_discretize_xgb} creates a \emph{specification} of a recipe step that will
discretize numeric data (e.g. integers or doubles) into bins in a
supervised way using an XgBoost model.
}
\details{
\code{step_discretize_xgb()} creates non-uniform bins from numerical
variables by utilizing the information about the outcome variable and
applying the xgboost model. It is advised to impute missing values before
this step. This step is intended to be used particularly with linear models
because thanks to creating non-uniform bins it becomes easier to learn
non-linear patterns from the data.
The best selection of buckets for each variable is selected using
an internal early stopping scheme implemented in the \pkg{xgboost}
package, which makes this discretization method prone to overfitting.
The pre-defined values of the underlying xgboost learns should give good
and reasonably complex results. However, if one wishes to tune them the
recommended path would be to first start with changing the value of
\code{num_breaks} to e.g.: 20 or 30. If that doesn't give satisfactory results
one could experiment with increasing the \code{tree_depth} parameter.
This step requires the \pkg{xgboost} package. If not installed, the
step will stop with a note about installing the package.
Note that the original data will be replaced with the new bins.
}
\examples{
library(modeldata)
data(credit_data)
library(rsample)
split <- initial_split(credit_data, strata = "Status")
credit_data_tr <- training(split)
credit_data_te <- testing(split)
xgb_rec <-
recipe(Status ~ ., data = credit_data_tr) \%>\%
step_medianimpute(all_numeric()) \%>\%
step_discretize_xgb(all_numeric(), outcome = "Status")
xgb_rec <- prep(xgb_rec, training = credit_data_tr)
xgb_test_bins <- bake(xgb_rec, credit_data_te)
}
\seealso{
\code{\link[recipes:recipe]{recipes::recipe()}} \code{\link[recipes:prep.recipe]{recipes::prep.recipe()}} \code{\link[recipes:bake.recipe]{recipes::bake.recipe()}}
}
\concept{discretization}
\concept{factors}
\concept{preprocessing}
\concept{xgboost}
\keyword{binning}
|
/man/step_discretize_xgb.Rd
|
no_license
|
topepo/embed-1
|
R
| false | true | 4,601 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discretize_xgb.R
\name{step_discretize_xgb}
\alias{step_discretize_xgb}
\alias{tidy.step_discretize_xgb}
\title{Discretize numeric variables with XgBoost}
\usage{
step_discretize_xgb(
recipe,
...,
role = NA,
trained = FALSE,
outcome = NULL,
prop = 0.8,
learn_rate = 0.3,
num_breaks = 10,
tree_depth = 1,
rules = NULL,
skip = FALSE,
id = rand_id("discretize_xgb")
)
\method{tidy}{step_discretize_xgb}(x, ...)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the sequence of
operations for this recipe.}
\item{...}{One or more selector functions to choose which variables are
affected by the step. See \code{\link[=selections]{selections()}} for more details.}
\item{role}{Defaults to \code{"predictor"}.}
\item{trained}{A logical to indicate if the quantities for preprocessing
have been estimated.}
\item{outcome}{A call to \code{vars} to specify which variable is used as the
outcome to train XgBoost models in order to discretize explanatory
variables.}
\item{prop}{The share of data used for training the splits (the rest is used
as a validation set for early stopping). Defaults to 0.80.}
\item{learn_rate}{The rate at which the boosting algorithm adapts from
iteration-to-iteration. Corresponds to \code{eta} in the \pkg{xgboost} package.
Defaults to 0.3.}
\item{num_breaks}{The \emph{maximum} number of discrete bins to bucket continuous
features. Corresponds to \code{max_bin} in the \pkg{xgboost} package. Defaults to
10.}
\item{tree_depth}{The maximum depth of the tree (i.e. number of splits).
Corresponds to \code{max_depth} in the \pkg{xgboost} package. Defaults to 1.}
\item{rules}{The splitting rules of the best XgBoost tree to retain for
each variable.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[recipes:bake.recipe]{recipes::bake.recipe()}}? While all operations are baked
when \code{\link[recipes:prep.recipe]{recipes::prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_discretize_xgb} object.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of existing steps (if any).
}
\description{
\code{step_discretize_xgb} creates a \emph{specification} of a recipe step that will
discretize numeric data (e.g. integers or doubles) into bins in a
supervised way using an XgBoost model.
}
\details{
\code{step_discretize_xgb()} creates non-uniform bins from numerical
variables by utilizing the information about the outcome variable and
applying the xgboost model. It is advised to impute missing values before
this step. This step is intended to be used particularly with linear models
because thanks to creating non-uniform bins it becomes easier to learn
non-linear patterns from the data.
The best selection of buckets for each variable is selected using
an internal early stopping scheme implemented in the \pkg{xgboost}
package, which makes this discretization method prone to overfitting.
The pre-defined values of the underlying xgboost learns should give good
and reasonably complex results. However, if one wishes to tune them the
recommended path would be to first start with changing the value of
\code{num_breaks} to e.g.: 20 or 30. If that doesn't give satisfactory results
one could experiment with increasing the \code{tree_depth} parameter.
This step requires the \pkg{xgboost} package. If not installed, the
step will stop with a note about installing the package.
Note that the original data will be replaced with the new bins.
}
\examples{
library(modeldata)
data(credit_data)
library(rsample)
split <- initial_split(credit_data, strata = "Status")
credit_data_tr <- training(split)
credit_data_te <- testing(split)
xgb_rec <-
recipe(Status ~ ., data = credit_data_tr) \%>\%
step_medianimpute(all_numeric()) \%>\%
step_discretize_xgb(all_numeric(), outcome = "Status")
xgb_rec <- prep(xgb_rec, training = credit_data_tr)
xgb_test_bins <- bake(xgb_rec, credit_data_te)
}
\seealso{
\code{\link[recipes:recipe]{recipes::recipe()}} \code{\link[recipes:prep.recipe]{recipes::prep.recipe()}} \code{\link[recipes:bake.recipe]{recipes::bake.recipe()}}
}
\concept{discretization}
\concept{factors}
\concept{preprocessing}
\concept{xgboost}
\keyword{binning}
|
rm(list=ls())
setwd("/data2/kiemele/WGCNA/HXBbrain_forPhenogen/20141218")
library(qtl)
##Load in data
MEs <- read.cross("csv", "", "forAnalysis_MEs.csv", genotypes = c("H", "B"))
summary(MEs)
#########################
# QTL marker regression #
#########################
nphe=(nphe(MEs)-2)
out_MEs.mr <- scanone(MEs, pheno.col=c(1:nphe), method="mr")
max(out_MEs.mr)
dim(out_MEs.mr)
out_MEs.mr[1:5, 1:5]
head(out_MEs.mr)
maxLODs <- apply(out_MEs.mr[,3:ncol(out_MEs.mr)],2,function(a) out_MEs.mr[which.max(a),1:2])
results <- data.frame(probeset_id = names(maxLODs),snp_id = unlist(lapply(maxLODs,function(a) rownames(a))),
chr = unlist(lapply(maxLODs,function(a) a[,1])),pos = unlist(lapply(maxLODs,function(a) a[,2])),maxLOD = apply(out_MEs.mr[,3:ncol(out_MEs.mr)],2,max))
################
# get p-values #
################
set.seed(12)
operms_corrMEs <- scanone(MEs, pheno.col=c(1:nphe), method="mr", n.perm=1000)
dim(operms_corrMEs)
summary(operms_corrMEs)
pvals <- c()
for(i in 1:nrow(results)){
pvalue <- sum(results[i, "maxLOD"] < operms_corrMEs[,i])/nrow(operms_corrMEs)
pvals <- c(pvals, pvalue)
}
results$pvals <- pvals
save(out_MEs.mr, operms_corrMEs, results, file="EQTLs.Rdata")
write.csv(results, file="HXB.RNAseqTC.EQTLs.20141218results.csv", row.names=FALSE)
|
/programs/orig/EQTLanalysis.R
|
no_license
|
TabakoffLab/WGCNA
|
R
| false | false | 1,330 |
r
|
rm(list=ls())
setwd("/data2/kiemele/WGCNA/HXBbrain_forPhenogen/20141218")
library(qtl)
##Load in data
MEs <- read.cross("csv", "", "forAnalysis_MEs.csv", genotypes = c("H", "B"))
summary(MEs)
#########################
# QTL marker regression #
#########################
nphe=(nphe(MEs)-2)
out_MEs.mr <- scanone(MEs, pheno.col=c(1:nphe), method="mr")
max(out_MEs.mr)
dim(out_MEs.mr)
out_MEs.mr[1:5, 1:5]
head(out_MEs.mr)
maxLODs <- apply(out_MEs.mr[,3:ncol(out_MEs.mr)],2,function(a) out_MEs.mr[which.max(a),1:2])
results <- data.frame(probeset_id = names(maxLODs),snp_id = unlist(lapply(maxLODs,function(a) rownames(a))),
chr = unlist(lapply(maxLODs,function(a) a[,1])),pos = unlist(lapply(maxLODs,function(a) a[,2])),maxLOD = apply(out_MEs.mr[,3:ncol(out_MEs.mr)],2,max))
################
# get p-values #
################
set.seed(12)
operms_corrMEs <- scanone(MEs, pheno.col=c(1:nphe), method="mr", n.perm=1000)
dim(operms_corrMEs)
summary(operms_corrMEs)
pvals <- c()
for(i in 1:nrow(results)){
pvalue <- sum(results[i, "maxLOD"] < operms_corrMEs[,i])/nrow(operms_corrMEs)
pvals <- c(pvals, pvalue)
}
results$pvals <- pvals
save(out_MEs.mr, operms_corrMEs, results, file="EQTLs.Rdata")
write.csv(results, file="HXB.RNAseqTC.EQTLs.20141218results.csv", row.names=FALSE)
|
#' Download Playbyplay data for NBA game
#'
#' Download and process NBA.com play-by-play data for given game and season.
#' @param game_id Game's ID in NBA.com DB
#' @param verbose Defalt TRUE - prints additional information
#'
#' @return Dataset containing data from Playbyplay pages. Play-by-play provides information about every play from NBA game, one action per row.
#'
#' @author Patrick Chodowski, \email{Chodowski.Patrick@@gmail.com}
#' @keywords NBAr, play-by-play, game
#'
#' @examples
#' get_playbyplay(21400001)
#'
#' @importFrom lubridate second minute ms
#' @import dplyr
#' @import tidyr
#' @import httr
#' @import zoo
#' @importFrom purrr set_names
#' @import tibble
#' @importFrom glue glue
#' @importFrom magrittr %>%
#' @importFrom jsonlite fromJSON
#' @export get_playbyplay
get_playbyplay <- function(game_id, verbose=TRUE){
tryCatch({
link <- glue("https://stats.nba.com/stats/playbyplay?GameID=00{game_id}&StartPeriod=0&EndPeriod=14")
verbose_print(verbose, link)
result_sets_df <- rawToChar(GET(link, add_headers(.headers = c('Referer' = 'http://google.com', 'User-Agent' = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'connection' = 'keep-alive',
'Accept' = 'application/json',
'Host' = 'data.nba.com',
'x-nba-stats-origin'= 'stats')))$content) %>% fromJSON()
index <- which(result_sets_df$resultSets$name == "PlayByPlay")
dataset <- result_sets_df$resultSets$rowSet[index][1] %>%
as.data.frame(stringsAsFactors=F) %>%
as_tibble() %>%
set_names(tolower(unlist(result_sets_df$resultSets$headers[index]))) %>%
mutate(mins = as.numeric(minute(ms(pctimestring))),
secs = as.numeric(second(ms(pctimestring)))) %>%
mutate(scoremargin = ifelse(scoremargin == 'TIE',0, scoremargin)) %>%
mutate_if(check_if_numeric, as.numeric)
dataset[1,c("score","scoremargin")] <- c("0 - 0", 0)
dataset[,c("score","scoremargin")] <- apply(dataset[,c("score","scoremargin")], 2, na.locf)
verbose_dataset(verbose, dataset)
return(dataset)}, error=function(e) print(e$message))
}
|
/R/get_playbyplay.R
|
no_license
|
PatrickChodowski/NBAr
|
R
| false | false | 2,409 |
r
|
#' Download Playbyplay data for NBA game
#'
#' Download and process NBA.com play-by-play data for given game and season.
#' @param game_id Game's ID in NBA.com DB
#' @param verbose Defalt TRUE - prints additional information
#'
#' @return Dataset containing data from Playbyplay pages. Play-by-play provides information about every play from NBA game, one action per row.
#'
#' @author Patrick Chodowski, \email{Chodowski.Patrick@@gmail.com}
#' @keywords NBAr, play-by-play, game
#'
#' @examples
#' get_playbyplay(21400001)
#'
#' @importFrom lubridate second minute ms
#' @import dplyr
#' @import tidyr
#' @import httr
#' @import zoo
#' @importFrom purrr set_names
#' @import tibble
#' @importFrom glue glue
#' @importFrom magrittr %>%
#' @importFrom jsonlite fromJSON
#' @export get_playbyplay
get_playbyplay <- function(game_id, verbose=TRUE){
tryCatch({
link <- glue("https://stats.nba.com/stats/playbyplay?GameID=00{game_id}&StartPeriod=0&EndPeriod=14")
verbose_print(verbose, link)
result_sets_df <- rawToChar(GET(link, add_headers(.headers = c('Referer' = 'http://google.com', 'User-Agent' = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'connection' = 'keep-alive',
'Accept' = 'application/json',
'Host' = 'data.nba.com',
'x-nba-stats-origin'= 'stats')))$content) %>% fromJSON()
index <- which(result_sets_df$resultSets$name == "PlayByPlay")
dataset <- result_sets_df$resultSets$rowSet[index][1] %>%
as.data.frame(stringsAsFactors=F) %>%
as_tibble() %>%
set_names(tolower(unlist(result_sets_df$resultSets$headers[index]))) %>%
mutate(mins = as.numeric(minute(ms(pctimestring))),
secs = as.numeric(second(ms(pctimestring)))) %>%
mutate(scoremargin = ifelse(scoremargin == 'TIE',0, scoremargin)) %>%
mutate_if(check_if_numeric, as.numeric)
dataset[1,c("score","scoremargin")] <- c("0 - 0", 0)
dataset[,c("score","scoremargin")] <- apply(dataset[,c("score","scoremargin")], 2, na.locf)
verbose_dataset(verbose, dataset)
return(dataset)}, error=function(e) print(e$message))
}
|
#####################################################
# Some R code to get you going for the
# 2018 Melbourne Datathon
#####################################################
#install and load required libraries
#install.packages('data.table')
library(data.table)
#tell R where it can find the data
ScanOnFolderMaster <- '/Users/wangmumu/Documents/MelbDatathon2018/Samp_x/ScanOnTransaction'
ScanOffFolderMaster <- '/Users/wangmumu/Documents/MelbDatathon2018/Samp_x/ScanOffTransaction'
mySamp <- 0
ScanOnFolder <- sub("x",mySamp,ScanOnFolderMaster)
ScanOffFolder <- sub("x",mySamp,ScanOffFolderMaster)
#list the files
onFiles <- list.files(ScanOnFolder,recursive = TRUE,full.names = TRUE)
offFiles <- list.files(ScanOffFolder,recursive = TRUE,full.names = TRUE)
#how many
allFiles <- union(onFiles,offFiles)
cat("\nthere are", length(allFiles),'files')
#------------------------------------
#read in a file and take a look
#------------------------------------
myFile <- onFiles[1]
cmd <- paste0("gzip -dc ", myFile)
dt <- fread(cmd,nrow=10000)
#these are the column names
colnames(dt) <- c('Mode','BusinessDate','DateTime','CardID','CardType','VehicleID','ParentRoute','RouteID','StopID')
#take a look
dt
#summary stats
summary(dt)
#lets plot some distributions of the numerics
hist(dt$Mode,col='blue')
hist(dt$CardID,col='blue')
hist(dt$CardType,col='blue',breaks=max(dt$CardType))
hist(dt$VehicleID,col='blue')
hist(dt$RouteID,col='blue')
hist(dt$StopID,col='blue')
#one field is clearly a date time
dt$DateTime <- as.POSIXct(dt$DateTime)
plot(dt$DateTime)
hist(dt$DateTime,breaks=1000)
#----------------------------------------
# scan through the files and extract
# a sample of specific rows and columns
#----------------------------------------
first <- TRUE
count <- 0
#we won't look at all the files
#myFiles <- onFiles
myFiles <- onFiles[1:10]
for (myOn in myFiles){
cmd <- paste0("gzip -dc ", myOn)
#grab only 3 columns
dt <- fread(cmd,select = c(3,4,9))
#create a sample based on column 4 values
#note the samples are already sampled!
dt <- subset(dt,V4 %% 100 == mySamp)
#stack the records together
if (first == TRUE){
allON <- dt
first <- FALSE
} else {
l = list(dt,allON)
allON <- rbindlist(l)
}
count <- count + 1
cat('\n',count,' of ',length(myFiles))
}
cat('\n there are ', format(nrow(allON),big.mark = ","),'rows')
colnames(allON) <- c('DateTime','CardID','StopID')
allON
summary(allON)
hist(allON$CardID,col='blue')
#one field is clearly a date time
allON[,DateTime := as.POSIXct(DateTime,tz='Australia/Sydney')]
allON[,unixTime := as.numeric(DateTime)]
hist(allON$unixTime,breaks=1000)
#write the file
fwrite(allON,'/Users/wangmumu/Documents/MelbDatathon2018/allon.txt')
#----------------------------------
# now add the locations
#----------------------------------
sl <- fread('/Users/wangmumu/Documents/MelbDatathon2018/stop_locations.txt')
#take a look
sl
#just keep the columns that look useful
sl <- subset(sl,select=c('V1','V10','V11'))
colnames(sl) <- c('StopID','lat','long')
#merge the location onto the tap on data
setkey(sl,StopID)
setkey(allON,StopID)
dt <- merge(allON,sl)
#find the cards that have the most tap ons
busy_travellers <- as.numeric(names(sort(table(dt$CardID),decreasing=TRUE)[1:50]))
#take a subset of just these busy travellers
bt <- subset(dt,CardID %in% busy_travellers )
#finally a map
plot(bt$lat,bt$long,col=factor(bt$CardID),pch=19,main="busy travellers")
# install.packages('ggplot2')
# library(ggplot2)
# update.packages()
# warnings()
|
/code_to_get_started.R
|
no_license
|
lynnhio/MeblourneDatathon2018
|
R
| false | false | 3,598 |
r
|
#####################################################
# Some R code to get you going for the
# 2018 Melbourne Datathon
#####################################################
#install and load required libraries
#install.packages('data.table')
library(data.table)
#tell R where it can find the data
ScanOnFolderMaster <- '/Users/wangmumu/Documents/MelbDatathon2018/Samp_x/ScanOnTransaction'
ScanOffFolderMaster <- '/Users/wangmumu/Documents/MelbDatathon2018/Samp_x/ScanOffTransaction'
mySamp <- 0
ScanOnFolder <- sub("x",mySamp,ScanOnFolderMaster)
ScanOffFolder <- sub("x",mySamp,ScanOffFolderMaster)
#list the files
onFiles <- list.files(ScanOnFolder,recursive = TRUE,full.names = TRUE)
offFiles <- list.files(ScanOffFolder,recursive = TRUE,full.names = TRUE)
#how many
allFiles <- union(onFiles,offFiles)
cat("\nthere are", length(allFiles),'files')
#------------------------------------
#read in a file and take a look
#------------------------------------
myFile <- onFiles[1]
cmd <- paste0("gzip -dc ", myFile)
dt <- fread(cmd,nrow=10000)
#these are the column names
colnames(dt) <- c('Mode','BusinessDate','DateTime','CardID','CardType','VehicleID','ParentRoute','RouteID','StopID')
#take a look
dt
#summary stats
summary(dt)
#lets plot some distributions of the numerics
hist(dt$Mode,col='blue')
hist(dt$CardID,col='blue')
hist(dt$CardType,col='blue',breaks=max(dt$CardType))
hist(dt$VehicleID,col='blue')
hist(dt$RouteID,col='blue')
hist(dt$StopID,col='blue')
#one field is clearly a date time
dt$DateTime <- as.POSIXct(dt$DateTime)
plot(dt$DateTime)
hist(dt$DateTime,breaks=1000)
#----------------------------------------
# scan through the files and extract
# a sample of specific rows and columns
#----------------------------------------
first <- TRUE
count <- 0
#we won't look at all the files
#myFiles <- onFiles
myFiles <- onFiles[1:10]
for (myOn in myFiles){
cmd <- paste0("gzip -dc ", myOn)
#grab only 3 columns
dt <- fread(cmd,select = c(3,4,9))
#create a sample based on column 4 values
#note the samples are already sampled!
dt <- subset(dt,V4 %% 100 == mySamp)
#stack the records together
if (first == TRUE){
allON <- dt
first <- FALSE
} else {
l = list(dt,allON)
allON <- rbindlist(l)
}
count <- count + 1
cat('\n',count,' of ',length(myFiles))
}
cat('\n there are ', format(nrow(allON),big.mark = ","),'rows')
colnames(allON) <- c('DateTime','CardID','StopID')
allON
summary(allON)
hist(allON$CardID,col='blue')
#one field is clearly a date time
allON[,DateTime := as.POSIXct(DateTime,tz='Australia/Sydney')]
allON[,unixTime := as.numeric(DateTime)]
hist(allON$unixTime,breaks=1000)
#write the file
fwrite(allON,'/Users/wangmumu/Documents/MelbDatathon2018/allon.txt')
#----------------------------------
# now add the locations
#----------------------------------
sl <- fread('/Users/wangmumu/Documents/MelbDatathon2018/stop_locations.txt')
#take a look
sl
#just keep the columns that look useful
sl <- subset(sl,select=c('V1','V10','V11'))
colnames(sl) <- c('StopID','lat','long')
#merge the location onto the tap on data
setkey(sl,StopID)
setkey(allON,StopID)
dt <- merge(allON,sl)
#find the cards that have the most tap ons
busy_travellers <- as.numeric(names(sort(table(dt$CardID),decreasing=TRUE)[1:50]))
#take a subset of just these busy travellers
bt <- subset(dt,CardID %in% busy_travellers )
#finally a map
plot(bt$lat,bt$long,col=factor(bt$CardID),pch=19,main="busy travellers")
# install.packages('ggplot2')
# library(ggplot2)
# update.packages()
# warnings()
|
# ***************************************************************************************
# Library : modeltime
# Function : combine_modeltime_tables
# Created on: 2021/8/6
# URL : https://business-science.github.io/modeltime/reference/combine_modeltime_tables.html
# ***************************************************************************************
# <概要>
# - 複数のモデルテーブルを1つに結合する
# <構文>
# combine_modeltime_tables(...)
# <使用例>
# 0 準備
# 1 モデル構築
# 2 モデルテーブルの結合
# 0 準備 ---------------------------------------------------------------------------------------
# ライブラリ
library(modeltime)
library(tidymodels)
library(tidyverse)
library(timetk)
library(lubridate)
# データ準備
m750 <- m4_monthly %>% filter(id == "M750")
# データ分割
splits <- m750 %>% time_series_split(assess = "3 years", cumulative = TRUE)
# 1 モデル構築 ---------------------------------------------------------------------------------
# モデル定義&学習
# --- Auto Arima
model_fit_arima <-
arima_reg() %>%
set_engine("auto_arima") %>%
fit(value ~ date, training(splits))
# モデル定義&学習
# --- Prophet
model_fit_prophet <-
prophet_reg() %>%
set_engine("prophet") %>%
fit(value ~ date, training(splits))
# 2 モデルテーブルの結合 --------------------------------------------------------------------------
# モデルテーブルに登録
# --- 個別に登録
model_tbl_1 <- modeltime_table(model_fit_arima)
model_tbl_2 <- modeltime_table(model_fit_prophet)
# モデルテーブルの結合
combine_modeltime_tables(model_tbl_1, model_tbl_2)
|
/library/modeltime/function/workflow/combine_modeltime_tables.R
|
no_license
|
jimyanau/r-timeseries
|
R
| false | false | 1,709 |
r
|
# ***************************************************************************************
# Library : modeltime
# Function : combine_modeltime_tables
# Created on: 2021/8/6
# URL : https://business-science.github.io/modeltime/reference/combine_modeltime_tables.html
# ***************************************************************************************
# <概要>
# - 複数のモデルテーブルを1つに結合する
# <構文>
# combine_modeltime_tables(...)
# <使用例>
# 0 準備
# 1 モデル構築
# 2 モデルテーブルの結合
# 0 準備 ---------------------------------------------------------------------------------------
# ライブラリ
library(modeltime)
library(tidymodels)
library(tidyverse)
library(timetk)
library(lubridate)
# データ準備
m750 <- m4_monthly %>% filter(id == "M750")
# データ分割
splits <- m750 %>% time_series_split(assess = "3 years", cumulative = TRUE)
# 1 モデル構築 ---------------------------------------------------------------------------------
# モデル定義&学習
# --- Auto Arima
model_fit_arima <-
arima_reg() %>%
set_engine("auto_arima") %>%
fit(value ~ date, training(splits))
# モデル定義&学習
# --- Prophet
model_fit_prophet <-
prophet_reg() %>%
set_engine("prophet") %>%
fit(value ~ date, training(splits))
# 2 モデルテーブルの結合 --------------------------------------------------------------------------
# モデルテーブルに登録
# --- 個別に登録
model_tbl_1 <- modeltime_table(model_fit_arima)
model_tbl_2 <- modeltime_table(model_fit_prophet)
# モデルテーブルの結合
combine_modeltime_tables(model_tbl_1, model_tbl_2)
|
#' Release package to CRAN.
#'
#' Run automated and manual tests, then ftp to CRAN.
#'
#' The package release process will:
#'
#' \itemize{
#'
#' \item Confirm that the package passes \code{R CMD check}
#' \item Ask if you've checked your code on win-builder
#' \item Confirm that news is up-to-date
#' \item Confirm that DESCRIPTION is ok
#' \item Ask if you've checked packages that depend on your package
#' \item Build the package
#' \item Submit the package to CRAN, using comments in "cran-comments.md"
#' }
#'
#' You can also add arbitrary extra questions by defining an (un-exported)
#' function called \code{release_questions()} that returns a character vector
#' of additional questions to ask.
#'
#' You also need to read the CRAN repository policy at
#' \url{http://cran.r-project.org/web/packages/policies.html} and make
#' sure you're in line with the policies. \code{release} tries to automate as
#' many of polices as possible, but it's impossible to be completely
#' comprehensive, and they do change in between releases of devtools.
#'
#' @section Guarantee:
#'
#' If a devtools bug causes one of the CRAN maintainers to treat you
#' impolitely, I will personally send you a handwritten apology note.
#' Please forward me the email and your address, and I'll get a card in
#' the mail.
#'
#' @param pkg package description, can be path or package name. See
#' \code{\link{as.package}} for more information
#' @param check if \code{TRUE}, run checking, otherwise omit it. This
#' is useful if you've just checked your package and you're ready to
#' release it.
#' @export
release <- function(pkg = ".", check = TRUE) {
dr_d <- dr_devtools()
if (!dr_d)
print(dr_d)
pkg <- as.package(pkg)
# Figure out if this is a new package
cran_version <- cran_pkg_version(pkg$package)
new_pkg <- is.null(cran_version)
if (uses_git(pkg$path)) {
if (git_uncommitted(pkg$path))
warning("Uncommited changes in git.", immediate. = TRUE, call. = FALSE)
if (git_sync_status(pkg$path))
warning("Git not synched with remote.", immediate. = TRUE, call. = FALSE)
}
if (check) {
check(pkg, cran = TRUE, check_version = TRUE, manual = TRUE)
release_checks(pkg)
if (yesno("Was package check successful?"))
return(invisible())
} else {
release_checks(pkg)
if (yesno("Does R CMD check pass with no ERRORs or WARNINGs?"))
return(invisible())
# Even if we don't run the full checks, at least check that the package
# version is sufficient for submission to CRAN.
if (new_pkg) {
message("Package ", pkg$package, " not found on CRAN. This is a new package.")
} else if (as.package_version(pkg$version) > cran_version) {
message("Local package ", pkg$package, " ", pkg$version,
" is greater than CRAN version ", cran_version, ".")
} else {
stop("Local package ", pkg$package, " ", pkg$version,
" must be greater than CRAN version ", cran_version, ".")
}
}
rule("cran-comments.md ")
cat(cran_comments(pkg), "\n\n")
if (yesno("Are the CRAN submission comments correct?"))
return(invisible())
if (yesno("Have you checked on win-builder (with build_win())?"))
return(invisible())
if (!new_pkg) {
cran_url <- paste0("http://cran.rstudio.com/web/checks/check_results_",
pkg$package, ".html")
if (yesno("Have you fixed all existing problems at \n", cran_url, " ?"))
return(invisible())
}
if (file.exists("NEWS")) {
try(print(show_news(pkg)))
if (yesno("Is package news up-to-date?"))
return(invisible())
}
rule("DESCRIPTION")
cat(readLines(file.path(pkg$path, "DESCRIPTION")), sep = "\n")
cat("\n")
if (yesno("Is DESCRIPTION up-to-date?"))
return(invisible())
deps <- if (new_pkg) 0 else length(revdep(pkg$package))
if (deps > 0) {
msg <- paste0("Have you checked the ", deps ," packages that depend on ",
"this package (with check_cran())?")
if (yesno(msg))
return(invisible())
}
release_questions <- pkg_env(pkg)$release_questions
if (!is.null(release_questions)) {
questions <- release_questions()
for (question in questions) {
if (yesno(question)) return(invisible())
}
}
if (yesno("Is your email address ", maintainer(pkg)$email, "?"))
return(invisible())
built_path <- build_cran(pkg)
if (yesno("Ready to submit?"))
return(invisible())
upload_cran(pkg, built_path)
if (file.exists(file.path(pkg$path, ".git"))) {
message("Don't forget to tag the release when the package is accepted!")
}
invisible(TRUE)
}
release_email <- function(name, new_pkg) {
paste(
"Dear CRAN maintainers,\n",
"\n",
if (new_pkg) {
paste("I have uploaded a new package, ", name, ", to CRAN. ",
"I have read and agree to the CRAN policies.\n", sep = "")
} else {
paste("I have just uploaded a new version of ", name, " to CRAN.\n",
sep = "")
},
"\n",
"Thanks!\n",
"\n",
getOption("devtools.name"), "\n",
sep = "")
}
yesno <- function(...) {
yeses <- c("Yes", "Definitely", "For sure", "Yup", "Yeah")
nos <- c("No way", "Not yet", "I forget", "No", "Nope", "Uhhhh... Maybe?")
cat(paste0(..., collapse = ""))
qs <- c(sample(yeses, 1), sample(nos, 2))
rand <- sample(length(qs))
menu(qs[rand]) != which(rand == 1)
}
# http://tools.ietf.org/html/rfc2368
email <- function(address, subject, body) {
url <- paste(
"mailto:",
URLencode(address),
"?subject=", URLencode(subject),
"&body=", URLencode(body),
sep = ""
)
tryCatch({
browseURL(url, browser = email_browser())},
error = function(e) {
message("Sending failed with error: ", e$message)
cat("To: ", address, "\n", sep = "")
cat("Subject: ", subject, "\n", sep = "")
cat("\n")
cat(body, "\n", sep = "")
}
)
invisible(TRUE)
}
email_browser <- function() {
if (!identical(.Platform$GUI, "RStudio"))
return (getOption("browser"))
# Use default browser, even if RStudio running
if (.Platform$OS.type == "windows")
return (NULL)
browser <- Sys.which(c("xdg-open", "open"))
browser[nchar(browser) > 0][[1]]
}
maintainer <- function(pkg = ".") {
pkg <- as.package(pkg)
authors <- pkg$`authors@r`
if (!is.null(authors)) {
people <- eval(parse(text = authors))
if (is.character(people)) {
maintainer <- as.person(people)
} else {
maintainer <- Find(function(x) "cre" %in% x$role, people)
}
} else {
maintainer <- pkg$maintainer
if (is.null(maintainer)) {
stop("No maintainer defined in package.", call. = FALSE)
}
maintainer <- as.person(maintainer)
}
list(
name = paste(maintainer$given, maintainer$family),
email = maintainer$email
)
}
cran_comments <- function(pkg = ".") {
pkg <- as.package(pkg)
path <- file.path(pkg$path, "cran-comments.md")
if (!file.exists(path)) {
stop("Can't find cran-comments.md in ", pkg$package, ".\n",
"This file gives CRAN volunteers comments about the submission,\n",
"and it must exist. Please create it using this guide:\n",
"http://r-pkgs.had.co.nz/release.html#release-check",
"Then run use_build_ignore('cran-comments.md')",
call. = FALSE)
}
paste0(readLines(path, warn = FALSE), collapse = "\n")
}
cran_submission_url <- "http://xmpalantir.wu.ac.at/cransubmit/index2.php"
#' Submit a package to CRAN.
#'
#' This uses the new CRAN web-form submission process. After submission, you
#' will receive an email asking you to confirm submission - this is used
#' to check that the package is submitted by the maintainer.
#'
#' It's recommend that you use \code{\link{release}()} rather than this
#' function as it performs more checks prior to submission.
#'
#' @param pkg package description, can be path or package name. See
#' \code{\link{as.package}} for more information
#' @export
#' @keywords internal
submit_cran <- function(pkg = ".") {
built_path <- build_cran(pkg)
upload_cran(pkg, built_path)
}
build_cran <- function(pkg) {
message("Building")
built_path <- build(pkg, tempdir(), manual = TRUE)
message("File size: ",
format(as.object_size(file.info(built_path)$size), units = "auto"))
built_path
}
upload_cran <- function(pkg, built_path) {
pkg <- as.package(pkg)
maint <- maintainer(pkg)
comments <- cran_comments(pkg)
# Initial upload ---------
message("Uploading package & comments")
body <- list(
pkg_id = "",
name = maint$name,
email = maint$email,
uploaded_file = httr::upload_file(built_path, "application/x-gzip"),
comment = comments,
upload = "Upload package"
)
r <- httr::POST(cran_submission_url, body = body)
httr::stop_for_status(r)
new_url <- httr::parse_url(r$url)
new_url$query$strErr
# Confirmation -----------
message("Confirming submission")
body <- list(
pkg_id = new_url$query$pkg_id,
name = maint$name,
email = maint$email,
policy_check = "1/",
submit = "Submit package"
)
r <- httr::POST(cran_submission_url, body = body)
httr::stop_for_status(r)
new_url <- httr::parse_url(r$url)
if (new_url$query$submit == "1") {
message("Package submission successful.\n",
"Check your email for confirmation link.")
} else {
stop("Package failed to upload.", call. = FALSE)
}
invisible(TRUE)
}
as.object_size <- function(x) structure(x, class = "object_size")
|
/R/release.r
|
no_license
|
melissasullivan/devtools
|
R
| false | false | 9,480 |
r
|
#' Release package to CRAN.
#'
#' Run automated and manual tests, then ftp to CRAN.
#'
#' The package release process will:
#'
#' \itemize{
#'
#' \item Confirm that the package passes \code{R CMD check}
#' \item Ask if you've checked your code on win-builder
#' \item Confirm that news is up-to-date
#' \item Confirm that DESCRIPTION is ok
#' \item Ask if you've checked packages that depend on your package
#' \item Build the package
#' \item Submit the package to CRAN, using comments in "cran-comments.md"
#' }
#'
#' You can also add arbitrary extra questions by defining an (un-exported)
#' function called \code{release_questions()} that returns a character vector
#' of additional questions to ask.
#'
#' You also need to read the CRAN repository policy at
#' \url{http://cran.r-project.org/web/packages/policies.html} and make
#' sure you're in line with the policies. \code{release} tries to automate as
#' many of polices as possible, but it's impossible to be completely
#' comprehensive, and they do change in between releases of devtools.
#'
#' @section Guarantee:
#'
#' If a devtools bug causes one of the CRAN maintainers to treat you
#' impolitely, I will personally send you a handwritten apology note.
#' Please forward me the email and your address, and I'll get a card in
#' the mail.
#'
#' @param pkg package description, can be path or package name. See
#' \code{\link{as.package}} for more information
#' @param check if \code{TRUE}, run checking, otherwise omit it. This
#' is useful if you've just checked your package and you're ready to
#' release it.
#' @export
release <- function(pkg = ".", check = TRUE) {
dr_d <- dr_devtools()
if (!dr_d)
print(dr_d)
pkg <- as.package(pkg)
# Figure out if this is a new package
cran_version <- cran_pkg_version(pkg$package)
new_pkg <- is.null(cran_version)
if (uses_git(pkg$path)) {
if (git_uncommitted(pkg$path))
warning("Uncommited changes in git.", immediate. = TRUE, call. = FALSE)
if (git_sync_status(pkg$path))
warning("Git not synched with remote.", immediate. = TRUE, call. = FALSE)
}
if (check) {
check(pkg, cran = TRUE, check_version = TRUE, manual = TRUE)
release_checks(pkg)
if (yesno("Was package check successful?"))
return(invisible())
} else {
release_checks(pkg)
if (yesno("Does R CMD check pass with no ERRORs or WARNINGs?"))
return(invisible())
# Even if we don't run the full checks, at least check that the package
# version is sufficient for submission to CRAN.
if (new_pkg) {
message("Package ", pkg$package, " not found on CRAN. This is a new package.")
} else if (as.package_version(pkg$version) > cran_version) {
message("Local package ", pkg$package, " ", pkg$version,
" is greater than CRAN version ", cran_version, ".")
} else {
stop("Local package ", pkg$package, " ", pkg$version,
" must be greater than CRAN version ", cran_version, ".")
}
}
rule("cran-comments.md ")
cat(cran_comments(pkg), "\n\n")
if (yesno("Are the CRAN submission comments correct?"))
return(invisible())
if (yesno("Have you checked on win-builder (with build_win())?"))
return(invisible())
if (!new_pkg) {
cran_url <- paste0("http://cran.rstudio.com/web/checks/check_results_",
pkg$package, ".html")
if (yesno("Have you fixed all existing problems at \n", cran_url, " ?"))
return(invisible())
}
if (file.exists("NEWS")) {
try(print(show_news(pkg)))
if (yesno("Is package news up-to-date?"))
return(invisible())
}
rule("DESCRIPTION")
cat(readLines(file.path(pkg$path, "DESCRIPTION")), sep = "\n")
cat("\n")
if (yesno("Is DESCRIPTION up-to-date?"))
return(invisible())
deps <- if (new_pkg) 0 else length(revdep(pkg$package))
if (deps > 0) {
msg <- paste0("Have you checked the ", deps ," packages that depend on ",
"this package (with check_cran())?")
if (yesno(msg))
return(invisible())
}
release_questions <- pkg_env(pkg)$release_questions
if (!is.null(release_questions)) {
questions <- release_questions()
for (question in questions) {
if (yesno(question)) return(invisible())
}
}
if (yesno("Is your email address ", maintainer(pkg)$email, "?"))
return(invisible())
built_path <- build_cran(pkg)
if (yesno("Ready to submit?"))
return(invisible())
upload_cran(pkg, built_path)
if (file.exists(file.path(pkg$path, ".git"))) {
message("Don't forget to tag the release when the package is accepted!")
}
invisible(TRUE)
}
release_email <- function(name, new_pkg) {
paste(
"Dear CRAN maintainers,\n",
"\n",
if (new_pkg) {
paste("I have uploaded a new package, ", name, ", to CRAN. ",
"I have read and agree to the CRAN policies.\n", sep = "")
} else {
paste("I have just uploaded a new version of ", name, " to CRAN.\n",
sep = "")
},
"\n",
"Thanks!\n",
"\n",
getOption("devtools.name"), "\n",
sep = "")
}
yesno <- function(...) {
yeses <- c("Yes", "Definitely", "For sure", "Yup", "Yeah")
nos <- c("No way", "Not yet", "I forget", "No", "Nope", "Uhhhh... Maybe?")
cat(paste0(..., collapse = ""))
qs <- c(sample(yeses, 1), sample(nos, 2))
rand <- sample(length(qs))
menu(qs[rand]) != which(rand == 1)
}
# http://tools.ietf.org/html/rfc2368
email <- function(address, subject, body) {
url <- paste(
"mailto:",
URLencode(address),
"?subject=", URLencode(subject),
"&body=", URLencode(body),
sep = ""
)
tryCatch({
browseURL(url, browser = email_browser())},
error = function(e) {
message("Sending failed with error: ", e$message)
cat("To: ", address, "\n", sep = "")
cat("Subject: ", subject, "\n", sep = "")
cat("\n")
cat(body, "\n", sep = "")
}
)
invisible(TRUE)
}
email_browser <- function() {
if (!identical(.Platform$GUI, "RStudio"))
return (getOption("browser"))
# Use default browser, even if RStudio running
if (.Platform$OS.type == "windows")
return (NULL)
browser <- Sys.which(c("xdg-open", "open"))
browser[nchar(browser) > 0][[1]]
}
maintainer <- function(pkg = ".") {
pkg <- as.package(pkg)
authors <- pkg$`authors@r`
if (!is.null(authors)) {
people <- eval(parse(text = authors))
if (is.character(people)) {
maintainer <- as.person(people)
} else {
maintainer <- Find(function(x) "cre" %in% x$role, people)
}
} else {
maintainer <- pkg$maintainer
if (is.null(maintainer)) {
stop("No maintainer defined in package.", call. = FALSE)
}
maintainer <- as.person(maintainer)
}
list(
name = paste(maintainer$given, maintainer$family),
email = maintainer$email
)
}
cran_comments <- function(pkg = ".") {
pkg <- as.package(pkg)
path <- file.path(pkg$path, "cran-comments.md")
if (!file.exists(path)) {
stop("Can't find cran-comments.md in ", pkg$package, ".\n",
"This file gives CRAN volunteers comments about the submission,\n",
"and it must exist. Please create it using this guide:\n",
"http://r-pkgs.had.co.nz/release.html#release-check",
"Then run use_build_ignore('cran-comments.md')",
call. = FALSE)
}
paste0(readLines(path, warn = FALSE), collapse = "\n")
}
cran_submission_url <- "http://xmpalantir.wu.ac.at/cransubmit/index2.php"
#' Submit a package to CRAN.
#'
#' This uses the new CRAN web-form submission process. After submission, you
#' will receive an email asking you to confirm submission - this is used
#' to check that the package is submitted by the maintainer.
#'
#' It's recommend that you use \code{\link{release}()} rather than this
#' function as it performs more checks prior to submission.
#'
#' @param pkg package description, can be path or package name. See
#' \code{\link{as.package}} for more information
#' @export
#' @keywords internal
submit_cran <- function(pkg = ".") {
built_path <- build_cran(pkg)
upload_cran(pkg, built_path)
}
build_cran <- function(pkg) {
message("Building")
built_path <- build(pkg, tempdir(), manual = TRUE)
message("File size: ",
format(as.object_size(file.info(built_path)$size), units = "auto"))
built_path
}
upload_cran <- function(pkg, built_path) {
pkg <- as.package(pkg)
maint <- maintainer(pkg)
comments <- cran_comments(pkg)
# Initial upload ---------
message("Uploading package & comments")
body <- list(
pkg_id = "",
name = maint$name,
email = maint$email,
uploaded_file = httr::upload_file(built_path, "application/x-gzip"),
comment = comments,
upload = "Upload package"
)
r <- httr::POST(cran_submission_url, body = body)
httr::stop_for_status(r)
new_url <- httr::parse_url(r$url)
new_url$query$strErr
# Confirmation -----------
message("Confirming submission")
body <- list(
pkg_id = new_url$query$pkg_id,
name = maint$name,
email = maint$email,
policy_check = "1/",
submit = "Submit package"
)
r <- httr::POST(cran_submission_url, body = body)
httr::stop_for_status(r)
new_url <- httr::parse_url(r$url)
if (new_url$query$submit == "1") {
message("Package submission successful.\n",
"Check your email for confirmation link.")
} else {
stop("Package failed to upload.", call. = FALSE)
}
invisible(TRUE)
}
as.object_size <- function(x) structure(x, class = "object_size")
|
install.packages("WDI")
require(WDI)
#domestic gross product
gdp <- WDI(country = c("US", "CA", "GB", "DE", "CN", "JP", "SG", "IL"), indicator = c("NY.GDP.PCAP.CD", "NY.GDP.MKTP.CD"), start = 1960, end = 2011)
gdp
head(gdp)
names(gdp) <- c("iso2c", "Country", "Year", "PerCapGDP", "GDP")
us1 <- gdp$PerCapGDP[gdp$Country == "Canada"]
#convert it to a time series
us1
us1 <- ts(us1, start = min(gdp$year), end = max(gdp$year))
plot(us1, ylab = "Per capita GDP", xlab = "Year")
acf(us1)
pacf(us1)
install.packages("reshape2")
library(reshape2)
reshape2
dcast
gdpCast <- dcast("Year", "Country", data = gdp[, c("Country", "Year", "PerCapGDP")], value.var = "PerCapGDP")
gdpCast <- dcast(Year ~ Country,
data = gdp[, c("Country", "Year", "PerCapGDP")],
value.var = "PerCapGDP")
head(gdpCast)
gdpTS <- ts(data = gdpCast[, -1], start = min(gdpCast$Year), end = max(gdpCast$Year))
plot(gdpTS, plot.type = "single", col = 1:8)
legend("topleft", legend = colnames(gdpTS), ncol = 2, lty = 1, col = 1:8, cex = .9)
gdpTS <- gdpTS[, which(colnames(gdpTS) != "Germany")]
library(quantmod)
load("data/att.rdata")
library(quantmod)
att <- getSymbols("T", auto.assign = FALSE)
library(xts)
# show data
head(att)
plot(att)
attClose <- att$T.Close
class(attClose)
head(attClose)
library(rugarch)
attSpec <- ugarchspec(variance.model = list(model = "sGARCH", garchOrder = c(1, 1), mean.model = list(armaOrder = c(1, 1)), distribution.model = "std"))
attGarch <- ugarchfit(spec = attSpec, data = attClose)
attGarch
attLog <- diff(log(attClose))[-1]
# build the specification
attLogSpec <- ugarchspec(variance.model = list(model = "sGARCH",
garchOrder = c(1, 1)),
mean.model = list(armaOrder = c(1, 1)),
distribution.model = "std")
# fit the model
attLogGarch <- ugarchfit(spec = attLogSpec, data = attLog)
|
/Statistics/GarchVar_TS.R
|
no_license
|
piyushkumar102/R-Programs
|
R
| false | false | 1,918 |
r
|
install.packages("WDI")
require(WDI)
#domestic gross product
gdp <- WDI(country = c("US", "CA", "GB", "DE", "CN", "JP", "SG", "IL"), indicator = c("NY.GDP.PCAP.CD", "NY.GDP.MKTP.CD"), start = 1960, end = 2011)
gdp
head(gdp)
names(gdp) <- c("iso2c", "Country", "Year", "PerCapGDP", "GDP")
us1 <- gdp$PerCapGDP[gdp$Country == "Canada"]
#convert it to a time series
us1
us1 <- ts(us1, start = min(gdp$year), end = max(gdp$year))
plot(us1, ylab = "Per capita GDP", xlab = "Year")
acf(us1)
pacf(us1)
install.packages("reshape2")
library(reshape2)
reshape2
dcast
gdpCast <- dcast("Year", "Country", data = gdp[, c("Country", "Year", "PerCapGDP")], value.var = "PerCapGDP")
gdpCast <- dcast(Year ~ Country,
data = gdp[, c("Country", "Year", "PerCapGDP")],
value.var = "PerCapGDP")
head(gdpCast)
gdpTS <- ts(data = gdpCast[, -1], start = min(gdpCast$Year), end = max(gdpCast$Year))
plot(gdpTS, plot.type = "single", col = 1:8)
legend("topleft", legend = colnames(gdpTS), ncol = 2, lty = 1, col = 1:8, cex = .9)
gdpTS <- gdpTS[, which(colnames(gdpTS) != "Germany")]
library(quantmod)
load("data/att.rdata")
library(quantmod)
att <- getSymbols("T", auto.assign = FALSE)
library(xts)
# show data
head(att)
plot(att)
attClose <- att$T.Close
class(attClose)
head(attClose)
library(rugarch)
attSpec <- ugarchspec(variance.model = list(model = "sGARCH", garchOrder = c(1, 1), mean.model = list(armaOrder = c(1, 1)), distribution.model = "std"))
attGarch <- ugarchfit(spec = attSpec, data = attClose)
attGarch
attLog <- diff(log(attClose))[-1]
# build the specification
attLogSpec <- ugarchspec(variance.model = list(model = "sGARCH",
garchOrder = c(1, 1)),
mean.model = list(armaOrder = c(1, 1)),
distribution.model = "std")
# fit the model
attLogGarch <- ugarchfit(spec = attLogSpec, data = attLog)
|
setwd("T:\\R\\thuc_hanh_16_4_21")
A = read.csv("Temperature.csv", header = TRUE)
trung_binh = tapply(A$Temperature, A$Month, mean, na.rm =TRUE)
trung_binh
trung_binh2 = tapply(A$Temperature, list(A$Year, A$Month), mean, na.rm =TRUE)
trung_binh2
trung_binh3 = tapply(A$Temperature, A$Month, sd, na.rm =TRUE)
trung_binh3
help(table)
table(A$Year)
table(A$Station,A$Year)
|
/bt_vat/thuc_hanh_16_4_21/hello r.r
|
no_license
|
Klatane/bt_minhtriho_all
|
R
| false | false | 384 |
r
|
setwd("T:\\R\\thuc_hanh_16_4_21")
A = read.csv("Temperature.csv", header = TRUE)
trung_binh = tapply(A$Temperature, A$Month, mean, na.rm =TRUE)
trung_binh
trung_binh2 = tapply(A$Temperature, list(A$Year, A$Month), mean, na.rm =TRUE)
trung_binh2
trung_binh3 = tapply(A$Temperature, A$Month, sd, na.rm =TRUE)
trung_binh3
help(table)
table(A$Year)
table(A$Station,A$Year)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ellipse.lm.R
\name{ellipse.lm}
\alias{ellipse.lm}
\title{Outline a pairwise confidence region for a linear model fit.}
\usage{
\method{ellipse}{lm}(x, which = c(1, 2), level = 0.95, t = sqrt(2 *
qf(level, 2, x$df.residual)), ...)
}
\arguments{
\item{x}{The first argument should be an \code{lm} object, usually resulting
from a call to \code{lm()}.}
\item{which}{Which selects the pair of parameters to be plotted. The default
is the first two.}
\item{level}{The confidence level of the region. Default 95\%.}
\item{t}{The t statistic on the boundary of the ellipse.}
\item{...}{Other \code{ellipse.default} parameters may also be used.}
}
\value{
A matrix with columns \code{x} and \code{y} to outline the confidence region.
}
\description{
This function produces the ellipsoidal outline of a pairwise confidence
region for a linear model fit.
}
\details{
The summary function is used to obtain the covariance matrix of the fitted parameters.
}
\examples{
# Plot the estimate and joint 90\\\% confidence region for the displacement and cylinder
# count linear coefficients in the mtcars dataset
data(mtcars)
fit <- lm(mpg ~ disp + cyl , mtcars)
s <- summary(fit)
plot(ellipse(fit, which = c('disp', 'cyl'), level = 0.90), type = 'l')
points(fit$coefficients['disp'], fit$coefficients['cyl'])
}
\seealso{
\code{\link[=ellipse.default]{ellipse.default()}}, \code{\link[stats:summary.lm]{stats::summary.lm()}}
}
\keyword{dplot}
\keyword{regression}
|
/man/ellipse.lm.Rd
|
no_license
|
kongdd/ellipse
|
R
| false | true | 1,533 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ellipse.lm.R
\name{ellipse.lm}
\alias{ellipse.lm}
\title{Outline a pairwise confidence region for a linear model fit.}
\usage{
\method{ellipse}{lm}(x, which = c(1, 2), level = 0.95, t = sqrt(2 *
qf(level, 2, x$df.residual)), ...)
}
\arguments{
\item{x}{The first argument should be an \code{lm} object, usually resulting
from a call to \code{lm()}.}
\item{which}{Which selects the pair of parameters to be plotted. The default
is the first two.}
\item{level}{The confidence level of the region. Default 95\%.}
\item{t}{The t statistic on the boundary of the ellipse.}
\item{...}{Other \code{ellipse.default} parameters may also be used.}
}
\value{
A matrix with columns \code{x} and \code{y} to outline the confidence region.
}
\description{
This function produces the ellipsoidal outline of a pairwise confidence
region for a linear model fit.
}
\details{
The summary function is used to obtain the covariance matrix of the fitted parameters.
}
\examples{
# Plot the estimate and joint 90\\\% confidence region for the displacement and cylinder
# count linear coefficients in the mtcars dataset
data(mtcars)
fit <- lm(mpg ~ disp + cyl , mtcars)
s <- summary(fit)
plot(ellipse(fit, which = c('disp', 'cyl'), level = 0.90), type = 'l')
points(fit$coefficients['disp'], fit$coefficients['cyl'])
}
\seealso{
\code{\link[=ellipse.default]{ellipse.default()}}, \code{\link[stats:summary.lm]{stats::summary.lm()}}
}
\keyword{dplot}
\keyword{regression}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "phoneme")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.svm", par.vals = list(kernel = "polynomial", degree = 1, type = "nu-classification"), predict.type = "prob")
#:# hash
#:# 1f57a24cda2fde4e99949eebd0aa7096
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_phoneme/classification_Class/1f57a24cda2fde4e99949eebd0aa7096/code.R
|
no_license
|
lukaszbrzozowski/CaseStudies2019S
|
R
| false | false | 739 |
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "phoneme")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.svm", par.vals = list(kernel = "polynomial", degree = 1, type = "nu-classification"), predict.type = "prob")
#:# hash
#:# 1f57a24cda2fde4e99949eebd0aa7096
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
salary <- c(15000, 20000, 15000, 15000, 18000, 20000)
moneyUsed <- c(16000, 15000, 10000, 20000, 10000, 15000)
names(salary) <- c("January", "February", "March", "April", "May", "June")
names(moneyUsed) <- c("January", "February", "March", "April", "May", "June")
salary
moneyUsed
totalPerMonth <- salary - moneyUsed
totalPerMonth
totalSalary6Months <- sum(salary)
totalUsed6Months <- sum(moneyUsed)
totalSalary6Months
totalUsed6Months
totalSalary6Months > totalUsed6Months
salaryJune <- salary[6]
salaryJune
salaryFirst3Months <- salary[c(1, 2, 3)]
salaryFirst3Months
salaryLast3Months <- salary[c("April", "May", "June")]
salaryLast3Months
mean(salaryLast3Months)
bonusSalaryMonths <- salary > 15000
bonusSalaryMonths
moreMoneyMonths <- salary[bonusSalaryMonths]
moreMoneyMonths
lessMoneyUsedMonths <- moneyUsed < 15000
lessMoneyUsedMonths
savedMoneyMonths <- moneyUsed[lessMoneyUsedMonths]
savedMoneyMonths
|
/Introduction to R/Personalized/3. Personalized.r
|
no_license
|
TopicosSelectos/tutoriales-2019-2-mimi1698
|
R
| false | false | 961 |
r
|
salary <- c(15000, 20000, 15000, 15000, 18000, 20000)
moneyUsed <- c(16000, 15000, 10000, 20000, 10000, 15000)
names(salary) <- c("January", "February", "March", "April", "May", "June")
names(moneyUsed) <- c("January", "February", "March", "April", "May", "June")
salary
moneyUsed
totalPerMonth <- salary - moneyUsed
totalPerMonth
totalSalary6Months <- sum(salary)
totalUsed6Months <- sum(moneyUsed)
totalSalary6Months
totalUsed6Months
totalSalary6Months > totalUsed6Months
salaryJune <- salary[6]
salaryJune
salaryFirst3Months <- salary[c(1, 2, 3)]
salaryFirst3Months
salaryLast3Months <- salary[c("April", "May", "June")]
salaryLast3Months
mean(salaryLast3Months)
bonusSalaryMonths <- salary > 15000
bonusSalaryMonths
moreMoneyMonths <- salary[bonusSalaryMonths]
moreMoneyMonths
lessMoneyUsedMonths <- moneyUsed < 15000
lessMoneyUsedMonths
savedMoneyMonths <- moneyUsed[lessMoneyUsedMonths]
savedMoneyMonths
|
context("Linear models")
test_that("Simple lm models work", {
model_simple <- lm(mpg ~ cyl + disp, data = mtcars)
mtcars$gear <- as.factor(mtcars$gear)
model_indicators <- lm(mpg ~ cyl + gear, data = mtcars)
tex <- extract_eq(model_simple)
actual <- "\\text{mpg} = \\alpha + \\beta_{1}(\\text{cyl}) + \\beta_{2}(\\text{disp}) + \\epsilon"
expect_equal(tex, equation_class(actual),
label = "basic equation builds correctly")
tex <- extract_eq(model_simple, use_coefs = TRUE)
actual <- "\\text{mpg} = 34.66 - 1.59(\\text{cyl}) - 0.02(\\text{disp}) + \\epsilon"
expect_equal(tex, equation_class(actual),
label = "basic equation + coefs builds correctly")
tex <- extract_eq(model_indicators)
actual <- "\\text{mpg} = \\alpha + \\beta_{1}(\\text{cyl}) + \\beta_{2}(\\text{gear}_{\\text{4}}) + \\beta_{3}(\\text{gear}_{\\text{5}}) + \\epsilon"
expect_equal(tex, equation_class(actual),
label = "categorical subscripts work")
})
test_that("Interactions work", {
simple_int <- lm(Sepal.Length ~ Sepal.Width*Species, iris)
tex <- extract_eq(simple_int)
actual <- "\\text{Sepal.Length} = \\alpha + \\beta_{1}(\\text{Sepal.Width}) + \\beta_{2}(\\text{Species}_{\\text{versicolor}}) + \\beta_{3}(\\text{Species}_{\\text{virginica}}) + \\beta_{4}(\\text{Sepal.Width} \\times \\text{Species}_{\\text{versicolor}}) + \\beta_{5}(\\text{Sepal.Width} \\times \\text{Species}_{\\text{virginica}}) + \\epsilon"
expect_equal(tex, equation_class(actual),
label = "Basic interaction with subscripts")
simple_int2 <- lm(mpg ~ hp*wt, mtcars)
tex2 <- extract_eq(simple_int2)
actual2 <- "\\text{mpg} = \\alpha + \\beta_{1}(\\text{hp}) + \\beta_{2}(\\text{wt}) + \\beta_{3}(\\text{hp} \\times \\text{wt}) + \\epsilon"
expect_equal(tex2, equation_class(actual2),
label = "Basic interaction with no subscripts")
})
|
/tests/testthat/test-lm.R
|
permissive
|
cRistiancec/equatiomatic
|
R
| false | false | 1,913 |
r
|
context("Linear models")
test_that("Simple lm models work", {
model_simple <- lm(mpg ~ cyl + disp, data = mtcars)
mtcars$gear <- as.factor(mtcars$gear)
model_indicators <- lm(mpg ~ cyl + gear, data = mtcars)
tex <- extract_eq(model_simple)
actual <- "\\text{mpg} = \\alpha + \\beta_{1}(\\text{cyl}) + \\beta_{2}(\\text{disp}) + \\epsilon"
expect_equal(tex, equation_class(actual),
label = "basic equation builds correctly")
tex <- extract_eq(model_simple, use_coefs = TRUE)
actual <- "\\text{mpg} = 34.66 - 1.59(\\text{cyl}) - 0.02(\\text{disp}) + \\epsilon"
expect_equal(tex, equation_class(actual),
label = "basic equation + coefs builds correctly")
tex <- extract_eq(model_indicators)
actual <- "\\text{mpg} = \\alpha + \\beta_{1}(\\text{cyl}) + \\beta_{2}(\\text{gear}_{\\text{4}}) + \\beta_{3}(\\text{gear}_{\\text{5}}) + \\epsilon"
expect_equal(tex, equation_class(actual),
label = "categorical subscripts work")
})
test_that("Interactions work", {
simple_int <- lm(Sepal.Length ~ Sepal.Width*Species, iris)
tex <- extract_eq(simple_int)
actual <- "\\text{Sepal.Length} = \\alpha + \\beta_{1}(\\text{Sepal.Width}) + \\beta_{2}(\\text{Species}_{\\text{versicolor}}) + \\beta_{3}(\\text{Species}_{\\text{virginica}}) + \\beta_{4}(\\text{Sepal.Width} \\times \\text{Species}_{\\text{versicolor}}) + \\beta_{5}(\\text{Sepal.Width} \\times \\text{Species}_{\\text{virginica}}) + \\epsilon"
expect_equal(tex, equation_class(actual),
label = "Basic interaction with subscripts")
simple_int2 <- lm(mpg ~ hp*wt, mtcars)
tex2 <- extract_eq(simple_int2)
actual2 <- "\\text{mpg} = \\alpha + \\beta_{1}(\\text{hp}) + \\beta_{2}(\\text{wt}) + \\beta_{3}(\\text{hp} \\times \\text{wt}) + \\epsilon"
expect_equal(tex2, equation_class(actual2),
label = "Basic interaction with no subscripts")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{ContFeaturesPlot}
\alias{ContFeaturesPlot}
\title{Plot of continuous features}
\usage{
ContFeaturesPlot(leadCpds, data, nrclusters = NULL, orderLab = NULL,
colorLab = NULL, cols = NULL, ylab = "features", addLegend = TRUE,
margins = c(5.5, 3.5, 0.5, 8.7), plottype = "new", location = NULL)
}
\arguments{
\item{leadCpds}{A character vector containing the objects one wants to
separate from the others.}
\item{data}{The data matrix.}
\item{nrclusters}{Optional. The number of clusters to consider if colorLab
is specified. Default is NULL.}
\item{orderLab}{Optional. If the objects are to set in a specific order of
a specific method. Default is NULL.}
\item{colorLab}{The clustering result that determines the color of the
labels of the objects in the plot. If NULL, the labels are black. Default is NULL.}
\item{cols}{The colors for the labels of the objects. Default is NULL.}
\item{ylab}{The lable of the y-axis. Default is "features".}
\item{addLegend}{Logical. Indicates whether a legend should be added to the
plot. Default is TRUE.}
\item{margins}{Optional. Margins to be used for the plot. Default is c(5.5,3.5,0.5,8.7).}
\item{plottype}{Should be one of "pdf","new" or "sweave". If "pdf", a
location should be provided in "location" and the figure is saved there. If
"new" a new graphic device is opened and if "sweave", the figure is made
compatible to appear in a sweave or knitr document, i.e. no new device is
opened and the plot appears in the current device or document. Default is "new".}
\item{location}{If plottype is "pdf", a location should be provided in
"location" and the figure is saved there. Default is NULL.}
}
\value{
A plot in which the values of the features of the leadCpds are
separeted from the others.
}
\description{
The function \code{ContFeaturesPlot} plots the values of continuous features.
It is possible to separate between objects of interest and the
other objects.
}
\examples{
\dontrun{
data(Colors1)
Comps=c("Cpd1", "Cpd2", "Cpd3", "Cpd4", "Cpd5")
Data=matrix(sample(15, size = 50*5, replace = TRUE), nrow = 50, ncol = 5)
colnames(Data)=colnames(Data, do.NULL = FALSE, prefix = "col")
rownames(Data)=rownames(Data, do.NULL = FALSE, prefix = "row")
for(i in 1:50){
rownames(Data)[i]=paste("Cpd",i,sep="")
}
ContFeaturesPlot(leadCpds=Comps,orderLab=rownames(Data),colorLab=NULL,data=Data,
nrclusters=7,cols=Colors1,ylab="features",addLegend=TRUE,margins=c(5.5,3.5,0.5,8.7),
plottype="new",location=NULL)
}
}
|
/man/ContFeaturesPlot.Rd
|
no_license
|
cran/IntClust
|
R
| false | true | 2,634 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{ContFeaturesPlot}
\alias{ContFeaturesPlot}
\title{Plot of continuous features}
\usage{
ContFeaturesPlot(leadCpds, data, nrclusters = NULL, orderLab = NULL,
colorLab = NULL, cols = NULL, ylab = "features", addLegend = TRUE,
margins = c(5.5, 3.5, 0.5, 8.7), plottype = "new", location = NULL)
}
\arguments{
\item{leadCpds}{A character vector containing the objects one wants to
separate from the others.}
\item{data}{The data matrix.}
\item{nrclusters}{Optional. The number of clusters to consider if colorLab
is specified. Default is NULL.}
\item{orderLab}{Optional. If the objects are to set in a specific order of
a specific method. Default is NULL.}
\item{colorLab}{The clustering result that determines the color of the
labels of the objects in the plot. If NULL, the labels are black. Default is NULL.}
\item{cols}{The colors for the labels of the objects. Default is NULL.}
\item{ylab}{The lable of the y-axis. Default is "features".}
\item{addLegend}{Logical. Indicates whether a legend should be added to the
plot. Default is TRUE.}
\item{margins}{Optional. Margins to be used for the plot. Default is c(5.5,3.5,0.5,8.7).}
\item{plottype}{Should be one of "pdf","new" or "sweave". If "pdf", a
location should be provided in "location" and the figure is saved there. If
"new" a new graphic device is opened and if "sweave", the figure is made
compatible to appear in a sweave or knitr document, i.e. no new device is
opened and the plot appears in the current device or document. Default is "new".}
\item{location}{If plottype is "pdf", a location should be provided in
"location" and the figure is saved there. Default is NULL.}
}
\value{
A plot in which the values of the features of the leadCpds are
separeted from the others.
}
\description{
The function \code{ContFeaturesPlot} plots the values of continuous features.
It is possible to separate between objects of interest and the
other objects.
}
\examples{
\dontrun{
data(Colors1)
Comps=c("Cpd1", "Cpd2", "Cpd3", "Cpd4", "Cpd5")
Data=matrix(sample(15, size = 50*5, replace = TRUE), nrow = 50, ncol = 5)
colnames(Data)=colnames(Data, do.NULL = FALSE, prefix = "col")
rownames(Data)=rownames(Data, do.NULL = FALSE, prefix = "row")
for(i in 1:50){
rownames(Data)[i]=paste("Cpd",i,sep="")
}
ContFeaturesPlot(leadCpds=Comps,orderLab=rownames(Data),colorLab=NULL,data=Data,
nrclusters=7,cols=Colors1,ylab="features",addLegend=TRUE,margins=c(5.5,3.5,0.5,8.7),
plottype="new",location=NULL)
}
}
|
# start!
## 데이터 삭제
setwd('c:/Users/USER/Desktop/문화관광 공모전/data')
library(dplyr)
library(magrittr)
d <- haven::read_sav('국민여가활동조사.sav')
forcluster <- data.frame(data.table::fread('rawdata.csv'))
forcluster%<>%mutate(본인소득=d$Q45A1)
forcluster %<>%mutate(가구소득 = forcluster$Q45A2)
for(i in 1:10498) {
if(forcluster$가구소득[i]==1){
forcluster$가구소득[i]<-0
} else if(forcluster$가구소득[i]==2) {
forcluster$가구소득[i]<-500000
} else if(forcluster$가구소득[i]==3) {
forcluster$가구소득[i]<-1500000
} else if(forcluster$가구소득[i]==4) {
forcluster$가구소득[i]<-2500000
} else if(forcluster$가구소득[i]==5) {
forcluster$가구소득[i]<-3500000
} else if(forcluster$가구소득[i]==6) {
forcluster$가구소득[i]<-4500000
} else if(forcluster$가구소득[i]==7) {
forcluster$가구소득[i]<-5500000
} else if(forcluster$가구소득[i]==8) {
forcluster$가구소득[i]<-6500000
} else if(forcluster$가구소득[i]==9) {
forcluster$가구소득[i]<-7500000
} else if(forcluster$가구소득[i]==10) {
forcluster$가구소득[i]<-8500000
} else if(forcluster$가구소득[i]==11) {
forcluster$가구소득[i]<-9500000
} else {
forcluster$가구소득[i]<-10000000
}
}
for(i in 1:10498) {
if(forcluster$본인소득[i]==1){
forcluster$본인소득[i]<-0
} else if(forcluster$본인소득[i]==2) {
forcluster$본인소득[i]<-500000
} else if(forcluster$본인소득[i]==3) {
forcluster$본인소득[i]<-1500000
} else if(forcluster$본인소득[i]==4) {
forcluster$본인소득[i]<-2500000
} else if(forcluster$본인소득[i]==5) {
forcluster$본인소득[i]<-3500000
} else if(forcluster$본인소득[i]==6) {
forcluster$본인소득[i]<-4500000
} else if(forcluster$본인소득[i]==7) {
forcluster$본인소득[i]<-5500000
} else if(forcluster$본인소득[i]==8) {
forcluster$본인소득[i]<-6500000
} else if(forcluster$본인소득[i]==9) {
forcluster$본인소득[i]<-7500000
} else if(forcluster$본인소득[i]==10) {
forcluster$본인소득[i]<-8500000
} else if(forcluster$본인소득[i]==11) {
forcluster$본인소득[i]<-9500000
} else {
forcluster$본인소득[i]<-10000000
}
}
forcluster%<>%mutate(한달평균지출액수=forcluster$Q9,만나이 = forcluster$Q36,하루평균소요시간 = (forcluster$Q13A1A1*5 + forcluster$Q13A1A1*2)/7)
forcluster%<>% filter(한달평균지출액수<500000,만나이<75,가구소득<6500000,하루평균소요시간<8)
data <- forcluster
## 클러스터링
library(cluster)
library(purrr)
library(ggplot2)
library(clValid)
scdata <- scale(data%>%select(만나이, 가구소득, 한달평균지출액수, 하루평균소요시간), center=T,scale=T)
vadata <- clValid(scdata, 2:10, clMethods = 'kmeans', validation = 'internal', maxitems = nrow(scdata))
summary(vadata)
attach(clusterdata)
clusterdata <- data%>%select(만나이, 가구소득, 한달평균지출액수, 하루평균소요시간)
########클러스터링 잘 됨!! 코드 절대 버리지 말기!
set.seed(10000)
kmodel <- kmeans(scale(data%>%select(만나이, 가구소득, 한달평균지출액수, 하루평균소요시간)), center=4)
cldata <- data%>%select(만나이, 가구소득, 한달평균지출액수, 하루평균소요시간)
results<- cldata%>%
mutate(group = kmodel$cluster)%>%
group_by(group) %>%
do(the_summary = summary(.))
results$the_summary
cldata%<>%
mutate(cluster = kmodel$cluster)%>%
mutate(본인소득=forcluster$본인소득)
table(cldata$cluster)
hist(as.matrix(cldata%>%filter(cluster==1)%>%select(본인소득)))
hist(as.matrix(cldata%>%filter(cluster==2)%>%select(본인소득)))
hist(as.matrix(cldata%>%filter(cluster==3)%>%select(본인소득)))
hist(as.matrix(cldata%>%filter(cluster==4)%>%select(본인소득)))
data$Q30 %>% table()
data$Q30 <- cut(data$Q30,breaks=c(-Inf,4.1,Inf),labels=F)
data%<>%mutate(Q35 = ifelse(data$Q35==2, 2, 1))
data%<>%mutate(group= kmodel$cluster)
logdata<- data%>%select(Q2A2, Q5, Q9, Q13A1A1, Q13A1A2, Q14, Q19, Q24A1, Q24A2, Q35, group, Q30)
colnames(logdata) <- c('동반자유형', '지속활동여부', '한달지출', '평일시간', '휴일시간',
'휴가여부', '생활권시설이용여부', '스마트평일', '스마트주말',
'결혼여부', '그룹', '만족도')
for(k in c(1,2,6,7,10,11,12)){
logdata[,k] <- as.factor(logdata[,k])
}
str(logdata)
group1 <- logdata%>%filter(그룹==1)
group2 <- logdata%>%filter(그룹==2)
group3 <- logdata%>%filter(그룹==3)
group4 <- logdata%>%filter(그룹==4)
group1%<>%select(-그룹)
group2%<>%select(-그룹)
group3%<>%select(-그룹)
group4%<>%select(-그룹) # 로지스틱을 위한 전처리 끝!
library(rgl)
colors <- c("#BEF781","#F78181","#585858",'#81BEF7')
colors <- colors[as.numeric(cldata$cluster)]
plot3d(cldata$만나이,cldata$가구소득,cldata$한달평균지출액수, col=colors)
ggplot(aes(x = 만나이, y = 가구소득), data = cldata) +
geom_point(aes(color = cluster))
#####################################################################
## binary logistic ##
group1 <- read.csv('log1.csv')
group2 <- read.csv('log2.csv')
group3 <- read.csv('log3.csv')
group4 <- read.csv('log4.csv')
for (k in c(2,6,7,10)){group1[,k] <- ifelse(group1[,k]==2, 0, 1)} ; group1[,11] <- ifelse(group1[,11]==1,0,1)
for (k in c(2,6,7,10)){group2[,k] <- ifelse(group2[,k]==2, 0, 1)} ; group2[,11] <- ifelse(group2[,11]==1,0,1)
for (k in c(2,6,7,10)){group3[,k] <- ifelse(group3[,k]==2, 0, 1)} ; group3[,11] <- ifelse(group3[,11]==1,0,1)
for (k in c(2,6,7,10)){group4[,k] <- ifelse(group4[,k]==2, 0, 1)} ; group4[,11] <- ifelse(group4[,11]==1,0,1)
for(k in c(1,2,6,7,10,11)){group1[,k] <- as.factor(group1[,k])}
for(k in c(1,2,6,7,10,11)){group2[,k] <- as.factor(group2[,k])}
for(k in c(1,2,6,7,10,11)){group3[,k] <- as.factor(group3[,k])}
for(k in c(1,2,6,7,10,11)){group4[,k] <- as.factor(group4[,k])}
library(lmtest)
fit1 <- glm(만족도~.,data=group1, family='binomial')
fit2 <- glm(만족도~.,data=group2, family='binomial')
fit3 <- glm(만족도~.,data=group3, family='binomial')
fit4 <- glm(만족도~.,data=group4, family='binomial')
summary(fit1)
summary(fit2)
summary(fit3)
summary(fit4)
null1 <- glm(만족도~1, data=group1, family= 'binomial')
full1 <- glm(만족도~., data=group1, family= 'binomial')
stepfit1<- step(null1, scope=list(lower=null1,upper=full1), direction = 'both', trace = F)
summary(stepfit1)
finalfit1 <- stepfit1
lrtest(null1, finalfit1)
lrtest(finalfit1,full1)
summary(finalfit1)
exp(coef(finalfit1))
car::vif(finalfit1)
null2 <- glm(만족도~1, data=group2, family= 'binomial')
full2 <- glm(만족도~., data=group2, family= 'binomial')
stepfit2<- step(null2, scope=list(lower=null2,upper=full2), direction = 'both')
summary(stepfit2)
finalfit2 <- stepfit2
finalfit2 <- glm(만족도~동반자유형+지속활동여부+휴가여부, data=group2,family='binomial')
summary(finalfit2)
lrtest(null2, finalfit2)
lrtest(finalfit2,full2)
exp(coef(finalfit2))
car::vif(finalfit2)
null3 <- glm(만족도~1, data=group3, family= 'binomial')
full3 <- glm(만족도~., data=group3, family= 'binomial')
stepfit3<- step(null3, scope=list(lower=null3,upper=full3), direction = 'both')
summary(stepfit3)
finalfit3 <- glm(만족도~평일시간+생활권시설이용여부+한달지출+결혼여부+스마트주말+동반자유형, data=group3, family='binomial')
lrtest(null3, finalfit3)
lrtest(finalfit3,full3)
summary(finalfit3)
exp(coef(finalfit3))
car::vif(finalfit3)
null4 <- glm(만족도~1, data=group4, family= 'binomial')
full4 <- glm(만족도~., data=group4, family= 'binomial')
stepfit4 <- step(null4, scope=list(lower=null4,upper=full4), direction = 'both')
summary(stepfit4)
finalfit4 <- glm(만족도~평일시간+생활권시설이용여부+휴가여부+한달지출+지속활동여부+동반자유형, data=group4, family='binomial')
lrtest(null4, finalfit4)
lrtest(finalfit4,full4)
summary(finalfit4)
exp(coef(finalfit4))
car::vif(finalfit4)
## anova
setwd('c:/Users/USER/Desktop/data')
library(dplyr)
library(magrittr)
data <- data.table::fread('aovdata.csv')
data%<>%mutate(group=as.factor(group))
aovmodel1 <- aov(만나이~group, data=data) # one-way anova
aovmodel2 <- aov(한달평균지출액수~group, data=data)
aovmodel3 <- aov(가구소득~group, data=data)
summary(aovmodel)
oneway.test(만나이~group,data=data,var.equal=F) # welch's anova
t.test(data%>%filter(group==1)%>%select(가구소득), data%>%filter(group==4)%>%select(가구소득),paired =FALSE, var.equal = TRUE, conf.level = 0.95)
t.test(data%>%filter(group==1)%>%select(한달평균지출액수), data%>%filter(group==4)%>%select(한달평균지출액수),paired =FALSE, var.equal = TRUE, conf.level = 0.95)
library(multcomp)
pairwise.t.test(data$만나이,data$group, p.adj='bonf')
pairwise.t.test(data$가구소득,data$group, p.adj='bonf')
##
group1 <- data.table::fread('group1_all.csv')
group4 <- data.table::fread('group4_all.csv')
hist(group1$만나이)
hist(group4$만나이)
|
/Clustering, logistic, anova.R
|
no_license
|
JinsaGalbi/Consumer-oriented-national-leisure-policy
|
R
| false | false | 9,397 |
r
|
# start!
## 데이터 삭제
setwd('c:/Users/USER/Desktop/문화관광 공모전/data')
library(dplyr)
library(magrittr)
d <- haven::read_sav('국민여가활동조사.sav')
forcluster <- data.frame(data.table::fread('rawdata.csv'))
forcluster%<>%mutate(본인소득=d$Q45A1)
forcluster %<>%mutate(가구소득 = forcluster$Q45A2)
for(i in 1:10498) {
if(forcluster$가구소득[i]==1){
forcluster$가구소득[i]<-0
} else if(forcluster$가구소득[i]==2) {
forcluster$가구소득[i]<-500000
} else if(forcluster$가구소득[i]==3) {
forcluster$가구소득[i]<-1500000
} else if(forcluster$가구소득[i]==4) {
forcluster$가구소득[i]<-2500000
} else if(forcluster$가구소득[i]==5) {
forcluster$가구소득[i]<-3500000
} else if(forcluster$가구소득[i]==6) {
forcluster$가구소득[i]<-4500000
} else if(forcluster$가구소득[i]==7) {
forcluster$가구소득[i]<-5500000
} else if(forcluster$가구소득[i]==8) {
forcluster$가구소득[i]<-6500000
} else if(forcluster$가구소득[i]==9) {
forcluster$가구소득[i]<-7500000
} else if(forcluster$가구소득[i]==10) {
forcluster$가구소득[i]<-8500000
} else if(forcluster$가구소득[i]==11) {
forcluster$가구소득[i]<-9500000
} else {
forcluster$가구소득[i]<-10000000
}
}
for(i in 1:10498) {
if(forcluster$본인소득[i]==1){
forcluster$본인소득[i]<-0
} else if(forcluster$본인소득[i]==2) {
forcluster$본인소득[i]<-500000
} else if(forcluster$본인소득[i]==3) {
forcluster$본인소득[i]<-1500000
} else if(forcluster$본인소득[i]==4) {
forcluster$본인소득[i]<-2500000
} else if(forcluster$본인소득[i]==5) {
forcluster$본인소득[i]<-3500000
} else if(forcluster$본인소득[i]==6) {
forcluster$본인소득[i]<-4500000
} else if(forcluster$본인소득[i]==7) {
forcluster$본인소득[i]<-5500000
} else if(forcluster$본인소득[i]==8) {
forcluster$본인소득[i]<-6500000
} else if(forcluster$본인소득[i]==9) {
forcluster$본인소득[i]<-7500000
} else if(forcluster$본인소득[i]==10) {
forcluster$본인소득[i]<-8500000
} else if(forcluster$본인소득[i]==11) {
forcluster$본인소득[i]<-9500000
} else {
forcluster$본인소득[i]<-10000000
}
}
forcluster%<>%mutate(한달평균지출액수=forcluster$Q9,만나이 = forcluster$Q36,하루평균소요시간 = (forcluster$Q13A1A1*5 + forcluster$Q13A1A1*2)/7)
forcluster%<>% filter(한달평균지출액수<500000,만나이<75,가구소득<6500000,하루평균소요시간<8)
data <- forcluster
## 클러스터링
library(cluster)
library(purrr)
library(ggplot2)
library(clValid)
scdata <- scale(data%>%select(만나이, 가구소득, 한달평균지출액수, 하루평균소요시간), center=T,scale=T)
vadata <- clValid(scdata, 2:10, clMethods = 'kmeans', validation = 'internal', maxitems = nrow(scdata))
summary(vadata)
attach(clusterdata)
clusterdata <- data%>%select(만나이, 가구소득, 한달평균지출액수, 하루평균소요시간)
########클러스터링 잘 됨!! 코드 절대 버리지 말기!
set.seed(10000)
kmodel <- kmeans(scale(data%>%select(만나이, 가구소득, 한달평균지출액수, 하루평균소요시간)), center=4)
cldata <- data%>%select(만나이, 가구소득, 한달평균지출액수, 하루평균소요시간)
results<- cldata%>%
mutate(group = kmodel$cluster)%>%
group_by(group) %>%
do(the_summary = summary(.))
results$the_summary
cldata%<>%
mutate(cluster = kmodel$cluster)%>%
mutate(본인소득=forcluster$본인소득)
table(cldata$cluster)
hist(as.matrix(cldata%>%filter(cluster==1)%>%select(본인소득)))
hist(as.matrix(cldata%>%filter(cluster==2)%>%select(본인소득)))
hist(as.matrix(cldata%>%filter(cluster==3)%>%select(본인소득)))
hist(as.matrix(cldata%>%filter(cluster==4)%>%select(본인소득)))
data$Q30 %>% table()
data$Q30 <- cut(data$Q30,breaks=c(-Inf,4.1,Inf),labels=F)
data%<>%mutate(Q35 = ifelse(data$Q35==2, 2, 1))
data%<>%mutate(group= kmodel$cluster)
logdata<- data%>%select(Q2A2, Q5, Q9, Q13A1A1, Q13A1A2, Q14, Q19, Q24A1, Q24A2, Q35, group, Q30)
colnames(logdata) <- c('동반자유형', '지속활동여부', '한달지출', '평일시간', '휴일시간',
'휴가여부', '생활권시설이용여부', '스마트평일', '스마트주말',
'결혼여부', '그룹', '만족도')
for(k in c(1,2,6,7,10,11,12)){
logdata[,k] <- as.factor(logdata[,k])
}
str(logdata)
group1 <- logdata%>%filter(그룹==1)
group2 <- logdata%>%filter(그룹==2)
group3 <- logdata%>%filter(그룹==3)
group4 <- logdata%>%filter(그룹==4)
group1%<>%select(-그룹)
group2%<>%select(-그룹)
group3%<>%select(-그룹)
group4%<>%select(-그룹) # 로지스틱을 위한 전처리 끝!
library(rgl)
colors <- c("#BEF781","#F78181","#585858",'#81BEF7')
colors <- colors[as.numeric(cldata$cluster)]
plot3d(cldata$만나이,cldata$가구소득,cldata$한달평균지출액수, col=colors)
ggplot(aes(x = 만나이, y = 가구소득), data = cldata) +
geom_point(aes(color = cluster))
#####################################################################
## binary logistic ##
group1 <- read.csv('log1.csv')
group2 <- read.csv('log2.csv')
group3 <- read.csv('log3.csv')
group4 <- read.csv('log4.csv')
for (k in c(2,6,7,10)){group1[,k] <- ifelse(group1[,k]==2, 0, 1)} ; group1[,11] <- ifelse(group1[,11]==1,0,1)
for (k in c(2,6,7,10)){group2[,k] <- ifelse(group2[,k]==2, 0, 1)} ; group2[,11] <- ifelse(group2[,11]==1,0,1)
for (k in c(2,6,7,10)){group3[,k] <- ifelse(group3[,k]==2, 0, 1)} ; group3[,11] <- ifelse(group3[,11]==1,0,1)
for (k in c(2,6,7,10)){group4[,k] <- ifelse(group4[,k]==2, 0, 1)} ; group4[,11] <- ifelse(group4[,11]==1,0,1)
for(k in c(1,2,6,7,10,11)){group1[,k] <- as.factor(group1[,k])}
for(k in c(1,2,6,7,10,11)){group2[,k] <- as.factor(group2[,k])}
for(k in c(1,2,6,7,10,11)){group3[,k] <- as.factor(group3[,k])}
for(k in c(1,2,6,7,10,11)){group4[,k] <- as.factor(group4[,k])}
library(lmtest)
fit1 <- glm(만족도~.,data=group1, family='binomial')
fit2 <- glm(만족도~.,data=group2, family='binomial')
fit3 <- glm(만족도~.,data=group3, family='binomial')
fit4 <- glm(만족도~.,data=group4, family='binomial')
summary(fit1)
summary(fit2)
summary(fit3)
summary(fit4)
null1 <- glm(만족도~1, data=group1, family= 'binomial')
full1 <- glm(만족도~., data=group1, family= 'binomial')
stepfit1<- step(null1, scope=list(lower=null1,upper=full1), direction = 'both', trace = F)
summary(stepfit1)
finalfit1 <- stepfit1
lrtest(null1, finalfit1)
lrtest(finalfit1,full1)
summary(finalfit1)
exp(coef(finalfit1))
car::vif(finalfit1)
null2 <- glm(만족도~1, data=group2, family= 'binomial')
full2 <- glm(만족도~., data=group2, family= 'binomial')
stepfit2<- step(null2, scope=list(lower=null2,upper=full2), direction = 'both')
summary(stepfit2)
finalfit2 <- stepfit2
finalfit2 <- glm(만족도~동반자유형+지속활동여부+휴가여부, data=group2,family='binomial')
summary(finalfit2)
lrtest(null2, finalfit2)
lrtest(finalfit2,full2)
exp(coef(finalfit2))
car::vif(finalfit2)
null3 <- glm(만족도~1, data=group3, family= 'binomial')
full3 <- glm(만족도~., data=group3, family= 'binomial')
stepfit3<- step(null3, scope=list(lower=null3,upper=full3), direction = 'both')
summary(stepfit3)
finalfit3 <- glm(만족도~평일시간+생활권시설이용여부+한달지출+결혼여부+스마트주말+동반자유형, data=group3, family='binomial')
lrtest(null3, finalfit3)
lrtest(finalfit3,full3)
summary(finalfit3)
exp(coef(finalfit3))
car::vif(finalfit3)
null4 <- glm(만족도~1, data=group4, family= 'binomial')
full4 <- glm(만족도~., data=group4, family= 'binomial')
stepfit4 <- step(null4, scope=list(lower=null4,upper=full4), direction = 'both')
summary(stepfit4)
finalfit4 <- glm(만족도~평일시간+생활권시설이용여부+휴가여부+한달지출+지속활동여부+동반자유형, data=group4, family='binomial')
lrtest(null4, finalfit4)
lrtest(finalfit4,full4)
summary(finalfit4)
exp(coef(finalfit4))
car::vif(finalfit4)
## anova
setwd('c:/Users/USER/Desktop/data')
library(dplyr)
library(magrittr)
data <- data.table::fread('aovdata.csv')
data%<>%mutate(group=as.factor(group))
aovmodel1 <- aov(만나이~group, data=data) # one-way anova
aovmodel2 <- aov(한달평균지출액수~group, data=data)
aovmodel3 <- aov(가구소득~group, data=data)
summary(aovmodel)
oneway.test(만나이~group,data=data,var.equal=F) # welch's anova
t.test(data%>%filter(group==1)%>%select(가구소득), data%>%filter(group==4)%>%select(가구소득),paired =FALSE, var.equal = TRUE, conf.level = 0.95)
t.test(data%>%filter(group==1)%>%select(한달평균지출액수), data%>%filter(group==4)%>%select(한달평균지출액수),paired =FALSE, var.equal = TRUE, conf.level = 0.95)
library(multcomp)
pairwise.t.test(data$만나이,data$group, p.adj='bonf')
pairwise.t.test(data$가구소득,data$group, p.adj='bonf')
##
group1 <- data.table::fread('group1_all.csv')
group4 <- data.table::fread('group4_all.csv')
hist(group1$만나이)
hist(group4$만나이)
|
## The following code is part of the example scripts included
## in the "Soil Organic Carbon Mapping Cookbook"
## @knitr 3C-SupportVectorMachines
# load data
dat <- read.csv("data/MKD_RegMatrix.csv")
dat$LCEE10 <- as.factor(dat$LCEE10)
# dat$soilmap <- as.factor(dat$soilmap)
# explore the data structure
str(dat)
library(sp)
# Promote to spatialPointsDataFrame
coordinates(dat) <- ~ X + Y
class(dat)
dat@proj4string <- CRS(projargs = "+init=epsg:4326")
dat@proj4string
load(file = "covariates.RData")
names(covs)
# plot the names of the covariates
names(dat@data)
# variable selection using correlation analysis
selectedCovs <- cor(x = as.matrix(dat@data[,5]),
y = as.matrix(dat@data[,-c(1:7,13,21)]))
# print correlation results
selectedCovs
library(reshape)
x <- subset(melt(selectedCovs), value != 1 | value != NA)
x <- x[with(x, order(-abs(x$value))),]
idx <- as.character(x$X2[1:5])
dat2 <- dat[c('OCSKGM', idx)]
names(dat2)
COV <- covs[[idx]]
# Selected covariates
names(COV)
# Categorical variables in svm models
dummyRaster <- function(rast){
rast <- as.factor(rast)
result <- list()
for(i in 1:length(levels(rast)[[1]][[1]])){
result[[i]] <- rast == levels(rast)[[1]][[1]][i]
names(result[[i]]) <- paste0(names(rast),
levels(rast)[[1]][[1]][i])
}
return(stack(result))
}
# convert soilmap from factor to dummy
# soilmap_dummy <- dummyRaster(covs$soilmap)
# convert LCEE10 from factor to dummy
LCEE10_dummy <- dummyRaster(covs$LCEE10)
# Stack the 5 COV layers with the 2 dummies
COV <- stack(COV, LCEE10_dummy)
# COV <- stack(COV, soilmap_dummy, LCEE10_dummy)
# print the final layer names
names(COV)
# convert soilmap column to dummy, the result is a matrix
# to have one column per category we had to add -1 to the formula
# dat_soilmap_dummy <- model.matrix(~soilmap -1, data = dat@data)
# convert the matrix to a data.frame
# dat_soilmap_dummy <- as.data.frame(dat_soilmap_dummy)
# convert LCEE10 column to dummy, the result is a matrix
# to have one column per category we had to add -1 to the formula
dat_LCEE10_dummy <- model.matrix(~LCEE10 -1, data = dat@data)
# convert the matrix to a data.frame
dat_LCEE10_dummy <- as.data.frame(dat_LCEE10_dummy)
dat@data <- cbind(dat@data, dat_LCEE10_dummy)
# dat@data <- cbind(dat@data, dat_LCEE10_dummy, dat_soilmap_dummy)
names(dat@data)
# Fitting a svm model and parameter tuning
library(e1071)
library(caret)
# Test different values of epsilon and cost
tuneResult <- tune(svm, OCSKGM ~., data = dat@data[,c("OCSKGM",
names(COV))],
ranges = list(epsilon = seq(0.1,0.2,0.02),
cost = c(5,7,15,20)))
plot(tuneResult)
# Choose the model with the best combination of epsilon and cost
tunedModel <- tuneResult$best.model
print(tunedModel)
# Use the model to predict the SOC in the covariates space
OCSsvm <- predict(COV, tunedModel)
# Save the result
writeRaster(OCSsvm, filename = "results/MKD_OCSKGM_svm.tif",
overwrite=TRUE)
plot(OCSsvm)
# Variable importance in svm. Code by:
# stackoverflow.com/questions/34781495
w <- t(tunedModel$coefs) %*% tunedModel$SV # weight vectors
w <- apply(w, 2, function(v){sqrt(sum(v^2))}) # weight
w <- sort(w, decreasing = T)
print(w)
|
/code/3C-SupportVectorMachines.R
|
no_license
|
anhnguyendepocen/SOC-Mapping-Cookbook
|
R
| false | false | 3,364 |
r
|
## The following code is part of the example scripts included
## in the "Soil Organic Carbon Mapping Cookbook"
## @knitr 3C-SupportVectorMachines
# load data
dat <- read.csv("data/MKD_RegMatrix.csv")
dat$LCEE10 <- as.factor(dat$LCEE10)
# dat$soilmap <- as.factor(dat$soilmap)
# explore the data structure
str(dat)
library(sp)
# Promote to spatialPointsDataFrame
coordinates(dat) <- ~ X + Y
class(dat)
dat@proj4string <- CRS(projargs = "+init=epsg:4326")
dat@proj4string
load(file = "covariates.RData")
names(covs)
# plot the names of the covariates
names(dat@data)
# variable selection using correlation analysis
selectedCovs <- cor(x = as.matrix(dat@data[,5]),
y = as.matrix(dat@data[,-c(1:7,13,21)]))
# print correlation results
selectedCovs
library(reshape)
x <- subset(melt(selectedCovs), value != 1 | value != NA)
x <- x[with(x, order(-abs(x$value))),]
idx <- as.character(x$X2[1:5])
dat2 <- dat[c('OCSKGM', idx)]
names(dat2)
COV <- covs[[idx]]
# Selected covariates
names(COV)
# Categorical variables in svm models
dummyRaster <- function(rast){
rast <- as.factor(rast)
result <- list()
for(i in 1:length(levels(rast)[[1]][[1]])){
result[[i]] <- rast == levels(rast)[[1]][[1]][i]
names(result[[i]]) <- paste0(names(rast),
levels(rast)[[1]][[1]][i])
}
return(stack(result))
}
# convert soilmap from factor to dummy
# soilmap_dummy <- dummyRaster(covs$soilmap)
# convert LCEE10 from factor to dummy
LCEE10_dummy <- dummyRaster(covs$LCEE10)
# Stack the 5 COV layers with the 2 dummies
COV <- stack(COV, LCEE10_dummy)
# COV <- stack(COV, soilmap_dummy, LCEE10_dummy)
# print the final layer names
names(COV)
# convert soilmap column to dummy, the result is a matrix
# to have one column per category we had to add -1 to the formula
# dat_soilmap_dummy <- model.matrix(~soilmap -1, data = dat@data)
# convert the matrix to a data.frame
# dat_soilmap_dummy <- as.data.frame(dat_soilmap_dummy)
# convert LCEE10 column to dummy, the result is a matrix
# to have one column per category we had to add -1 to the formula
dat_LCEE10_dummy <- model.matrix(~LCEE10 -1, data = dat@data)
# convert the matrix to a data.frame
dat_LCEE10_dummy <- as.data.frame(dat_LCEE10_dummy)
dat@data <- cbind(dat@data, dat_LCEE10_dummy)
# dat@data <- cbind(dat@data, dat_LCEE10_dummy, dat_soilmap_dummy)
names(dat@data)
# Fitting a svm model and parameter tuning
library(e1071)
library(caret)
# Test different values of epsilon and cost
tuneResult <- tune(svm, OCSKGM ~., data = dat@data[,c("OCSKGM",
names(COV))],
ranges = list(epsilon = seq(0.1,0.2,0.02),
cost = c(5,7,15,20)))
plot(tuneResult)
# Choose the model with the best combination of epsilon and cost
tunedModel <- tuneResult$best.model
print(tunedModel)
# Use the model to predict the SOC in the covariates space
OCSsvm <- predict(COV, tunedModel)
# Save the result
writeRaster(OCSsvm, filename = "results/MKD_OCSKGM_svm.tif",
overwrite=TRUE)
plot(OCSsvm)
# Variable importance in svm. Code by:
# stackoverflow.com/questions/34781495
w <- t(tunedModel$coefs) %*% tunedModel$SV # weight vectors
w <- apply(w, 2, function(v){sqrt(sum(v^2))}) # weight
w <- sort(w, decreasing = T)
print(w)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{ungrid}
\alias{ungrid}
\title{Extract (x, y, z) from (x, y, grid)}
\usage{
ungrid(x, y, grid)
}
\arguments{
\item{x}{a vector holding the x coordinates of grid.}
\item{y}{a vector holding the y coordinates of grid.}
\item{grid}{a matrix holding the grid.}
}
\value{
A list containing three vectors: \code{x}, the grid x values,
\code{y}, the grid y values, and \code{grid}, the grid values.
}
\description{
Extract the grid points from a grid, returning columns.
This is useful for e.g. gridding large datasets, in which the first step
might be to use \code{\link{binMean2D}}, followed by
\code{\link{interpBarnes}}.
}
\examples{
library(oce)
data(wind)
u <- interpBarnes(wind$x, wind$y, wind$z)
contour(u$xg, u$yg, u$zg)
U <- ungrid(u$xg, u$yg, u$zg)
points(U$x, U$y, col=oce.colorsJet(100)[rescale(U$grid, rlow=1, rhigh=100)], pch=20)
}
\author{
Dan Kelley
}
|
/pkgs/oce/man/ungrid.Rd
|
no_license
|
vaguiar/EDAV_Project_2017
|
R
| false | true | 955 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{ungrid}
\alias{ungrid}
\title{Extract (x, y, z) from (x, y, grid)}
\usage{
ungrid(x, y, grid)
}
\arguments{
\item{x}{a vector holding the x coordinates of grid.}
\item{y}{a vector holding the y coordinates of grid.}
\item{grid}{a matrix holding the grid.}
}
\value{
A list containing three vectors: \code{x}, the grid x values,
\code{y}, the grid y values, and \code{grid}, the grid values.
}
\description{
Extract the grid points from a grid, returning columns.
This is useful for e.g. gridding large datasets, in which the first step
might be to use \code{\link{binMean2D}}, followed by
\code{\link{interpBarnes}}.
}
\examples{
library(oce)
data(wind)
u <- interpBarnes(wind$x, wind$y, wind$z)
contour(u$xg, u$yg, u$zg)
U <- ungrid(u$xg, u$yg, u$zg)
points(U$x, U$y, col=oce.colorsJet(100)[rescale(U$grid, rlow=1, rhigh=100)], pch=20)
}
\author{
Dan Kelley
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree.R
\name{ggDiagnose.tree}
\alias{ggDiagnose.tree}
\title{Diagnostic plot for \code{tree} object (\code{ggplot2} based)}
\usage{
ggDiagnose.tree(x, type = c("proportional", "uniform"),
split.labels = TRUE, leaf.labels = FALSE, text.size = 3, ...,
show.plot = TRUE, return = FALSE)
}
\arguments{
\item{x}{an object of class \code{tree} from the \pkg{tree}.}
\item{type}{character string. If "uniform", the branches are of uniform
length (only shows depth). Otherwise they are proportional to the decrease
in impurity.}
\item{split.labels}{if TRUE (the default), non-leaf nodes are labeled with
splitting rule}
\item{leaf.labels}{if TRUE, leaves are labeled with predicted value
(the default is FALSE).}
\item{text.size}{integer size for \code{link[ggplot2]{geom_text}} for labels.}
\item{...}{extra attributes (currently not used)}
\item{show.plot}{logic to display the graphics (group of graphics in this
case)}
\item{return}{logic to return list of graphics and the data frame to make
the majority of graphics}
}
\value{
depending on \code{show.plot} and \code{return} it
will return the visualization of the graphic and/or a list
of both the list of data frames used the make the graphic and the
individual graphic object.
}
\description{
Utilizing work from Andie de Vries's function
\code{\link[ggdendro]{dendro_data.tree}} from the \pkg{ggdendro} package
which is used in the \code{dfCompile} to create a list of data frames.
}
\examples{
library(tree)
tree.object <- tree(Species ~., data = iris)
plot(tree.object)
ggDiagnose.tree(tree.object, split.labels = FALSE)
}
|
/man/ggDiagnose.tree.Rd
|
permissive
|
benjaminleroy/ggDiagnose
|
R
| false | true | 1,669 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree.R
\name{ggDiagnose.tree}
\alias{ggDiagnose.tree}
\title{Diagnostic plot for \code{tree} object (\code{ggplot2} based)}
\usage{
ggDiagnose.tree(x, type = c("proportional", "uniform"),
split.labels = TRUE, leaf.labels = FALSE, text.size = 3, ...,
show.plot = TRUE, return = FALSE)
}
\arguments{
\item{x}{an object of class \code{tree} from the \pkg{tree}.}
\item{type}{character string. If "uniform", the branches are of uniform
length (only shows depth). Otherwise they are proportional to the decrease
in impurity.}
\item{split.labels}{if TRUE (the default), non-leaf nodes are labeled with
splitting rule}
\item{leaf.labels}{if TRUE, leaves are labeled with predicted value
(the default is FALSE).}
\item{text.size}{integer size for \code{link[ggplot2]{geom_text}} for labels.}
\item{...}{extra attributes (currently not used)}
\item{show.plot}{logic to display the graphics (group of graphics in this
case)}
\item{return}{logic to return list of graphics and the data frame to make
the majority of graphics}
}
\value{
depending on \code{show.plot} and \code{return} it
will return the visualization of the graphic and/or a list
of both the list of data frames used the make the graphic and the
individual graphic object.
}
\description{
Utilizing work from Andie de Vries's function
\code{\link[ggdendro]{dendro_data.tree}} from the \pkg{ggdendro} package
which is used in the \code{dfCompile} to create a list of data frames.
}
\examples{
library(tree)
tree.object <- tree(Species ~., data = iris)
plot(tree.object)
ggDiagnose.tree(tree.object, split.labels = FALSE)
}
|
with(afb8926f1d70741859b34cf202d7de61a, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';source("D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/R/Recommendations/advanced_federation_blend.r");a2Hrpdwy3col1<- as.character(FRAME878836$location);linkapoQGV <- data.table("col1"=c("null"), "col2"=c("null")); linkapoQGV <- unique(linkapoQGV);aR8pJAHRV<- curate(a2Hrpdwy3col1,linkapoQGV);aR8pJAHRV <- as.data.table(aR8pJAHRV);names(aR8pJAHRV)<-"afbNDZAYw";FRAME878836 <- cbind(FRAME878836,aR8pJAHRV);FRAME878836 <- FRAME878836[,-c("location")];colnames(FRAME878836)[colnames(FRAME878836)=="afbNDZAYw"] <- "location";rm(aR8pJAHRV,linkapoQGV,a2Hrpdwy3col1,a2Hrpdwy3, best_match, best_match_nonzero, best_match_zero, blend, curate, self_match );});
|
/80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/amN97LN4trRUF.R
|
no_license
|
ayanmanna8/test
|
R
| false | false | 850 |
r
|
with(afb8926f1d70741859b34cf202d7de61a, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';source("D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/R/Recommendations/advanced_federation_blend.r");a2Hrpdwy3col1<- as.character(FRAME878836$location);linkapoQGV <- data.table("col1"=c("null"), "col2"=c("null")); linkapoQGV <- unique(linkapoQGV);aR8pJAHRV<- curate(a2Hrpdwy3col1,linkapoQGV);aR8pJAHRV <- as.data.table(aR8pJAHRV);names(aR8pJAHRV)<-"afbNDZAYw";FRAME878836 <- cbind(FRAME878836,aR8pJAHRV);FRAME878836 <- FRAME878836[,-c("location")];colnames(FRAME878836)[colnames(FRAME878836)=="afbNDZAYw"] <- "location";rm(aR8pJAHRV,linkapoQGV,a2Hrpdwy3col1,a2Hrpdwy3, best_match, best_match_nonzero, best_match_zero, blend, curate, self_match );});
|
# ------------------------------------------------------------------------
# Strongbridge scoring summaries
# ------------------------------------------------------------------------
results_dir <- "F:/Projects/Strongbridge/results/scoring/"
model_dir <- "F:/Projects/Strongbridge/results/modelling/XGBOOST_advanced/02_XGB_optimal_HP/"
# ------------------------------------------------------------------------
# Sum all counts from all chunks
# ------------------------------------------------------------------------
num_chunks = c(1:128)
counts <- vector("list")
counts[[1]]<- read_csv(paste0(results_dir, "C", str_pad(1, 3, pad = "0"), "_score_sample_counts.csv"))
all_counts <- data.frame(
counts[[1]][,1:3],
count_pats = counts[[1]]$counts_sample_scoring_cohort,
count_nobrain = counts[[1]]$patients_with_HYPP_and_CAIs
)
for (i in num_chunks[-1]) {
counts[[i]]<- read_csv(paste0(results_dir, "C", str_pad(i, 3, pad = "0"), "_score_sample_counts.csv"))
all_counts$count_pats = counts[[i]]$counts_sample_scoring_cohort + all_counts$count_pats
all_counts$count_nobrain = counts[[i]]$patients_with_HYPP_and_CAIs + all_counts$count_nobrain
}
write.csv(all_counts,
paste0(results_dir, "all_counts_128_minus_under_24_month_lookback_ppp_patients.csv")
)
# ------------------------------------------------------------------------
# Total count of patients
# ------------------------------------------------------------------------
results_dir <- "F:/Projects/Strongbridge/results/scoring/"
total <- list()
num_chunks <- 1:128
for (i in num_chunks) {
chunk <- paste0("C", str_pad(i, 3, pad = "0"))
pred <- read_rds(paste0(results_dir, chunk, "_score_sample_pred.rds"))
total[[i]] <- length(pred$prob.1)
}
total_vec <- unlist(total)
total_sum <- sum(total_vec)
# ------------------------------------------------------------------------
# Count of dirty/clean ppp
# ------------------------------------------------------------------------
score_dir <- "F:/Projects/Strongbridge/data/scoring_cohort_chunks/"
pr_curve <- read_csv(paste0(model_dir, "PR_curve_opt_HP_unmatched.csv"))
counts_ppp <- vector("list")
num_chunks = 128
# Count per chunk
for (i in num_chunks) {
chunk <- paste0("C", str_pad(i, 3, pad = "0"))
pred_merge <- merge(
read_rds(paste0(results_dir, "C", str_pad(i, 3, pad = "0"), "_score_sample_pred.rds")),
read_csv(paste0(score_dir, "Scoring_Final_Sample_", chunk, ".csv"),
col_types = (cols(PATIENT_ID = col_character(), .default = col_guess()))) %>%
select(PATIENT_ID, clean_ppp_clm_cnt, dirty_ppp_clm_cnt),
by = "PATIENT_ID")
pred_merge$clean_ppp_clm_cnt[is.na(pred_merge$clean_ppp_clm_cnt)] <- 0
pred_merge$dirty_ppp_clm_cnt[is.na(pred_merge$dirty_ppp_clm_cnt)] <- 0
clean_count <- sapply( pr_curve$thresh, function(x) {
length(pred_merge$prob.1[pred_merge$prob.1 >= x & pred_merge$clean_ppp_clm_cnt > 0])
})
dirty_count <- sapply( pr_curve$thresh, function(x) {
length(pred_merge$prob.1[pred_merge$prob.1 >= x & pred_merge$dirty_ppp_clm_cnt > 0])
})
counts_ppp[[i]] <- data.frame(pr_curve,
clean_counts= clean_count,
dirty_counts = dirty_count
)
}
# # Sum all counts together
#
# ppp_all_counts <- data.frame(
# counts_ppp[[1]][,1:3],
# clean_counts = counts_ppp[[1]]$clean_counts,
# dirty_counts = counts_ppp[[1]]$dirty_counts
# )
#
# for (i in num_chunks[-1]) {
#
# ppp_all_counts$clean_counts = counts_ppp[[i]]$clean_counts + ppp_all_counts$clean_counts
# ppp_all_counts$dirty_counts = counts_ppp[[i]]$dirty_counts + ppp_all_counts$dirty_counts
#
# }
ppp_counts <- as.data.frame(counts_ppp[[128]])
write_csv(ppp_counts, paste0(results_dir, "clean_dirty_ppp_counts_minus_under_24_month_lookback.csv"))
# ------------------------------------------------------------------------
# Patient profiles
# ------------------------------------------------------------------------
profiles <- vector("list")
num_chunks = c(1:128)
for (i in num_chunks) {
profiles[[i]] <- read_rds(paste0(results_dir, "C", str_pad(i, 3, pad = "0"), "_score_sample_patient_profiles.rds"))
}
profiles_all <- profiles[[1]]
for (i in num_chunks[-1]) {
profiles_all <- rbind(profiles_all, profiles[[i]])
}
profiles_all <- arrange(profiles_all, desc(prob.1))
profiles_top_10 <- profiles_all[1:10,]
write.csv(profiles_all, paste0(results_dir, "each_chunk_top_10_patient_profiles_128_minus_under_24_months_lookback.csv"))
write.csv(profiles_top_10, paste0(results_dir, "overall_top_10_patient_profiles_128_minus_under_24_months_lookback.csv"))
|
/scoring/02_sum_counts_across_chunks.R
|
no_license
|
jzhao0802/strongbridge
|
R
| false | false | 4,685 |
r
|
# ------------------------------------------------------------------------
# Strongbridge scoring summaries
# ------------------------------------------------------------------------
results_dir <- "F:/Projects/Strongbridge/results/scoring/"
model_dir <- "F:/Projects/Strongbridge/results/modelling/XGBOOST_advanced/02_XGB_optimal_HP/"
# ------------------------------------------------------------------------
# Sum all counts from all chunks
# ------------------------------------------------------------------------
num_chunks = c(1:128)
counts <- vector("list")
counts[[1]]<- read_csv(paste0(results_dir, "C", str_pad(1, 3, pad = "0"), "_score_sample_counts.csv"))
all_counts <- data.frame(
counts[[1]][,1:3],
count_pats = counts[[1]]$counts_sample_scoring_cohort,
count_nobrain = counts[[1]]$patients_with_HYPP_and_CAIs
)
for (i in num_chunks[-1]) {
counts[[i]]<- read_csv(paste0(results_dir, "C", str_pad(i, 3, pad = "0"), "_score_sample_counts.csv"))
all_counts$count_pats = counts[[i]]$counts_sample_scoring_cohort + all_counts$count_pats
all_counts$count_nobrain = counts[[i]]$patients_with_HYPP_and_CAIs + all_counts$count_nobrain
}
write.csv(all_counts,
paste0(results_dir, "all_counts_128_minus_under_24_month_lookback_ppp_patients.csv")
)
# ------------------------------------------------------------------------
# Total count of patients
# ------------------------------------------------------------------------
results_dir <- "F:/Projects/Strongbridge/results/scoring/"
total <- list()
num_chunks <- 1:128
for (i in num_chunks) {
chunk <- paste0("C", str_pad(i, 3, pad = "0"))
pred <- read_rds(paste0(results_dir, chunk, "_score_sample_pred.rds"))
total[[i]] <- length(pred$prob.1)
}
total_vec <- unlist(total)
total_sum <- sum(total_vec)
# ------------------------------------------------------------------------
# Count of dirty/clean ppp
# ------------------------------------------------------------------------
score_dir <- "F:/Projects/Strongbridge/data/scoring_cohort_chunks/"
pr_curve <- read_csv(paste0(model_dir, "PR_curve_opt_HP_unmatched.csv"))
counts_ppp <- vector("list")
num_chunks = 128
# Count per chunk
for (i in num_chunks) {
chunk <- paste0("C", str_pad(i, 3, pad = "0"))
pred_merge <- merge(
read_rds(paste0(results_dir, "C", str_pad(i, 3, pad = "0"), "_score_sample_pred.rds")),
read_csv(paste0(score_dir, "Scoring_Final_Sample_", chunk, ".csv"),
col_types = (cols(PATIENT_ID = col_character(), .default = col_guess()))) %>%
select(PATIENT_ID, clean_ppp_clm_cnt, dirty_ppp_clm_cnt),
by = "PATIENT_ID")
pred_merge$clean_ppp_clm_cnt[is.na(pred_merge$clean_ppp_clm_cnt)] <- 0
pred_merge$dirty_ppp_clm_cnt[is.na(pred_merge$dirty_ppp_clm_cnt)] <- 0
clean_count <- sapply( pr_curve$thresh, function(x) {
length(pred_merge$prob.1[pred_merge$prob.1 >= x & pred_merge$clean_ppp_clm_cnt > 0])
})
dirty_count <- sapply( pr_curve$thresh, function(x) {
length(pred_merge$prob.1[pred_merge$prob.1 >= x & pred_merge$dirty_ppp_clm_cnt > 0])
})
counts_ppp[[i]] <- data.frame(pr_curve,
clean_counts= clean_count,
dirty_counts = dirty_count
)
}
# # Sum all counts together
#
# ppp_all_counts <- data.frame(
# counts_ppp[[1]][,1:3],
# clean_counts = counts_ppp[[1]]$clean_counts,
# dirty_counts = counts_ppp[[1]]$dirty_counts
# )
#
# for (i in num_chunks[-1]) {
#
# ppp_all_counts$clean_counts = counts_ppp[[i]]$clean_counts + ppp_all_counts$clean_counts
# ppp_all_counts$dirty_counts = counts_ppp[[i]]$dirty_counts + ppp_all_counts$dirty_counts
#
# }
ppp_counts <- as.data.frame(counts_ppp[[128]])
write_csv(ppp_counts, paste0(results_dir, "clean_dirty_ppp_counts_minus_under_24_month_lookback.csv"))
# ------------------------------------------------------------------------
# Patient profiles
# ------------------------------------------------------------------------
profiles <- vector("list")
num_chunks = c(1:128)
for (i in num_chunks) {
profiles[[i]] <- read_rds(paste0(results_dir, "C", str_pad(i, 3, pad = "0"), "_score_sample_patient_profiles.rds"))
}
profiles_all <- profiles[[1]]
for (i in num_chunks[-1]) {
profiles_all <- rbind(profiles_all, profiles[[i]])
}
profiles_all <- arrange(profiles_all, desc(prob.1))
profiles_top_10 <- profiles_all[1:10,]
write.csv(profiles_all, paste0(results_dir, "each_chunk_top_10_patient_profiles_128_minus_under_24_months_lookback.csv"))
write.csv(profiles_top_10, paste0(results_dir, "overall_top_10_patient_profiles_128_minus_under_24_months_lookback.csv"))
|
i = 340
library(asSeq, lib="/nas02/home/w/e/weisun/R/Rlibs/")
# -------------------------------------------------------------------------
# read in the list of the SNP to be excluded
# -------------------------------------------------------------------------
setwd("/lustre/scr/w/e/weisun/TCGA/hetSNP_EA/")
files = list.files(path = ".", pattern="hetSNP_")
sams = gsub("hetSNP_", "", files)
sams = gsub(".txt", "", sams, fixed=TRUE)
#for(i in 1:length(files)){
f1 = files[i]
sam1 = sams[i]
cat("\n", sam1, date(), "\n")
input = sprintf("../bam/%s_sorted_by_name_uniq_filtered.bam", sam1)
outputTag = sprintf("../bam/%s_asCounts_hetSNP_EA", sam1)
snpList = f1
if(! file.exists(f1)){
stop("snpList file does not exist")
}
extractAsReads(input, snpList, outputTag)
#}
|
/data_preparation/R_batch3/_step2/step2_filter_asCounts.339.R
|
no_license
|
jasa-acs/Mapping-Tumor-Specific-Expression-QTLs-in-Impure-Tumor-Samples
|
R
| false | false | 809 |
r
|
i = 340
library(asSeq, lib="/nas02/home/w/e/weisun/R/Rlibs/")
# -------------------------------------------------------------------------
# read in the list of the SNP to be excluded
# -------------------------------------------------------------------------
setwd("/lustre/scr/w/e/weisun/TCGA/hetSNP_EA/")
files = list.files(path = ".", pattern="hetSNP_")
sams = gsub("hetSNP_", "", files)
sams = gsub(".txt", "", sams, fixed=TRUE)
#for(i in 1:length(files)){
f1 = files[i]
sam1 = sams[i]
cat("\n", sam1, date(), "\n")
input = sprintf("../bam/%s_sorted_by_name_uniq_filtered.bam", sam1)
outputTag = sprintf("../bam/%s_asCounts_hetSNP_EA", sam1)
snpList = f1
if(! file.exists(f1)){
stop("snpList file does not exist")
}
extractAsReads(input, snpList, outputTag)
#}
|
##################################
####Orange 10 Time Series HW 2####
##################################
library(readxl)
library(lubridate)
library(tidyverse)
library(Hmisc)
library(zoo)
library(broom)
library(ggfortify)
library(uroot)
library(lmtest)
library(forecast)
library(dplyr)
library(ggplot2)
##################################
#######Well data cleaning#########
##################################
well <- read_excel("G-2866_T.xlsx", sheet = "Well")
summary(well)
hist(well$Corrected)
well_2 <- well %>%
# create a new variable which is an integer for the hour of each time
mutate(time_2 = hour(time)) %>%
# merge the data and newly created datetime variables into a variable called datetime
unite(datetime, date, time_2, sep = " ", remove = FALSE) %>%
# convert the character datetime variable to an R recognized datetime format
mutate(datetime = ymd_h(datetime)) %>%
# select only the new datetime variable and rename the Corrected variable to depth
select(datetime, depth = Corrected)
#group our data into hourly data and use the mean of depth
well_3<-well_2%>%
group_by(datetime)%>%
summarise(avg_well=mean(depth))
#find if there is any missing value
datetime=seq(
from=as.POSIXct("2007-10-1 1:00", tz="UTC"),
to=as.POSIXct("2018-6-12 23:00", tz="UTC"),
by="hour"
)
missing_hours=length(datetime)-length(well_3$datetime)
datetime<-as.data.frame(datetime)
#259
#merge the actual datetime and well_3 and impute the missing values
well_4 <- merge(datetime, well_3, by.all=c("datetime"), all.x = TRUE, all.y = TRUE )
sum(is.na(well_4$avg_well))
#impute the missing value
well_4$avg_well <- na.interp(well_4$avg)
sum(is.na(well_4$avg_well))
##############################
######Rain Data cleaning######
##############################
rain <- read_excel("G-2866_T.xlsx", sheet = "Rain")
summary(rain)
hist(rain$RAIN_FT)
str(rain)
dim(rain)
rain$date <- strptime(format(rain$Date, "%Y/%m/%d"), "%Y/%m/%d")
rain$time <- strptime(format(rain$Date, "%H:%M:%S"), "%H:%M:%S")
rain <- rain[,c(-1)]
rain<- rain[,c(2,3,1)]
str(rain)
rain_2 <- rain
# create a new variable which is an integer for the hour of each time
rain_2$time_2 = hour(rain_2$time)
rain_2<- rain_2 %>%
# merge the data and newly created datetime variables into a variable called datetime
unite(datetime, date, time_2, sep = " ", remove = FALSE) %>%
# convert the character datetime variable to an R recognized datetime format
mutate(datetime = ymd_h(datetime)) %>%
# select only the new datetime variable and rename the Tide_ft variable to depth
select(datetime, RAIN_FT = RAIN_FT)
#group our data into hourly data and use the sum of rain for an hour
rain_3<-rain_2%>%
group_by(datetime)%>%
summarise(sum_rain=sum(RAIN_FT))
#find if there is any missing value - UPDATE THE TIME IF NEEDED
datetime=seq(
from=as.POSIXct("2005-10-10 0:00", tz="UTC"),
to=as.POSIXct("2018-07-09 23:00", tz="UTC"),
by="hour"
)
datetime<-as.data.frame(datetime)
missing_hours=length(datetime$datetime)-length(rain_3$datetime)
print(missing_hours)
##0
#merge rain and well
well_4$avg <- as.numeric(well_4$avg)
rain_3$sum <- rain_3$sum
#########################
##########Tide###########
#########################
tide <- read_csv("station_8722859.csv")
summary(tide)
hist(tide$Prediction)
tide_2 <- tide %>%
# create a new variable which is an integer for the hour of each time
mutate(time_2 = hour(Time)) %>%
# merge the data and newly created datetime variables into a variable called datetime
unite(datetime, Date, time_2, sep = " ", remove = FALSE) %>%
# convert the character datetime variable to an R recognized datetime format
mutate(datetime = ymd_h(datetime)) %>%
# select only the new datetime variable and rename the Corrected variable to depth
select(datetime,Prediction)
#group our data into hourly data and use the mean of depth
tide_3<-tide_2%>%
group_by(datetime)%>%
summarise(avg_tide=mean(Prediction))
#find if there is any missing value
datetime=seq(
from=as.POSIXct("2005-10-10 0:00", tz="UTC"),
to=as.POSIXct("2018-10-9 23:00", tz="UTC"),
by="hour"
)
missing_hours=length(datetime)-length(tide_3$datetime)
datetime<-as.data.frame(datetime)
#merge the actual datetime and tide_3 and impute the missing values
tide_4 <- merge(datetime, tide_3, by.all=c("datetime"), all.x = TRUE, all.y = TRUE )
sum(is.na(tide_4$avg_tide))
#impute the missing value
tide_4$avg_tide <- na.interp(tide_4$avg_tide)
sum(is.na(tide_4$avg_tide))
#data for 10 years
#Create Date variable
well_merge$Date <- substr(well_merge$datetime, start = 1, stop=10)
#Create Month variable
well_merge$Month <- substr(well_merge$datetime, start = 6, stop=7)
#Create Hour Variable
well_merge$Hour <- substr(well_merge$datetime, start = 12, stop=13)
well_merge <-inner_join(inner_join(well_4,rain_3,by = NULL, copy = FALSE),tide_4, by=NULL, copy = FALSE)
write.csv(well_merge, file="merge_well.csv", row.names = FALSE)
#subset recent three years data
train_3<-well_merge[67494:93791,]
write.csv(train_3,file="merge_well_3.csv",row.names = FALSE)
################################
#########explore data###########
################################
## time series object train_3
well_ts_train3_well<-ts(train_3$avg_well, frequency = 8766/4)
well_ts_train3_tide<-ts(train_3$avg_tide, frequency = 8766)
well_ts_train3_rain<-ts(train_3$sum_rain, frequency = 8766/4)
##STL decomposition: quarter season
decomp_stl_well <- stl(well_ts_train3_well, s.window = 7)
plot(decomp_stl_well)
decomp_stl_tide <- stl(well_ts_train3_tide, s.window = 7)
plot(decomp_stl_tide)
decomp_stl_rain <- stl(well_ts_train3_rain, s.window = 7)
plot(decomp_stl_rain)
# create a gg object using the train_3 dataframe with aesthetics: x = datetime and y = avg
ggplot(train_3, aes(datetime, avg_well)) +
# draw a line plot using the above defined aesthetics
geom_line(color = "black") +
# drop some of the ugly R thematic elements for a simple look
theme_bw() +
# label the axes and add a title
labs(x = "Date And Time (in hours)", y = "Avg Depth of Well (in feet)", title = "Avg Depth of Well From 2015-2018")
##################################
#######Modeling in SAS code#######
##################################
|
/Time Series/Final_data_cleaning.R
|
no_license
|
smao6/Orange_HW_10
|
R
| false | false | 6,434 |
r
|
##################################
####Orange 10 Time Series HW 2####
##################################
library(readxl)
library(lubridate)
library(tidyverse)
library(Hmisc)
library(zoo)
library(broom)
library(ggfortify)
library(uroot)
library(lmtest)
library(forecast)
library(dplyr)
library(ggplot2)
##################################
#######Well data cleaning#########
##################################
well <- read_excel("G-2866_T.xlsx", sheet = "Well")
summary(well)
hist(well$Corrected)
well_2 <- well %>%
# create a new variable which is an integer for the hour of each time
mutate(time_2 = hour(time)) %>%
# merge the data and newly created datetime variables into a variable called datetime
unite(datetime, date, time_2, sep = " ", remove = FALSE) %>%
# convert the character datetime variable to an R recognized datetime format
mutate(datetime = ymd_h(datetime)) %>%
# select only the new datetime variable and rename the Corrected variable to depth
select(datetime, depth = Corrected)
#group our data into hourly data and use the mean of depth
well_3<-well_2%>%
group_by(datetime)%>%
summarise(avg_well=mean(depth))
#find if there is any missing value
datetime=seq(
from=as.POSIXct("2007-10-1 1:00", tz="UTC"),
to=as.POSIXct("2018-6-12 23:00", tz="UTC"),
by="hour"
)
missing_hours=length(datetime)-length(well_3$datetime)
datetime<-as.data.frame(datetime)
#259
#merge the actual datetime and well_3 and impute the missing values
well_4 <- merge(datetime, well_3, by.all=c("datetime"), all.x = TRUE, all.y = TRUE )
sum(is.na(well_4$avg_well))
#impute the missing value
well_4$avg_well <- na.interp(well_4$avg)
sum(is.na(well_4$avg_well))
##############################
######Rain Data cleaning######
##############################
rain <- read_excel("G-2866_T.xlsx", sheet = "Rain")
summary(rain)
hist(rain$RAIN_FT)
str(rain)
dim(rain)
rain$date <- strptime(format(rain$Date, "%Y/%m/%d"), "%Y/%m/%d")
rain$time <- strptime(format(rain$Date, "%H:%M:%S"), "%H:%M:%S")
rain <- rain[,c(-1)]
rain<- rain[,c(2,3,1)]
str(rain)
rain_2 <- rain
# create a new variable which is an integer for the hour of each time
rain_2$time_2 = hour(rain_2$time)
rain_2<- rain_2 %>%
# merge the data and newly created datetime variables into a variable called datetime
unite(datetime, date, time_2, sep = " ", remove = FALSE) %>%
# convert the character datetime variable to an R recognized datetime format
mutate(datetime = ymd_h(datetime)) %>%
# select only the new datetime variable and rename the Tide_ft variable to depth
select(datetime, RAIN_FT = RAIN_FT)
#group our data into hourly data and use the sum of rain for an hour
rain_3<-rain_2%>%
group_by(datetime)%>%
summarise(sum_rain=sum(RAIN_FT))
#find if there is any missing value - UPDATE THE TIME IF NEEDED
datetime=seq(
from=as.POSIXct("2005-10-10 0:00", tz="UTC"),
to=as.POSIXct("2018-07-09 23:00", tz="UTC"),
by="hour"
)
datetime<-as.data.frame(datetime)
missing_hours=length(datetime$datetime)-length(rain_3$datetime)
print(missing_hours)
##0
#merge rain and well
well_4$avg <- as.numeric(well_4$avg)
rain_3$sum <- rain_3$sum
#########################
##########Tide###########
#########################
tide <- read_csv("station_8722859.csv")
summary(tide)
hist(tide$Prediction)
tide_2 <- tide %>%
# create a new variable which is an integer for the hour of each time
mutate(time_2 = hour(Time)) %>%
# merge the data and newly created datetime variables into a variable called datetime
unite(datetime, Date, time_2, sep = " ", remove = FALSE) %>%
# convert the character datetime variable to an R recognized datetime format
mutate(datetime = ymd_h(datetime)) %>%
# select only the new datetime variable and rename the Corrected variable to depth
select(datetime,Prediction)
#group our data into hourly data and use the mean of depth
tide_3<-tide_2%>%
group_by(datetime)%>%
summarise(avg_tide=mean(Prediction))
#find if there is any missing value
datetime=seq(
from=as.POSIXct("2005-10-10 0:00", tz="UTC"),
to=as.POSIXct("2018-10-9 23:00", tz="UTC"),
by="hour"
)
missing_hours=length(datetime)-length(tide_3$datetime)
datetime<-as.data.frame(datetime)
#merge the actual datetime and tide_3 and impute the missing values
tide_4 <- merge(datetime, tide_3, by.all=c("datetime"), all.x = TRUE, all.y = TRUE )
sum(is.na(tide_4$avg_tide))
#impute the missing value
tide_4$avg_tide <- na.interp(tide_4$avg_tide)
sum(is.na(tide_4$avg_tide))
#data for 10 years
#Create Date variable
well_merge$Date <- substr(well_merge$datetime, start = 1, stop=10)
#Create Month variable
well_merge$Month <- substr(well_merge$datetime, start = 6, stop=7)
#Create Hour Variable
well_merge$Hour <- substr(well_merge$datetime, start = 12, stop=13)
well_merge <-inner_join(inner_join(well_4,rain_3,by = NULL, copy = FALSE),tide_4, by=NULL, copy = FALSE)
write.csv(well_merge, file="merge_well.csv", row.names = FALSE)
#subset recent three years data
train_3<-well_merge[67494:93791,]
write.csv(train_3,file="merge_well_3.csv",row.names = FALSE)
################################
#########explore data###########
################################
## time series object train_3
well_ts_train3_well<-ts(train_3$avg_well, frequency = 8766/4)
well_ts_train3_tide<-ts(train_3$avg_tide, frequency = 8766)
well_ts_train3_rain<-ts(train_3$sum_rain, frequency = 8766/4)
##STL decomposition: quarter season
decomp_stl_well <- stl(well_ts_train3_well, s.window = 7)
plot(decomp_stl_well)
decomp_stl_tide <- stl(well_ts_train3_tide, s.window = 7)
plot(decomp_stl_tide)
decomp_stl_rain <- stl(well_ts_train3_rain, s.window = 7)
plot(decomp_stl_rain)
# create a gg object using the train_3 dataframe with aesthetics: x = datetime and y = avg
ggplot(train_3, aes(datetime, avg_well)) +
# draw a line plot using the above defined aesthetics
geom_line(color = "black") +
# drop some of the ugly R thematic elements for a simple look
theme_bw() +
# label the axes and add a title
labs(x = "Date And Time (in hours)", y = "Avg Depth of Well (in feet)", title = "Avg Depth of Well From 2015-2018")
##################################
#######Modeling in SAS code#######
##################################
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- navbarPage(
"Final Project Title",
tabPanel("Model",
fluidPage(
titlePanel("Model Title"),
sidebarLayout(
sidebarPanel(
selectInput(
"plot_type",
"Plot Type",
c("Option A" = "a", "Option B" = "b")
)),
mainPanel(plotOutput("line_plot")))
)),
tabPanel("Discussion",
titlePanel("Potential Sources"),
h3("Gun Violence Statistics"),
p("The most important part of my project
will involve statistics on gun violence. I found a Wikipedia
tbale which has information on violence in 2015. The source is
the FBI's online summary of Crime in the U.S. This is an anunual
report and I have checked and there is a 2019 version. Generally,
I want to avoid 2020 in fear of any anomalies created by the
pandemic. The wikipedia table also has a source for 2013 gun
ownership rates, but the study it linked to seems to have done
these caluclations using firearm suicide as a proxy. This sounds
dubious to me. Link to Wikipedia page:
https://en.wikipedia.org/wiki/Gun_violence_in_the_United_States_by_state#cite_note-4"),
h3("Gun Ownerhsip Statistics"),
p("Not only is the study cited old, but I really am sketptical of
its methodology, though it is apparently not an uncommon one. I
found a CBS news article which published gun ownership rates by
state and whose source was a long-term study released in April of
2020 by researches at the RAND Corporation. The base of the
calculations appear to come from surveys, combined with
'administrative data on firearm suicides, hunting licenses,
subscriptions to Guns & Ammo magazine, and background checks.'
This sounds much more trustworthy. Though it was released early in
2020, so I don't think my COVID fears really apply here."),
h3("Gun Ownerhsip Statistics"),
p("Not only is the study cited old, but I really am sketptical of
its methodology, though it is apparently not an uncommon one. I
found a CBS news article which published gun ownership rates by
state and whose source was a long-term study released in April of
2020 by researches at the RAND Corporation. The base of the
calculations appear to come from surveys, combined with
'administrative data on firearm suicides, hunting licenses,
subscriptions to Guns & Ammo magazine, and background checks.'
This sounds much more trustworthy. Though it was released early in
2020, so I don't think my COVID fears really apply here.
Link: https://www.rand.org/pubs/tools/TL354.html"),
h3("States by GDP"),
p("I found a Wikipedia article with a list of states by their GDP.
It lists states for the third quarter of 2020, but again I am
conerned that data from this time will obscure results because
of impact from COVID. The source is the Bureau of Economic
Analysis, Department of Commerce. They seem to have regular
quarterly reports of this kind. Information on the territories of
the U.S. was gotten from the World Bank, and I don't think I will
be including these in my observation, though I should make sure
other sources of mine do not."),
h3("States by GDP"),
p("I found a Wikipedia article with a list of states by their GDP.
It lists states for the third quarter of 2020, but again I am
conerned that data from this time will obscure results because
of impact from COVID. The source is the Bureau of Economic
Analysis, Department of Commerce. They seem to have regular
quarterly reports of this kind. Information on the territories of
the U.S. was gotten from the World Bank, and I don't think I will
be including these in my observation, though I should make sure
other sources of mine do not. I also wonder whether GDP is a better
approximation of what I am looking for than income. I will want
to compare the difference between these two in further analysis."),
h3("Census Data"),
p("Census Data contains many of the factors I am interested in using
in these observations: urbanity, race, and possibly, hosuehold income.
I am interested in the first two because I often here these invoked
in conversation gun violence. Particulary by those who push back
against corellation by gun violence. Though PMUS data exists and I
would love to do something on a more local level, perhaps in
Virginia, I do not know whether the preceeding data exixts for
states.")),
tabPanel("About",
titlePanel("About"),
h3("Project Background and Motivations"),
p("Hello, this is where I talk about my project."),
h3("About Me"),
p("My name is Nosa and I study Comparative Literature.
You can reach me at nlawani@college.harvard.edu."),
h3("Repo Link"),
p("https://github.com/lawanin/gov1005-recitation-week-4")))
# Define server logic required to draw a histogram
server <- function(input, output) {}
# Run the application
shinyApp(ui = ui, server = server)
|
/Milestone_4/app.R
|
no_license
|
BeauMeche/gov1005-recitation-week-4
|
R
| false | false | 6,280 |
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- navbarPage(
"Final Project Title",
tabPanel("Model",
fluidPage(
titlePanel("Model Title"),
sidebarLayout(
sidebarPanel(
selectInput(
"plot_type",
"Plot Type",
c("Option A" = "a", "Option B" = "b")
)),
mainPanel(plotOutput("line_plot")))
)),
tabPanel("Discussion",
titlePanel("Potential Sources"),
h3("Gun Violence Statistics"),
p("The most important part of my project
will involve statistics on gun violence. I found a Wikipedia
tbale which has information on violence in 2015. The source is
the FBI's online summary of Crime in the U.S. This is an anunual
report and I have checked and there is a 2019 version. Generally,
I want to avoid 2020 in fear of any anomalies created by the
pandemic. The wikipedia table also has a source for 2013 gun
ownership rates, but the study it linked to seems to have done
these caluclations using firearm suicide as a proxy. This sounds
dubious to me. Link to Wikipedia page:
https://en.wikipedia.org/wiki/Gun_violence_in_the_United_States_by_state#cite_note-4"),
h3("Gun Ownerhsip Statistics"),
p("Not only is the study cited old, but I really am sketptical of
its methodology, though it is apparently not an uncommon one. I
found a CBS news article which published gun ownership rates by
state and whose source was a long-term study released in April of
2020 by researches at the RAND Corporation. The base of the
calculations appear to come from surveys, combined with
'administrative data on firearm suicides, hunting licenses,
subscriptions to Guns & Ammo magazine, and background checks.'
This sounds much more trustworthy. Though it was released early in
2020, so I don't think my COVID fears really apply here."),
h3("Gun Ownerhsip Statistics"),
p("Not only is the study cited old, but I really am sketptical of
its methodology, though it is apparently not an uncommon one. I
found a CBS news article which published gun ownership rates by
state and whose source was a long-term study released in April of
2020 by researches at the RAND Corporation. The base of the
calculations appear to come from surveys, combined with
'administrative data on firearm suicides, hunting licenses,
subscriptions to Guns & Ammo magazine, and background checks.'
This sounds much more trustworthy. Though it was released early in
2020, so I don't think my COVID fears really apply here.
Link: https://www.rand.org/pubs/tools/TL354.html"),
h3("States by GDP"),
p("I found a Wikipedia article with a list of states by their GDP.
It lists states for the third quarter of 2020, but again I am
conerned that data from this time will obscure results because
of impact from COVID. The source is the Bureau of Economic
Analysis, Department of Commerce. They seem to have regular
quarterly reports of this kind. Information on the territories of
the U.S. was gotten from the World Bank, and I don't think I will
be including these in my observation, though I should make sure
other sources of mine do not."),
h3("States by GDP"),
p("I found a Wikipedia article with a list of states by their GDP.
It lists states for the third quarter of 2020, but again I am
conerned that data from this time will obscure results because
of impact from COVID. The source is the Bureau of Economic
Analysis, Department of Commerce. They seem to have regular
quarterly reports of this kind. Information on the territories of
the U.S. was gotten from the World Bank, and I don't think I will
be including these in my observation, though I should make sure
other sources of mine do not. I also wonder whether GDP is a better
approximation of what I am looking for than income. I will want
to compare the difference between these two in further analysis."),
h3("Census Data"),
p("Census Data contains many of the factors I am interested in using
in these observations: urbanity, race, and possibly, hosuehold income.
I am interested in the first two because I often here these invoked
in conversation gun violence. Particulary by those who push back
against corellation by gun violence. Though PMUS data exists and I
would love to do something on a more local level, perhaps in
Virginia, I do not know whether the preceeding data exixts for
states.")),
tabPanel("About",
titlePanel("About"),
h3("Project Background and Motivations"),
p("Hello, this is where I talk about my project."),
h3("About Me"),
p("My name is Nosa and I study Comparative Literature.
You can reach me at nlawani@college.harvard.edu."),
h3("Repo Link"),
p("https://github.com/lawanin/gov1005-recitation-week-4")))
# Define server logic required to draw a histogram
server <- function(input, output) {}
# Run the application
shinyApp(ui = ui, server = server)
|
#' Height (current + predicted) vs growth curves plot for males.
#'
#' This function returns a ggplot object showing the **current** and **predicted height** vs normal growth charts for american population.
#'
#' Data for growth charts was obtained from the National Center for Health Statistics.
#'
#' Please visit \url{https://www.cdc.gov/growthcharts/percentile_data_files.htm} to learn more about this information.
#'
#' Be aware, players from different populations to the one used on these growth charts may not be well represented.
#'
#' For females, use documentation for **`plot_growth_female()`**
#'
#'
#' @param data A data frame. The object containing the raw data we wish to analize.
#' @param athlete A character string with the name of the athlete we wish to plot.
#' @return A plot `(ggplot)`
#'
#' @export
#' @examples
#' plot_growth_male(data_sample, "Athlete 08")
#'
plot_growth_male <- function(data, athlete) {
curve <- matuR::curves %>%
dplyr::mutate(Years = round(Agemos / 12, 2)) %>%
dplyr::select(Gender, Years, everything(), -Agemos, -Power, -Median, -CV) %>%
dplyr::filter(Gender == "Male")
athlete <- maturation_cm(data) %>% dplyr::filter(Athlete %in% c(athlete))
plot <- ggplot2::ggplot(curve) +
ggplot2::annotate("rect", xmin = 1.9, xmax = 6, ymin = 187, ymax = 216, fill = "white") +
ggplot2::annotate("text", x = 3.9, y = 215, label = "Normal Growth Curve", color = "black", size = 3) +
ggplot2::annotate("rect", xmin = 2, xmax = 2.6, ymin = 205, ymax = 210, fill = "skyblue1") +
ggplot2::annotate("text", x = 4, y = 208, label = "3-97 Percentiles", color = "black", size = 2) +
ggplot2::annotate("rect", xmin = 2, xmax = 2.6, ymin = 200, ymax = 205, fill = "skyblue2") +
ggplot2::annotate("text", x = 4, y = 203, label = "5-95 Percentiles", color = "black", size = 2) +
ggplot2::annotate("rect", xmin = 2, xmax = 2.6, ymin = 195, ymax = 200, fill = "skyblue3") +
ggplot2::annotate("text", x = 4, y = 198, label = "10-90 Percentiles", color = "black", size = 2) +
ggplot2::annotate("rect", xmin = 2, xmax = 2.6, ymin = 190, ymax = 195, fill = "skyblue4") +
ggplot2::annotate("text", x = 4, y = 193, label = "25-75 Percentiles", color = "black", size = 2) +
ggplot2::annotate("rect", xmin = 20, xmax = 22, ymin = max(curve$P3), ymax = max(curve$P97), fill = "black", alpha = .8) +
ggplot2::annotate("text", x = 21.5, y = 75, label = "Adult Years", color = "black", size = 3) +
ggplot2::geom_ribbon(ggplot2::aes(ymin=P3, ymax=P97, x=Years), fill = "skyblue1") +
ggplot2::geom_ribbon(ggplot2::aes(ymin=P5, ymax=P95, x=Years), fill = "skyblue2") +
ggplot2::geom_ribbon(ggplot2::aes(ymin=P10, ymax=P90, x=Years), fill = "skyblue3") +
ggplot2::geom_ribbon(ggplot2::aes(ymin=P25, ymax=P75, x=Years), fill = "skyblue4") +
ggplot2::geom_line(ggplot2::aes(y=P50, x=Years), colour = "gray", linetype = 2) +
ggplot2::geom_vline(ggplot2::aes(xintercept = 20), color = "black") +
ggplot2::geom_curve(data = athlete, ggplot2::aes(x = Age, y = `Height (CM)`, xend = 16.5, yend = 135), color = "black", curvature = 0.2, size = 0.5, linetype = 1) +
ggplot2::annotate("text", x = 17, y = 125, label = "Current \n Height", color = "black", size = 3) +
ggplot2::geom_point(data = athlete, ggplot2::aes(Age, `Height (CM)`), color = "deeppink", size = 3) +
ggplot2::geom_point(data = athlete %>% dplyr::mutate(Age = 21), ggplot2::aes(Age, `Estimated Adult Height (CM)`), color = "deeppink", size = 3) +
ggplot2::geom_text(data = athlete %>% dplyr::mutate(Age = 21), ggplot2::aes(Age, `Estimated Adult Height (CM)`, label = `Estimated Adult Height (CM)`), color = "deeppink", size = 3, vjust = -1) +
ggplot2::geom_curve(data = athlete %>% dplyr::mutate(Age2 = 21), ggplot2::aes(x = Age, y = `Height (CM)`, xend = Age2, yend = `Estimated Adult Height (CM)`), color = "deeppink", curvature = -0.05, size = 0.5, linetype = 1) +
ggplot2::scale_x_continuous(breaks = seq(0, 20, by = 2.5)) +
ggplot2::ylim(75, 220) +
ggplot2::ylab("Height (CM) \n") + ggplot2::xlab("Years") +
ggplot2::labs(title = "Projected Height (CM)", subtitle = "vs standard growth curves: United States \n", caption = "For more information about growth charts visit https://www.cdc.gov/growthcharts/") +
ggplot2::theme_light() +
ggplot2::theme(axis.title.x = ggplot2::element_text(color = "grey", hjust = 1),
axis.title.y = ggplot2::element_text(color = "grey", hjust = 1),
plot.subtitle = ggplot2::element_text(color = "darkgray"),
plot.caption = ggplot2::element_text(color = "lightblue"),
panel.grid.minor = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_line(linetype = 2),
legend.title = ggplot2::element_blank())
plot
}
|
/R/plot_growth_male.R
|
permissive
|
CFPC-performance/matuR
|
R
| false | false | 4,821 |
r
|
#' Height (current + predicted) vs growth curves plot for males.
#'
#' This function returns a ggplot object showing the **current** and **predicted height** vs normal growth charts for american population.
#'
#' Data for growth charts was obtained from the National Center for Health Statistics.
#'
#' Please visit \url{https://www.cdc.gov/growthcharts/percentile_data_files.htm} to learn more about this information.
#'
#' Be aware, players from different populations to the one used on these growth charts may not be well represented.
#'
#' For females, use documentation for **`plot_growth_female()`**
#'
#'
#' @param data A data frame. The object containing the raw data we wish to analize.
#' @param athlete A character string with the name of the athlete we wish to plot.
#' @return A plot `(ggplot)`
#'
#' @export
#' @examples
#' plot_growth_male(data_sample, "Athlete 08")
#'
plot_growth_male <- function(data, athlete) {
curve <- matuR::curves %>%
dplyr::mutate(Years = round(Agemos / 12, 2)) %>%
dplyr::select(Gender, Years, everything(), -Agemos, -Power, -Median, -CV) %>%
dplyr::filter(Gender == "Male")
athlete <- maturation_cm(data) %>% dplyr::filter(Athlete %in% c(athlete))
plot <- ggplot2::ggplot(curve) +
ggplot2::annotate("rect", xmin = 1.9, xmax = 6, ymin = 187, ymax = 216, fill = "white") +
ggplot2::annotate("text", x = 3.9, y = 215, label = "Normal Growth Curve", color = "black", size = 3) +
ggplot2::annotate("rect", xmin = 2, xmax = 2.6, ymin = 205, ymax = 210, fill = "skyblue1") +
ggplot2::annotate("text", x = 4, y = 208, label = "3-97 Percentiles", color = "black", size = 2) +
ggplot2::annotate("rect", xmin = 2, xmax = 2.6, ymin = 200, ymax = 205, fill = "skyblue2") +
ggplot2::annotate("text", x = 4, y = 203, label = "5-95 Percentiles", color = "black", size = 2) +
ggplot2::annotate("rect", xmin = 2, xmax = 2.6, ymin = 195, ymax = 200, fill = "skyblue3") +
ggplot2::annotate("text", x = 4, y = 198, label = "10-90 Percentiles", color = "black", size = 2) +
ggplot2::annotate("rect", xmin = 2, xmax = 2.6, ymin = 190, ymax = 195, fill = "skyblue4") +
ggplot2::annotate("text", x = 4, y = 193, label = "25-75 Percentiles", color = "black", size = 2) +
ggplot2::annotate("rect", xmin = 20, xmax = 22, ymin = max(curve$P3), ymax = max(curve$P97), fill = "black", alpha = .8) +
ggplot2::annotate("text", x = 21.5, y = 75, label = "Adult Years", color = "black", size = 3) +
ggplot2::geom_ribbon(ggplot2::aes(ymin=P3, ymax=P97, x=Years), fill = "skyblue1") +
ggplot2::geom_ribbon(ggplot2::aes(ymin=P5, ymax=P95, x=Years), fill = "skyblue2") +
ggplot2::geom_ribbon(ggplot2::aes(ymin=P10, ymax=P90, x=Years), fill = "skyblue3") +
ggplot2::geom_ribbon(ggplot2::aes(ymin=P25, ymax=P75, x=Years), fill = "skyblue4") +
ggplot2::geom_line(ggplot2::aes(y=P50, x=Years), colour = "gray", linetype = 2) +
ggplot2::geom_vline(ggplot2::aes(xintercept = 20), color = "black") +
ggplot2::geom_curve(data = athlete, ggplot2::aes(x = Age, y = `Height (CM)`, xend = 16.5, yend = 135), color = "black", curvature = 0.2, size = 0.5, linetype = 1) +
ggplot2::annotate("text", x = 17, y = 125, label = "Current \n Height", color = "black", size = 3) +
ggplot2::geom_point(data = athlete, ggplot2::aes(Age, `Height (CM)`), color = "deeppink", size = 3) +
ggplot2::geom_point(data = athlete %>% dplyr::mutate(Age = 21), ggplot2::aes(Age, `Estimated Adult Height (CM)`), color = "deeppink", size = 3) +
ggplot2::geom_text(data = athlete %>% dplyr::mutate(Age = 21), ggplot2::aes(Age, `Estimated Adult Height (CM)`, label = `Estimated Adult Height (CM)`), color = "deeppink", size = 3, vjust = -1) +
ggplot2::geom_curve(data = athlete %>% dplyr::mutate(Age2 = 21), ggplot2::aes(x = Age, y = `Height (CM)`, xend = Age2, yend = `Estimated Adult Height (CM)`), color = "deeppink", curvature = -0.05, size = 0.5, linetype = 1) +
ggplot2::scale_x_continuous(breaks = seq(0, 20, by = 2.5)) +
ggplot2::ylim(75, 220) +
ggplot2::ylab("Height (CM) \n") + ggplot2::xlab("Years") +
ggplot2::labs(title = "Projected Height (CM)", subtitle = "vs standard growth curves: United States \n", caption = "For more information about growth charts visit https://www.cdc.gov/growthcharts/") +
ggplot2::theme_light() +
ggplot2::theme(axis.title.x = ggplot2::element_text(color = "grey", hjust = 1),
axis.title.y = ggplot2::element_text(color = "grey", hjust = 1),
plot.subtitle = ggplot2::element_text(color = "darkgray"),
plot.caption = ggplot2::element_text(color = "lightblue"),
panel.grid.minor = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_line(linetype = 2),
legend.title = ggplot2::element_blank())
plot
}
|
.onAttach <-
function(libname, pkgname) {
packageStartupMessage("\nPlease cite as: \n")
packageStartupMessage(" Hlavac, Marek (2018). stargazer: Well-Formatted Regression and Summary Statistics Tables.")
packageStartupMessage(" R package version 5.2.2. https://CRAN.R-project.org/package=stargazer \n")
}
.stargazer.wrap <-
function(..., type, title, style, summary, out, out.header, covariate.labels, column.labels, column.separate,
dep.var.caption, dep.var.labels, dep.var.labels.include, align, coef, se, t, p, t.auto,
p.auto, ci, ci.custom, ci.level, ci.separator, add.lines, apply.coef, apply.se, apply.t, apply.p, apply.ci,
colnames,
column.sep.width, decimal.mark, df, digit.separate, digit.separator, digits, digits.extra,
flip, float,
float.env, font.size, header, initial.zero, intercept.bottom, intercept.top, keep, keep.stat,
label, model.names, model.numbers, multicolumn, no.space, notes, notes.align, notes.append,
notes.label, object.names, omit, omit.labels, omit.stat, omit.summary.stat, omit.table.layout,
omit.yes.no, order, ord.intercepts, perl, report, rownames,
rq.se, selection.equation, single.row, star.char, star.cutoffs, suppress.errors,
table.layout, table.placement,
zero.component, summary.logical, summary.stat, nobs, mean.sd, min.max, median, iqr, warn) {
.add.model <-
function(object.name, user.coef=NULL, user.se=NULL, user.t=NULL, user.p=NULL, auto.t=TRUE, auto.p=TRUE, user.ci.lb=NULL, user.ci.rb=NULL) {
if (class(object.name)[1] == "Glm") {
.summary.object <<- summary.glm(object.name)
}
else if (!(.model.identify(object.name) %in% c("aftreg", "coxreg","phreg","weibreg", "Glm", "bj", "cph", "lrm", "ols", "psm", "Rq"))) {
.summary.object <<- summary(object.name)
}
else {
.summary.object <<- object.name
}
if (.model.identify(object.name) == "rq") {
.summary.object <<- suppressMessages(summary(object.name, se=.format.rq.se))
}
model.num.total <- 1 # model number for multinom, etc.
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(.summary.object$coefficients))) {
model.num.total <- nrow(.summary.object$coefficients)
}
}
for (model.num in 1:model.num.total) {
.global.models <<- append(.global.models, .model.identify(object.name))
.global.dependent.variables <<- append(.global.dependent.variables, .dependent.variable(object.name, model.num))
.global.dependent.variables.written <<- append(.global.dependent.variables.written, .dependent.variable.written(object.name, model.num))
.global.N <<- append(.global.N, .number.observations(object.name))
.global.LL <<- append(.global.LL, .log.likelihood(object.name))
.global.R2 <<- append(.global.R2, .r.squared(object.name))
.global.max.R2 <<- append(.global.max.R2, .max.r.squared(object.name))
.global.adj.R2 <<- append(.global.adj.R2, .adj.r.squared(object.name))
.global.AIC <<- append(.global.AIC, .AIC(object.name))
.global.BIC <<- append(.global.BIC, .BIC(object.name))
.global.scale <<- append(.global.scale, .get.scale(object.name))
.global.UBRE <<- append(.global.UBRE, .gcv.UBRE(object.name))
.global.sigma2 <<- append(.global.sigma2, .get.sigma2(object.name))
.global.rho <<- cbind(.global.rho, .get.rho(object.name))
.global.mills <<- cbind(.global.mills, .get.mills(object.name))
.global.theta <<- cbind(.global.theta, .get.theta(object.name))
.global.SER <<- cbind(.global.SER, .SER(object.name))
.global.F.stat <<- cbind(.global.F.stat, .F.stat(object.name))
.global.chi.stat <<- cbind(.global.chi.stat, .chi.stat(object.name))
.global.wald.stat <<- cbind(.global.wald.stat, .wald.stat(object.name))
.global.lr.stat <<- cbind(.global.lr.stat, .lr.stat(object.name))
.global.logrank.stat <<- cbind(.global.logrank.stat, .logrank.stat(object.name))
.global.null.deviance <<- cbind(.global.null.deviance, .null.deviance(object.name))
.global.residual.deviance <<- cbind(.global.residual.deviance, .residual.deviance(object.name))
max.length <- length(.global.coefficient.variables)+length(.coefficient.variables(object.name))
# add RHS variables and coefficients
coef.var <- .coefficient.variables(object.name)
.global.coef.vars.by.model <<- cbind(.global.coef.vars.by.model, coef.var)
temp.gcv <- rep(NA,each=1,times=max.length)
temp.gcv[1:length(.global.coefficient.variables)] <- .global.coefficient.variables
how.many.gcv <- length(.global.coefficient.variables)
# try to find variable
position <- 0
for (i in seq(1:length(coef.var))) {
found <- FALSE
for (j in seq(1:length(.global.coefficient.variables))) {
if (coef.var[i] == .global.coefficient.variables[j]) {
found <- TRUE
for (k in 1:how.many.gcv) {
if (coef.var[i]==temp.gcv[k]) {
position <- k
}
}
}
}
# If variable was found, no need to add it
if (found == FALSE) {
# append new variable to list of regressors
while ((position < how.many.gcv) && (!(temp.gcv[position+1] %in% coef.var))) {
position <- position + 1
}
temp.gcv <- append(temp.gcv, coef.var[i], after=position)
how.many.gcv <- how.many.gcv + 1
position <- position + 1
}
}
.global.coefficient.variables <<- temp.gcv[1:how.many.gcv]
# build up coefficients from scratch
temp.coefficients <- temp.std.errors <- temp.ci.lb <- temp.ci.rb <- temp.t.stats <- temp.p.values <- matrix(data = NA, nrow = length(.global.coefficient.variables), ncol = ncol(.global.coefficients)+1)
rownames(temp.coefficients) <- rownames(temp.std.errors) <- rownames(temp.ci.lb) <- rownames(temp.ci.rb) <- rownames(temp.t.stats) <- rownames(temp.p.values) <- .global.coefficient.variables
# fill in from previous iteration of .global coefficients
which.variable <- 0
for (row in .global.coefficient.variables) {
which.variable <- which.variable + 1
row.i <- .rename.intercept(row) # row with intercept renamed to get the omit and keep right
### if omitted variable, then advance to the next iteration of the loop --- !!! do this also for index
#skip all of this if omitted based on regular expression
omitted <- FALSE
if (!is.null(.format.omit.regexp)) {
for (i in seq(1:length(.format.omit.regexp))) {
if (length(grep(.format.omit.regexp[i], row.i, perl=.format.perl, fixed=FALSE))!=0) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.regexp)) {
omitted <- TRUE
for (i in seq(1:length(.format.keep.regexp))) {
if (length(grep(.format.keep.regexp[i], row.i, perl=.format.perl, fixed=FALSE))!=0) { omitted <- FALSE }
}
}
if (!is.null(.format.omit.index)) {
for (i in seq(1:length(.format.omit.index))) {
if (.format.omit.index[i] == which.variable) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.index)) {
omitted <- TRUE
for (i in seq(1:length(.format.keep.index))) {
if (.format.keep.index[i] == which.variable) { omitted <- FALSE }
}
}
if (omitted == TRUE) { next }
###
for (col in seq(1:ncol(.global.coefficients))) {
if (sum(as.vector(rownames(.global.coefficients[,col, drop=FALSE])==row))!=0) {
if (!is.null(.global.coefficients)) { temp.coefficients[row, col] <- .global.coefficients[row, col] }
if (!is.null(.global.std.errors)) { temp.std.errors[row, col] <- .global.std.errors[row, col] }
if (!is.null(.global.ci.lb)) { temp.ci.lb[row, col] <- .global.ci.lb[row, col] }
if (!is.null(.global.ci.rb)) { temp.ci.rb[row, col] <- .global.ci.rb[row, col] }
if (!is.null(.global.t.stats)) { temp.t.stats[row, col] <- .global.t.stats[row, col] }
if (!is.null(.global.p.values)) { temp.p.values[row, col] <- .global.p.values[row, col] }
}
}
feed.coef <- NA; feed.se <- NA
# coefficients and standard errors
if (!is.null(.get.coefficients(object.name, user.coef, model.num=model.num)[row])) {
temp.coefficients[row, ncol(temp.coefficients)] <- .get.coefficients(object.name, user.coef, model.num=model.num)[row]
feed.coef <- temp.coefficients[, ncol(temp.coefficients)]
}
if (!is.null(.get.standard.errors(object.name, user.se, model.num=model.num)[row])) {
temp.std.errors[row, ncol(temp.std.errors)] <- .get.standard.errors(object.name, user.se, model.num=model.num)[row]
feed.se <- temp.std.errors[, ncol(temp.std.errors)]
}
# confidence interval, left and right bound
if (!is.null(.get.ci.lb(object.name, user.ci.lb, model.num=model.num)[row])) { temp.ci.lb[row, ncol(temp.ci.lb)] <- .get.ci.lb(object.name, user.ci.lb, model.num=model.num)[row] }
if (!is.null(.get.ci.rb(object.name, user.ci.rb, model.num=model.num)[row])) { temp.ci.rb[row, ncol(temp.ci.rb)] <- .get.ci.rb(object.name, user.ci.rb, model.num=model.num)[row] }
# t-stats and p-values
#if (!is.null(user.coef)) { feed.coef <- user.coef } # feed user-defined coefficients, if available - check that this does not mess up multinom
#if (!is.null(user.se)) { feed.se <- user.se } # feed user-defined std errors, if available
if (!is.null(.get.t.stats(object.name, user.t, auto.t, feed.coef, feed.se, user.coef, user.se, model.num=model.num)[row])) { temp.t.stats[row, ncol(temp.std.errors)] <- .get.t.stats(object.name, user.t, auto.t, feed.coef, feed.se, user.coef, user.se, model.num=model.num)[row] }
if (!is.null(.get.p.values(object.name, user.p, auto.p, feed.coef, feed.se, user.coef, user.se, model.num=model.num)[row])) { temp.p.values[row, ncol(temp.std.errors)] <- .get.p.values(object.name, user.p, auto.p, feed.coef, feed.se, user.coef, user.se, model.num=model.num)[row] }
}
if (!is.null(temp.coefficients)) { .global.coefficients <<- temp.coefficients }
if (!is.null(temp.std.errors)) { .global.std.errors <<- temp.std.errors }
if (!is.null(temp.ci.lb)) { .global.ci.lb <<- temp.ci.lb }
if (!is.null(temp.ci.rb)) { .global.ci.rb <<- temp.ci.rb }
if (!is.null(temp.t.stats)) { .global.t.stats <<- temp.t.stats }
if (!is.null(temp.p.values)) { .global.p.values <<- temp.p.values }
}
}
.adj.r.squared <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","coeftest","maBina", "lmer", "glmer", "nlmer", "Gls"))) {
if (model.name %in% c("heckit")) {
return(.summary.object$rSquared$R2adj)
}
if (model.name %in% c("felm")) {
return(.summary.object$r2adj)
}
if (!is.null(suppressMessages(.summary.object$adj.r.squared))) {
return(as.vector(suppressMessages(.summary.object$adj.r.squared)))
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam", "poisson.gam", "gam()")) {
return(as.vector(.summary.object$r.sq))
}
else if (model.name %in% c("plm")) {
return(as.vector(.summary.object$r.squared["adjrsq"]))
}
else if (model.name %in% c("ols")) {
n <- nobs(object.name)
p <- length(object.name$coefficients[names(object.name$coefficients)!="Intercept"])
r2 <- object.name$stats["R2"]
adj.r2 <- 1-(1-r2)*((n-1) / (n-p-1))
return(as.vector(adj.r2))
}
}
return(NA)
}
.adjust.settings.style <-
function(what.style) {
style <- tolower(what.style)
if (style == "all") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","-","omit","-","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","sigma2","theta(se)*(p)", "SER(df)","F statistic(df)*(p)","chi2(df)*(p)","Wald(df)*(p)","LR(df)*(p)","logrank(df)*(p)","AIC","BIC","UBRE","rho(se)*(p)","Mills(se)*(p)","residual deviance(df)*","null deviance(df)*","=!","notes")
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error","t-stat","p-value")
}
else if (style == "all2") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","-","omit","-","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","sigma2","theta(se)*(p)", "SER(df)","F statistic(df)*(p)","chi2(df)*(p)","Wald(df)*(p)","LR(df)*(p)","logrank(df)*(p)","AIC","BIC","UBRE","rho(se)*(p)","Mills(se)*(p)","residual deviance(df)*","null deviance(df)*","=!","notes")
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
}
# aer = American Economic Review
else if (style == "aer") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.until.nonzero.digit <<- FALSE
.format.max.extra.digits <<- 0
.format.model.left <<- ""
.format.model.right <<- ""
.format.note <<- "\\textit{Notes:}"
.format.note.alignment <<- "l"
.format.note.content <<- c("$^{***}$Significant at the [***] percent level.","$^{**}$Significant at the [**] percent level.","$^{*}$Significant at the [*] percent level.")
}
# ajps = American Journal of Political Science
else if (style == "ajps") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.digit.separator <<- ""
.format.dependent.variables.left <<- "\\textbf{"
.format.dependent.variables.right <<- "}"
.format.column.left <<- "\\textbf{"
.format.column.right <<- "}"
.format.models.left <<- "\\textbf{"
.format.models.right <<- "}"
.format.numbers.left <<- "\\textbf{Model "
.format.numbers.right <<- "}"
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.N <<- "N"
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.chi.stat <<- "Chi-square"
.format.R2 <<- "R-squared"
.format.adj.R2 <<- "Adj. R-squared"
.format.max.R2 <<- "Max. R-squared"
.format.note <<- ""
.format.note.content <<- c("$^{***}$p $<$ [.***]; $^{**}$p $<$ [.**]; $^{*}$p $<$ [.*]")
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
}
# ajs = American Journal of Sociology
else if (style == "ajs") {
.format.table.parts <<- c(" ","=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variables.capitalize <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.numbers.left <<- ""
.format.numbers.right <<- ""
.format.until.nonzero.digit <<- FALSE
.format.max.extra.digits <<- 0
.format.model.left <<- ""
.format.model.right <<- ""
.format.note <<- "\\textit{Notes:}"
.format.note.alignment <<- "l"
.format.note.content <<- c("$^{*}$P $<$ [.*]","$^{**}$P $<$ [.**]","$^{***}$P $<$ [.***]")
.format.cutoffs <<- c(0.05, 0.01, 0.001)
.format.initial.zero <<- FALSE
}
# apsr = American Political Science Review
else if (style == "apsr") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.models.left <<- ""
.format.models.right <<- ""
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.N <<- "N"
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.chi.stat <<- "chi$^{2}$"
.format.note <<- ""
.format.note.content <<- c("$^{*}$p $<$ [.*]; $^{**}$p $<$ [.**]; $^{***}$p $<$ [.***]")
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
}
# asq = Administrative Science Quarterly
else if (style == "asq") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.digit.separator <<- ""
.format.dependent.variables.left <<- "\\textbf{"
.format.dependent.variables.right <<- "}"
.format.column.left <<- "\\textbf{"
.format.column.right <<- "}"
.format.models.left <<- "\\textbf{"
.format.models.right <<- "}"
.format.numbers.left <<- "\\textbf{Model "
.format.numbers.right <<- "}"
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.chi.stat <<- "Chi-square"
.format.R2 <<- "R-squared"
.format.adj.R2 <<- "Adj. R-squared"
.format.max.R2 <<- "Max. R-squared"
.format.note <<- ""
.format.note.content <<- c("$^{\\bullet}$p $<$ [.*]; $^{\\bullet\\bullet}$p $<$ [.**]; $^{\\bullet\\bullet\\bullet}$p $<$ [.***]")
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
.format.stars <<- "\\bullet"
}
# asr = American Sociological Review
else if (style == "asr") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.models.left <<- ""
.format.models.right <<- ""
.format.coefficient.table.parts <<- c("variable name","coefficient*")
.format.N <<- "\\textit{N}"
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.chi.stat <<- "chi$^{2}$"
.format.note <<- ""
.format.note.content <<- c("$^{*}$p $<$ [.*]; $^{**}$p $<$ [.**]; $^{***}$p $<$ [.***]")
.format.cutoffs <<- c(0.05, 0.01, 0.001)
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
}
# "demography" = Demography
else if (style == "demography") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.models.left <<- ""
.format.models.right <<- ""
.format.numbers.left <<- "Model "
.format.numbers.right <<- ""
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.N <<- "\\textit{N}"
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.chi.stat <<- "Chi-Square"
.format.note <<- ""
.format.note.content <<- c("$^{*}$p $<$ [.*]; $^{**}$p $<$ [.**]; $^{***}$p $<$ [.***]")
.format.cutoffs <<- c(0.05, 0.01, 0.001)
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
}
# io = International Organization
else if (style == "io") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.coefficient.variables.capitalize <<- TRUE
.format.s.coefficient.variables.capitalize <<- TRUE
.format.intercept.name <<- "Constant"
.format.N <<- "\\textit{Observations}"
.format.AIC <<- "\\textit{Akaike information criterion}"
.format.BIC <<- "\\textit{Bayesian information criterion}"
.format.chi.stat <<- "\\textit{Chi-square}"
.format.logrank.stat <<- "\\textit{Score (logrank) test}"
.format.lr.stat <<- "\\textit{LR test}"
.format.max.R2 <<- "\\textit{Maximum R-squared}"
.format.R2 <<- "\\textit{R-squared}"
.format.adj.R2 <<- "\\textit{Adjusted R-squared}"
.format.UBRE <<- "\\textit{UBRE}"
.format.F.stat <<- "\\textit{F statistic}"
.format.LL <<- "\\textit{Log likelihood}"
.format.SER <<- "\\textit{Residual standard error}"
.format.null.deviance <<- "\\textit{Null deviance}"
.format.residual.deviance <<- "\\textit{Residual deviance}"
.format.scale <<- "\\textit{Scale}"
.format.wald.stat <<- "\\textit{Wald test}"
.format.note <<- "\\textit{Notes:}"
.format.note.content <<- c("$^{***}$p $<$ [.***]; $^{**}$p $<$ [.**]; $^{*}$p $<$ [.*]")
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
}
# jpam = Journal of Policy Analysis and Management
else if (style == "jpam") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.models.left <<- ""
.format.models.right <<- ""
.format.numbers.left <<- "Model "
.format.numbers.right <<- ""
.format.numbers.roman <<- TRUE
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.intercept.bottom <<- FALSE
.format.intercept.top <<- TRUE
.format.N <<- "N"
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.note <<- "\\textit{Note:}"
.format.note.content <<- c("$^{***}$p $<$ [.***]; $^{**}$p $<$ [.**]; $^{*}$p $<$ [.*]")
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
.format.s.statistics.names <<- cbind(c("n","N"), c("nmiss","missing"), c("mean","Mean"), c("sd","SD"), c("median","Median"), c("min","Minimum"), c("max","Maximum"), c("mad","Median Abs. Dev."), c("p","Percentile(!)"))
}
# "qje" = Quarterly Journal of Economics
else if (style=="qje") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","=!","notes")
.format.dependent.variable.text.on <<- FALSE
.format.s.stat.parts <<- c("-!","stat names","=","statistics1","=!","notes")
.format.N <<- "\\textit{N}"
.format.note <<- "\\textit{Notes:}"
.format.note.content <<- c("$^{***}$Significant at the [***] percent level.", "$^{**}$Significant at the [**] percent level.", "$^{*}$Significant at the [*] percent level.")
}
# find style based on journal ("default" or other)
else if (style=="commadefault") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","-","omit","-","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","sigma2","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","=!","notes")
.format.digit.separator <<- " "
.format.decimal.character <<- ","
}
else if (style=="default") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","-","omit","-","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","sigma2","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","=!","notes")
}
}
.apply <-
function(auto.t, auto.p)
{
if ((!is.null(apply.coef)) || ((!is.null(apply.se)))) {
if (!is.null(apply.coef)) { .global.coefficients <<- apply(.global.coefficients, c(1,2), apply.coef) }
if (!is.null(apply.se)) { .global.std.errors <<- apply(.global.std.errors, c(1,2), apply.se) }
if (auto.t == TRUE) { .global.t.stats <<- .global.coefficients / .global.std.errors }
if (auto.p == TRUE) { .global.p.values <<- 2 * pnorm( abs( .global.t.stats ) , mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE) }
}
if (!is.null(apply.t)) { .global.t.stats <<- apply(.global.t.stats, c(1,2), apply.t) }
if (!is.null(apply.p)) { .global.p.values <<- apply(.global.p.values, c(1,2), apply.p) }
}
.AIC <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("coeftest")) {
return(NA)
}
if (model.name %in% c("lmer","lme","nlme","glmer","nlmer", "ergm", "gls", "Gls", "lagsarlm", "errorsarlm", "", "Arima")) {
return(as.vector(AIC(object.name)))
}
if (model.name %in% c("censReg")) {
return(as.vector(AIC(object.name)[1]))
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$ics["AIC"])
}
if (model.name %in% c("maBina")) {
return(as.vector(object.name$w$aic))
}
if (model.name %in% c("arima")) {
return(as.vector(object.name$aic))
}
else if (!is.null(.summary.object$aic)) {
return(as.vector(.summary.object$aic))
}
else if (!is.null(object.name$AIC)) {
return(as.vector(object.name$AIC))
}
return(NA)
}
.BIC <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("coeftest","maBina","Arima")) {
return(NA)
}
if (model.name %in% c("censReg")) {
return(as.vector(BIC(object.name)[1]))
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$ics["BIC"])
}
if (model.name %in% c("lmer","lme","nlme","glmer","nlmer", "ergm", "gls", "Gls")) {
return(as.vector(BIC(object.name)))
}
if (model.name %in% c("arima")) {
return(as.vector(object.name$bic))
}
else if (!is.null(.summary.object$bic)) {
return(as.vector(.summary.object$bic))
}
else if (!is.null(object.name$BIC)) {
return(as.vector(object.name$BIC))
}
return(NA)
}
.chi.stat <-
function(object.name) {
chi.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina","coeftest","lmer", "Gls", "glmer", "nlmer", "normal.gam","logit.gam","probit.gam","poisson.gam","gam()"))) {
if (!is.null(.summary.object$chi)) {
chi.value <- suppressMessages(.summary.object$chi)
df.value <- suppressMessages(.summary.object$df) - suppressMessages(.summary.object$idf)
chi.p.value <- pchisq(chi.value, df.value, ncp=0, lower.tail = FALSE, log.p = FALSE)
chi.output <- as.vector(c(chi.value, df.value, chi.p.value))
}
else if (model.name %in% c("cph", "lrm", "ols", "psm")) {
chi.value <- object.name$stat["Model L.R."]
df.value <- object.name$stat["d.f."]
chi.p.value <- pchisq(chi.value, df.value, ncp=0, lower.tail = FALSE, log.p = FALSE)
chi.output <- as.vector(c(chi.value, df.value, chi.p.value))
}
else if (model.name %in% c("probit.ss")) {
chi.value <- object.name$LRT$LRT
df.value <- object.name$LRT$df
chi.p.value <- pchisq(chi.value, df.value, ncp=0, lower.tail = FALSE, log.p = FALSE)
chi.output <- as.vector(c(chi.value, df.value, chi.p.value))
}
}
names(chi.output) <- c("statistic","df1","p-value")
return(cbind(chi.output))
}
.coefficient.table.part <-
function(part, which.variable, variable.name=NULL) {
# coefficient variable name
if (part=="variable name") {
# use intercept name for intercept, otherwise variable name
if (is.na(.format.covariate.labels[.which.variable.label])) {
if (.format.coefficient.variables.capitalize == TRUE) { cat(" ", .format.coefficient.variables.left, toupper(variable.name), .format.coefficient.variables.right, sep="") }
else { cat(" ", .format.coefficient.variables.left, variable.name, .format.coefficient.variables.right, sep="") }
}
else { cat(" ", .format.coefficient.variables.left, .format.covariate.labels[.which.variable.label], .format.coefficient.variables.right, sep="") }
}
# coefficients and stars
else if ((part=="coefficient") || (part=="coefficient*")) {
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.coefficients[.global.coefficient.variables[which.variable],i])) {
# report the coefficient
cat(" & ", .iround(.global.coefficients[.global.coefficient.variables[which.variable],i],.format.round.digits),sep="")
# add stars to denote statistical significance
if (part=="coefficient*") {
p.value <- .global.p.values[.global.coefficient.variables[which.variable],i]
.enter.significance.stars(p.value)
}
}
else {
cat(" & ",sep="")
}
# if single-row, follow up with standard error / confidence interval
if ((.format.single.row == TRUE) && (("standard error" %in% .format.coefficient.table.parts) || ("standard error*" %in% .format.coefficient.table.parts))) {
if (.format.dec.mark.align == TRUE) { space.char <- "$ $"}
else { space.char <- " "}
if (!is.na(.global.std.errors[.global.coefficient.variables[which.variable],i])) {
# report standard errors or confidence intervals
.format.ci.use <- .format.ci[i]
if (is.na(.format.ci.use)) {
for (j in i:1) {
if (!is.na(.format.ci[j])) {
.format.ci.use <- .format.ci[j]
break
}
}
}
if (.format.ci.use == TRUE) {
# if ci level is NA, find the most recent set level
.format.ci.level.use <- .format.ci.level[i]
if (is.na(.format.ci.level.use)) {
for (j in i:1) {
if (!is.na(.format.ci.level[j])) {
.format.ci.level.use <- .format.ci.level[j]
break
}
}
}
z.value <- qnorm((1 + .format.ci.level.use)/2)
coef <- .global.coefficients[.global.coefficient.variables[which.variable],i]
se <- .global.std.errors[.global.coefficient.variables[which.variable],i]
ci.lower.bound <- coef - z.value * se
ci.upper.bound <- coef + z.value * se
if (!is.null(ci.custom[[i]])) {
ci.lower.bound.temp <- .global.ci.lb[.global.coefficient.variables[which.variable],i]
ci.upper.bound.temp <- .global.ci.rb[.global.coefficient.variables[which.variable],i]
if (!is.na(ci.lower.bound.temp)) (ci.lower.bound <- ci.lower.bound.temp)
if (!is.na(ci.upper.bound.temp)) (ci.upper.bound <- ci.upper.bound.temp)
}
if (!is.null(apply.ci)) {
ci.lower.bound <- do.call(apply.ci, list(ci.lower.bound))
ci.upper.bound <- do.call(apply.ci, list(ci.upper.bound))
}
if (.format.dec.mark.align == TRUE) {
hyphen <- paste("$",.format.ci.separator,"$", sep="")
}
else {
hyphen <- .format.ci.separator
}
cat(space.char, .format.std.errors.left, .iround(ci.lower.bound,.format.round.digits),hyphen,.iround(ci.upper.bound,.format.round.digits),.format.std.errors.right,sep="")
}
else {
cat(space.char, .format.std.errors.left, .iround(.global.std.errors[.global.coefficient.variables[which.variable],i],.format.round.digits),.format.std.errors.right,sep="")
}
# add stars to denote statistical significance
if ("standard error*" %in% .format.coefficient.table.parts) {
p.value <- .global.p.values[.global.coefficient.variables[which.variable],i]
.enter.significance.stars(p.value)
}
}
}
}
cat(" \\\\ \n ")
}
# standard errors
else if (((part=="standard error") || (part=="standard error*")) && (.format.single.row==FALSE)) {
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.std.errors[.global.coefficient.variables[which.variable],i])) {
# report standard errors or confidence intervals
.format.ci.use <- .format.ci[i]
if (is.na(.format.ci.use)) {
for (j in i:1) {
if (!is.na(.format.ci[j])) {
.format.ci.use <- .format.ci[j]
break
}
}
}
if (.format.ci.use == TRUE) {
# if ci level is NA, find the most recent set level
.format.ci.level.use <- .format.ci.level[i]
if (is.na(.format.ci.level.use)) {
for (j in i:1) {
if (!is.na(.format.ci.level[j])) {
.format.ci.level.use <- .format.ci.level[j]
break
}
}
}
z.value <- qnorm((1 + .format.ci.level.use)/2)
coef <- .global.coefficients[.global.coefficient.variables[which.variable],i]
se <- .global.std.errors[.global.coefficient.variables[which.variable],i]
ci.lower.bound <- coef - z.value * se
ci.upper.bound <- coef + z.value * se
if (!is.null(ci.custom[[i]])) {
ci.lower.bound.temp <- .global.ci.lb[.global.coefficient.variables[which.variable],i]
ci.upper.bound.temp <- .global.ci.rb[.global.coefficient.variables[which.variable],i]
if (!is.na(ci.lower.bound.temp)) (ci.lower.bound <- ci.lower.bound.temp)
if (!is.na(ci.upper.bound.temp)) (ci.upper.bound <- ci.upper.bound.temp)
}
if (!is.null(apply.ci)) {
ci.lower.bound <- do.call(apply.ci, list(ci.lower.bound))
ci.upper.bound <- do.call(apply.ci, list(ci.upper.bound))
}
if (.format.dec.mark.align == TRUE) {
hyphen <- paste("$",.format.ci.separator,"$", sep="")
}
else {
hyphen <- .format.ci.separator
}
if (.format.dec.mark.align == TRUE) {
cat(" & \\multicolumn{1}{c}{", .format.std.errors.left, .iround(ci.lower.bound,.format.round.digits),hyphen,.iround(ci.upper.bound,.format.round.digits),.format.std.errors.right,"}",sep="")
}
else {
cat(" & ", .format.std.errors.left, .iround(ci.lower.bound,.format.round.digits),hyphen,.iround(ci.upper.bound,.format.round.digits),.format.std.errors.right,sep="")
}
}
else {
cat(" & ", .format.std.errors.left, .iround(.global.std.errors[.global.coefficient.variables[which.variable],i],.format.round.digits),.format.std.errors.right,sep="")
}
# add stars to denote statistical significance
if (part=="standard error*") {
p.value <- .global.p.values[.global.coefficient.variables[which.variable],i]
.enter.significance.stars(p.value)
}
}
else {
cat(" & ",sep="")
}
}
cat(" \\\\ \n ")
}
# p-values
else if ((part=="p-value") || (part=="p-value*")) {
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.p.values[.global.coefficient.variables[which.variable],i])) {
# report p-values
cat(" & ", .format.p.values.left, .iround(.global.p.values[.global.coefficient.variables[which.variable],i],.format.round.digits,round.up.positive=TRUE),.format.p.values.right,sep="")
# add stars to denote statistical significance
if (part=="p-value*") {
p.value <- .global.p.values[.global.coefficient.variables[which.variable],i]
.enter.significance.stars(p.value)
}
}
else {
cat(" & ",sep="")
}
}
cat(" \\\\ \n ")
}
# t-statistics
else if ((part=="t-stat") || (part=="t-stat*")) {
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.t.stats[.global.coefficient.variables[which.variable],i])) {
# report t-statistics
cat(" & ", .format.t.stats.left, .iround(.global.t.stats[.global.coefficient.variables[which.variable],i],.format.round.digits),.format.t.stats.right,sep="")
# add stars to denote statistical significance
if (part=="t-stat*") {
p.value <- .global.p.values[.global.coefficient.variables[which.variable],i]
.enter.significance.stars(p.value)
}
}
else {
cat(" & ",sep="")
}
}
cat(" \\\\ \n ")
}
# empty line
else if (part==" ") {
.table.empty.line()
}
# horizontal line
else if (part=="-") {
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
# double horizontal line
else if (part=="=") {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
}
.coefficient.variables <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit", "poisson", "negbin", "normal.gee", "logit.gee", "probit.gee", "poisson.gee", "normal.gam",
"logit.gam", "probit.gam", "poisson.gam", "normal.survey", "poisson.survey", "probit.survey", "logit.survey", "gamma", "gamma.gee", "gamma.survey",
"exp", "weibull", "coxph", "clogit", "lognorm", "tobit", "tobit(AER)", "brglm", "glm()", "Glm()", "svyglm()", "gee()", "survreg()", "gam()", "plm", "ivreg", "pmg", "lmrob", "glmrob",
"dynlm", "gls", "rq", "lagsarlm", "errorsarlm", "gmm", "mclogit")) {
return(as.vector(names(object.name$coefficients)))
}
else if (model.name %in% c("Arima")) {
return(names(object.name$coef))
}
else if (model.name %in% c("fGARCH")) {
return(rownames(object.name@fit$matcoef))
}
else if (model.name %in% c("censReg")) {
return(rownames(.summary.object$estimate))
}
else if (model.name %in% c("mnlogit")) {
return(rownames(.summary.object$CoefTable))
}
else if (model.name %in% c("lme","nlme")) {
return(rownames(.summary.object$tTable))
}
else if (model.name %in% c("felm")) {
return(row.names(object.name$coefficients))
}
else if (model.name %in% c("maBina")) {
return(as.vector(rownames(object.name$out)))
}
else if (model.name %in% c("mlogit")) {
return(as.vector(rownames(.summary.object$CoefTable)))
}
else if (model.name %in% c("hetglm")) {
return(as.vector(names(object.name$coefficients$mean)))
}
else if (model.name %in% c("selection","heckit")) {
if (!.global.sel.equation) {
indices <- .summary.object$param$index$betaO ### outcome equation
}
else {
indices <- .summary.object$param$index$betaS ### selection equation
}
return(as.vector(names(.summary.object$estimate[indices, 1])))
}
else if (model.name %in% c("probit.ss", "binaryChoice")) {
return(as.vector(names(.summary.object$estimate[,1])))
}
else if (model.name %in% c("coeftest")) {
return(as.vector(rownames(object.name)))
}
else if (model.name %in% c("clm")) {
if (.format.ordered.intercepts == FALSE) { return(as.vector(names(object.name$beta))) }
else { return(c(as.vector(names(object.name$beta)), as.vector(names(object.name$alpha)))) }
}
else if (model.name %in% c("lmer", "glmer", "nlmer", "pgmm")) {
return(as.vector(rownames(.summary.object$coefficients)))
}
else if (model.name %in% c("ergm", "rem.dyad")) {
return(as.vector(names(object.name$coef)))
}
else if (model.name %in% c("betareg")) {
return(as.vector(names(object.name$coefficients$mean)))
}
else if (model.name %in% c("zeroinfl", "hurdle")) {
if (.global.zero.component==FALSE) {
return(as.vector(names(object.name$coefficients$count)))
}
else {
return(as.vector(names(object.name$coefficients$zero)))
}
}
else if (model.name %in% c("cloglog.net", "gamma.net", "logit.net", "probit.net")) {
return(as.vector(rownames(.summary.object$coefficients)))
}
else if (model.name %in% c("rlm")) {
return(as.vector(rownames(suppressMessages(.summary.object$coefficients))))
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
coef.temp <- as.vector(rownames(suppressMessages(.summary.object$coefficients)))
if (.format.ordered.intercepts == FALSE) { return(coef.temp[seq(from=1, to=length(coef.temp)-(length(suppressMessages(.summary.object$lev))-1))]) }
else { return(coef.temp) }
}
else if (model.name %in% c("arima")) {
return(as.vector(names(object.name$coef)))
}
else if (model.name %in% c("multinom")) {
return(as.vector(object.name$coefnames))
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
return(as.vector(names(object.name$coefficients)))
}
return(NULL)
}
.dependent.variable <-
function(object.name, model.num=1) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("lmer", "glmer", "nlmer", "gls")) {
return(as.vector(as.character(formula(object.name))[2]))
}
if (model.name %in% c("Arima")) {
return(as.character(object.name$call$x))
}
if (model.name %in% c("fGARCH")) {
return(as.character(object.name@call$data))
}
if (model.name %in% c("multinom")) {
if (!is.null(rownames(.summary.object$coefficients))) {
return(as.vector(rownames(.summary.object$coefficients)[model.num]))
}
}
if (model.name %in% c("rem.dyad", "coeftest")) {
return(as.vector(as.character(" ")))
}
if (model.name %in% c("gmm")) {
formula <- object.name$call[2]
position <- regexpr("~", formula, fixed=T)
return( .trim(substr(formula, 1, position-1)) )
}
if (model.name %in% c("selection","heckit")) {
if (!.global.sel.equation) {
formula <- object.name$call["outcome"] ### outcome
}
else {
formula <- object.name$call["selection"] ### outcome
}
position <- regexpr("~", formula, fixed=T)
return( .trim(substr(formula, 1, position-1)))
}
if (model.name %in% c("probit.ss","binaryChoice")) {
formula <- object.name$call["formula"]
position <- regexpr("~", formula, fixed=T)
return( .trim(substr(formula, 1, position-1)))
}
if (model.name %in% c("maBina")) {
object.name <- object.name$w
}
if (model.name %in% c("lme")) {
object.name$call$formula <- object.name$call$fixed
}
if (model.name %in% c("nlme")) {
object.name$call$formula <- object.name$call$model
}
if (!is.null(object.name$call$formula)) {
if (is.symbol(object.name$call$formula)) {
formula.temp <- as.formula(object.name)
}
else {
formula.temp <- object.name$call$formula
}
if (length(as.vector(as.character(formula.temp)))>1) {
return(as.vector(as.character(formula.temp)[2]))
}
}
if (!is.null(object.name$formula)) {
if (is.symbol(object.name$formula)) {
formula.temp <- as.formula(object.name)
}
else {
formula.temp <- object.name$formula
}
if (length(as.vector(as.character(formula.temp)))>1) { # this is for zelig$result ones
return(as.vector(as.character(formula.temp)[2]))
}
}
if (!is.null(object.name$formula2)) {
if (is.symbol(object.name$formula2)) {
formula.temp <- as.formula(object.name)
}
else {
formula.temp <- object.name$formula2
}
if (length(as.vector(as.character(formula.temp)))>1) { # z.ls
return(as.vector(as.character(formula.temp)[2]))
}
}
return("")
}
.dependent.variable.written <-
function(object.name, model.num=1) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("tobit","ologit","oprobit", "relogit", "coxph","exp","lognorm","weibull","survreg()","arima",
"aftreg", "weibreg", "coxreg", "phreg", "bj", "cph", "psm")) {
written.var <- .inside.bracket(.dependent.variable(object.name))[1]
}
else if (model.name %in% c("clogit","mclogit")) {
written.var <- .inside.bracket(.dependent.variable(object.name))[2]
}
else { written.var <- .dependent.variable(object.name, model.num) }
# some formatting changes
# remove everything before and including he last dollar sign from variable name
temp <- strsplit(written.var,"$",fixed=TRUE)
written.var <- temp[[1]][length(temp[[1]])]
# if underscore or ^, etc. in variable name, then insert an escape \ before it
written.var <- .remove.special.chars(written.var)
return(written.var)
}
.enter.significance.stars <-
function(p.value, force.math=FALSE) {
if ((!is.na(p.value)) && (!is.null(p.value))) {
if (.format.dec.mark.align == TRUE) {
c <- ""
}
else {
c <- "$"
}
if (force.math == TRUE) { c <- "$" }
cutoffs <- .format.cutoffs[length(.format.cutoffs):1]
stars <- .format.stars[length(.format.stars):1]
for (i in 1:length(cutoffs)) {
if (!is.na(cutoffs[i])) {
if (p.value < cutoffs[i]) {
cat(c,"^{",stars[i],"}",c,sep="")
break
}
}
}
}
}
.F.stat <-
function(object.name) {
F.stat.output <- as.vector(rep(NA,times=4))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH", "Arima", "maBina","coeftest", "lmer", "glmer", "nlmer", "Gls"))) {
if (model.name %in% c("plm")) {
F.stat.value <- .summary.object$fstatistic$statistic
df.numerator <- .summary.object$fstatistic$parameter["df1"]
df.denominator <- .summary.object$fstatistic$parameter["df2"]
F.stat.p.value <- .summary.object$fstatistic$p.value
F.stat.output <- as.vector(c(F.stat.value, df.numerator, df.denominator, F.stat.p.value))
}
else if (!is.null(suppressMessages(.summary.object$fstatistic["value"]))) {
F.stat.value <- .summary.object$fstatistic["value"]
df.numerator <- .summary.object$fstatistic["numdf"]
df.denominator <- .summary.object$fstatistic["dendf"]
F.stat.p.value <- pf(F.stat.value, df.numerator, df.denominator, lower.tail=FALSE)
F.stat.output <- as.vector(c(F.stat.value, df.numerator, df.denominator, F.stat.p.value))
}
}
names(F.stat.output) <- c("statistic","df1","df2","p-value")
return(cbind(F.stat.output))
}
.gcv.UBRE <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH", "Arima", "maBina", "coeftest", "lmer", "Gls", "glmer", "nlmer"))) {
if (!is.null(object.name$gcv.ubre)) {
return(as.vector(object.name$gcv.ubre))
}
}
return(NA)
}
# fill in NAs into a if b is the longer vector
.fill.NA <-
function(a, b) {
a.temp <- a; b.temp <- b
if (length(a) >= length(b)) {
return(a.temp)
}
else {
length(a.temp) <- length(b)
return(a.temp)
}
}
.get.model.name <-
function(object.name) {
return.value <- .model.identify(object.name)
if (substr(return.value,1,5)=="glm()") { return.value <- "glm()" }
if (substr(return.value,1,8)=="svyglm()") { return.value <- "svyglm()" }
if (substr(return.value,1,5)=="gee()") { return.value <- "gee()" }
if (substr(return.value,1,5)=="gam()") { return.value <- "gam()" }
if (substr(return.value,1,6)=="polr()") { return.value <- "polr()" }
if (substr(return.value,1,9)=="survreg()") { return.value <- "survreg()" }
return(return.value)
}
.get.p.values.1 <-
function(object.name, user.given=NULL, auto=TRUE, f.coef=NULL, f.se=NULL, user.coef=NULL, user.se=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) {
user.given <- as.vector(user.given[model.num,])
}
}
return(user.given)
}
if (auto == TRUE) {
if ((!is.null(user.coef)) || (!is.null(user.se))) {
#if (.model.identify(object.name) == "multinom") {
# f.coef <- as.vector(f.coef[model.num,])
# f.se <- as.vector(f.se[model.num,])
#}
# set the lengths of the vectors to be equal to each other
coef.div <- .fill.NA(f.coef, f.se)
se.div <- .fill.NA(f.se, f.coef)
t.out <- (coef.div / se.div)
auto.return <- 2*pnorm(abs(t.out), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
names(auto.return) <- names(f.coef)
return( auto.return )
}
}
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit", "poisson", "negbin", "normal.survey", "poisson.survey", "probit.survey", "logit.survey", "gamma", "gamma.survey",
"cloglog.net", "gamma.net", "logit.net", "probit.net", "brglm", "glm()", "Glm()", "svyglm()", "plm", "pgmm", "ivreg", "lmrob", "glmrob", "dynlm", "rq", "gmm","mclogit","felm")) {
return(.summary.object$coefficients[,4])
}
if (model.name %in% c("censReg")) {
return(.summary.object$estimate[,4])
}
if (model.name %in% c("mnlogit")) {
return(.summary.object$CoefTable[,4])
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$matcoef[,4])
}
if (model.name %in% c("lme", "nlme")) {
return(.summary.object$tTable[,5])
}
if (model.name %in% c("maBina")) {
return(as.vector(object.name$out[,4]))
}
if (model.name %in% c("mlogit")) {
return(as.vector(.summary.object$CoefTable[,4]))
}
if (model.name %in% c("coeftest")) {
return(as.vector(object.name[,4]))
}
if (model.name %in% c("hetglm")) {
return(as.vector(.summary.object$coefficients$mean[,4]))
}
if (model.name %in% c("selection","heckit")) {
if (!.global.sel.equation) {
indices <- .summary.object$param$index$betaO ### outcome equation
}
else {
indices <- .summary.object$param$index$betaS ### selection equation
}
return(as.vector(.summary.object$estimate[indices,4]))
}
if (model.name %in% c("probit.ss", "binaryChoice")) {
return(as.vector(.summary.object$estimate[,4]))
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(.summary.object$Coef[,4])
}
if (model.name %in% c("lmer", "glmer", "nlmer")) {
Vcov <- as.matrix(vcov(object.name, useScale = FALSE))
coefs <- .summary.object$coefficients[,1]
se <- sqrt(diag(Vcov))
tstat <- coefs / se
pval <- 2 * pnorm(abs(tstat), lower.tail = FALSE)
names(pval) <- names(coefs)
return(pval)
}
if (model.name %in% c("Arima")) {
coef.temp <- object.name$coef
se.temp <- sqrt(diag(object.name$var.coef))
tstat <- coef.temp / se.temp
pval <- 2 * pnorm(abs(tstat), lower.tail = FALSE)
return(pval)
}
if (model.name %in% c("ergm")) {
return(.summary.object$coefs[,4])
}
if (model.name %in% c("clm")) {
if (.format.ordered.intercepts == FALSE) {
return(.summary.object$coefficients[(length(object.name$alpha)+1):(length(object.name$coefficients)),4])
}
else {
return(.summary.object$coefficients[,4])
}
}
else if (model.name %in% c("pmg")) {
coef.temp <- .summary.object$coefficients
std.err.temp <- sqrt(diag(.summary.object$vcov))
t.stat.temp <- coef.temp / std.err.temp
df.temp <- length(.summary.object$residuals)
return( 2 * pt(abs(t.stat.temp), df=df.temp, lower.tail = FALSE, log.p = FALSE) )
}
else if (model.name %in% c("zeroinfl", "hurdle")) {
if (.global.zero.component==FALSE) {
return(.summary.object$coefficients$count[,4])
}
else {
return(.summary.object$coefficients$zero[,4])
}
}
else if (model.name %in% c("normal.gee", "logit.gee", "poisson.gee", "probit.gee", "gamma.gee", "gee()")) {
return(2*pnorm(abs(.summary.object$coefficients[,"Robust z"]), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE))
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam", "poisson.gam", "gam()")) {
return(.summary.object$p.pv)
}
else if (model.name %in% c("coxph", "clogit")) {
return(.summary.object$coef[,"Pr(>|z|)"])
}
else if (model.name %in% c("exp","lognorm","weibull","tobit", "survreg()")) {
return(.summary.object$table[,"p"])
}
else if (model.name %in% c("rlm")) {
coef.temp <- suppressMessages(.summary.object$coefficients[,"t value"])
coef.temp <- 2*pnorm(abs(coef.temp[seq(from=1, to=length(coef.temp))]), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
return(coef.temp)
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
coef.temp <- suppressMessages(.summary.object$coefficients[,"t value"])
if (.format.ordered.intercepts == FALSE) { return(2*pnorm(abs(coef.temp[seq(from=1, to=length(coef.temp)-(length(suppressMessages(.summary.object$lev))-1))]), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)) }
else {
return( 2*pnorm(abs(coef.temp[seq(from=1, to=length(coef.temp))]), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE) )
}
}
else if (model.name %in% c("arima")) {
return(2*pnorm( abs(object.name$coef / (sqrt(diag(object.name$var.coef))) ), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE))
}
else if (model.name %in% c("tobit(AER)")){
return(.summary.object$coefficients[,"Pr(>|z|)"])
}
else if (model.name %in% c("multinom")) {
if (is.null(nrow(.summary.object$coefficients))) {
coef.temp <- .summary.object$coefficients
se.temp <- .summary.object$standard.errors
}
else {
coef.temp <- .summary.object$coefficients[model.num,]
se.temp <- .summary.object$standard.errors[model.num,]
}
return( 2*pnorm( abs( (coef.temp) / (se.temp) ) , mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE) )
}
else if (model.name %in% c("betareg")) {
return(.summary.object$coefficients$mean[,"Pr(>|z|)"])
}
else if (model.name %in% c("gls")) {
coef.temp <- object.name$coefficients
se.temp <- sqrt(diag(object.name$varBeta))
t.temp <- coef.temp / se.temp
p.temp <- 2*pnorm( abs( t.temp ) , mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
return(p.temp)
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
coef.temp <- object.name$coefficients
se.temp <- sqrt(diag(object.name$var))
t.temp <- coef.temp / se.temp
p.temp <- 2*pnorm( abs( t.temp ) , mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
return(p.temp)
}
else if (model.name %in% c("rem.dyad")) {
coef.temp <- object.name$coef
se.temp <- sqrt(diag(object.name$cov))
t.temp <- coef.temp / se.temp
p.temp <- 2*pnorm( abs( t.temp ) , mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
return(p.temp)
}
return(NULL)
}
.get.p.values <-
function(object.name, user.given=NULL, auto=TRUE, f.coef=NULL, f.se=NULL, user.coef=NULL, user.se=NULL, model.num=1) {
out <- .get.p.values.1(object.name, user.given, auto, f.coef, f.se, user.coef, user.se, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.get.scale <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina", "coeftest", "Gls", "lmer", "glmer", "nlmer"))) {
if (!is.null(object.name$scale)) {
if (model.name %in% c("normal.gee", "logit.gee", "poisson.gee", "probit.gee", "gamma.gee", "gee()", "exp","lognorm","weibull","tobit","survreg()","tobit(AER)")) {
return(as.vector(object.name$scale))
}
}
}
return(NA)
}
.get.sigma2 <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("arima","fGARCH","maBina", "coeftest", "Gls", "lmer", "glmer", "nlmer")) {
return(NA)
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(.summary.object$s2)
}
if (!is.null(object.name$sigma2)) {
return(as.vector(object.name$sigma2))
}
return(NA)
}
.get.rho <-
function(object.name) {
model.name <- .get.model.name(object.name)
rho.output <- as.vector(rep(NA,times=4))
if (model.name %in% c("selection")) {
i <- object.name$param$index$rho
if (is.null(i)) { i <- object.name$param$index$errTerms["rho"] }
if (!is.null(i)) {
rho.output <- as.vector(.summary.object$estimate[i,])
}
}
if (model.name %in% c("heckit")) {
if (object.name$method == "2step") {
i <- object.name$param$index$rho
rho.output <- as.vector(.summary.object$estimate[i,])
}
}
names(rho.output) <- c("statistic","se","tstat","p-value")
return(cbind(rho.output))
}
.get.mills <-
function(object.name) {
model.name <- .get.model.name(object.name)
mills.output <- as.vector(rep(NA,times=4))
if (model.name %in% c("heckit", "selection")) {
i <- object.name$param$index$Mills
if (!is.null(i)) {
mills.output <- as.vector(.summary.object$estimate[i,])
}
}
names(mills.output) <- c("statistic","se","tstat","p-value")
return(cbind(mills.output))
}
.get.standard.errors.1 <-
function(object.name, user.given=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) { user.given <- as.vector(user.given[model.num,]) }
}
return(user.given)
}
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit", "poisson", "negbin", "normal.survey", "poisson.survey", "probit.survey", "logit.survey", "gamma", "gamma.survey",
"cloglog.net", "gamma.net", "logit.net", "probit.net", "brglm", "glm()", "Glm()", "svyglm()", "plm", "pgmm", "ivreg", "lmrob", "glmrob", "dynlm", "gmm","mclogit")) {
return(.summary.object$coefficients[,"Std. Error"])
}
if (model.name %in% c("Arima")) {
return(sqrt(diag(object.name$var.coef)))
}
if (model.name %in% c("censReg")) {
return(.summary.object$estimate[,2])
}
if (model.name %in% c("mnlogit")) {
return(.summary.object$CoefTable[,2])
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$matcoef[,2])
}
if (model.name %in% c("lme", "nlme")) {
return(.summary.object$tTable[,2])
}
if (model.name %in% c("maBina")) {
return(as.vector(object.name$out[,2]))
}
if (model.name %in% c("mlogit")) {
return(as.vector(.summary.object$CoefTable[,2]))
}
if (model.name %in% c("coeftest")) {
return(as.vector(object.name[,2]))
}
if (model.name %in% c("selection","heckit")) {
if (!.global.sel.equation) {
indices <- .summary.object$param$index$betaO ### outcome equation
}
else {
indices <- .summary.object$param$index$betaS ### selection equation
}
return(as.vector(.summary.object$estimate[indices,2]))
}
if (model.name %in% c("probit.ss", "binaryChoice")) {
return(as.vector(.summary.object$estimate[,2]))
}
if (model.name %in% c("hetglm")) {
return(as.vector(.summary.object$coefficients$mean[,2]))
}
if (model.name %in% c("lmer", "glmer", "nlmer")) {
Vcov <- as.matrix(vcov(object.name, useScale = FALSE))
coefs <-.summary.object$coefficients[,1]
se <- sqrt(diag(Vcov))
names(se) <- names(coefs)
return(se)
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(.summary.object$Coef[,2])
}
if (model.name %in% c("ergm")) {
return(.summary.object$coefs[,2])
}
if (model.name %in% c("rq","felm")) {
return(.summary.object$coefficients[,2])
}
if (model.name %in% c("clm")) {
if (.format.ordered.intercepts == FALSE) {
return(.summary.object$coefficients[(length(object.name$alpha)+1):(length(object.name$coefficients)),2])
}
else {
return(.summary.object$coefficients[,2])
}
}
else if (model.name %in% c("pmg")) {
return (sqrt(diag(.summary.object$vcov)))
}
if (model.name %in% c("zeroinfl", "hurdle")) {
if (.global.zero.component == FALSE) {
return(.summary.object$coefficients$count[,"Std. Error"])
}
else {
return(.summary.object$coefficients$zero[,"Std. Error"])
}
}
else if (model.name %in% c("normal.gee", "logit.gee", "poisson.gee", "probit.gee", "gamma.gee", "gee()")) {
return(.summary.object$coefficients[,"Robust S.E."])
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam", "poisson.gam", "gam()")) {
temp.se <- .summary.object$se
names(temp.se) <- names(.summary.object$p.coeff)
return(temp.se)
}
else if (model.name %in% c("coxph")) {
return(.summary.object$coef[,"se(coef)"])
}
else if (model.name %in% c("clogit")) {
return(.summary.object$coef[,"se(coef)"])
}
else if (model.name %in% c("exp","lognorm","weibull","tobit","survreg()")) {
return(.summary.object$table[,"Std. Error"])
}
else if (model.name %in% c("rlm")) {
return(suppressMessages(.summary.object$coefficients[,"Std. Error"]))
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
se.temp <- suppressMessages(.summary.object$coefficients[,"Std. Error"])
if (.format.ordered.intercepts == FALSE) { return(se.temp[seq(from=1, to=length(se.temp)-(length(suppressMessages(.summary.object$lev))-1))]) }
else { return(se.temp) }
}
else if (model.name %in% c("arima")) {
return( sqrt(diag(object.name$var.coef)) )
}
else if (model.name %in% c("tobit(AER)")){
return(.summary.object$coefficients[,"Std. Error"])
}
else if (model.name %in% c("multinom")) {
if (is.null(nrow(.summary.object$coefficients))) {
se.temp <- .summary.object$standard.errors
}
else {
se.temp <- .summary.object$standard.errors[model.num,]
}
return(se.temp)
}
else if (model.name %in% c("betareg")) {
return(.summary.object$coefficients$mean[,"Std. Error"])
}
else if (model.name %in% c("gls")) {
se.temp <- sqrt(diag(object.name$varBeta))
return(se.temp)
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
return( sqrt(diag(object.name$var) ) )
}
else if (model.name %in% c("rem.dyad")) {
return( sqrt(diag(object.name$cov) ) )
}
return(NULL)
}
.get.standard.errors <-
function(object.name, user.given=NULL, model.num=1) {
out <- .get.standard.errors.1(object.name, user.given, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.get.ci.lb.1 <-
function(object.name, user.given=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) { user.given <- as.vector(user.given[model.num,]) }
}
return(user.given)
}
return(NULL)
}
.get.ci.lb <-
function(object.name, user.given=NULL, model.num=1) {
out <- .get.ci.lb.1(object.name, user.given, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.get.ci.rb.1 <-
function(object.name, user.given=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) { user.given <- as.vector(user.given[model.num,]) }
}
return(user.given)
}
return(NULL)
}
.get.ci.rb <-
function(object.name, user.given=NULL, model.num=1) {
out <- .get.ci.rb.1(object.name, user.given, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.get.t.stats.1 <-
function(object.name, user.given=NULL, auto=TRUE, f.coef=NULL, f.se=NULL, user.coef=NULL, user.se=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) {
user.given <- as.vector(user.given[model.num,])
}
}
return(user.given)
}
if (auto == TRUE) {
if ((!is.null(user.coef)) || (!is.null(user.se))) {
#if (.model.identify(object.name) == "multinom") {
# f.coef <- as.vector(f.coef[model.num,])
# f.se <- as.vector(f.se[model.num,])
#}
# set the lengths of the vectors to be equal to each other
coef.div <- .fill.NA(f.coef, f.se)
se.div <- .fill.NA(f.se, f.coef)
auto.return <- coef.div / se.div
names(auto.return) <- names(f.coef)
return(auto.return)
}
}
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit", "poisson", "negbin", "normal.survey", "poisson.survey", "probit.survey", "logit.survey", "gamma", "gamma.survey",
"cloglog.net", "gamma.net", "logit.net", "probit.net", "glm()", "Glm()", "svyglm()","plm", "pgmm", "ivreg", "lmrob", "glmrob", "dynlm", "gmm", "mclogit", "felm")) {
return(.summary.object$coefficients[,3])
}
if (model.name %in% c("censReg")) {
return(.summary.object$estimate[,3])
}
if (model.name %in% c("mnlogit")) {
return(.summary.object$CoefTable[,3])
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$matcoef[,3])
}
if (model.name %in% c("lme", "nlme")) {
return(.summary.object$tTable[,4])
}
if (model.name %in% c("coeftest")) {
return(as.vector(object.name[,3]))
}
if (model.name %in% c("maBina")) {
return(as.vector(object.name$out[,3]))
}
if (model.name %in% c("mlogit")) {
return(as.vector(.summary.object$CoefTable[,3]))
}
if (model.name %in% c("selection","heckit")) {
if (!.global.sel.equation) {
indices <- .summary.object$param$index$betaO ### outcome equation
}
else {
indices <- .summary.object$param$index$betaS ### selection equation
}
return(as.vector(.summary.object$estimate[indices,3]))
}
if (model.name %in% c("probit.ss", "binaryChoice")) {
return(as.vector(.summary.object$estimate[,3]))
}
if (model.name %in% c("hetglm")) {
return(as.vector(.summary.object$coefficients$mean[,3]))
}
if (model.name %in% c("lmer", "glmer", "nlmer")) {
Vcov <- as.matrix(vcov(object.name, useScale = FALSE))
coefs <- .summary.object$coefficients[,1]
se <- sqrt(diag(Vcov))
tstat <- coefs / se
names(tstat) <- names(coefs)
return(tstat)
}
if (model.name %in% c("ergm")) {
return((.summary.object$coefs[,1])/(.summary.object$coefs[,2]))
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(.summary.object$Coef[,3])
}
if (model.name %in% c("rq")) {
return(.summary.object$coefficients[,3])
}
if (model.name %in% c("clm")) {
if (.format.ordered.intercepts == FALSE) {
return(.summary.object$coefficients[(length(object.name$alpha)+1):(length(object.name$coefficients)),3])
}
else {
return(.summary.object$coefficients[,3])
}
}
else if (model.name %in% c("pmg")) {
coef.temp <- .summary.object$coef
std.err.temp <- sqrt(diag(.summary.object$vcov))
t.stat.temp <- coef.temp / std.err.temp
return(t.stat.temp)
}
else if (model.name %in% c("zeroinfl", "hurdle")) {
if (.global.zero.component == FALSE) {
return(.summary.object$coefficients$count[,3])
}
else {
return(.summary.object$coefficients$zero[,3])
}
}
else if (model.name %in% c("normal.gee", "logit.gee", "poisson.gee", "probit.gee", "gamma.gee", "gee()")) {
return(.summary.object$coefficients[,"Robust z"])
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam", "poisson.gam", "gam()")) {
return(.summary.object$p.t)
}
else if (model.name %in% c("coxph", "clogit")) {
return(.summary.object$coef[,"z"])
}
else if (model.name %in% c("exp","lognorm","weibull", "tobit","survreg()")) {
return(.summary.object$table[,"z"])
}
else if (model.name %in% c("rlm")) {
return(suppressMessages(.summary.object$coefficients[,"t value"]))
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
tstat.temp <- suppressMessages(.summary.object$coefficients[,"t value"])
if (.format.ordered.intercepts == FALSE) { return(tstat.temp[seq(from=1, to=length(tstat.temp)-(length(suppressMessages(.summary.object$lev))-1))]) }
else { return(tstat.temp) }
}
else if (model.name %in% c("arima")) {
return( object.name$coef / (sqrt(diag(object.name$var.coef))) )
}
else if (model.name %in% c("tobit(AER)")){
return(.summary.object$coefficients[,"z value"])
}
else if (model.name %in% c("multinom")) {
if (is.null(nrow(.summary.object$coefficients))) {
coef.temp <- .summary.object$coefficients
se.temp <- .summary.object$standard.errors
}
else {
coef.temp <- .summary.object$coefficients[model.num,]
se.temp <- .summary.object$standard.errors[model.num,]
}
return( (coef.temp) / (se.temp) )
}
else if (model.name %in% c("betareg")) {
return(.summary.object$coefficients$mean[,"z value"])
}
else if (model.name %in% c("gls")) {
coef.temp <- object.name$coefficients
se.temp <- sqrt(diag(object.name$varBeta))
return(coef.temp / se.temp)
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
coef.temp <- object.name$coefficients
se.temp <- sqrt(diag(object.name$var))
return(coef.temp / se.temp )
}
else if (model.name %in% c("Arima")) {
coef.temp <- object.name$coef
se.temp <- sqrt(diag(object.name$var.coef))
return(coef.temp / se.temp )
}
else if (model.name %in% c("rem.dyad")) {
coef.temp <- object.name$coef
se.temp <- sqrt(diag(object.name$cov))
return(coef.temp / se.temp )
}
return(NULL)
}
.get.t.stats <-
function(object.name, user.given=NULL, auto=TRUE, f.coef=NULL, f.se=NULL, user.coef=NULL, user.se=NULL, model.num=1) {
out <- .get.t.stats.1(object.name, user.given, auto, f.coef, f.se, user.coef, user.se, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.get.theta <-
function(object.name) {
theta.output <- as.vector(rep(NA,times=4))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina", "coeftest", "Gls", "lmer", "glmer", "nlmer"))) {
if ((!is.null(object.name$theta)) && (!is.null(object.name$SE.theta))) {
theta.value <- object.name$theta
theta.se.value <- object.name$SE.theta
theta.tstat.value <- theta.value / theta.se.value
theta.p.value <- 2*pnorm(abs(theta.tstat.value), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
theta.output <- as.vector(c(theta.value, theta.se.value, theta.tstat.value, theta.p.value))
}
}
names(theta.output) <- c("statistic","se","tstat","p-value")
return(cbind(theta.output))
}
.inside.bracket <-
function(s) {
process.string <- ""
return.vector <- NULL
if (!is.character(s)) { return("") }
if (is.null(s)) { return("") }
if (is.na(s)) { return("") }
if (s=="") { return("") }
if (length(s) > 1) { return("") }
inside.inner.bracket <- 0
for (i in seq(from = (regexpr("(",s,fixed=TRUE)[1])+1, to = nchar(s))) {
letter <- substr(s,i,i)
if (letter == "(") { inside.inner.bracket <- inside.inner.bracket + 1 }
if (letter == ")") { inside.inner.bracket <- inside.inner.bracket - 1 }
if ((letter == ",") && (inside.inner.bracket == 0)) {
return.vector <- c(return.vector, process.string)
process.string <- ""
}
else if (inside.inner.bracket >= 0) { process.string <- paste(process.string, letter, sep="") }
else { break }
}
if (process.string != "") { return.vector <- c(return.vector, process.string) }
return (.trim(return.vector))
}
.iround <-
function(x, decimal.places=0, round.up.positive=FALSE, simply.output=FALSE) {
x.original <- x
first.part <- ""
if (is.na(x) || is.null(x)) { return("") }
if (simply.output == TRUE) {
if (!is.numeric(x)) { return(.remove.special.chars(x)) }
}
if (x.original < 0) { x <- abs(x) }
if (!is.na(decimal.places)) {
if ((.format.until.nonzero.digit == FALSE) || (decimal.places <= 0)) {
round.result <- round(x, digits=decimal.places)
}
else {
temp.places <- decimal.places
if (!.is.all.integers(x)) {
while ((round(x, digits=temp.places) == 0) && (temp.places < (decimal.places + .format.max.extra.digits))) {
temp.places <- temp.places + 1
}
}
round.result <- round(x, digits=temp.places)
decimal.places <- temp.places
}
if ((round.up.positive==TRUE) && (round.result < x)) { # useful for p-values that should be rounded up
if (x > (10^((-1)*(decimal.places+1)))) {
round.result <- round.result + 10^((-1)*decimal.places)
}
else { round.result <- 0 }
}
}
else { # if the decimal place is NA
round.result <- x
}
round.result.char <- as.character(format(round.result, scientific=FALSE))
split.round.result <- unlist(strsplit(round.result.char, "\\."))
## first deal with digit separator
for (i in seq(from=1, to=length(.format.digit.separator.where))) {
if (.format.digit.separator.where[i]<=0) {
.format.digit.separator.where[i] <<- -1
}
}
separator.count <- 1
length.integer.part <- nchar(split.round.result[1])
digits.in.separated.unit <- 0
for (i in seq(from=length.integer.part, to=1)) {
if ((digits.in.separated.unit == .format.digit.separator.where[separator.count]) && (substr(split.round.result[1],i,i)!="-")){
first.part <- paste(.format.digit.separator,first.part,sep="")
if (separator.count < length(.format.digit.separator.where)) { separator.count <- separator.count + 1 }
digits.in.separated.unit <- 0
}
first.part <- paste(substr(split.round.result[1],i,i),first.part,sep="")
digits.in.separated.unit <- digits.in.separated.unit + 1
}
# remove initial zero and there are decimal places, if that is requested
if (.format.initial.zero==FALSE) {
if ((round.result > 0) && (round.result < 1)) {
if ((is.na(decimal.places)) || (decimal.places > 0)) {
first.part <- ""
}
}
}
if (x.original < 0) { # use math-mode for a better looking negative sign
if (.format.dec.mark.align == TRUE) {
first.part <- paste("-", first.part, sep="")
}
else {
first.part <- paste("$-$", first.part, sep="")
}
}
# now deal with the decimal part
if (!is.na(decimal.places)) {
if (decimal.places <= 0) {
return(first.part)
}
}
if (length(split.round.result)==2) {
if (is.na(decimal.places)) { return(paste(first.part,.format.decimal.character,split.round.result[2],sep="")) }
if (nchar(split.round.result[2]) < decimal.places) {
decimal.part <- split.round.result[2]
for (i in seq(from = 1,to = (decimal.places - nchar(split.round.result[2])))) {
decimal.part <- paste(decimal.part,"0", sep="")
}
return(paste(first.part,.format.decimal.character,decimal.part,sep=""))
}
else { return(paste(first.part,.format.decimal.character,split.round.result[2],sep="")) }
}
else if (length(split.round.result)==1) {
if (is.na(decimal.places)) { return(paste(first.part,.format.decimal.character,decimal.part,sep="")) }
decimal.part <- ""
for (i in seq(from = 1,to = decimal.places)) {
decimal.part <- paste(decimal.part,"0", sep="")
}
return(paste(first.part,.format.decimal.character,decimal.part,sep=""))
}
else { return(NULL) }
}
is.wholenumber <-
function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
.is.all.integers <-
function(x) {
if (!is.numeric(x)) { return(FALSE) }
if (length(x[!is.na(x)]) == length(is.wholenumber(x)[(!is.na(x)) && (is.wholenumber(x)==TRUE)])) {
return(TRUE)
}
else { return (FALSE) }
}
.log.likelihood <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("coeftest","maBina","gamma.net","logit.net","probit.net","cloglog.net")) {
return(NA)
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$value)
}
if (model.name %in% c("mlogit", "mnlogit")) {
return(as.vector(object.name$logLik[1]))
}
if (model.name %in% c("arima", "betareg", "zeroinfl", "hurdle", "hetglm", "Arima")) {
return(as.vector(object.name$loglik))
}
if (model.name %in% c("selection","binaryChoice", "probit.ss")) {
return(as.vector(.summary.object$loglik))
}
if (model.name %in% c("lme","nlme","lmer", "glmer", "nlmer","censReg")) {
return(as.vector(logLik(object.name)[1]))
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(as.vector(.summary.object$LL))
}
if (model.name %in% c("clm", "gls")) {
return(as.vector(object.name$logLik))
}
else if (model.name %in% c("coxph", "clogit", "exp", "weibull", "lognorm","tobit", "tobit(AER)", "survreg()")) {
return(as.vector(.summary.object$loglik[2]))
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg")) {
return(as.vector(object.name$loglik[2]))
}
else if (!is.null(object.name$aic)) {
return(as.vector(-(0.5)*(object.name$aic-2*length(.summary.object$coefficients[,"Estimate"]))))
}
return(NA)
}
.logrank.stat <-
function(object.name) {
logrank.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina", "coeftest", "Gls", "lmer", "glmer", "nlmer"))) {
if (!is.null(.summary.object$logtest)) {
logrank.value <- suppressMessages(.summary.object$sctest[1])
df.value <- suppressMessages(.summary.object$sctest[2])
logrank.p.value <- suppressMessages(.summary.object$sctest[3])
logrank.output <- as.vector(c(logrank.value, df.value, logrank.p.value))
}
}
names(logrank.output) <- c("statistic","df1","p-value")
return(cbind(logrank.output))
}
.lr.stat <-
function(object.name) {
log.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (model.name %in% c("mlogit")) {
log.value <- as.vector(.summary.object$lratio$statistic["chisq"])
if (!is.null(log.value)) {
df.value <- as.vector(length(object.name$coeff))
log.p.value <- as.vector(pchisq(log.value,df.value,lower.tail=FALSE))
log.output <- as.vector(c(log.value, df.value, log.p.value))
}
}
else if (model.name %in% c("lagsarlm", "errorsarlm")) {
log.value <- as.vector(.summary.object$LR1$statistic)
df.value <- as.vector(.summary.object$LR1$parameter)
log.p.value <- as.vector(.summary.object$LR1$p.value)
log.output <- as.vector(c(log.value, df.value, log.p.value))
}
else if (!(model.name %in% c("arima","fGARCH","Arima","maBina","coeftest","Gls","lmer","glmer","nlmer"))) {
if (!is.null(.summary.object$logtest)) {
log.value <- suppressMessages(.summary.object$logtest[1])
df.value <- suppressMessages(.summary.object$logtest[2])
log.p.value <- suppressMessages(.summary.object$logtest[3])
log.output <- as.vector(c(log.value, df.value, log.p.value))
}
}
names(log.output) <- c("statistic","df1","p-value")
return(cbind(log.output))
}
.max.r.squared <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","fGARCH","Arima","maBina", "coeftest", "lmer", "glmer", "nlmer", "Gls", "Arima"))) {
if (model.name %in% c("coxph", "clogit")) {
return(as.vector(.summary.object$rsq[2]))
}
}
return(NA)
}
.model.identify <-
function(object.name) {
if (class(object.name)[1]=="NULL") { #### !!!!! continue this
return("NULL")
}
if (class(object.name)[1]=="Arima") {
return("Arima")
}
if (class(object.name)[1]=="fGARCH") {
return("fGARCH")
}
if (class(object.name)[1]=="censReg") {
return("censReg")
}
if (class(object.name)[1]=="ergm") {
return("ergm")
}
if (class(object.name)[1]=="mnlogit") {
return("mnlogit")
}
if (class(object.name)[1]=="lme") {
return("lme")
}
if (class(object.name)[1]=="nlme") {
return("nlme")
}
if (class(object.name)[1]=="felm") {
return("felm")
}
if (class(object.name)[1] %in% c("mclogit","mclogitRandeff")) {
return("mclogit")
}
if (class(object.name)[1]=="mlogit") {
return("mlogit")
}
if (class(object.name)[1]=="maBina") {
return("maBina")
}
if (class(object.name)[1]=="coeftest") {
return("coeftest")
}
if (class(object.name)[1]=="rem.dyad") {
return("rem.dyad")
}
if (class(object.name)[1]=="lmerMod") {
return("lmer")
}
if (class(object.name)[1]=="glmerMod") {
return("glmer")
}
if (class(object.name)[1]=="nlmerMod") {
return("nlmer")
}
if (!is.null(object.name$call)) {
if (object.name$call[1]=="lm()") { return("ls") }
else if ((object.name$call[1]=="glm()") || (object.name$call[1]=="Glm()")) {
if (object.name$family$family=="gaussian") {
if (object.name$family$link=="identity") {
return("normal")
}
}
else if (object.name$family$family=="binomial") {
if (object.name$family$link=="probit") {
return("probit")
}
if (object.name$family$link=="logit") {
return("logit")
}
}
else if (object.name$family$family=="poisson") {
if (object.name$family$link=="log") {
return("poisson")
}
}
else if (object.name$family$family=="Gamma") {
if (object.name$family$link=="inverse") {
return("gamma")
}
}
return(paste("glm()#",object.name$family$family,"#",object.name$family$link, sep=""))
}
else if (object.name$call[1]=="svyglm()") {
if (object.name$family$family=="gaussian") {
if (object.name$family$link=="identity") {
return("normal.survey")
}
}
else if ((object.name$family$family=="binomial") || (object.name$family$family=="quasibinomial")) {
if (object.name$family$link=="probit") {
return("probit.survey")
}
if (object.name$family$link=="logit") {
return("logit.survey")
}
}
else if (object.name$family$family=="poisson") {
if (object.name$family$link=="log") {
return("poisson.survey")
}
}
else if (object.name$family$family=="Gamma") {
if (object.name$family$link=="inverse") {
return("gamma.survey")
}
}
return(paste("svyglm()#",object.name$family$family,"#",object.name$family$link, sep=""))
}
else if (object.name$call[1]=="gam()") {
if (object.name$family$family=="gaussian") {
if (object.name$family$link=="identity") {
return("normal.gam")
}
}
else if (object.name$family$family=="binomial") {
if (object.name$family$link=="probit") {
return("probit.gam")
}
if (object.name$family$link=="logit") {
return("logit.gam")
}
}
else if (object.name$family$family=="poisson") {
if (object.name$family$link=="log") {
return("poisson.gam")
}
}
else if (object.name$family$family=="Gamma") {
if (object.name$family$link=="inverse") {
return("gamma.gam")
}
}
return(paste("gam()#",object.name$family$family,"#",object.name$family$link, sep=""))
}
else if (object.name$call[1]=="polr()") {
if (object.name$method=="logistic") {
return("ologit")
}
else if (object.name$method=="probit") {
return("oprobit")
}
return(paste("polr()#",object.name$method, sep=""))
}
else if (object.name$call[1]=="gee()") {
if (object.name$family$family=="gaussian") {
if (object.name$family$link=="identity") {
return("normal.gee")
}
}
else if (object.name$family$family=="binomial") {
if (object.name$family$link=="probit") {
return("probit.gee")
}
if (object.name$family$link=="logit") {
return("logit.gee")
}
}
else if (object.name$family$family=="poisson") {
if (object.name$family$link=="log") {
return("poisson.gee")
}
}
else if (object.name$family$family=="Gamma") {
if (object.name$family$link=="inverse") {
return("gamma.gee")
}
}
return(paste("gee()#",object.name$family$family,"#",object.name$family$link, sep=""))
}
else if (object.name$call[1]=="survreg()") {
if (object.name$dist=="exponential") {
return("exp")
}
else if (object.name$dist=="weibull") {
return("weibull")
}
else if (object.name$dist=="lognorm") {
return("lognormal")
}
else if (object.name$dist=="gaussian") {
return("tobit")
}
return(paste("survreg()#",object.name$dist, sep=""))
}
else if (object.name$call[1]=="glm.nb()") {
return("negbin")
}
else if (object.name$call[1]=="\"glm.nb\"()") {
return("negbin")
}
if (!is.null(object.name$userCall)) {
if (object.name$userCall[1]=="clogit()") {
return("clogit")
}
}
if (object.name$call[1]=="coxph()") {
return("coxph")
}
if (object.name$call[1]=="pmg()") {
return("pmg")
}
if (object.name$call[1]=="selection()") {
return("selection")
}
if (object.name$call[1]=="heckit()") {
return("heckit")
}
if (object.name$call[1]=="probit()") {
return("probit.ss")
}
if (object.name$call[1]=="binaryChoice()") {
return("binaryChoice")
}
if (object.name$call[1]=="brglm()") {
return("brglm")
}
if (object.name$call[1]=="gls()") {
return("gls")
}
if (object.name$call[1]=="clm()") {
return("clm")
}
if (object.name$call[1]=="lmrob()") {
return("lmrob")
}
if (object.name$call[1]=="glmrob()") {
return("glmrob")
}
if (object.name$call[1]=="dynlm()") {
return("dynlm")
}
if (object.name$call[1]=="rq()") {
return("rq")
}
if (object.name$call[1]=="gmm()") {
return("gmm")
}
if (object.name$call[1]=="lagsarlm()") {
return("lagsarlm")
}
if (object.name$call[1]=="errorsarlm()") {
return("errorsarlm")
}
if (object.name$call[1]=="rlm()") {
return("rlm")
}
if (object.name$call[1]=="aftreg()") {
return("aftreg")
}
if (object.name$call[1]=="coxreg()") {
return("coxreg")
}
if (object.name$call[1]=="phreg()") {
return("phreg")
}
if (object.name$call[1]=="weibreg()") {
return("weibreg")
}
if (object.name$call[1]=="bj()") {
return("bj")
}
if (object.name$call[1]=="cph()") {
return("cph")
}
if (object.name$call[1]=="Gls()") {
return("Gls")
}
if (object.name$call[1]=="lrm()") {
return("lrm")
}
if (object.name$call[1]=="ols()") {
return("ols")
}
if (object.name$call[1]=="psm()") {
return("psm")
}
if (object.name$call[1]=="Rq()") {
return("Rq")
}
if (object.name$call[1]=="hetglm()") {
return("hetglm")
}
else if (object.name$call[1]=="relogit()") {
return("relogit")
}
else if (object.name$call[1]=="netbinom()") {
if (object.name$call$LF=="probit") { return("probit.net") }
if (object.name$call$LF=="logit") { return("logit.net") }
if (object.name$call$LF=="cloglog") { return("cloglog.net") }
}
else if (object.name$call[1]=="netgamma()") {
return("gamma.net")
}
else if (object.name$call[1]=="zelig()") {
if (object.name$call$model %in% c("ls","normal","logit","probit","relogit","poisson","poisson.survey",
"negbinom","probit.survey","logit.survey","normal.gee","logit.gee","probit.gee",
"poisson.gee","normal.gam","logit.gam","probit.gam","poisson.gam","exp",
"coxph","weibull","lognorm","normal.survey","gamma","gamma.survey",
"gamma.gee","cloglog.net","logit.net","probit.net","gamma.net","ologit",
"oprobit","arima","tobit")) {
return(object.name$call$model)
}
else { return("unsupported zelig") }
}
else if (object.name$call[1]=="tobit()") {
return("tobit(AER)")
}
else if (object.name$call[1]=="multinom()") {
return("multinom")
}
else if (object.name$call[1]=="betareg()") {
return("betareg")
}
else if (object.name$call[1]=="zeroinfl()") {
return("zeroinfl")
}
else if (object.name$call[1]=="hurdle()") {
return("hurdle")
}
else if (object.name$call[1]=="plm()") {
return("plm")
}
else if (object.name$call[1]=="pgmm()") {
return("pgmm")
}
else if (object.name$call[1]=="ivreg()") {
return("ivreg")
}
}
return("unknown")
}
.new.table <-
function(object.name, user.coef=NULL, user.se=NULL, user.t=NULL, user.p=NULL, auto.t=TRUE, auto.p=TRUE, user.ci.lb=NULL, user.ci.rb=NULL) {
if (class(object.name)[1] == "Glm") {
.summary.object <<- summary.glm(object.name)
}
else if (!(.model.identify(object.name) %in% c("aftreg", "coxreg","phreg","weibreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq"))) {
.summary.object <<- summary(object.name)
}
else {
.summary.object <<- object.name
}
if (.model.identify(object.name) == "rq") {
.summary.object <<- suppressMessages(summary(object.name, se=.format.rq.se))
}
model.num.total <- 1 # model number for multinom, etc.
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(.summary.object$coefficients))) {
model.num.total <- nrow(.summary.object$coefficients)
}
}
# set to null
.global.models <<- NULL
.global.dependent.variables <<- NULL
.global.dependent.variables.written <<- NULL
.global.coefficient.variables <<- NULL
.global.coef.vars.by.model <<- NULL
.global.coefficients <<- NULL
.global.std.errors <<- NULL
.global.ci.lb <<- NULL
.global.ci.rb <<- NULL
.global.t.stats <<- NULL
.global.p.values <<- NULL
.global.N <<- NULL
.global.LL <<- NULL
.global.R2 <<- NULL
.global.max.R2 <<- NULL
.global.adj.R2 <<- NULL
.global.AIC <<- NULL
.global.BIC <<- NULL
.global.scale <<- NULL
.global.UBRE <<- NULL
.global.sigma2 <<- NULL
.global.theta <<- NULL
.global.rho <<- NULL
.global.mills <<- NULL
.global.SER <<- NULL
.global.F.stat <<- NULL
.global.chi.stat <<- NULL
.global.wald.stat <<- NULL
.global.lr.stat <<- NULL
.global.logrank.stat <<- NULL
.global.null.deviance <<- NULL
.global.residual.deviance <<- NULL
for (model.num in 1:model.num.total) {
.global.models <<- c(.global.models, suppressMessages(as.vector(.model.identify(object.name))))
.global.dependent.variables <<- c(.global.dependent.variables, suppressMessages(.dependent.variable(object.name, model.num)))
.global.dependent.variables.written <<- c(.global.dependent.variables.written, suppressMessages(.dependent.variable.written(object.name, model.num)))
.global.coefficient.variables <<- suppressMessages(.coefficient.variables(object.name))
.global.coef.vars.by.model <<- suppressMessages(cbind(.global.coef.vars.by.model, .global.coefficient.variables))
get.coef <- suppressMessages(.get.coefficients(object.name, user.coef, model.num=model.num))
get.se <- suppressMessages(.get.standard.errors(object.name, user.se, model.num=model.num))
.global.coefficients <<- cbind(.global.coefficients, get.coef)
.global.std.errors <<- cbind(.global.std.errors, get.se)
.global.ci.lb <<- suppressMessages(cbind(.global.ci.lb, .get.ci.lb(object.name, user.ci.lb, model.num=model.num)))
.global.ci.rb <<- suppressMessages(cbind(.global.ci.rb, .get.ci.rb(object.name, user.ci.rb, model.num=model.num)))
feed.coef <- NA; feed.se <- NA
if (!is.null(get.coef)) { feed.coef <- get.coef }
if (!is.null(get.se)) { feed.se <- get.se }
if (!is.null(user.coef)) { feed.coef <- user.coef } # feed user-defined coefficients, if available
if (!is.null(user.se)) { feed.se <- user.se } # feed user-defined std errors, if available
.global.t.stats <<- suppressMessages(cbind(.global.t.stats, .get.t.stats(object.name, user.t, auto.t, feed.coef, feed.se, user.coef, user.se, model.num=model.num)))
.global.p.values <<- suppressMessages(cbind(.global.p.values, .get.p.values(object.name, user.p, auto.p, feed.coef, feed.se, user.coef, user.se, model.num=model.num)))
.global.N <<- c(.global.N, suppressMessages(.number.observations(object.name)))
.global.LL <<- c(.global.LL, suppressMessages(.log.likelihood(object.name)))
.global.R2 <<- c(.global.R2, suppressMessages(.r.squared(object.name)))
.global.max.R2 <<- c(.global.max.R2, suppressMessages(.max.r.squared(object.name)))
.global.adj.R2 <<- c(.global.adj.R2, suppressMessages(.adj.r.squared(object.name)))
.global.AIC <<- c(.global.AIC, suppressMessages(.AIC(object.name)))
.global.BIC <<- c(.global.BIC, suppressMessages(.BIC(object.name)))
.global.scale <<- c(.global.scale, suppressMessages(.get.scale(object.name)))
.global.UBRE <<- c(.global.UBRE, suppressMessages(.gcv.UBRE(object.name)))
.global.sigma2 <<- c(.global.sigma2, suppressMessages(.get.sigma2(object.name)))
.global.rho <<- cbind(suppressMessages(.get.rho(object.name)))
.global.mills <<- cbind(suppressMessages(.get.mills(object.name)))
.global.theta <<- cbind(suppressMessages(.get.theta(object.name)))
.global.SER <<- cbind(suppressMessages(.SER(object.name)))
.global.F.stat <<- cbind(suppressMessages(.F.stat(object.name)))
.global.chi.stat <<- cbind(suppressMessages(.chi.stat(object.name)))
.global.wald.stat <<- cbind(suppressMessages(.wald.stat(object.name)))
.global.lr.stat <<- cbind(suppressMessages(.lr.stat(object.name)))
.global.logrank.stat <<- cbind(suppressMessages(.logrank.stat(object.name)))
.global.null.deviance <<- cbind(suppressMessages(.null.deviance(object.name)))
.global.residual.deviance <<- cbind(suppressMessages(.residual.deviance(object.name)))
}
}
.null.deviance <-
function(object.name) {
null.deviance.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","coeftest","Gls","lmer","glmer","nlmer", "ergm"))) {
if (model.name %in% c("rem.dyad", "mclogit")) {
null.deviance.value <- object.name$null.deviance
null.deviance.output <- as.vector(c(null.deviance.value, NA, NA))
}
else if (model.name %in% c("maBina")) {
null.deviance.value <- object.name$w$null.deviance
df.value <- object.name$w$df.null
null.deviance.output <- as.vector(c(null.deviance.value, df.value, NA))
}
else if (!is.null(suppressMessages(.summary.object$null.deviance))) {
null.deviance.value <- suppressMessages(.summary.object$null.deviance)
df.value <- object.name$df.null
null.deviance.output <- as.vector(c(null.deviance.value, df.value, NA))
}
else if (!is.null(object.name$null.deviance)) {
null.deviance.value <- object.name$null.deviance
df.value <- object.name$df.null
null.deviance.output <- as.vector(c(null.deviance.value, df.value, NA))
}
}
names(null.deviance.output) <- c("statistic","df1","p-value")
return(cbind(null.deviance.output))
}
.number.observations <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit",
"poisson", "negbin", "normal.survey", "poisson.survey",
"probit.survey", "logit.survey", "gamma", "gamma.survey",
"z.arima", "brglm","glm()", "Glm()", "svyglm()")) {
return(length(object.name$residuals))
}
else if (model.name %in% c("fGARCH")) {
return(length(object.name@data))
}
else if (model.name %in% c("maBina")) {
return(length(object.name$w$residuals))
}
else if (model.name %in% c("mlogit")) {
return(sum(object.name$freq))
}
else if (model.name %in% c("felm")) {
return(object.name$N)
}
else if (model.name %in% c("mclogit")) {
return(object.name$N)
}
else if (model.name %in% c("selection", "heckit")) {
return(.summary.object$param$nObs)
}
else if (model.name %in% c("binaryChoice", "probit.ss")) {
return(object.name$param$nObs)
}
else if (model.name %in% c("lmer","glmer","nlmer")) {
return(length(resid(object.name)))
}
else if (model.name %in% c("gmm")) {
return(object.name$n)
}
else if (model.name %in% c("plm", "pgmm", "pmg", "rlm", "lmrob", "glmrob", "dynlm", "rq", "lagsarlm", "errorsarlm", "rem.dyad")) {
return(as.vector(length(object.name$residual)))
}
else if (model.name %in% c("mnlogit")) {
return(as.vector(.summary.object$model.size$N))
}
else if (model.name %in% c("hurdle", "zeroinfl")) {
return(as.vector(object.name$n))
}
else if (model.name %in% c("ivreg","clm","hetglm")) {
return(as.vector(object.name$nobs))
}
if (model.name %in% c("normal.gee", "logit.gee", "poisson.gee",
"probit.gee", "gamma.gee", "gee()", "betareg")) {
return(as.vector(.summary.object$nobs))
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam",
"poisson.gam", "coxph", "clogit", "exp", "lognorm", "weibull", "survreg()",
"gam()")) {
return(as.vector(.summary.object$n))
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
return(as.vector(.summary.object$nobs))
}
else if (model.name %in% c("gls")) {
return(as.vector(object.name$dims$N))
}
else if (model.name %in% c("tobit(AER)")) {
return(as.vector(.summary.object$n["Total"]))
}
else if (model.name %in% c("Arima","censReg","lme","nlme","weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
return(as.vector(nobs(object.name)))
}
return(NA)
}
.rename.intercept <-
function(x) {
out <- x
for (i in seq(1:length(x))) {
if (x[i] %in% .global.intercept.strings) {
out[i] <- .format.intercept.name
}
}
return(out)
}
.order.reg.table <-
function(order) {
# first, find the position of the intercept and rename the variable to be the intercept string
intercept.position <- NULL
for (i in seq(1:length(.global.coefficient.variables))) {
if (.global.coefficient.variables[i] %in% .global.intercept.strings) {
intercept.position <- i
.global.coefficient.variables[i] <<- .format.intercept.name
rownames(.global.coefficients)[i] <<- .format.intercept.name
rownames(.global.std.errors)[i] <<- .format.intercept.name
rownames(.global.ci.lb)[i] <<- .format.intercept.name
rownames(.global.ci.rb)[i] <<- .format.intercept.name
rownames(.global.t.stats)[i] <<- .format.intercept.name
rownames(.global.p.values)[i] <<- .format.intercept.name
}
}
# put intercept on bottom if necessary
if (!is.null(intercept.position)) {
# hold contents of last row in placeholder variables
placehold.coefficient.variables <- .global.coefficient.variables[-intercept.position]
intercept.coefficient.variables <- .global.coefficient.variables[intercept.position]
if (.format.intercept.bottom) {
.global.coefficient.variables <<- c(placehold.coefficient.variables, intercept.coefficient.variables)
}
if (.format.intercept.top) {
.global.coefficient.variables <<- c(intercept.coefficient.variables, placehold.coefficient.variables)
}
}
# order according to user's wishes
old.order <- 1:length(.global.coefficient.variables)
new.order <- NULL; add.these <- NULL
if (!is.null(order)) {
# if order is regular expression...
if (is.character(order)) {
not.ordered.yet <- .global.coefficient.variables
for (i in 1:length(order)) {
add.these <- grep(order[i], not.ordered.yet, perl=.format.perl, fixed=FALSE)
not.ordered.yet[add.these] <- NA
if (length(add.these) != 0) {
new.order <- c(new.order, add.these)
}
}
}
else if (is.numeric(order)) { # if order contains indices
order <- unique(order)
order <- order[order <= max(old.order)]
new.order <- old.order[order]
}
}
if (!is.null(new.order)) {
remainder <- old.order[-new.order]
new.order <- c(new.order, remainder)
}
else { new.order <- old.order }
# set the right order
.global.coefficient.variables[old.order] <<- .global.coefficient.variables[new.order]
}
.insert.col.front <- function(d, new.col) {
# values
d.new <- d
d.new[,seq(2,ncol(d)+1)] <- d[,seq(1,ncol(d))]
d.new[,1] <- new.col
# column names
if (!is.null(colnames(d))) {
colnames(d.new)[seq(2,ncol(d)+1)] <- colnames(d)[seq(1,ncol(d))]
colnames(d.new)[1] <- ""
}
return(d.new)
}
.order.data.frame <-
function(d, order, summary=FALSE) {
if ((.format.rownames == TRUE) && (summary == FALSE)) { # if we want to report rownames, add them to data frame
if (!is.null(rownames(d))) { d <- .insert.col.front(d, rownames(d)) }
}
# order according to user's wishes
old.order <- 1:length(colnames(d))
new.order <- NULL; add.these <- NULL
if (!is.null(order)) {
# if order is regular expression...
if (is.character(order)) {
not.ordered.yet <- colnames(d)
for (i in 1:length(order)) {
add.these <- grep(order[i], d, perl=.format.perl, fixed=FALSE)
not.ordered.yet[add.these] <- NA
if (length(add.these) != 0) {
new.order <- c(new.order, add.these)
}
}
}
else if (is.numeric(order)) { # if order contains indices
order <- unique(order)
order <- order[order <= max(old.order)]
new.order <- old.order[order]
}
}
if (!is.null(new.order)) {
remainder <- old.order[-new.order]
new.order <- c(new.order, remainder)
}
else { new.order <- old.order }
return( d[new.order] )
}
.print.additional.lines <-
function(part.number=NULL) {
# if no additional lines, then quit the function
if (is.null(.format.add.lines)) { return(NULL) }
max.l <- length(.global.models)+1
for (line in 1:length(.format.add.lines)) {
## add columns if too few, remove if too many
if (max.l > length(.format.add.lines[[line]])) {
.format.add.lines[[line]] <- c(.format.add.lines[[line]], rep(NA, times=max.l - length(.format.add.lines[[line]])))
}
else if (max.l < length(.format.add.lines[[line]])) {
.format.add.lines[[line]] <- .format.add.lines[[line]][1:max.l]
}
.format.add.lines[[line]] <- .format.add.lines[[line]]
## print each line
for (i in 1:max.l) {
if (!is.na(.format.add.lines[[line]][i])) {
if (i==1) {
cat(.format.add.lines[[line]][i], sep="")
}
else {
cat(" & ",.format.add.lines[[line]][i], sep="")
}
}
else {
if (i==1) {
cat(" ", sep="")
}
else {
cat(" & ", sep="")
}
}
}
cat(" \\\\ \n")
}
.table.part.published[part.number] <<- TRUE
}
.print.table.statistic <-
function(.global.var.name, .format.var.name, decimal.digits=.format.round.digits, part.string="", part.number=NULL, type.se=FALSE) {
# default values
report.df <- FALSE
report.p.value <- FALSE
significance.stars <- FALSE
report.se <- FALSE
report.tstat <- FALSE
intelligent.df <- .format.intelligent.df
force.math <- FALSE
# reporting of df, p-value, significance stars, standard errors, t-stats
if (length(grep("(df)", part.string,fixed=TRUE))!=0) { report.df <- TRUE }
if (length(grep("(se)", part.string,fixed=TRUE))!=0) { report.se <- TRUE }
if (length(grep("(t)", part.string,fixed=TRUE))!=0) { report.tstat <- TRUE }
if (length(grep("(p)", part.string,fixed=TRUE))!=0) { report.p.value <- TRUE }
if (length(grep("*", part.string,fixed=TRUE))!=0) { significance.stars <- TRUE }
# first for vectors (statistics without, say, degrees of freedom)
if (is.vector(.global.var.name) == TRUE) {
if (sum(!is.na(.global.var.name))!=0) {
cat (.format.var.name)
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.var.name[i])) {
if (.format.dec.mark.align == TRUE) {
cat(" & \\multicolumn{1}{c}{",.iround(.global.var.name[i], decimal.digits),"}", sep="")
}
else {
cat(" & ",.iround(.global.var.name[i], decimal.digits), sep="")
}
}
else { cat(" & ", sep="") }
}
cat(" \\\\ \n")
.table.part.published[part.number] <<- TRUE
}
}
else if ((is.matrix(.global.var.name) == TRUE) && (type.se == FALSE)) { # for statistics that have degrees of freedom
if (sum(!is.na(as.vector(.global.var.name["statistic",])))!=0) {
# intelligent df reporting (figure out whether only report it on left side, or also)
report.df.left.column <- FALSE
# whittle down unique values
df.all.together <- NULL
for (i in seq(1:length(.global.models))) {
df.string <- ""
for (j in seq(1:(nrow(.global.var.name)- 2))) {
df.string <- paste(df.string,";",as.character(.global.var.name[paste("df",as.character(j),sep=""),i]),sep="")
}
df.all.together <- append(df.all.together, df.string)
}
# remove.na.r
df.all.together.no.NA <- NULL
for (i in seq(1:length(df.all.together))) {
if (substr(df.all.together[i],1,3)!=";NA") { df.all.together.no.NA <- c(df.all.together.no.NA, df.all.together[i]) }
}
df.all.together.no.NA.unique <- sort(unique(df.all.together.no.NA))
# put df on the left if only one unique df in the table, and not just one column w/ given df
if (intelligent.df == TRUE) {
if ((length(df.all.together.no.NA.unique)==1) && (length(df.all.together.no.NA)>=2)) { report.df.left.column <- TRUE }
}
# write down the line
cat (.format.var.name)
# report df on left side w/ intelligent reporting
if (report.df.left.column == TRUE) {
if (report.df == TRUE) {
cat(" ",.format.df.left,sep="")
df.list <- unlist(strsplit(df.all.together.no.NA.unique[1],";"))
for (i in seq(from=2, to=length(df.list))) {
if (i>=3) { cat(.format.df.separator) }
cat(df.list[i],sep="")
}
cat(.format.df.right,sep="")
}
}
# now, go column by column
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.var.name["statistic",i])) {
if (.format.dec.mark.align==TRUE) {
cat(" & \\multicolumn{1}{c}{",.iround(.global.var.name["statistic",i], decimal.digits), sep="")
force.math <- TRUE
}
else {
cat(" & ",.iround(.global.var.name["statistic",i], decimal.digits), sep="")
}
# significance stars
if ((significance.stars == TRUE) && (!is.na(.global.var.name["p-value",i]))) { .enter.significance.stars(.global.var.name["p-value",i], force.math) }
# degrees of freedom - only report by statistics if not in the left column already
if (report.df.left.column == FALSE) {
if ((report.df == TRUE) && (!is.na(.global.var.name["df1",i]))) {
cat(" ",.format.df.left,sep="")
for (j in seq(1:(nrow(.global.var.name)- 2))) {
if (!is.na(.global.var.name[paste("df",as.character(j),sep=""),i])) {
if (j>=2) { cat(.format.df.separator) }
cat(.global.var.name[paste("df",as.character(j),sep=""),i],sep="")
}
}
cat(.format.df.right,sep="")
}
}
# p-values
if ((report.p.value == TRUE) && (!is.na(.global.var.name["p-value",i]))) {
cat(" ",.format.p.value.left,sep="")
if (!is.na(.global.var.name[paste("df",as.character(j),sep=""),i])) {
cat(.iround(.global.var.name["p-value",i],.format.round.digits, round.up.positive=TRUE),sep="")
}
cat(.format.p.value.right,sep="")
}
if (.format.dec.mark.align==TRUE) {
cat("}")
}
else {
cat("")
}
}
else { cat(" & ", sep="") }
}
cat(" \\\\ \n")
.table.part.published[part.number] <<- TRUE
}
}
else if ((is.matrix(.global.var.name) == TRUE) && (type.se == TRUE)) { # for statistics that have a standard error
if (sum(!is.na(as.vector(.global.var.name["statistic",])))!=0) {
# write down the line
cat (.format.var.name)
# now, go column by column
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.var.name["statistic",i])) {
if (.format.dec.mark.align == TRUE) {
cat(" & \\multicolumn{1}{c}{",.iround(.global.var.name["statistic",i], decimal.digits), sep="")
}
else {
cat(" & ",.iround(.global.var.name["statistic",i], decimal.digits), sep="")
}
# significance stars
if ((significance.stars == TRUE) && (!is.na(.global.var.name["p-value",i]))) { .enter.significance.stars(.global.var.name["p-value",i], force.math) }
# standard errors
if ((report.se == TRUE) && (!is.na(.global.var.name["se",i]))) { cat(" ",.format.se.left,.iround(.global.var.name["se",i], decimal.digits),.format.se.right,sep="") }
# t-statistics
if ((report.tstat == TRUE) && (!is.na(.global.var.name["tstat",i]))) { cat(" ",.format.tstat.left, .iround(.global.var.name["tstat",i], decimal.digits),.format.tstat.right,sep="") }
# p-values
if ((report.p.value == TRUE) && (!is.na(.global.var.name["p-value",i]))) { cat(" ",.format.p.value.left,.iround(.global.var.name["p-value",i], decimal.digits),.format.p.value.right,sep="") }
if (.format.dec.mark.align == TRUE) {
cat("}")
}
else {
cat("")
}
}
else { cat(" & ", sep="") }
}
cat(" \\\\ \n")
.table.part.published[part.number] <<- TRUE
}
}
}
.publish.table <-
function() {
.table.info.comment()
# table header
.table.header()
.table.insert.space()
.table.part.published <<- as.vector(rep(NA, times=length(.format.table.parts))) # to keep track what has been published (to deal intelligently with horizontal lines)
.publish.horizontal.line <<- TRUE # should non-compulsory horizontal lines be published? (yes, if something else published since the previous line)
if (length(.format.table.parts)>=1) {
for (i in seq(1:length(.format.table.parts))) {
.publish.table.part(part=.format.table.parts[i], which.part.number=i)
if (.table.part.published[i]==TRUE) { .publish.horizontal.line <<- TRUE }
if ((.format.table.parts[i]=="-") || (.format.table.parts[i]=="-!") || (.format.table.parts[i]=="=") || (.format.table.parts[i]=="=!")) { .publish.horizontal.line <<- FALSE }
}
}
cat("\\end{tabular} \n")
if (.format.floating == TRUE) { cat("\\end{", .format.floating.environment,"} \n", sep="") }
else if (!is.null(.format.font.size)) {
cat("\\endgroup \n",sep="")
}
}
.publish.table.part <-
function(part, which.part.number) {
.table.part.published[which.part.number] <<- FALSE
# dependent variable label line
if (part=="dependent variable label") {
if (.format.dependent.variable.text.on == TRUE) {
cat(" & \\multicolumn{",length(.global.models),"}{c}{",.format.dependent.variable.text, "} \\\\ \n", sep="")
if (.format.dependent.variable.text.underline == TRUE) { cat("\\cline{2-",length(.global.models)+1,"} \n", sep="") }
}
.table.part.published[which.part.number] <<- TRUE
}
# dependent variables
else if (part=="dependent variables") {
.table.insert.space()
cat(.format.dependent.variables.text)
how.many.columns <- 0
label.counter <- 0
for (i in seq(1:length(.global.models))) {
if (is.null(.format.dep.var.labels)) { .format.dep.var.labels <<- NA }
how.many.columns <- how.many.columns + 1
# write down if next column has different dependent variable, or if end of columns
different.dependent.variable <- FALSE
if (i == length(.global.models)) {different.dependent.variable <- TRUE}
else if ((as.character(.global.dependent.variables[i])) != (as.character(.global.dependent.variables[i+1]))) {different.dependent.variable <- TRUE}
if (.format.multicolumn==FALSE) { different.dependent.variable <- TRUE }
if (different.dependent.variable == TRUE) {
label.counter <- label.counter + 1
if (how.many.columns == 1) {
if (.format.dec.mark.align==TRUE) {
if (is.na(.format.dep.var.labels[label.counter])) {
if (.format.dependent.variables.capitalize == TRUE) { cat(" & \\multicolumn{1}{c}{",.format.dependent.variables.left,toupper(as.character(.global.dependent.variables.written[i])),.format.dependent.variables.right,"}", sep="") }
else { cat(" & \\multicolumn{1}{c}{",.format.dependent.variables.left,as.character(.global.dependent.variables.written[i]),.format.dependent.variables.right,"}", sep="") }
}
else { cat(" & \\multicolumn{1}{c}{",.format.dependent.variables.left,.format.dep.var.labels[label.counter],.format.dependent.variables.right,"}", sep="") }
}
else {
if (is.na(.format.dep.var.labels[label.counter])) {
if (.format.dependent.variables.capitalize == TRUE) { cat(" & ",.format.dependent.variables.left,toupper(as.character(.global.dependent.variables.written[i])),.format.dependent.variables.right, sep="") }
else { cat(" & ",.format.dependent.variables.left,as.character(.global.dependent.variables.written[i]),.format.dependent.variables.right, sep="") }
}
else { cat(" & ",.format.dependent.variables.left,.format.dep.var.labels[label.counter],.format.dependent.variables.right, sep="") }
}
}
else {
if (is.na(.format.dep.var.labels[label.counter])) {
if (.format.dependent.variables.capitalize == TRUE) {cat(" & \\multicolumn{",how.many.columns,"}{c}{",.format.dependent.variables.left,toupper(as.character(.global.dependent.variables.written[i])),.format.dependent.variables.right,"}", sep="")}
else {cat(" & \\multicolumn{",how.many.columns,"}{c}{",.format.dependent.variables.left,as.character(.global.dependent.variables.written[i]),.format.dependent.variables.right,"}", sep="")}
}
else {cat(" & \\multicolumn{",how.many.columns,"}{c}{",.format.dependent.variables.left,.format.dep.var.labels[label.counter],.format.dependent.variables.right,"}", sep="")}
}
how.many.columns <- 0
}
}
cat(" \\\\ \n")
.table.part.published[which.part.number] <<- TRUE
}
# models
else if (part=="models") {
if ((.format.model.names.include==TRUE) && ((.format.models.skip.if.one == FALSE) || ((.format.models.skip.if.one == TRUE) && (length(unique(.global.models))>=2)))) {
.table.insert.space()
cat(.format.models.text)
# rename models based on .formatting preferences
renamed.global.models <- as.matrix(rbind(.global.models, rep("", times=length(.global.models))))
for (i in seq(1:length(.global.models))) {
for (j in seq(1:ncol(.format.model.names))) {
model.strsplit <- unlist(strsplit(.global.models[i], split="#"))
if (.global.models[i]==.format.model.names[1,j]) {
renamed.global.models[1,i] <- .format.model.names[2,j]
renamed.global.models[2,i] <- .format.model.names[3,j]
}
else if ((model.strsplit[1]=="glm()") || (model.strsplit[1]=="svyglm()") || (model.strsplit[1]=="gee()") || (model.strsplit[1]=="gam()")) {
if ( .format.model.function == TRUE ) { renamed.global.models[1,i] <- paste(substr(model.strsplit[1],1,nchar(model.strsplit[1])-2),": ", .format.model.family, model.strsplit[2], sep="") }
else { renamed.global.models[1,i] <- paste(.format.model.family, model.strsplit[2], sep="")}
renamed.global.models[2,i] <- paste(.format.model.link, model.strsplit[3], sep="")
}
else if ((model.strsplit[1]=="survreg()") || (model.strsplit[1]=="polr()")) {
if ( .format.model.function == TRUE ) { renamed.global.models[1,i] <- paste(substr(model.strsplit[1],1,nchar(model.strsplit[1])-2),": ", .format.model.dist, model.strsplit[2], sep="") }
else { renamed.global.models[1,i] <- paste(.format.model.dist, model.strsplit[2], sep="")}
renamed.global.models[2,i] <- ""
}
}
}
if (sum(renamed.global.models[2,]==rep("", times=length(.global.models)))==length(.global.models)) { how.many.model.rows <- 1}
else { how.many.model.rows <- 2 }
for (row in seq(from=1, to=how.many.model.rows)) {
how.many.columns <- 0
for (i in seq(1:length(.global.models))) {
how.many.columns <- how.many.columns + 1
# write down if next column has different dependent variable, or if end of columns
different.model <- FALSE
if (i == length(.global.models)) {different.model <- TRUE}
else if ((as.character(.global.models[i])) != (as.character(.global.models[i+1]))) {different.model <- TRUE}
else if ((as.character(.global.dependent.variables[i])) != (as.character(.global.dependent.variables[i+1]))) {different.model <- TRUE} # subsume models under dependent variables
if (.format.multicolumn==FALSE) { different.model <- TRUE }
if (different.model == TRUE) {
if (how.many.columns == 1) {
if (.format.dec.mark.align == TRUE) {
cat(" & \\multicolumn{1}{c}{",.format.models.left,as.character(renamed.global.models[row,i]),.format.models.right,"}", sep="")
}
else {
cat(" & ",.format.models.left,as.character(renamed.global.models[row,i]),.format.models.right, sep="")
}
}
else {cat(" & \\multicolumn{",how.many.columns,"}{c}{",.format.models.left,as.character(renamed.global.models[row,i]),.format.models.right,"}", sep="")}
how.many.columns <- 0
}
}
cat(" \\\\ \n")
}
# underline models
if (.format.underline.models == TRUE) {
how.many.columns <- 0
for (i in seq(1:length(.global.models))) {
how.many.columns <- how.many.columns + 1
# underline if next column has different dependent variable, or if end of columns
different.model <- FALSE
if (i == length(.global.models)) {different.model <- TRUE}
else if ((as.character(.global.models[i])) != (as.character(.global.models[i+1]))) {different.model <- TRUE}
else if ((as.character(.global.dependent.variables[i])) != (as.character(.global.dependent.variables[i+1]))) {different.model <- TRUE} # subsume models under dependent variables
if (different.model== TRUE) {
cat("\\cline{",(i-how.many.columns+1)+1,"-",i+1,"} ",sep="")
how.many.columns <- 0
}
}
cat("\n")
}
.table.part.published[which.part.number] <<- TRUE
}
}
# column labels
else if (part=="columns") {
if (!is.null(.format.column.labels)) {
if (is.null(.format.column.separate)) { .format.column.separate <- 1 }
# adjust column.separate to have the same number of columns as the table
models.in.table <- length(.global.models)
models.in.col <- 0
for (i in seq(1:length(.format.column.separate))) { # count up how many models in column.separate
models.in.col <- models.in.col + .format.column.separate[i]
}
excess <- models.in.table - models.in.col
# if too few column labels, add ones to column.separate
if (excess > 0) {
last.index <- length(.format.column.separate)
for (i in seq(1:excess)) {
.format.column.separate[last.index + i] <- 1
}
}
# if too many column labels, then cut down
if (excess < 0) {
col.total <- 0
new.format.column.separate <- NULL
for(i in seq(1:length(.format.column.separate))) {
col.total <- col.total + .format.column.separate[i]
if (col.total > models.in.table) {
new.format.column.separate[i] <- .format.column.separate[i] - (col.total - models.in.table)
if (new.format.column.separate[i] == 0) { new.format.column.separate <- new.format.column.separate[-i] }
break
}
else {
new.format.column.separate[i] <- .format.column.separate[i]
}
}
.format.column.separate <- new.format.column.separate
}
# output column labels
col.position <- 1
for (i in seq(1:length(.format.column.separate))) {
if (is.null(.format.column.labels[col.position])) { .format.column.labels[col.position] <- "" }
if (is.na(.format.column.labels[col.position])) { .format.column.labels[col.position] <- "" }
if (.format.column.separate[i]==1) {
if (.format.dec.mark.align==TRUE) {
cat(" & \\multicolumn{1}{c}{",.format.column.left,.format.column.labels[col.position],.format.column.right,"}", sep="")
}
else {
cat(" & ",.format.column.left,.format.column.labels[col.position],.format.column.right, sep="")
}
}
else {
cat(" & \\multicolumn{",.format.column.separate[i],"}{c}{",.format.column.left,.format.column.labels[col.position],.format.column.right,"}", sep="")
}
col.position <- col.position + 1
}
cat(" \\\\ \n")
}
}
# numbers
else if (part=="numbers") {
if ((.format.model.numbers == TRUE) && (length(.global.models)>1)) {
.table.insert.space()
cat(.format.numbers.text)
for (i in seq(1:length(.global.models))) {
if (.format.dec.mark.align==TRUE) {
if (.format.numbers.roman == TRUE) { cat(" & \\multicolumn{1}{c}{",.format.numbers.left,.roman.numeral(i),.format.numbers.right,"}", sep="") }
else { cat(" & \\multicolumn{1}{c}{",.format.numbers.left,i,.format.numbers.right,"}", sep="") }
}
else {
if (.format.numbers.roman == TRUE) { cat(" & ",.format.numbers.left,.roman.numeral(i),.format.numbers.right, sep="") }
else { cat(" & ",.format.numbers.left,i,.format.numbers.right, sep="") }
}
}
cat("\\\\ \n")
.table.part.published[which.part.number] <<- TRUE
}
}
# numbers
else if (part=="objects") {
if (.format.object.names == TRUE) {
.table.insert.space()
for (i in seq(1:length(.global.models))) {
if (.format.dec.mark.align==TRUE) {
cat(" & \\multicolumn{1}{c}{",.global.object.names[i],"}", sep="")
}
else {
cat(" & ",.global.object.names[i], sep="")
}
}
cat("\\\\ \n")
.table.part.published[which.part.number] <<- TRUE
}
}
## coefficients
else if (part=="coefficients") {
.which.variable.label <<- 0
if (is.null(.format.covariate.labels)) { .format.covariate.labels <<- NA }
# then, enter the coefficients
for (i in seq(1:length(.global.coefficient.variables))) { .table.enter.coefficients(i) }
.table.part.published[which.part.number] <<- TRUE
}
# number of observations
else if (part=="N") { .print.table.statistic(.global.var.name=.global.N, .format.var.name=.format.N, decimal.digits=0, part.number=which.part.number) }
# fixed effects table
else if (part=="omit") {
if ((!is.null(.format.omit.regexp)) && (!is.null(.format.omit.labels))) {
.format.omit.table <<- matrix(.format.omit.no, nrow=length(.format.omit.regexp), ncol=length(.global.models))
for (i in seq(1:length(.global.models))) {
for (j in seq(1:length(.format.omit.regexp))) {
for (k in seq(1:length(.global.coef.vars.by.model[,i]))) {
relevant.coef.var <- .global.coef.vars.by.model[k,i]
if (length(grep(.format.omit.regexp[j], relevant.coef.var, perl=.format.perl, fixed=FALSE))!=0) {
.format.omit.table[j,i] <<- .format.omit.yes
}
}
}
}
for (i in seq(1:length(.format.omit.regexp))) {
cat (.format.omit.labels[i])
for (j in seq(1:length(.global.models))) {
if (.format.dec.mark.align == TRUE) {
cat(" & \\multicolumn{1}{c}{",.format.omit.table[i,j],"}", sep="")
}
else {
cat(" & ",.format.omit.table[i,j], sep="")
}
}
cat(" \\\\ \n")
}
.table.part.published[which.part.number] <<- TRUE
}
}
# R-squared
else if (part=="R-squared") { .print.table.statistic(.global.var.name=.global.R2, .format.var.name=.format.R2, part.number=which.part.number) }
# max R-squared
else if (part=="max R-squared") { .print.table.statistic(.global.var.name=.global.max.R2, .format.var.name=.format.max.R2, part.number=which.part.number) }
# adjusted R-squared
else if (part=="adjusted R-squared") { .print.table.statistic(.global.var.name=.global.adj.R2, .format.var.name=.format.adj.R2, part.number=which.part.number) }
# log likelihood
else if (part=="log likelihood") { .print.table.statistic(.global.var.name=.global.LL, .format.var.name=.format.LL, part.number=which.part.number) }
# Akaike Information Criterion (AIC)
else if (part=="AIC") { .print.table.statistic(.global.var.name=.global.AIC, .format.var.name=.format.AIC, part.number=which.part.number) }
# Bayesian Information Criterion (BIC)
else if (part=="BIC") { .print.table.statistic(.global.var.name=.global.BIC, .format.var.name=.format.BIC, part.number=which.part.number) }
# Scale Parameter
else if (part=="scale") { .print.table.statistic(.global.var.name=.global.scale, .format.var.name=.format.scale, part.number=which.part.number) }
# UBRE
else if (part=="UBRE") { .print.table.statistic(.global.var.name=.global.UBRE, .format.var.name=.format.UBRE, part.number=which.part.number) }
# sigma2
else if (part=="sigma2") { .print.table.statistic(.global.var.name=.global.sigma2, .format.var.name=.format.sigma2, part.number=which.part.number) }
## with degrees of freedom
# residual standard error (sigma); standard error of the regression
else if (substr(part,1,nchar("SER"))=="SER") { .print.table.statistic(.global.var.name=.global.SER, .format.var.name=.format.SER, part.string=part, part.number=which.part.number) }
# F-statistic
else if (substr(part,1,nchar("F statistic"))=="F statistic") { .print.table.statistic(.global.var.name=.global.F.stat, .format.var.name=.format.F.stat, part.string=part, part.number=which.part.number) }
# theta
else if (substr(part,1,nchar("theta"))=="theta") { .print.table.statistic(.global.var.name=.global.theta, .format.var.name=.format.theta, part.string=part, part.number=which.part.number, type.se=TRUE) }
# rho
else if (substr(part,1,nchar("rho"))=="rho") { .print.table.statistic(.global.var.name=.global.rho, .format.var.name=.format.rho, part.string=part, part.number=which.part.number, type.se=TRUE) }
# Inverse Mills ratio
else if (substr(part,1,nchar("Mills"))=="Mills") { .print.table.statistic(.global.var.name=.global.mills, .format.var.name=.format.mills, part.string=part, part.number=which.part.number, type.se=TRUE) }
# Chi-squared
else if (substr(part,1,nchar("chi2"))=="chi2") { .print.table.statistic(.global.var.name=.global.chi.stat, .format.var.name=.format.chi.stat, part.string=part, part.number=which.part.number) }
# Wald Test
else if (substr(part,1,nchar("Wald"))=="Wald") { .print.table.statistic(.global.var.name=.global.wald.stat, .format.var.name=.format.wald.stat, part.string=part, part.number=which.part.number) }
# LR Test
else if (substr(part,1,nchar("LR"))=="LR") { .print.table.statistic(.global.var.name=.global.lr.stat, .format.var.name=.format.lr.stat, part.string=part, part.number=which.part.number) }
# Score (Logrank) Test
else if (substr(part,1,nchar("logrank"))=="logrank") { .print.table.statistic(.global.var.name=.global.logrank.stat, .format.var.name=.format.logrank.stat, part.string=part, part.number=which.part.number) }
# null deviance
else if (substr(part,1,nchar("null deviance"))=="null deviance") { .print.table.statistic(.global.var.name=.global.null.deviance, .format.var.name=.format.null.deviance, part.string=part, part.number=which.part.number) }
# residual deviance
else if (substr(part,1,nchar("residual deviance"))=="residual deviance") { .print.table.statistic(.global.var.name=.global.residual.deviance, .format.var.name=.format.residual.deviance, part.string=part, part.number=which.part.number) }
##
# single horizontal line, no matter what
else if (part=="-!") {
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
# single horizontal line, optional
else if (part=="-") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
}
# double horizontal line, no matter what
else if (part=="=!") {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
# double horizontal line
else if (part=="=") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
}
# notes
else if (part=="notes") {
if (.format.note != "") { cat(.format.note) }
for (i in seq(1:length(.format.note.content))) {
.format.note.content[i] <- .format.note.content[i]
# print individual notes
if (.format.note == "") { cat("\\multicolumn{",length(.global.models)+1,"}{",.format.note.alignment,"}{",.format.note.content[i],"} \\\\ \n", sep="") }
else { cat(" & \\multicolumn{",length(.global.models),"}{",.format.note.alignment,"}{",.format.note.content[i],"} \\\\ \n", sep="") }
}
.table.part.published[which.part.number] <<- TRUE
}
# empty line
else if (part==" ") {
.table.empty.line();
.table.part.published[which.part.number] <<- TRUE
}
# additional lines
else if (part=="additional") { .print.additional.lines(part.number=which.part.number) }
}
.r.squared <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina","coeftest","nlmer", "glmer", "lmer","Gls","Arima"))) {
if (model.name %in% c("heckit")) {
return(.summary.object$rSquared$R2)
}
if (model.name %in% c("felm")) {
return(.summary.object$r2)
}
if (model.name %in% c("mlogit")) {
return(.summary.object$mfR2[1])
}
if (model.name %in% c("plm")) {
return(as.vector(.summary.object$r.squared["rsq"]))
}
else if (model.name %in% c("betareg")) {
return(as.vector(.summary.object$pseudo.r.squared))
}
else if (!is.null(.summary.object$r.squared)) {
return(as.vector(.summary.object$r.squared))
}
else if (model.name %in% c("coxph", "clogit")) {
return(as.vector(.summary.object$rsq[1]))
}
else if (model.name %in% c("pmg")) {
return(as.vector(.summary.object$rsqr))
}
else if (model.name %in% c("cph","lrm","ols","psm")) {
return(as.vector(object.name$stats["R2"]))
}
}
return(NA)
}
.remove.special.chars <-
function(s) {
if (!is.character(s)) { s.out <- as.character(s) }
else { s.out <- s }
# this has to go first
s.out <- gsub("\\","\\textbackslash ",s.out,fixed=TRUE)
# basic special characters
s.out <- gsub("_","\\_",s.out,fixed=TRUE)
s.out <- gsub("#","\\#",s.out,fixed=TRUE)
s.out <- gsub("~","\\textasciitilde",s.out,fixed=TRUE)
s.out <- gsub("{","\\{",s.out,fixed=TRUE)
s.out <- gsub("}","\\}",s.out,fixed=TRUE)
s.out <- gsub("%","\\%",s.out,fixed=TRUE)
s.out <- gsub("$","\\$",s.out,fixed=TRUE)
# pre-defined text-mode commands (add more?)
s.out <- gsub("*","\\textasteriskcentered ",s.out,fixed=TRUE)
s.out <- gsub("|","\\textbar ",s.out,fixed=TRUE)
s.out <- gsub(">","\\textgreater ",s.out,fixed=TRUE)
s.out <- gsub("<","\\textless ",s.out,fixed=TRUE)
# more substitutions
s.out <- gsub("^","$\\hat{\\mkern6mu}$",s.out,fixed=TRUE)
return(s.out)
}
.residual.deviance <-
function(object.name) {
residual.deviance.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","coeftest", "Gls","multinom","lmer","glmer","nlmer"))) {
if (model.name %in% c("rem.dyad")) {
residual.deviance.value <- object.name$residual.deviance
residual.deviance.output <- as.vector(c(residual.deviance.value, NA, NA))
}
else if (model.name %in% c("mclogit")) {
residual.deviance.value <- object.name$deviance
residual.deviance.output <- as.vector(c(residual.deviance.value, NA, NA))
}
else if (model.name %in% c("maBina")) {
residual.deviance.value <- object.name$w$deviance
df.value <- object.name$w$df.residual
residual.deviance.output <- as.vector(c(residual.deviance.value, df.value, NA))
}
else if (!is.null(.summary.object$deviance)) {
residual.deviance.value <- suppressMessages(.summary.object$deviance)
df.value <- object.name$df.residual
residual.deviance.output <- as.vector(c(residual.deviance.value, df.value, NA))
}
else if (!is.null(object.name$deviance)) {
residual.deviance.value <- object.name$deviance
df.value <- object.name$df.residual
residual.deviance.output <- as.vector(c(residual.deviance.value, df.value, NA))
}
}
names(residual.deviance.output) <- c("statistic","df1","p-value")
return(cbind(residual.deviance.output))
}
.roman.numeral <-
function(regular.number) {
# unique representation only for integers between 1 and 3899
if ((regular.number < 1) || (regular.number > 3899)) {
return(NULL)
}
else {
roman.output <- ""
number.remaining <- regular.number
while (number.remaining > 999) {
roman.output <- paste(roman.output, "M", sep="")
number.remaining <- number.remaining - 1000
}
if (number.remaining > 899) {
roman.output <- paste(roman.output, "CM", sep="")
number.remaining <- number.remaining - 900
}
if (number.remaining > 499) {
roman.output <- paste(roman.output, "D", sep="")
number.remaining <- number.remaining - 500
}
if (number.remaining > 399) {
roman.output <- paste(roman.output, "CD", sep="")
number.remaining <- number.remaining - 400
}
if (number.remaining > 399) {
roman.output <- paste(roman.output, "D", sep="")
number.remaining <- number.remaining - 400
}
while (number.remaining > 99) {
roman.output <- paste(roman.output, "C", sep="")
number.remaining <- number.remaining - 100
}
if (number.remaining > 89) {
roman.output <- paste(roman.output, "XC", sep="")
number.remaining <- number.remaining - 90
}
if (number.remaining > 49) {
roman.output <- paste(roman.output, "L", sep="")
number.remaining <- number.remaining - 50
}
if (number.remaining > 39) {
roman.output <- paste(roman.output, "XL", sep="")
number.remaining <- number.remaining - 40
}
while (number.remaining > 9) {
roman.output <- paste(roman.output, "X", sep="")
number.remaining <- number.remaining - 10
}
if (number.remaining > 8) {
roman.output <- paste(roman.output, "IX", sep="")
number.remaining <- number.remaining - 9
}
if (number.remaining > 4) {
roman.output <- paste(roman.output, "V", sep="")
number.remaining <- number.remaining - 5
}
if (number.remaining > 3) {
roman.output <- paste(roman.output, "IV", sep="")
number.remaining <- number.remaining - 4
}
if (number.remaining > 3) {
roman.output <- paste(roman.output, "IV", sep="")
number.remaining <- number.remaining - 4
}
while (number.remaining > 0) {
roman.output <- paste(roman.output, "I", sep="")
number.remaining <- number.remaining - 1
}
return(roman.output)
}
}
.SER <-
function(object.name) {
SER.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","lme","nlme","fGARCH","Arima","maBina","coeftest","lmer","glmer","nlmer","gls","Gls"))) {
if (model.name %in% c("felm")) {
SER.output <- as.vector(c(.summary.object$rse, .summary.object$rdf, NA))
}
else if (!is.null(suppressMessages(.summary.object$sigma))) {
sigma.value <-suppressMessages(.summary.object$sigma)
if (model.name %in% c("rlm")) {
df.residual.value <- .summary.object$df[2]
}
else {
df.residual.value <- object.name$df.residual
}
SER.output <- as.vector(c(sigma.value, df.residual.value, NA))
}
}
names(SER.output) <- c("statistic","df1","p-value")
return(cbind(SER.output))
}
.stargazer.reg.table <-
function(...) {
list.of.models <- as.list(list(...))
how.many.models <- length(list.of.models)
# find how many models user wants to customize
# max.user <- max(length(coef),length(se),length(t),length(p),length(ci.custom))
length(coef) <<- length(se) <<- length(t) <<- length(p) <<- length(ci.custom) <<- how.many.models
if (how.many.models >= 1) {
suppressMessages(.new.table(list.of.models[[1]], user.coef=coef[[1]], user.se=se[[1]], user.t=t[[1]], user.p=p[[1]], auto.t=t.auto, auto.p=p.auto, user.ci.lb=ci.custom[[1]][,1], user.ci.rb=ci.custom[[1]][,2]))
if (how.many.models >= 2) {
for (i in seq(from = 2,to = how.many.models)) {
#if (i <= max.user) {
suppressMessages(.add.model(list.of.models[[i]], user.coef=coef[[i]], user.se=se[[i]], user.t=t[[i]], user.p=p[[i]], auto.t=t.auto, auto.p=p.auto, user.ci.lb=ci.custom[[i]][,1], user.ci.rb=ci.custom[[i]][,2]))
#}
#else {
# suppressMessages(.add.model(list.of.models[[i]], user.coef=NULL, user.se=NULL, user.t=NULL, user.p=NULL, auto.t=t.auto, auto.p=p.auto, user.ci.lb=NULL, user.ci.rb=NULL))
#}
}
}
.apply(auto.t=t.auto, auto.p=p.auto)
.order.reg.table(order)
suppressMessages(.publish.table())
}
}
.set.font.size <-
function() {
if (!is.null(.format.font.size)) {
cat("\\", .format.font.size," \n", sep="")
}
}
.floating.header <-
function() {
if (.format.floating==TRUE) {
cat("\\begin{", .format.floating.environment,"}[", .format.table.placement,"] \\centering \n",sep="")
cat(" \\caption{", .format.title, "} \n",sep="")
cat(" \\label{", .format.label, "} \n",sep="")
.set.font.size()
}
else if (!is.null(.format.font.size)) { # set font size using begingroup
cat("\\begingroup \n", sep="")
.set.font.size()
}
}
.data.frame.table.header <-
function(object) {
.floating.header()
.formatting.alignment <- paste("@{\\extracolsep{",.format.column.sep.width,"}} ", sep="")
for (i in seq(1:(length(names(object))))) {
if (.format.dec.mark.align == FALSE) {
.formatting.alignment <- paste(.formatting.alignment, "c", sep="")
}
else {
.formatting.alignment <- paste(.formatting.alignment, "D{", .format.decimal.character,"}{", .format.decimal.character,"}{-", .format.s.round.digits,"} ", sep="")
}
}
#
cat("\\begin{tabular}{",.formatting.alignment,"} \n",sep="")
}
.stargazer.data.frame.table <-
function(object) {
# flip objects
if (.format.flip == TRUE) {
# keep row- and column names
obj.rownames <- rownames(object)
obj.colnames <- colnames(object)
object <- as.data.frame(t(object))
colnames(object) <- obj.rownames
rownames(object) <- obj.colnames
}
if ((nrow(object) < 1) || (ncol(object) < 1)) {
cat("% Error: Data frame must have at least one row and one column.\n")
}
else {
object <- .order.data.frame(object, order)
.table.info.comment()
#create table header
.data.frame.table.header(object)
.table.insert.space()
.table.part.published <<- as.vector(rep(NA, times=length(.format.s.stat.parts))) # to keep track what has been published (to deal intelligently with horizontal lines)
.publish.horizontal.line <<- TRUE # should non-compulsory horizontal lines be published? (yes, if something else published since the previous line)
if (length(.format.s.stat.parts)>=1) {
for (i in seq(1:length(.format.s.stat.parts))) {
.data.frame.table.part(object,.format.s.stat.parts[i], which.part.number = i)
if (.table.part.published[i]==TRUE) { .publish.horizontal.line <<- TRUE }
if ((.format.s.stat.parts[i]=="-") || (.format.s.stat.parts[i]=="-!") || (.format.s.stat.parts[i]=="=") || (.format.s.stat.parts[i]=="=!")) { .publish.horizontal.line <<- FALSE }
}
}
cat("\\end{tabular} \n")
if (.format.floating == TRUE) { cat("\\end{", .format.floating.environment,"} \n", sep="") }
else if (!is.null(.format.font.size)) {
cat("\\endgroup \n",sep="")
}
}
}
.data.frame.table.part <-
function(object, part, which.part.number) {
.table.part.published[which.part.number] <<- FALSE
if ((part=="stat names") && (.format.colnames==TRUE)) {
x.which <- 0
if (is.null(.format.covariate.labels)) { .format.covariate.labels <<- NA }
for (x in seq(1:length(names(object)))) {
omitted <- FALSE
if (!is.null(.format.omit.regexp)) {
for (j in seq(1:length(.format.omit.regexp))) {
if (length(grep(.format.omit.regexp[j], names(object)[x], perl=.format.perl, fixed=FALSE))!=0) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.regexp)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.regexp))) {
if (length(grep(.format.keep.regexp[j], names(object)[x], perl=.format.perl, fixed=FALSE))!=0) { omitted <- FALSE }
}
}
if (!is.null(.format.omit.index)) {
for (j in seq(1:length(.format.omit.index))) {
if (.format.omit.index[j] == x) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.index)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.index))) {
if (.format.keep.index[j] == x) { omitted <- FALSE }
}
}
if (omitted == FALSE) {
x.which <- x.which + 1
if (x >= 2) { cat(" & ", sep="")}
# if underscore or ^ in variable name, then insert an escape \ before it
name.printed <- .remove.special.chars(names(object)[x])
if (is.na(.format.covariate.labels[x.which])) {
if (.format.coefficient.variables.capitalize == TRUE) { name.printed <- toupper(name.printed) }
}
else { name.printed <- .format.covariate.labels[x.which] }
if (.format.dec.mark.align==TRUE) {
cat("\\multicolumn{1}{c}{",.format.s.coefficient.variables.left, name.printed,.format.s.coefficient.variables.right,"}", sep="")
}
else {
cat(.format.s.coefficient.variables.left, name.printed,.format.s.coefficient.variables.right, sep="")
}
}
}
cat(" \\\\ \n")
.table.part.published[which.part.number] <<- TRUE
}
if (substr(part,1,10)=="statistics") {
for (y in seq(1:nrow(object))) {
for (x in seq(1:length(names(object)))) {
omitted <- FALSE
if (!is.null(.format.omit.regexp)) {
for (j in seq(1:length(.format.omit.regexp))) {
if (length(grep(.format.omit.regexp[j], names(object)[x], perl=.format.perl, fixed=FALSE))!=0) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.regexp)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.regexp))) {
if (length(grep(.format.keep.regexp[j], names(object)[x], perl=.format.perl, fixed=FALSE))!=0) { omitted <- FALSE }
}
}
if (!is.null(.format.omit.index)) {
for (j in seq(1:length(.format.omit.index))) {
if (.format.omit.index[j] == x) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.index)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.index))) {
if (.format.keep.index[j] == x) { omitted <- FALSE }
}
}
if (omitted == FALSE) {
if (x >= 2) { cat(" & ", sep="") }
.how.much.to.round <- .format.round.digits
if (is.numeric(object[y,x])) {
if (.is.all.integers(object[y,x])) { .how.much.to.round <- 0 }
rounded.object <- .iround(object[y,x], .how.much.to.round)
if (.format.dec.mark.align==TRUE) {
cat(rounded.object, sep="")
}
else {
cat("$", rounded.object, "$",sep="")
}
}
else {
adjusted.object <- .remove.special.chars(object[y, x])
if (is.na(adjusted.object)) { adjusted.object <- "" }
if (.format.dec.mark.align==TRUE) {
cat("\\multicolumn{1}{c}{", adjusted.object, "}", sep="")
}
else {
cat(adjusted.object, sep="")
}
}
}
}
# add empty lines
how.many.empty.lines <- as.numeric(substr(part,11,nchar(part)))
if (is.na(how.many.empty.lines)) { how.many.empty.lines <- 1 }
for (j in seq(1:how.many.empty.lines)) {
cat(" \\\\ \n")
}
}
.table.part.published[which.part.number] <<- TRUE
}
# notes
else if ((part=="notes") && (!is.null(.format.s.note.content))) {
if (.format.s.note != "") cat(.format.s.note)
for (i in seq(1:length(.format.s.note.content))) {
.format.s.note.content[i] <- .format.s.note.content[i]
if (.format.s.note == "") { cat("\\multicolumn{",length(names(object)),"}{",.format.s.note.alignment,"}{",.format.s.note.content[i],"} \\\\ \n", sep="") }
else { cat(" & \\multicolumn{",length(names(object)),"}{",.format.s.note.alignment,"}{",.format.s.note.content[i],"} \\\\ \n", sep="") }
}
.table.part.published[which.part.number] <<- TRUE
}
# empty line
else if (part==" ") {
.table.empty.line()
.table.part.published[which.part.number] <<- TRUE
}
# horizontal line
else if (part=="-!") {
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
else if (part=="-") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
}
# double horizontal line
else if (part=="=!") {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
else if (part=="=") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
}
}
.stargazer.summ.stat.table <-
function(object) {
if (length(names(object)) < 1) {
cat("% Error: Data frame columns do not have any names.\n")
}
else if ((nrow(object) < 1) || (ncol(object) < 1)) {
cat("% Error: Data frame must have at least one row and one column.\n")
}
else {
object <- .order.data.frame(object, order, summary=T)
.table.info.comment()
# create table header
.summ.stat.table.header(object)
.table.insert.space()
for (i in seq(1:length(.format.s.stat.parts))) {
.summ.stat.table.part(object,.format.s.stat.parts[i])
}
cat("\\end{tabular} \n")
if (.format.floating == TRUE) { cat("\\end{", .format.floating.environment,"} \n", sep="") }
else if (!is.null(.format.font.size)) {
cat("\\endgroup \n",sep="")
}
}
}
.summ.stat.publish.statistic <-
function(object, which.variable, which.statistic) {
if ((is.numeric(object[,which.variable]) == TRUE) || ((is.logical(object[,which.variable])) && (.format.summ.logical==TRUE))) {
if ((is.logical(object[,which.variable])) && (.format.summ.logical==TRUE)) {
temp.var <- rep(NA, time=length(object[,which.variable]))
temp.var[object[,which.variable]==TRUE] <- 1
temp.var[object[,which.variable]==FALSE] <- 0
}
else {
temp.var <- object[,which.variable]
}
which.statistic <- tolower(which.statistic)
if (which.statistic == "n") {
return(.iround(sum(!is.na(temp.var)), 0))
}
else if (which.statistic == "nmiss") {
return(.iround(sum(is.na(temp.var)), 0))
}
else if (which.statistic == "mean") {
return(.iround(mean(temp.var, na.rm=TRUE), .format.s.round.digits))
}
else if (which.statistic == "median") {
median.value <- median(temp.var, na.rm=TRUE)
if (.is.all.integers(temp.var) == FALSE) { how.much.to.round <- .format.s.round.digits }
else {
if (.is.all.integers(median.value) == TRUE) { how.much.to.round <- 0 }
else { how.much.to.round <- 1 }
}
return(.iround(median.value, how.much.to.round))
}
else if (which.statistic == "sd") {
return(.iround(sd(temp.var, na.rm=TRUE), .format.s.round.digits))
}
else if (which.statistic == "min") {
if (.is.all.integers(temp.var) == FALSE) { how.much.to.round <- .format.s.round.digits }
else { how.much.to.round <- 0 }
return(.iround(min(temp.var, na.rm=TRUE), how.much.to.round))
}
else if (which.statistic == "max") {
if (.is.all.integers(temp.var) == FALSE) { how.much.to.round <- .format.s.round.digits }
else { how.much.to.round <- 0 }
return(.iround(max(temp.var, na.rm=TRUE), how.much.to.round))
}
else if (which.statistic == "mad") {
return(.iround(mad(temp.var, na.rm=TRUE), .format.s.round.digits))
}
else if (substr(which.statistic,1,1) == "p") {
percentile.value <- quantile(temp.var, as.numeric(substr(which.statistic,2,nchar(which.statistic))) / 100, na.rm=TRUE)
if (.is.all.integers(temp.var) == FALSE) { how.much.to.round <- .format.s.round.digits }
else {
if (.is.all.integers(percentile.value) == TRUE) { how.much.to.round <- 0 }
else { how.much.to.round <- 1 }
}
return(.iround(percentile.value, how.much.to.round))
}
}
else { return(NA) }
}
.summ.stat.table.header <-
function(object) {
.floating.header()
#
.formatting.alignment <- paste("@{\\extracolsep{",.format.column.sep.width,"}}l", sep="")
if (.format.flip == FALSE) { width <- length(.format.s.statistics.list) }
else { width <- length(.summ.stat.included(object)) }
for (i in seq(1:width)) {
if (.format.dec.mark.align == FALSE) {
.formatting.alignment <- paste(.formatting.alignment, "c", sep="")
}
else {
.formatting.alignment <- paste(.formatting.alignment, "D{", .format.decimal.character,"}{", .format.decimal.character,"}{-", .format.s.round.digits,"} ", sep="")
}
}
#
cat("\\begin{tabular}{",.formatting.alignment,"} \n",sep="")
}
# figure out which variables are included --> returns indices of included variables
.summ.stat.included <-
function(object) {
included <- NULL
for (i in seq(1:length(names(object)))) {
# skip all of this if omitted based on regular expression
omitted <- FALSE
if ((is.numeric(object[,i]) == TRUE) || (is.logical(object[,i]) && (.format.summ.logical==TRUE))) {
# also omit if all missing values
if (!any(!is.na(object[,i]))) { omitted <- TRUE }
if (!is.null(.format.omit.regexp)) {
for (j in seq(1:length(.format.omit.regexp))) {
if (length(grep(.format.omit.regexp[j], names(object)[i], perl=.format.perl, fixed=FALSE))!=0) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.regexp)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.regexp))) {
if (length(grep(.format.keep.regexp[j], names(object)[i], perl=.format.perl, fixed=FALSE))!=0) { omitted <- FALSE }
}
}
if (!is.null(.format.omit.index)) {
for (j in seq(1:length(.format.omit.index))) {
if (.format.omit.index[j] == i) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.index)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.index))) {
if (.format.keep.index[j] == i) { omitted <- FALSE }
}
}
}
else { omitted <- TRUE }
if (omitted == FALSE) { included <- c(included, i) }
}
return(included)
}
.summ.stat.table.part <-
function(object, part) {
included <- .summ.stat.included(object)
# with summary statistics, always publish horizontal line
.publish.horizontal.line <<- TRUE
if (part=="stat names") {
cat(.format.s.statistics.names.label, sep="")
if (.format.flip == FALSE) {
if (length(.format.s.statistics.list)>=1) {
for (i in seq(1:length(.format.s.statistics.list))) {
for (j in seq(1:ncol(.format.s.statistics.names))) {
if ((substr(.format.s.statistics.list[i],1,1)=="p") && (substr(.format.s.statistics.list[i],1,1)==.format.s.statistics.names[1,j])) {
cat(" & \\multicolumn{1}{c}{", .format.s.statistics.names.left, sub("!", substr(.format.s.statistics.list[i],2,nchar(.format.s.statistics.list[i])), .format.s.statistics.names[2,j], ignore.case =FALSE, fixed=TRUE), .format.s.statistics.names.right,"}", sep="")
}
else if (.format.s.statistics.list[i]==.format.s.statistics.names[1,j]) {
cat(" & \\multicolumn{1}{c}{", .format.s.statistics.names.left, .format.s.statistics.names[2,j], .format.s.statistics.names.right, "}", sep="")
}
}
}
}
}
else { # flipped summary statistic table
if (is.null(.format.covariate.labels)) { .format.covariate.labels <<- NA }
i.label <- 0
for (i in included) {
i.label <- i.label + 1
# if underscore in variable name, then insert an escape \ before it
name.printed <- .remove.special.chars(names(object)[i])
cat(" & ")
if (is.na(.format.covariate.labels[i.label])) {
if ( .format.s.coefficient.variables.capitalize == TRUE) { cat(.format.s.coefficient.variables.left, toupper(name.printed), .format.s.coefficient.variables.right, sep="") }
else { cat(.format.s.coefficient.variables.left, name.printed, .format.s.coefficient.variables.right, sep="") }
}
else { cat(.format.s.coefficient.variables.left, .format.covariate.labels[i.label], .format.s.coefficient.variables.right, sep="") }
}
}
cat(" \\\\ \n")
}
if (substr(part,1,10)=="statistics") {
if (is.null(.format.covariate.labels)) { .format.covariate.labels <<- NA }
if (.format.flip == FALSE) {
i.label <- 0
for (i in included) {
i.label <- i.label + 1
# if underscore in variable name, then insert an escape \ before it
name.printed <- .remove.special.chars(names(object)[i])
if (is.na(.format.covariate.labels[i.label])) {
if ( .format.s.coefficient.variables.capitalize == TRUE) { cat(.format.s.coefficient.variables.left, toupper(name.printed), .format.s.coefficient.variables.right, sep="") }
else { cat(.format.s.coefficient.variables.left, name.printed, .format.s.coefficient.variables.right, sep="") }
}
else { cat(.format.s.coefficient.variables.left, .format.covariate.labels[i.label], .format.s.coefficient.variables.right, sep="") }
if (length(.format.s.statistics.list)>=1) {
for (j in seq(1:length(.format.s.statistics.list))) {
# if aligning decimal marks, need to use multicolumn for anything w/o decimal mark
if (.format.dec.mark.align == FALSE) { # not aligning
cat(" & ", .summ.stat.publish.statistic(object, i, .format.s.statistics.list[j]), sep="")
}
else { # aligning
if (.is.all.integers(.summ.stat.publish.statistic(object, i, .format.s.statistics.list[j]))) {
cat(" & \\multicolumn{1}{c}{", .summ.stat.publish.statistic(object, i, .format.s.statistics.list[j]),"}", sep="")
}
else {
cat(" & ", .summ.stat.publish.statistic(object, i, .format.s.statistics.list[j]), sep="")
}
}
}
}
# add empty lines
how.many.empty.lines <- as.numeric(substr(part,11,nchar(part)))
if (is.na(how.many.empty.lines)) { how.many.empty.lines <- 1 }
for (j in seq(1:how.many.empty.lines)) {
cat(" \\\\ \n")
}
}
}
else { # flipped
if (length(.format.s.statistics.list)>=1) {
for (i in seq(1:length(.format.s.statistics.list))) {
for (j in seq(1:ncol(.format.s.statistics.names))) {
if ((substr(.format.s.statistics.list[i],1,1)=="p") && (substr(.format.s.statistics.list[i],1,1)==.format.s.statistics.names[1,j])) {
cat(.format.s.statistics.names.left, sub("!", substr(.format.s.statistics.list[i],2,nchar(.format.s.statistics.list[i])), .format.s.statistics.names[2,j], ignore.case =FALSE, fixed=TRUE), .format.s.statistics.names.right, sep="")
}
else if (.format.s.statistics.list[i]==.format.s.statistics.names[1,j]) {
cat(.format.s.statistics.names.left, .format.s.statistics.names[2,j], .format.s.statistics.names.right, sep="")
}
}
for (j in included) {
# if aligning decimal marks, need to use multicolumn for anything w/o decimal mark
if (.format.dec.mark.align == FALSE) { # not aligning
cat(" & ", .summ.stat.publish.statistic(object, j, .format.s.statistics.list[i]), sep="")
}
else { # aligning
if (.is.all.integers(.summ.stat.publish.statistic(object, j, .format.s.statistics.list[i]))) {
cat(" & \\multicolumn{1}{c}{", .summ.stat.publish.statistic(object, j, .format.s.statistics.list[i]),"}", sep="")
}
else {
cat(" & ", .summ.stat.publish.statistic(object, j, .format.s.statistics.list[i]), sep="")
}
}
}
# add empty lines
how.many.empty.lines <- as.numeric(substr(part,11,nchar(part)))
if (is.na(how.many.empty.lines)) { how.many.empty.lines <- 1 }
for (k in seq(1:how.many.empty.lines)) {
cat(" \\\\ \n")
}
}
}
}
}
# notes
else if ((part=="notes") && (!is.null(.format.s.note.content))) {
if (.format.s.note != "") cat(.format.s.note)
if (.format.s.note=="") { offset <- 1 }
else { offset <- 0 }
if (.format.flip == FALSE) { width <- length(.format.s.statistics.list)+ offset }
else { width <- length(included) + offset }
for (i in seq(1:length(.format.s.note.content))) {
.format.s.note.content[i] <- .format.s.note.content[i]
if (.format.s.note == "") { cat("\\multicolumn{",width,"}{",.format.s.note.alignment,"}{",.format.s.note.content[i],"} \\\\ \n", sep="") }
else { cat(" & \\multicolumn{",width,"}{",.format.s.note.alignment,"}{",.format.s.note.content[i],"} \\\\ \n", sep="") }
}
}
# empty line
else if (part==" ") {
.table.empty.line()
}
# horizontal line
else if (part=="-!") {
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
else if (part=="-") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
}
# double horizontal line
else if (part=="=!") {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
else if (part=="=") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
}
}
.table.empty.line <-
function() {
if (.format.no.space == FALSE) {
cat(" ")
for (i in seq(1:length(.global.models))) {
cat("& ")
}
cat("\\\\ \n")
}
}
.table.enter.coefficients <-
function(which.variable) {
if (which.variable > length(.global.coefficients)) {
return();
}
local.coefficient.var.name <- .global.coefficient.variables[which.variable]
#skip all of this if omitted based on regular expression
omitted <- FALSE
if (!is.null(.format.omit.regexp)) {
for (i in seq(1:length(.format.omit.regexp))) {
if (length(grep(.format.omit.regexp[i], local.coefficient.var.name, perl=.format.perl, fixed=FALSE))!=0) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.regexp)) {
omitted <- TRUE
for (i in seq(1:length(.format.keep.regexp))) {
if (length(grep(.format.keep.regexp[i], local.coefficient.var.name, perl=.format.perl, fixed=FALSE))!=0) { omitted <- FALSE }
}
}
if (!is.null(.format.omit.index)) {
for (i in seq(1:length(.format.omit.index))) {
if (.format.omit.index[i] == which.variable) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.index)) {
omitted <- TRUE
for (i in seq(1:length(.format.keep.index))) {
if (.format.keep.index[i] == which.variable) { omitted <- FALSE }
}
}
if (omitted == FALSE) {
.which.variable.label <<- .which.variable.label + 1
# remove final -TRUE (added by Zelig) from dummy variables
if (substr(local.coefficient.var.name, nchar(local.coefficient.var.name)-3, nchar(local.coefficient.var.name)) == "TRUE") {
### only remove TRUE if added by Zelig, rather than pre-existing in the formula name
if (length(grep(local.coefficient.var.name, .global.formulas.rhs,fixed=TRUE))==0) {
local.coefficient.var.name <- substr(local.coefficient.var.name, 1, nchar(local.coefficient.var.name)-4)
}
}
# remove everything before and including he last dollar sign from variable name
temp <- strsplit(local.coefficient.var.name,"$",fixed=TRUE)
local.coefficient.var.name <- temp[[1]][length(temp[[1]])]
# if underscore or ^ in variable name, then insert an escape \ before it
local.coefficient.var.name <- .remove.special.chars(local.coefficient.var.name)
if (length(.format.coefficient.table.parts)>=1) {
for (i in seq(1:length(.format.coefficient.table.parts))) {
.coefficient.table.part(part=.format.coefficient.table.parts[i], which.variable, variable.name=local.coefficient.var.name)
}
}
}
}
.table.header <-
function() {
.floating.header()
#
.formatting.alignment <- paste("@{\\extracolsep{",.format.column.sep.width,"}}l", sep="")
for (i in seq(1:length(.global.models))) {
if (.format.dec.mark.align==FALSE) {
.formatting.alignment <- paste(.formatting.alignment, "c", sep="")
}
else {
.formatting.alignment <- paste(.formatting.alignment, "D{", .format.decimal.character,"}{", .format.decimal.character,"}{-", .format.round.digits,"} ", sep="")
}
}
#
cat("\\begin{tabular}{",.formatting.alignment,"} \n",sep="")
}
.table.info.comment <-
function() {
cat("\n")
if (.format.header==TRUE) {
cat("% Table created by ", .global.package.name, " v.", .global.package.version, " by ", .global.package.author.name, ", ", .global.package.author.affiliation, ". E-mail: ", .global.package.author.email, "\n", sep="")
cat("% Date and time:", format(Sys.time(), "%a, %b %d, %Y - %X"))
cat("\n")
required.latex.packages <- NULL
if (.format.dec.mark.align==TRUE) { required.latex.packages <- c(required.latex.packages, "dcolumn") }
if (.format.floating.environment=="sidewaystable") { required.latex.packages <- c(required.latex.packages, "rotating") }
if (!is.null(required.latex.packages)) {
cat("% Requires LaTeX packages: ")
for (i in 1:length(required.latex.packages)){
cat(required.latex.packages[i]," ", sep="")
}
cat("\n")
}
}
}
.table.insert.space <-
function() {
cat("\\\\[",.format.space.size,"]",sep="")
}
.trim <-
function (x) gsub("^\\s+|\\s+$", "", x)
.wald.stat <-
function(object.name) {
wald.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina","coeftest", "Gls", "ivreg","lmer","glmer","nlmer"))) {
if (!is.null(.summary.object$waldtest)) {
wald.value <- suppressMessages(.summary.object$waldtest[1])
df.value <- suppressMessages(.summary.object$waldtest[2])
wald.p.value <- suppressMessages(.summary.object$waldtest[3])
wald.output <- as.vector(c(wald.value, df.value, wald.p.value))
}
else if (model.name %in% c("tobit(AER)")) {
wald.value <- .summary.object$wald
df.value <- .summary.object$df - .summary.object$idf
wald.p.value <- pchisq(wald.value, df.value, lower.tail=FALSE)
wald.output <- as.vector(c(wald.value, df.value, wald.p.value))
}
else if (model.name %in% c("lagsarlm", "errorsarlm")) {
wald.value <- as.vector(.summary.object$Wald1$statistic)
df.value <- as.vector(.summary.object$Wald1$parameter)
wald.p.value <- as.vector(.summary.object$Wald1$p.value)
wald.output <- as.vector(c(wald.value, df.value, wald.p.value))
}
}
names(wald.output) <- c("statistic","df1","p-value")
return(cbind(wald.output))
}
.get.coefficients.1 <-
function(object.name, user.given=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) { user.given <- as.vector(user.given[model.num,]) }
}
return(user.given)
}
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit", "poisson", "negbin", "normal.survey", "poisson.survey", "probit.survey", "logit.survey", "gamma", "gamma.survey",
"cloglog.net", "gamma.net", "logit.net", "probit.net", "brglm", "glm()", "Glm()", "svyglm()", "plm", "pgmm", "ivreg", "lmrob", "glmrob", "dynlm", "gmm", "mclogit")) {
return(.summary.object$coefficients[,"Estimate"])
}
if (model.name %in% c("Arima")) {
return(object.name$coef)
}
if (model.name %in% c("censReg")) {
return(.summary.object$estimate[,1])
}
if (model.name %in% c("mnlogit")) {
return(.summary.object$CoefTable[,1])
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$matcoef[,1])
}
if (model.name %in% c("lme","nlme")) {
return(.summary.object$tTable[,1])
}
if (model.name %in% c("maBina")) {
return(as.vector(object.name$out[,1]))
}
if (model.name %in% c("mlogit")) {
return(as.vector(.summary.object$CoefTable[,1]))
}
if (model.name %in% c("coeftest")) {
return(as.vector(object.name[,1]))
}
if (model.name %in% c("selection", "heckit")) {
if (!.global.sel.equation) {
indices <- .summary.object$param$index$betaO ### outcome equation
}
else {
indices <- .summary.object$param$index$betaS ### selection equation
}
return(as.vector(.summary.object$estimate[indices,1]))
}
if (model.name %in% c("probit.ss", "binaryChoice")) {
return(as.vector(.summary.object$estimate[,1]))
}
if (model.name %in% c("hetglm")) {
return(as.vector(.summary.object$coefficients$mean[,1]))
}
if (model.name %in% c("lmer","glmer","nlmer")) {
coefs <- .summary.object$coefficients[,1]
return(coefs)
}
if (model.name %in% c("ergm")) {
return(.summary.object$coefs[,1])
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(.summary.object$Coef[,1])
}
if (model.name %in% c("rq","felm")) {
return(.summary.object$coefficients[,1])
}
if (model.name %in% c("clm")) {
if (.format.ordered.intercepts == FALSE) {
return(.summary.object$coefficients[(length(object.name$alpha)+1):(length(object.name$coefficients)),1])
}
else {
return(.summary.object$coefficients[,1])
}
}
else if (model.name %in% c("pmg")) {
return(.summary.object$coefficients)
}
else if (model.name %in% c("zeroinfl", "hurdle")) {
if (.global.zero.component==FALSE) {
return(.summary.object$coefficients$count[,"Estimate"])
}
else {
return(.summary.object$coefficients$zero[,"Estimate"])
}
}
else if (model.name %in% c("normal.gee", "logit.gee", "probit.gee", "poisson.gee", "gamma.gee", "gee()")) {
return(.summary.object$coefficients[,"Estimate"])
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam", "poisson.gam", "gam()")) {
return(.summary.object$p.coeff)
}
else if (model.name %in% c("coxph", "clogit")) {
return(.summary.object$coef[,"coef"])
}
else if (model.name %in% c("exp","lognorm","weibull","tobit","survreg()")) {
return(.summary.object$table[,"Value"])
}
else if (model.name %in% c("rlm")) {
return(suppressMessages(.summary.object$coefficients[,"Value"]))
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
coef.temp <- suppressMessages(.summary.object$coefficients[,"Value"])
if (.format.ordered.intercepts == FALSE) { return(coef.temp[seq(from=1, to=length(coef.temp)-(length(suppressMessages(.summary.object$lev))-1))]) }
else { return(coef.temp) }
}
else if (model.name %in% c("arima", "rem.dyad")) {
return( object.name$coef )
}
else if (model.name %in% c("tobit(AER)")){
return(.summary.object$coefficients[,"Estimate"])
}
else if (model.name %in% c("multinom")){
if (is.null(nrow(.summary.object$coefficients))) {
coef.temp <- .summary.object$coefficients
}
else {
coef.temp <- .summary.object$coefficients[model.num,]
}
return(coef.temp)
}
else if (model.name %in% c("betareg")){
return(.summary.object$coefficients$mean[,"Estimate"])
}
else if (model.name %in% c("gls")) {
coef.temp <- object.name$coefficients
return(coef.temp)
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
return( object.name$coefficients )
}
else { return(NULL) }
}
.get.coefficients <-
function(object.name, user.given=NULL, model.num=1) {
out <- .get.coefficients.1(object.name, user.given, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.turn.into.list <-
function(x) {
if (is.vector(x) || is.matrix(x)) {
if (!is.list(x)) { return(as.list(x)) }
}
return(x)
}
.is.list.numeric <-
function(x) {
# tolerate NA or NULL
if (is.null(x)) { return(TRUE) }
if (!is.list(x)) { return(FALSE) }
for (i in 1:length(x)) {
elem <- x[[i]]
if (!is.null(elem)) {
if (length(elem) != length(elem[is.numeric(elem) || (is.na(elem))])) { return(FALSE) }
}
}
return(TRUE)
}
.is.list.numeric.matrix <-
function(x) {
# tolerate NA or NULL
if (is.null(x)) { return(TRUE) }
if (!is.list(x)) { return(FALSE) }
for (i in 1:length(x)) {
elem <- as.matrix(x[[i]])
if (!is.null(elem)) {
if (length(elem) != length(elem[is.numeric(elem) || (is.na(elem))])) { return(FALSE) }
}
}
return(TRUE)
}
.get.file.extension <-
function (path) {
split <- strsplit(path, "\\.")[[1]]
return( tolower(split[length(split)]) )
}
############## TEXT AND html MODE ##############
.split.line <- # split line of a LaTeX table into constituent parts separated by &
function(s) {
# remove the "\\\\"
s <- gsub("\\\\", "", s, fixed=TRUE)
s <- paste(" ",s," ", sep="")
return(.trim(strsplit(s, " &", fixed=TRUE)[[1]]))
}
.remove.extra.spaces <-
function(s) {
new.s <- ""
space <- FALSE
for (i in 1:nchar(s)) {
s.i <- substr(s,i,i)
if (s.i == " ") {
if (space == FALSE) {
space <- TRUE
new.s <- paste(new.s, s.i, sep="")
}
}
else {
space <- FALSE
new.s <- paste(new.s, s.i, sep="")
}
}
return(new.s)
}
strpos <-
function(x, s) {
return( regexpr(x, s, fixed=TRUE)[1] )
}
is.alphanumeric <-
function(s) {
alphanum <- FALSE
numbers <- grepl("^[[:digit:]]+$", s)
letters <- grepl("^[[:alpha:]]+$", s)
both <- grepl("^[[:digit:][:alpha:]]+$", s)
if ((numbers == TRUE) || (letters == TRUE) || (both == TRUE)) {
alphanum <- TRUE
}
return(alphanum)
}
.replace.latex.symbols <-
function (s) {
latex.replace <- NULL
latex.replace <- cbind(latex.replace, c("\\textbackslash","\\"), c("\\_","_"), c("\\#","#"), c("\\textasciitilde","~"), c("\\{","{"), c("\\}","}"), c("\\%","%"))
latex.replace <- cbind(latex.replace, c("\\textasteriskcentered","*"), c("\\textbar","|"), c("\\textgreater",">"), c("\\textless","<"), c("$\\hat{\\mkern6mu}$","^"))
# Greek letters
latex.replace <- cbind(latex.replace, c("\\alpha","alpha"), c("\\beta","beta"), c("\\gamma","gamma"), c("\\delta","delta"), c("\\epsilon","epsilon"), c("\\varepsilon","epsilon"), c("\\zeta","zeta"))
latex.replace <- cbind(latex.replace, c("\\eta","eta"), c("\\theta","theta"), c("\\vartheta","theta"), c("\\iota","iota"), c("\\kappa","kappa"), c("\\lambda","lambda"), c("\\mu","mu"))
latex.replace <- cbind(latex.replace, c("\\nu","nu"), c("\\xi","xi"), c("\\pi","pi"), c("\\varpi","pi"), c("\\rho","rho"), c("\\varrho","rho"), c("\\sigma","sigma"))
latex.replace <- cbind(latex.replace, c("\\varsigma","sigma"), c("\\tau","tau"), c("\\upsilon","upsilon"), c("\\phi","phi"), c("\\varphi","phi"), c("\\chi","chi"), c("\\psi","psi"))
latex.replace <- cbind(latex.replace, c("\\omega","omega"), c("\\Gamma","gamma"), c("\\Delta","delta"), c("\\Theta","theta"), c("\\Lambda","lambda"), c("\\Xi","xi"), c("\\Pi","pi"))
latex.replace <- cbind(latex.replace, c("\\Sigma","sigma"), c("\\Upsilon","upsilon"), c("\\Phi","phi"), c("\\Psi","psi"), c("\\Omega","omega"))
s.out <- s
for (item in 1:ncol(latex.replace)) {
symbol <- latex.replace[1, item]
replacement <- latex.replace[2, item]
# quick check if any latex characters
symbol.regexp <- gsub("\\","\\\\",symbol,fixed=TRUE)
symbol.regexp <- gsub("{","\\{",symbol.regexp,fixed=TRUE)
symbol.regexp <- gsub("}","\\}",symbol.regexp,fixed=TRUE)
symbol.regexp <- gsub("$","\\$",symbol.regexp,fixed=TRUE)
symbol.regexp <- paste(symbol.regexp, "[^[:alnum:]_]+", sep="")
pos <- 1
while (pos <= nchar(s.out)) {
if (length(grep(symbol.regexp, s.out))==0) { break }
s.pre <- substr(s.out, 1, pos-1)
s.pos.char <- substr(s.out, pos, pos)
s.post <- substr(s.out, pos + nchar(symbol), nchar(s.out))
if (substr(s.out, pos, pos+nchar(symbol)-1) == symbol) {
if (!is.alphanumeric(substr(s.post, 1, 1))) {
s.out <- paste(s.pre, replacement, s.post, sep="")
post <- pos + nchar(replacement) - 1
}
}
pos <- pos + 1
}
}
return(s.out)
}
.remove.control.sequences <-
function (s, type="text") {
s <- paste(" ",s, " ", sep="")
# replace latex symbols
s <- .replace.latex.symbols(s)
# remove dollar signs and underscores [ what about text-related starts ]
s <- gsub("\\$", "", s)
# remove extra spaces
s <- .remove.extra.spaces(s)
# add: replace some sequences with corresponding letters
# walk through the string
i <- 1
new.s <- ""
control.sequence <- ""
while (i <= nchar(s)) {
s.i0 <- substr(s, i-1, i)
s.i <- substr(s, i, i)
s.i2 <- substr(s, i, i+1)
if ((s.i %in% c("\\", "_", "^")) && (!(s.i2 %in% c("\\_","\\^"))) && (!(s.i0 %in% c("\\_","\\^"))) ) {
remainder.s <- substr(s, i+1, nchar(s)) # if control character not followed by curly brace
if ((strpos(" ", remainder.s) < strpos("{", remainder.s)) || (strpos("{", remainder.s)==-1)) {
i <- i + strpos(" ", remainder.s) + 1
}
else { # control character followed by curly brace
control.sequence <- substr(s, i, i+strpos("{", remainder.s)-1)
if (type=="html") {
if (control.sequence == "\\textit") { new.s <- paste(new.s,"<em>",sep="") }
if (control.sequence == "\\textbf") { new.s <- paste(new.s,"<strong>",sep="") }
if (control.sequence == "_") { new.s <- paste(new.s,"<sub>",sep="") }
if (control.sequence == "^") { new.s <- paste(new.s,"<sup>",sep="") }
}
if (type=="mmd") {
if (control.sequence == "\\textit") { new.s <- paste(new.s,"*",sep="") }
if (control.sequence == "\\textbf") { new.s <- paste(new.s,"**",sep="") }
if (control.sequence == "~") { new.s <- paste(new.s,"~",sep="") }
if (control.sequence == "^") { new.s <- paste(new.s,"^",sep="") }
}
s.sub <- substr(remainder.s, strpos("{", remainder.s), nchar(remainder.s))
open.brackets <- 0
bracket.start <- bracket.end <- strpos("{", s.sub)
for (j in 1:nchar(s.sub)) {
s.sub.j <- substr(s.sub, j, j)
if (s.sub.j == "{") {
open.brackets <- open.brackets + 1
if (open.brackets == 1) { bracket.start <- j + 1 }
}
if (s.sub.j == "}") {
open.brackets <- open.brackets - 1
if (open.brackets == 0) { bracket.end <- j - 1 }
}
if (!(s.sub.j %in% c("{","}"))) {
if (open.brackets == 0) { break }
}
}
if (bracket.end < bracket.start) {
examine.substring <- ""
}
else {
examine.substring <- substr(s.sub, bracket.start, bracket.end)
}
new.s <- paste(new.s, .remove.control.sequences(examine.substring, type=type), sep="")
if (type=="html") {
if (control.sequence == "\\textit") { new.s <- paste(new.s,"</em>",sep="") }
if (control.sequence == "\\textbf") { new.s <- paste(new.s,"</strong>",sep="") }
if (control.sequence == "_") { new.s <- paste(new.s,"</sub>",sep="") }
if (control.sequence == "^") { new.s <- paste(new.s,"</sup>",sep="") }
}
if (type=="mmd") {
if (control.sequence == "\\textit") { new.s <- paste(new.s,"*",sep="") }
if (control.sequence == "\\textbf") { new.s <- paste(new.s,"**",sep="") }
if (control.sequence == "~") { new.s <- paste(new.s,"~",sep="") }
if (control.sequence == "^") { new.s <- paste(new.s,"^",sep="") }
}
i <- i + strpos("{", remainder.s) + bracket.end + 1
}
}
else { # not inside a control sequence
new.s <- paste(new.s, s.i, sep="")
i <- i + 1
}
}
# replace underscores, etc.
new.s <- gsub("\\_", "_", new.s, fixed=T)
new.s <- gsub("\\^", "^", new.s, fixed=T)
return(.trim(new.s))
}
.text.cline <-
function (cline, max.length, line.char="-") {
for (i in 1:length(cline)) {
if ((cline[i]==0) && (sum(cline[i:length(cline)]) != 0)) {
.repeat.char(" ", rep=max.length[i]+1, new.line=FALSE)
}
else if (cline[i]>=1) {
underline.len <- 0
for (j in i:(i+cline[i]-1)) {
underline.len <- underline.len + max.length[j] + 1
}
underline.len <- underline.len - 1
.repeat.char(line.char, rep=underline.len, new.line=FALSE)
if ((sum(cline[i:length(cline)]) != cline[i])) { cat(" ") }
}
}
cat("\n")
}
.html.cline <-
function (cline) {
cat("<tr>")
for (i in 1:length(cline)) {
if ((cline[i]==0) && (sum(cline[i:length(cline)]) != 0)) {
cat("<td></td>")
}
else if (cline[i]>=1) {
cat("<td colspan=\"",cline[i],"\" style=\"border-bottom: 1px solid black\"></td>",sep="")
}
}
cat("</tr>\n")
}
.mmd.cline <-
function (cline) {
# no support for cline in MMD as far as I am aware
}
.text.horizontal.line <-
function (line.char="-", max.length) {
horizontal.length <- 0
for (i in 1:length(max.length)) {
horizontal.length <- horizontal.length + max.length[i] + 1
}
horizontal.length = horizontal.length - 1
.repeat.char(line.char, rep=horizontal.length, new.line=TRUE)
}
.html.horizontal.line <-
function (how.many.columns) {
cat("<tr><td colspan=\"",how.many.columns,"\" style=\"border-bottom: 1px solid black\"></td></tr>",sep="")
}
.mmd.horizontal.line <-
function (how.many.columns) {
# no support for hline in MMD as far as I am aware
}
.text.output <-
function(all.latex.code) {
how.many.tables <- 0
start.lines <- NULL
for (i in 1:length(all.latex.code)) {
if (all.latex.code[i] %in% c("")) {
how.many.tables <- how.many.tables + 1
start.lines <- c(start.lines, i)
}
}
for (table.number in 1:how.many.tables) {
if (table.number < how.many.tables) {
latex.code <- all.latex.code[start.lines[table.number]:start.lines[table.number+1]]
}
else {
latex.code <- all.latex.code[start.lines[table.number]:length(all.latex.code)]
}
how.many.columns <- .get.number.of.columns(latex.code)
r <- 0
matrices <- .matrices(latex.code, how.many.columns)
t <- matrices[[1]]
c <- matrices[[2]]
j <- matrices[[3]]
max.l <- .text.column.width(t, c)
w <- .width.matrix(c, max.l)
cat("\n")
for (row in 1:length(latex.code)) {
line <- latex.code[row]
if (substr(line, nchar(line)-2, nchar(line)) == "\\\\ ") {
r <- r + 1
.text.output.line(t, r, w, c, j)
}
else if (strpos("\\caption{", line) != -1) {
inside.caption <- substr(.trim(line), 10, nchar(.trim(line))-1)
text.title <- .trim(.remove.control.sequences(inside.caption))
if (text.title != "") { cat(.remove.control.sequences(inside.caption),"\n", sep="") }
}
else if (strpos("\\cline{", line) != -1) {
s <- paste(" ", line, " ", sep="")
cline <- rep(0, times=how.many.columns)
while (strpos("\\cline{", s) != -1) {
from <- strpos("\\cline{", s) + 7
to <- strpos("}", s) - 1
underline.columns <- substr(s, from, to)
split.columns <- strsplit(underline.columns,"-", fixed=TRUE)[[1]]
col.underline.begin <- as.numeric(split.columns[1])
col.underline.number <- as.numeric(split.columns[2]) - col.underline.begin + 1
cline[col.underline.begin] <- col.underline.number
s <- substr(s, to+1, nchar(s))
.text.cline(cline, max.l)
}
}
else if (strpos("\\hline",line) != -1) {
if (!(is.na(latex.code[row+1]))) {
if (strpos("\\hline", latex.code[row+1]) != -1) {
.text.horizontal.line("=", max.l)
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.text.horizontal.line("-", max.l)
}
}
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.text.horizontal.line("-", max.l)
}
}
}
}
}
}
.html.output <-
function(all.latex.code) {
how.many.tables <- 0
start.lines <- NULL
for (i in 1:length(all.latex.code)) {
if (all.latex.code[i] %in% c("")) {
how.many.tables <- how.many.tables + 1
start.lines <- c(start.lines, i)
}
}
for (table.number in 1:how.many.tables) {
if (table.number < how.many.tables) {
latex.code <- all.latex.code[start.lines[table.number]:start.lines[table.number+1]]
}
else {
latex.code <- all.latex.code[start.lines[table.number]:length(all.latex.code)]
}
how.many.columns <- .get.number.of.columns(latex.code)
r <- 0
matrices <- .matrices(latex.code, how.many.columns, type="html")
t <- matrices[[1]]
c <- matrices[[2]]
j <- matrices[[3]]
max.l <- .text.column.width(t, c)
w <- .width.matrix(c, max.l)
cat("\n")
cat("<table style=\"text-align:center\">")
for (row in 1:length(latex.code)) {
line <- latex.code[row]
if (substr(line, nchar(line)-2, nchar(line)) == "\\\\ ") {
r <- r + 1
.html.output.line(t, r, w, c, j)
}
else if (strpos("\\caption{", line) != -1) {
inside.caption <- substr(.trim(line), 10, nchar(.trim(line))-1)
text.title <- .trim(.remove.control.sequences(inside.caption, type="html"))
if (text.title != "") { cat("<caption><strong>",.remove.control.sequences(inside.caption, type="html"),"</strong></caption>\n", sep="") }
}
else if (strpos("\\cline{", line) != -1) {
s <- paste(" ", line, " ", sep="")
cline <- rep(0, times=how.many.columns)
while (strpos("\\cline{", s) != -1) {
from <- strpos("\\cline{", s) + 7
to <- strpos("}", s) - 1
underline.columns <- substr(s, from, to)
split.columns <- strsplit(underline.columns,"-", fixed=TRUE)[[1]]
col.underline.begin <- as.numeric(split.columns[1])
col.underline.number <- as.numeric(split.columns[2]) - col.underline.begin + 1
cline[col.underline.begin] <- col.underline.number
s <- substr(s, to+1, nchar(s))
.html.cline(cline)
}
}
else if (strpos("\\hline",line) != -1) {
if (!(is.na(latex.code[row+1]))) {
if (strpos("\\hline", latex.code[row+1]) != -1) {
.html.horizontal.line(how.many.columns)
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.html.horizontal.line(how.many.columns)
}
}
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.html.horizontal.line(how.many.columns)
}
}
}
}
cat("</table>\n")
}
}
.mmd.output <-
function(all.latex.code) {
how.many.tables <- 0
start.lines <- NULL
for (i in 1:length(all.latex.code)) {
if (all.latex.code[i] %in% c("")) {
how.many.tables <- how.many.tables + 1
start.lines <- c(start.lines, i)
}
}
for (table.number in 1:how.many.tables) {
if (table.number < how.many.tables) {
latex.code <- all.latex.code[start.lines[table.number]:start.lines[table.number+1]]
}
else {
latex.code <- all.latex.code[start.lines[table.number]:length(all.latex.code)]
}
how.many.columns <- .get.number.of.columns(latex.code)
r <- 0
matrices <- .matrices(latex.code, how.many.columns, type="mmd")
t <- matrices[[1]]
c <- matrices[[2]]
j <- matrices[[3]]
max.l <- .text.column.width(t, c)
w <- .width.matrix(c, max.l)
cat("\n")
cat("<table style=\"text-align:center\">")
for (row in 1:length(latex.code)) {
line <- latex.code[row]
if (substr(line, nchar(line)-2, nchar(line)) == "\\\\ ") {
r <- r + 1
.mmd.output.line(t, r, w, c, j)
}
else if (strpos("\\caption{", line) != -1) {
inside.caption <- substr(.trim(line), 10, nchar(.trim(line))-1)
text.title <- .trim(.remove.control.sequences(inside.caption, type="mmd"))
if (text.title != "") { cat("**",.remove.control.sequences(inside.caption, type="mmd"),"***\n", sep="") }
### ADD THE REQUISITE NUMBER OF |s
}
else if (strpos("\\cline{", line) != -1) {
s <- paste(" ", line, " ", sep="")
cline <- rep(0, times=how.many.columns)
while (strpos("\\cline{", s) != -1) {
from <- strpos("\\cline{", s) + 7
to <- strpos("}", s) - 1
underline.columns <- substr(s, from, to)
split.columns <- strsplit(underline.columns,"-", fixed=TRUE)[[1]]
col.underline.begin <- as.numeric(split.columns[1])
col.underline.number <- as.numeric(split.columns[2]) - col.underline.begin + 1
cline[col.underline.begin] <- col.underline.number
s <- substr(s, to+1, nchar(s))
.mmd.cline(cline)
}
}
else if (strpos("\\hline",line) != -1) {
if (!(is.na(latex.code[row+1]))) {
if (strpos("\\hline", latex.code[row+1]) != -1) {
.mmd.horizontal.line(how.many.columns)
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.mmd.horizontal.line(how.many.columns)
}
}
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.mmd.horizontal.line(how.many.columns)
}
}
}
}
cat("</table>\n")
}
}
.text.output.line <-
function(text.matrix, row, width.matrix, column.matrix, justification.matrix) {
real.c <- 0 # "real" column position
for (c in 1:ncol(text.matrix)) {
real.c <- real.c + column.matrix[row,c]
justify <- justification.matrix[row, c]
if (!(is.na(text.matrix[row,c]))) {
.just.cat(text.matrix[row, c], width=width.matrix[row, c], justify=justify)
if (real.c < ncol(text.matrix)) { cat(" ",sep="")}
}
}
cat("\n")
}
.html.output.line <-
function(text.matrix, row, width.matrix, column.matrix, justification.matrix) {
real.c <- 0 # "real" column position
cat("<tr>")
for (c in 1:ncol(text.matrix)) {
cm <- column.matrix[row,c]
real.c <- real.c + cm
justify <- justification.matrix[row, c]
if (!(is.na(text.matrix[row,c]))) {
cat("<td")
if (cm > 1) { cat(" colspan=\"",cm,"\"", sep="") }
if (justify == "l") { cat(" style=\"text-align:left\"", sep="") }
if (justify == "r") { cat(" style=\"text-align:right\"", sep="") }
cat(">")
.just.cat(text.matrix[row, c], width=width.matrix[row, c], justify="n")
cat("</td>")
}
}
cat("</tr>\n")
}
.mmd.output.line <-
function(text.matrix, row, width.matrix, column.matrix, justification.matrix) {
real.c <- 0 # "real" column position
for (c in 1:ncol(text.matrix)) {
cm <- column.matrix[row,c]
real.c <- real.c + cm
justify <- justification.matrix[row, c]
if (!(is.na(text.matrix[row,c]))) {
.just.cat(text.matrix[row, c], width=width.matrix[row, c], justify=justify)
for (i in 1:cm) { cat("|") }
}
}
cat("\n")
}
.width.matrix <-
function(column.matrix, max.length) {
w.matrix <- matrix(NA, nrow = nrow(column.matrix), ncol = ncol(column.matrix))
# enter single widths first
for (r in 1:nrow(column.matrix)) {
for (c in 1:ncol(column.matrix)) {
w.matrix[r,c] <- max.length[c]
}
}
# think about multicolumns
for (r in 1:nrow(column.matrix)) {
from.c <- 0 # from which column do I start hoovering up widths?
for (c in 1:ncol(column.matrix)) {
from.c <- from.c+1
if (column.matrix[r,c] >= 2) {
total.width <- 0
for (i in from.c:(from.c+column.matrix[r,c]-1)) {
total.width <- total.width + max.length[i] + 1
if (i > from.c) {
for (j in i:ncol(column.matrix)) {
if ((j+1) <= ncol(column.matrix)) {
w.matrix[r,j] <- w.matrix[r, j+1]
w.matrix[r,j+1] <- NA
}
else {
w.matrix[r,j] <- NA
}
}
}
}
w.matrix[r,c] <- total.width - 1
from.c <- from.c + column.matrix[r,c] - 1
}
}
}
return(w.matrix)
}
.text.column.width <-
function(text.matrix, column.matrix) {
max.length = rep(1, times=ncol(column.matrix))
temp.text.matrix <- text.matrix
# first, get the maximum width of single columns
for (r in 1:nrow(text.matrix)) {
for (c in 1:ncol(text.matrix)) {
real.c <- 0 # 'real' column number, adjusted for multicolumn
for (i in 1:c) {
real.c <- real.c + column.matrix[r, i]
}
if (real.c <= ncol(text.matrix)) {
if (column.matrix[r,c] == 1) { # only look at singles here
if (nchar(text.matrix[r,c]) > max.length[real.c]) { max.length[real.c] <- nchar(text.matrix[r,c]) }
}
}
}
}
# think about multicolumns
for (r in 1:nrow(text.matrix)) {
for (c in 1:ncol(text.matrix)) {
if (!is.na(column.matrix[r,c])) {
if (column.matrix[r,c] >= 2) { # only look at multicolumns
total.width <- 0
for (i in c:(c+column.matrix[r,c]-1)) {
total.width <- total.width + max.length[i]
}
while (total.width < nchar(text.matrix[r,c])) { # if does not fit into single columns, widen the maxima
relevant.maxima <- NULL
for (i in c:(c+column.matrix[r,c]-1)) {
relevant.maxima <- c(relevant.maxima, max.length[i])
if (max.length[i] == min(relevant.maxima)) {
total.width <- 0
for (j in c:(c+column.matrix[r,c]-1)) {
total.width <- total.width + max.length[j]
}
if (total.width < nchar(text.matrix[r,c])) { max.length[i] <- max.length[i] + 1 }
}
}
}
}
}
}
}
return(max.length)
}
.text.table.rows <-
function(latex.code) {
# figure out how many columns
rows <- 0
for (i in 1:length(latex.code)) {
line <- latex.code[i]
if (substr(line, nchar(line)-2, nchar(line)) == "\\\\ ") {
rows <- rows + 1
}
}
return(rows)
}
.get.number.of.columns <-
function(latex.code) {
formatting.string <- ""
for (i in 1:length(latex.code)) {
line <- latex.code[i]
if ((substr(line, 1, 7) == "\\begin{") && (regexpr("}}",line,fixed=TRUE)[[1]] != -1)) {
formatting.string <- substr(line, regexpr("}}",line,fixed=TRUE)[[1]]+2, nchar(line)-1)
}
}
columns <- 0
for (i in 1:nchar(formatting.string)) {
if (substring(formatting.string, i, i) %in% c("l", "c", "r", "D")) { columns <- columns + 1 }
}
return(columns)
}
.matrices <-
function(latex.code, how.many.columns, type="text") {
rows <- .text.table.rows(latex.code)
t.matrix <- matrix(NA, nrow = rows, ncol = how.many.columns)
c.matrix <- matrix(1, nrow = rows, ncol = how.many.columns)
j.matrix <- matrix(NA, nrow = rows, ncol = how.many.columns)
line.content.j <- rep("c", how.many.columns)
# put strings into matrix
row <- 0
for (i in 1:length(latex.code)) {
line <- latex.code[i]
if (substr(line, nchar(line)-2, nchar(line)) == "\\\\ ") {
row <- row + 1
line.content <- .split.line(.remove.control.sequences(line, type=type))
length(line.content) <- how.many.columns
t.matrix[row,] <- line.content
line.content.j[1] <- "l"
line.content.j[2:how.many.columns] <- "c"
line.split <- .split.line(line)
# add in column widths
line.column <- rep(1, how.many.columns)
for (j in 1:length(line.split)) {
no.of.columns <- 0
if (regexpr("\\multicolumn{", line.split[j], fixed=TRUE) != -1) {
# text
multicolumn.no <- substr(line.split[j], regexpr("{", line.split[j], fixed=TRUE)+1, regexpr("}", line.split[j], fixed=TRUE)-1)
no.of.columns <- as.numeric(multicolumn.no)
# justification
from <- regexpr("}{", line.split[j], fixed=TRUE)+2
rest.of.expression <- substr(line.split[j], from, nchar(line.split[j]))
to <- regexpr("}", rest.of.expression, fixed=TRUE) - 1
justification <- substr(rest.of.expression, 1, to)
line.content.j[j] <- justification
}
else {
no.of.columns <- 1
}
line.column[j] <- no.of.columns
}
# column
length(line.column) <- how.many.columns
c.matrix[row,] <- line.column
# justification
length(line.content.j) <- how.many.columns
j.matrix[row,] <- line.content.j
}
}
return(list(t.matrix,c.matrix,j.matrix))
}
.repeat.char <-
function(ch, rep=1, new.line=FALSE) {
if (rep >= 1) {
out.str <- ""
for (i in 1:rep) {
out.str <- paste(out.str, ch, sep="")
}
if (new.line == TRUE) { out.str <- paste(out.str, "\n", sep="")}
cat(out.str)
}
}
.just.cat <- # cat that justifies string appropriately over the next couple of paragraphs
function(s, width, offset.char=" ", justify="c"){
len <- nchar(s)
if (width <= len) {
cat(s)
}
else {
if (justify == "c") {
offset <- (width - len) %/% 2
.repeat.char(offset.char, offset)
cat(s)
.repeat.char(offset.char, width - len - offset)
}
else if (justify == "l") {
cat(s)
.repeat.char(offset.char, width - len)
}
else if (justify == "r") {
.repeat.char(offset.char, width - len)
cat(s)
}
else if (justify == "n") { # no justification, just output
cat(s)
}
}
}
############## OUTPUT INTO FILE ##############
### !!!! - add packages
.output.tex <-
function (file.out, content, header) {
header.tex <- "\\documentclass{article}\n"
required.latex.packages <- NULL
if (.format.dec.mark.align==TRUE) { required.latex.packages <- c(required.latex.packages, "dcolumn") }
if (.format.floating.environment=="sidewaystable") { required.latex.packages <- c(required.latex.packages, "rotating") }
if (!is.null(required.latex.packages)) {
for (i in 1:length(required.latex.packages)) {
header.tex <- paste(header.tex, "\\usepackage{", required.latex.packages[i], "}\n", sep="")
}
}
if (header == TRUE) {
cat(
header.tex,
"\\begin{document}",
paste(content, collapse="\n"),
"\\end{document}\n",
sep="\n",
file = file.out
)
} else {
cat(
paste(content, collapse="\n"),
sep="\n",
file = file.out
)
}
}
.output.html <-
function (file.out, content, header) {
if (header == TRUE) {
cat(
"<!DOCTYPE html>",
"<html>",
"<body>",
paste(content, collapse="\n"),
"</body>",
"</html>\n",
sep="\n",
file = file.out
)
} else {
cat(
paste(content, collapse="\n"),
sep="\n",
file = file.out
)
}
}
.output.txt <-
function (file.out, content, header) {
cat(
paste(content, collapse="\n"),
sep="\n",
file = file.out
)
}
# !!! - work on this more in a later version
.output.pdf <-
function (file.out, content) {
tex.temp.file <- tempfile("temp", fileext="tex")
.output.tex(tex.temp.file, content)
capture.output(system(paste( "pdflatex --interaction=nonstopmode", shQuote(tex.temp.file)), show.output.on.console = FALSE ))
}
.output.file <-
function (out, latex.code, text.out, html.out, type, out.header) {
for (i in 1:length(out)) {
if (.get.file.extension(out[i])=="tex") { .output.tex(out[i], latex.code, out.header) }
# else if (.get.file.extension(out[i])=="pdf") { .output.pdf(out[i], latex.code) }
else if (.get.file.extension(out[i])=="txt") { .output.txt(out[i], text.out, out.header) }
else if ((.get.file.extension(out[i])=="html") || (.get.file.extension(out[i])=="htm")) {
.output.html(out[i], html.out, out.header)
}
else { # if another extension, do latex or text based on 'type'
if (type == "latex") { .output.tex(out[i], latex.code, out.header) }
else if (type == "text") { .output.txt(out[i], text.out, out.header) }
else if (type == "html") { .output.html(out[i], html.out, out.header) }
}
}
}
###########################################
.get.objects <-
function(list.of.objects) {
objects <- list()
for (i in 1:length(list.of.objects)) {
current.object <- list.of.objects[[i]]
if (class(current.object)[1] == "list") {
objects <- append(objects, .get.objects(current.object))
}
else {
objects <- append(objects, list(current.object))
}
}
return(objects)
}
# exact object names from ... string
.get.object.names <- function(s) {
object.names <- NULL
inside <- .inside.bracket(s)
for (i in 1:length(inside)) {
if (substr(inside[i],1,nchar("list("))=="list(") {
object.names <- c(object.names, .get.object.names(inside[i]))
}
else {
object.names <- c(object.names, inside[i])
}
}
return(object.names)
}
###########################################
## invisible output
invisible.output <- NULL
latex.code <- NULL
text.out <- NULL
## error handling
error.present <- "\n"
# get object names --- !!! CHECK ORDER
object.names.string <- deparse(substitute(list(...))) ### for further processing to extract object names
.global.object.names.all <- .get.object.names(object.names.string)
# get objects
list.of.objects <- list(...)
objects <- as.list(.get.objects(list.of.objects))
how.many.objects <- length(objects)
# should we include a summary statistics table when given a data frame
.global.summary <- rep(TRUE, times=how.many.objects)
## check if argument input is ok
.format.rownames <- TRUE
.format.colnames <- TRUE
# flip the table?
.format.flip <- flip
if (how.many.objects < 1) { error.present <- c(error.present, "% Error: At least one object is required.\n") }
else {
# identify objects
for (i in seq(1:how.many.objects)) {
if (is.data.frame(objects[[i]])) {
obj.rownames <- rownames(objects[[i]])
if (is.null(obj.rownames)) { .format.rownames <- FALSE }
}
else if ((is.matrix(objects[[i]])) && (class(objects[[i]])[1] != "coeftest")) {
.global.summary[i] <- FALSE # content output default for matrices
obj.rownames <- rownames(objects[[i]])
obj.colnames <- colnames(objects[[i]])
if (is.null(obj.rownames)) {
if (.format.flip == FALSE) { .format.rownames <- FALSE }
else { .format.colnames <- FALSE }
obj.rownames <- as.character(c(1:nrow(objects[[i]])))
}
if (is.null(obj.colnames)) {
if (.format.flip == FALSE) { .format.colnames <- FALSE }
else { .format.rownames <- FALSE }
obj.colnames <- as.character(c(1:ncol(objects[[i]])))
}
objects[[i]] <- as.data.frame(objects[[i]])
colnames(objects[[i]]) <- obj.colnames
}
else if (is.vector(objects[[i]])) {
.global.summary[i] <- FALSE # content output default for vectors
obj.names <- names(objects[[i]])
if (is.null(obj.names)) {
.format.colnames <- FALSE
.format.rownames <- FALSE
obj.names <- as.character(c(1:length(objects[[i]])))
}
objects[[i]] <- as.data.frame(t(objects[[i]]))
names(objects[[i]]) <- obj.names
if (.format.flip == TRUE) { .format.colnames <- FALSE }
else { .format.rownames <- FALSE }
}
if (!is.data.frame(objects[[i]])) {
# if zelig$result relevant, identify this automatically
if (class(objects[[i]])[1] %in% c("coeftest","lmerMod","glmerMod","nlmerMod","fGARCH")) { # use this to eliminate lmer, glmer, nlmer
if (.model.identify(objects[[i]])=="unknown") { error.present <- c(error.present, "% Error: Unrecognized object type.\n",i) }
}
else {
if (!is.null(objects[[i]]$zelig.call)) {
if (!is.null(objects[[i]]$formula)) { formula <- objects[[i]]$formula }
objects[[i]] <- objects[[i]]$result
if (!is.null(formula)) { objects[[i]]$formula2 <- formula }
}
###
if (is.atomic(objects[[i]]) && (!is.null(objects[[i]]))) { error.present <- c(error.present, "% Error: Unrecognized object type.\n") }
else if (.model.identify(objects[[i]])=="unknown") { error.present <- c(error.present, "% Error: Unrecognized object type.\n") }
else if (.model.identify(objects[[i]])=="unsupported zelig") { error.present <- c(error.present, "% Error: Unsupported 'zelig' model.\n") }
}
}
}
}
if (!is.character(type)) { error.present <- c(error.present, "% Error: Argument 'type' must be of type 'character.'\n") }
if (length(type) != 1) { error.present <- c(error.present, "% Error: Argument 'type' must be of length 1.'\n") }
if (is.character(type)) {
if (!(tolower(type) %in% c("latex", "text", "html"))) {
error.present <- c(error.present, "% Error: 'style' must be either 'latex' (default), 'html' or 'text.'\n")
}
}
if (!is.character(title)) { error.present <- c(error.present, "% Error: Argument 'title' must be of type 'character.'\n") }
if (!is.character(style)) { error.present <- c(error.present, "% Error: Argument 'style' must be of type 'character.'\n") }
if (length(style) != 1) { error.present <- c(error.present, "% Error: Argument 'style' must be of length 1.'\n") }
if (is.character(style)) {
if (!(tolower(style) %in% c("all","all2","default","commadefault","aer","ajps","ajs","asq","asr","apsr","demography","io","jpam","qje"))) {
error.present <- c(error.present, "% Error: 'style' not recognized'\n")
}
}
if ((!is.logical(summary)) && (!is.null(summary))) { error.present <- c(error.present, "% Error: Argument 'summary' must be NULL, or of type 'logical' (TRUE/FALSE) \n") }
if ((!is.character(out)) && (!is.null(out))) { error.present <- c(error.present, "% Error: Argument 'out' must be NULL (default), or a vector of type 'character.' \n") }
if (!is.logical(out.header)) { error.present <- c(error.present, "% Error: Argument 'out.header' be of type 'logical' (TRUE/FALSE) \n") }
if ((!is.numeric(column.separate)) && (!is.null(column.separate))) { error.present <- c(error.present, "% Error: Argument 'column.separate' must be NULL (default), a vector of type 'numeric.'\n") }
if ((!is.character(column.labels)) && (!is.null(column.labels))) { error.present <- c(error.present, "% Error: Argument 'column.labels' must be NULL (default), or a vector of type 'character.'\n") }
if ((!is.character(covariate.labels)) && (!is.null(covariate.labels))) { error.present <- c(error.present, "% Error: Argument 'covariate.labels' must be NULL (default), or a vector of type 'character.'\n") }
if ((!is.character(dep.var.labels)) && (!is.null(dep.var.labels))) { error.present <- c(error.present, "% Error: Argument 'dep.var.labels' must be NULL (default), or a vector of type 'character.'\n") }
if ((!is.logical(dep.var.labels.include)) && (!is.null(dep.var.labels.include))) { error.present <- c(error.present, "% Error: Argument 'dep.var.labels.include' must be NULL (default), or of type 'logical' (TRUE/FALSE) \n") }
if ((length(dep.var.labels.include) != 1) && (!is.null(dep.var.labels.include))) { error.present <- c(error.present, "% Error: Argument 'dep.var.labels.include' must be of length 1.'\n") }
if ((!is.character(dep.var.caption)) && (!is.null(dep.var.caption))) { error.present <- c(error.present, "% Error: Argument 'dep.var.caption must be NULL (default), or of type 'character.'\n") }
if ((length(dep.var.caption) != 1) && (!is.null(dep.var.caption))) { error.present <- c(error.present, "% Error: Argument 'dep.var.caption' must be of length 1.'\n") }
coef <- .turn.into.list(coef); se <- .turn.into.list(se)
t <- .turn.into.list(t); p <- .turn.into.list(p)
if ((!.is.list.numeric(coef))) { error.present <- c(error.present, "% Error: Argument 'coef' must be NULL (default), or a list of numeric vectors.\n") }
if ((!.is.list.numeric(se))) { error.present <- c(error.present, "% Error: Argument 'se' must be NULL (default), or a list of numeric vectors.\n") }
if ((!.is.list.numeric(t))) { error.present <- c(error.present, "% Error: Argument 't' must be NULL (default), or a list of numeric vectors.\n") }
if ((!.is.list.numeric(p))) { error.present <- c(error.present, "% Error: Argument 'p' must be NULL (default), or a list of numeric vectors.\n") }
if (!is.logical(t.auto)) { error.present <- c(error.present, "% Error: Argument 't.auto' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(t.auto) != 1) { error.present <- c(error.present, "% Error: Argument 't.auto' must be of length 1.'\n") }
if (!is.logical(p.auto)) { error.present <- c(error.present, "% Error: Argument 't.auto' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(p.auto) != 1) { error.present <- c(error.present, "% Error: Argument 't.auto' must be of length 1.'\n") }
if (!is.logical(align)) { error.present <- c(error.present, "% Error: Argument 'align' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(align) != 1) { error.present <- c(error.present, "% Error: Argument 'align' must be of length 1.'\n") }
if (!is.logical(ci)) { error.present <- c(error.present, "% Error: Argument 'ci' must be of type 'logical' (TRUE/FALSE) \n") }
ci.custom <- .turn.into.list(ci.custom)
if ((!.is.list.numeric.matrix(ci.custom))) { error.present <- c(error.present, "% Error: Argument 'ci.custom' must be NULL (default), or a list of numeric matrices. \n") }
else if (!is.null(ci.custom)) {
l <- length(ci.custom)
bad.dimension <- FALSE
for (i in 1:l) {
if (!is.null(ci.custom[[i]])) {
if (ncol(ci.custom[[i]]) != 2 ) { bad.dimension <- TRUE }
}
}
if (bad.dimension) { error.present <- c(error.present, "% Error: The numeric matrix in 'ci.custom' must have two columns (lower bound and upper bound, respectively). \n") }
}
if (!is.numeric(ci.level)) { error.present <- c(error.present, "% Error: Argument 'ci.level' must be of type 'numeric.' \n") }
if ((!is.character(ci.separator)) && (!is.null(ci.separator))) { error.present <- c(error.present, "% Error: Argument 'ci.separator' must be NULL (default), or of type 'character.'\n") }
if ((length(ci.separator) != 1) && (!is.null(ci.separator))) { error.present <- c(error.present, "% Error: Argument 'ci.separator' must be of length 1.'\n") }
add.lines <- .turn.into.list(add.lines)
if ((!is.list(add.lines)) && (!is.null(add.lines))) { error.present <- c(error.present, "% Error: Argument 'add.lines' must be NULL (default), or a list of vectors. \n") }
if (!is.null(add.lines)) {
if (length(add.lines) < 1) { error.present <- c(error.present, "% Error: The list in argument 'add.lines' must be of length 1 or more. \n") }
if (!all(unlist(lapply(add.lines, is.vector)))) { error.present <- c(error.present, "% Error: Argument 'add.lines' must be NULL (default), or a list of vectors. \n") }
}
if ((!is.function(apply.coef)) && (!is.null(apply.coef))) { error.present <- c(error.present, "% Error: Argument 'apply.coef' must be NULL (default), or a function.'\n") }
if ((!is.function(apply.se)) && (!is.null(apply.se))) { error.present <- c(error.present, "% Error: Argument 'apply.se' must be NULL (default), or a function.'\n") }
if ((!is.function(apply.t)) && (!is.null(apply.t))) { error.present <- c(error.present, "% Error: Argument 'apply.t' must be NULL (default), or a function.'\n") }
if ((!is.function(apply.p)) && (!is.null(apply.p))) { error.present <- c(error.present, "% Error: Argument 'apply.p' must be NULL (default), or a function.'\n") }
if ((!is.function(apply.ci)) && (!is.null(apply.ci))) { error.present <- c(error.present, "% Error: Argument 'apply.ci' must be NULL (default), or a function.'\n") }
if (!is.character(column.sep.width)) { error.present <- c(error.present, "% Error: Argument 'column.sep.width' must be of type 'character.'\n") }
if (length(column.sep.width) != 1) { error.present <- c(error.present, "% Error: Argument 'column.sep.width' must be of length 1.'\n") }
if ((!is.character(decimal.mark)) && (!is.null(decimal.mark))) { error.present <- c(error.present, "% Error: Argument 'decimal.mark' must be NULL (default), or of type 'character.'\n") }
if ((length(decimal.mark) != 1) && (!is.null(decimal.mark))) { error.present <- c(error.present, "% Error: Argument 'decimal.mark' must be of length 1.'\n") }
if (!is.logical(df)) { error.present <- c(error.present, "% Error: Argument 'df' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(df) != 1) { error.present <- c(error.present, "% Error: Argument 'df' must be of length 1.'\n") }
if ((!is.numeric(digit.separate)) && (!is.null(digit.separate)) & (!is.character(digit.separate))) { error.present <- c(error.present, "% Error: Argument 'digit.separate' must be NULL (default), a vector of type 'numeric,' or of type 'character.' \n") }
if (is.character(digit.separate)) {
if (!(digit.separate %in% c("lakh","japan","china"))) { error.present <- c(error.present, "% Error: If argument 'digit.separate' is of type character, it must be one of \"lakh\"/\"china\"/\"japan\".\n") }
}
if ((!is.character(digit.separator)) && (!is.null(digit.separator))) { error.present <- c(error.present, "% Error: Argument 'digit.separator' must be NULL (default), or of type 'character.'\n") }
if ((length(digit.separator) != 1) && (!is.null(digit.separator))) { error.present <- c(error.present, "% Error: Argument 'digit.separator' must be of length 1.'\n") }
if ((!is.numeric(digits)) && (!is.null(digits))) {
if (!is.na(digits)) { error.present <- c(error.present, "% Error: Argument 'digits' must be NULL (default), or of type 'numeric.'\n") }
}
if ((length(digits) != 1) && (!is.null(digits))) {
if (!is.na(digits)) { error.present <- c(error.present, "% Error: Argument 'digits' must be of length 1.'\n") }
}
if (!is.null(digits)) {
if (!is.na(digits)) {
if ((digits<0) && (is.numeric(digits))) { error.present <- c(error.present, "% Error: Argument 'digits' must be >= 0.'\n") }
}
}
if ((!is.numeric(digits.extra)) && (!is.null(digits.extra))) { error.present <- c(error.present, "% Error: Argument 'digits.extra' must be NULL (default), or of type 'numeric.'\n") }
if ((length(digits.extra) != 1) && (!is.null(digits.extra))) { error.present <- c(error.present, "% Error: Argument 'digits.extra' must be of length 1.'\n") }
if (!is.null(digits.extra)) {
if ((digits.extra<0) && (is.numeric(digits.extra))) { error.present <- c(error.present, "% Error: Argument 'digits.extra' must be >= 0.'\n") }
}
if (!is.logical(flip)) { error.present <- c(error.present, "% Error: Argument 'flip' must be of type 'logical' (TRUE/FALSE) \n") }
if ((length(flip) != 1) && (!is.null(flip))) { error.present <- c(error.present, "% Error: Argument 'flip' must be of length 1.'\n") }
if (!is.logical(float)) { error.present <- c(error.present, "% Error: Argument 'float' must be of type 'logical' (TRUE/FALSE) \n") }
if ((length(float) != 1) && (!is.null(float))) { error.present <- c(error.present, "% Error: Argument 'float' must be of length 1.'\n") }
if (!(float.env %in% c("table","table*","sidewaystable"))) { error.present <- c(error.present, "% Error: Argument 'float.env' must be one of \"table\", \"table*\" or \"sidewaystable\".\n") }
if (length(float.env) != 1) { error.present <- c(error.present, "% Error: Argument 'float.env' must be of length 1.'\n") }
if (!is.null(font.size)) {
if (!(font.size %in% c("tiny","scriptsize","footnotesize","small","normalsize","large","Large","LARGE","huge","Huge"))) { error.present <- c(error.present, "% Error: Argument 'font.size' must be NULL (default), or one of the available font sizes. See documentation.") }
}
if ((length(font.size) != 1) && (!is.null(font.size))) { error.present <- c(error.present, "% Error: Argument 'font.size' must be of length 1.'\n") }
if (!is.logical(header)) { error.present <- c(error.present, "% Error: Argument 'header' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(header) != 1) { error.present <- c(error.present, "% Error: Argument 'header' must be of length 1.'\n") }
if ((!is.logical(initial.zero)) && (!is.null(initial.zero))) { error.present <- c(error.present, "% Error: Argument 'initial.zero' must be NULL (default), or of type 'logical' (TRUE/FALSE) \n") }
if ((length(initial.zero) != 1) && (!is.null(initial.zero))) { error.present <- c(error.present, "% Error: Argument 'initial.zero' must be of length 1.'\n") }
if (!is.logical(intercept.bottom)) { error.present <- c(error.present, "% Error: Argument 'intercept.bottom' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(intercept.bottom) != 1) { error.present <- c(error.present, "% Error: Argument 'intercept.bottom' must be of length 1.'\n") }
if (!is.logical(intercept.top)) { error.present <- c(error.present, "% Error: Argument 'intercept.top' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(intercept.top) != 1) { error.present <- c(error.present, "% Error: Argument 'intercept.top' must be of length 1.'\n") }
if (intercept.top && intercept.bottom) { error.present <- c(error.present, "% Error: Arguments 'intercept.bottom' and 'intercept.top' cannot both be TRUE. \n")}
if ((!is.character(keep)) && (!is.numeric(keep)) && (!is.null(keep))) { error.present <- c(error.present, "% Error: Argument 'keep' must be NULL (default; all variables kept), or a vector of type 'character' or 'numeric.'\n") }
if ((!is.character(keep.stat)) && (!is.null(keep.stat))) { error.present <- c(error.present, "% Error: Argument 'keep.stat' must be NULL (default), or a vector of type 'character.'\n") }
keep.stat.acceptable <- c("all","n","rsq","adj.rsq","max.rsq","ll","aic","bic","scale","ubre","rho(se)*","Mills(se)*","sigma2","ser","f","theta","chi2","wald","lr","logrank","null.dev","res.dev") # list of statistic codes that are acceptable
if (is.character(keep.stat)) {
is.acceptable <- unique(tolower(keep.stat) %in% keep.stat.acceptable)
if (length(is.acceptable)>1) { is.acceptable <- FALSE }
if (!is.acceptable) { error.present <- c(error.present, "% Error: Unknown statistic in 'keep.stat' argument.\n") }
}
if (!is.character(label)) { error.present <- c(error.present, "% Error: Argument 'label' must be of type 'character.'\n") }
if ((!is.logical(model.names)) && (!is.null(model.names))) { error.present <- c(error.present, "% Error: Argument 'model.names' must be NULL (default), or of type 'logical' (TRUE/FALSE) \n") }
if ((length(model.names) != 1) && (!is.null(model.names))) { error.present <- c(error.present, "% Error: Argument 'model.names' must be of length 1.'\n") }
if ((!is.logical(model.numbers)) && (!is.null(model.numbers))) { error.present <- c(error.present, "% Error: Argument 'model.numbers' must be NULL (default), or of type 'logical' (TRUE/FALSE) \n") }
if ((length(model.numbers) != 1) && (!is.null(model.numbers))) { error.present <- c(error.present, "% Error: Argument 'model.numbers' must be of length 1.'\n") }
if (!is.logical(multicolumn)) { error.present <- c(error.present, "% Error: Argument 'multicolumn' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(multicolumn) != 1) { error.present <- c(error.present, "% Error: Argument 'multicolumn' must be of length 1.'\n") }
if ((!is.logical(no.space)) && (!is.null(no.space))) { error.present <- c(error.present, "% Error: Argument 'no.space' must be NULL (default), or of type 'logical' (TRUE/FALSE) \n") }
if ((length(no.space) != 1) && (!is.null(no.space))) { error.present <- c(error.present, "% Error: Argument 'no.space' must be of length 1.'\n") }
if ((!is.character(notes)) && (!is.null(notes))) { error.present <- c(error.present, "% Error: Argument 'notes' must be NULL (default), or a vector of type 'character.'\n") }
if (!is.null(notes.align)) {
if (!(tolower(notes.align) %in% c("l","c","r"))) { error.present <- c(error.present, "% Error: Argument 'notes.align' must be NULL (default), or \"l\"/\"c\"/\"r\".\n") }
}
if ((length(notes.align) != 1) && (!is.null(notes.align))) { error.present <- c(error.present, "% Error: Argument 'notes.align' must be of length 1.'\n") }
if (!is.logical(notes.append)) { error.present <- c(error.present, "% Error: Argument 'notes.append' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(notes.append) != 1) { error.present <- c(error.present, "% Error: Argument 'notes.append' must be of length 1.'\n") }
if ((!is.character(notes.label)) && (!is.null(notes.label))) { error.present <- c(error.present, "% Error: Argument 'notes.label' must be NULL (default), or of type 'character.'\n") }
if ((length(notes.label) != 1) && (!is.null(notes.label))) { error.present <- c(error.present, "% Error: Argument 'notes.label' must be of length 1.'\n") }
if (!is.logical(object.names)) { error.present <- c(error.present, "% Error: Argument 'object.names' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(object.names) != 1) { error.present <- c(error.present, "% Error: Argument 'object.names' must be of length 1.'\n") }
if ((!is.character(omit)) && (!is.numeric(omit)) && (!is.null(omit))) { error.present <- c(error.present, "% Error: Argument 'omit' must be NULL (default; no omissions), or a vector of type 'character' or 'numeric.'\n") }
if ((!is.character(omit.labels)) && (!is.null(omit.labels))) { error.present <- c(error.present, "% Error: Argument 'omit' must be NULL (default; no omissions), or a vector of type 'character.'\n") }
if (!is.null(omit.labels)) {
if (length(omit) != length(omit.labels)) { error.present <- c(error.present, "% Error: Arguments 'omit.labels' must be NULL (default; no omissions), or equal in length to 'omit.labels'.'\n") }
}
if ((!is.character(omit.stat)) && (!is.null(omit.stat))) { error.present <- c(error.present, "% Error: Argument 'omit.stat' must be NULL (default), or a vector of type 'character.'\n") }
omit.stat.acceptable <- c("all","n","rsq","adj.rsq","max.rsq","ll","aic","bic","scale","ubre","rho(se)*","Mills(se)*","sigma2","ser","f","theta","chi2","wald","lr","logrank","null.dev","res.dev") # list of statistic codes that are acceptable
if (is.character(omit.stat)) {
is.acceptable <- unique(tolower(omit.stat) %in% omit.stat.acceptable)
if (length(is.acceptable)>1) { is.acceptable <- FALSE }
if (!is.acceptable) { error.present <- c(error.present, "% Error: Unknown statistic in 'omit.stat' argument.\n") }
}
if ((!is.character(omit.summary.stat)) && (!is.null(omit.summary.stat))) { error.present <- c(error.present, "% Error: Argument 'omit.summary.stat' must be NULL (default), or a vector of type 'character.'\n") }
omit.summary.stat.acceptable <- c("n","mean","sd","min","p25","median","p75","max")
if (is.character(omit.summary.stat)) {
is.acceptable <- unique(tolower(omit.summary.stat) %in% omit.summary.stat.acceptable)
if (length(is.acceptable)>1) { is.acceptable <- FALSE }
if (!is.acceptable) { error.present <- c(error.present, "% Error: Unknown statistic in 'omit.summary.stat' argument.\n") }
}
if ((!is.character(omit.yes.no)) && (!is.null(omit.yes.no))) { error.present <- c(error.present, "% Error: Argument 'omit.yes.no' must be a vector of type 'character.'\n") }
if ((length(omit.yes.no) != 2) && (!is.null(omit.yes.no))) { error.present <- c(error.present, "% Error: Argument 'omit.yes.no' must be of length 2.'\n") }
if ((!is.character(order)) && (!is.numeric(order)) & (!is.null(order))) { error.present <- c(error.present, "% Error: Argument 'order' must be NULL (default; no omissions), or a vector of type 'character' or 'numeric.'\n") }
if (!is.logical(ord.intercepts)) { error.present <- c(error.present, "% Error: Argument 'ord.intercepts' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(ord.intercepts) != 1) { error.present <- c(error.present, "% Error: Argument 'ord.intercepts' must be of length 1.'\n") }
if (!is.logical(perl)) { error.present <- c(error.present, "% Error: Argument 'perl' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(perl) != 1) { error.present <- c(error.present, "% Error: Argument 'perl' must be of length 1.'\n") }
if (!(is.logical(colnames)) && (!is.null(colnames))) { error.present <- c(error.present, "% Error: Argument 'colnames' must be NULL, or of type 'logical' (TRUE/FALSE) \n") }
if ((length(colnames) != 1) && (!is.null(colnames))) { error.present <- c(error.present, "% Error: Argument 'colnames' must be of length 1.'\n") }
if (!(is.logical(rownames)) && (!is.null(rownames))) { error.present <- c(error.present, "% Error: Argument 'rownames' must be NULL, or of type 'logical' (TRUE/FALSE) \n") }
if ((length(rownames) != 1) && (!is.null(rownames))) { error.present <- c(error.present, "% Error: Argument 'rownames' must be of length 1.'\n") }
if (!is.character(rq.se)) { error.present <- c(error.present, "% Error: Argument 'rq.se' must be of type 'character.' \n") }
if (length(rq.se) != 1) { error.present <- c(error.present, "% Error: Argument 'rq.se' must be of length 1.'\n") }
if (is.character(rq.se)) {
if (!(rq.se %in% c("iid", "nid", "ker", "boot"))) { error.present <- c(error.present, "% Error: Argument 'rq.se' must be one of: 'iid', 'nid', 'ker' or 'boot.' \n") }
}
if (!is.logical(selection.equation)) { error.present <- c(error.present, "% Error: Argument 'selection.equation' must be of type 'logical' (TRUE/FALSE) \n") }
if ((length(selection.equation) != 1) && (!is.null(selection.equation))) { error.present <- c(error.present, "% Error: Argument 'selection.equation' must be of length 1.'\n") }
if (!is.logical(single.row)) { error.present <- c(error.present, "% Error: Argument 'single.row' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(single.row) != 1) { error.present <- c(error.present, "% Error: Argument 'single.row' must be of length 1.'\n") }
if ((!is.character(star.char)) && (!is.null(star.char))) { error.present <- c(error.present, "% Error: Argument 'star.char' must be NULL (default), or of type 'character.'\n") }
if ((!(length(star.char) >= 1)) && (!is.null(star.char))) { error.present <- c(error.present, "% Error: Argument 'star.char' must be at least of length 1.'\n") }
if (!is.null(star.cutoffs)) {
if (sum(is.na(star.cutoffs)) != length(star.cutoffs)) {
if (!is.numeric(star.cutoffs)) { error.present <- c(error.present, "% Error: Argument 'star.cutoffs' must be NULL (default), or a vector of type 'numeric.'\n") }
}
if ( !(length(star.cutoffs) >= 1) && (!is.null(star.cutoffs))) { error.present <- c(error.present, "% Error: Argument 'star.cutoffs' must be a vector with at least one element.\n") }
if (sum(star.cutoffs[!is.na(star.cutoffs)] == sort(star.cutoffs, decreasing = TRUE, na.last=NA)) != length(star.cutoffs[!is.na(star.cutoffs)])) { error.present <- c(error.present, "% Error: The elements of 'star.cutoffs' must be in weakly decreasing order.\n") }
}
if ((!is.character(summary.stat)) && (!is.null(summary.stat))) { error.present <- c(error.present, "% Error: Argument 'summary.stat' must be NULL (default), or a vector of type 'character.'\n") }
summary.stat.acceptable <- c("n","mean","sd","min","p25","median","p75","max") # list of statistic codes that are acceptable
if (is.character(summary.stat)) {
is.acceptable <- unique(tolower(summary.stat) %in% summary.stat.acceptable)
if (length(is.acceptable)>1) { is.acceptable <- FALSE }
if (!is.acceptable) { error.present <- c(error.present, "% Error: Unknown statistic in 'summary.stat' argument.\n") }
}
if ((!is.character(table.layout)) && (!is.null(table.layout))) { error.present <- c(error.present, "% Error: Argument 'table.layout' must be of type 'character.'\n") }
if ((length(table.layout) != 1) && (!is.null(table.layout))) { error.present <- c(error.present, "% Error: Argument 'table.layout' must be of length 1.'\n") }
if (is.character(table.layout) && (length(table.layout)==1)) { # test if report only contains allowed letters
layout.error <- FALSE
for (i in 1:nchar(table.layout)) {
ch <- substring(table.layout,i,i)
if (!(ch %in% c("=","-","!","l","d","m","c","#","b","t","o","a","s","n"))) (layout.error <- TRUE)
}
if (layout.error) { error.present <- c(error.present, "% Error: Invalid characters in 'table.layout'. See package documentation. \n") }
}
if ((!is.character(omit.table.layout)) && (!is.null(omit.table.layout))) { error.present <- c(error.present, "% Error: Argument 'omit.table.layout' must be of type 'character.'\n") }
if ((length(omit.table.layout) != 1) && (!is.null(omit.table.layout))) { error.present <- c(error.present, "% Error: Argument 'omit.table.layout' must be of length 1.'\n") }
if (is.character(omit.table.layout) && (length(omit.table.layout)==1)) { # test if report only contains allowed letters
layout.error <- FALSE
for (i in 1:nchar(omit.table.layout)) {
ch <- substring(omit.table.layout,i,i)
if (!(ch %in% c("=","-","!","l","d","m","c","#","b","t","o","a","s","n"))) (layout.error <- TRUE)
}
if (layout.error) { error.present <- c(error.present, "% Error: Invalid characters in 'omit.table.layout'. See package documentation. \n") }
}
if (!is.character(table.placement)) { error.present <- c(error.present, "% Error: Argument 'table.placement' must be of type 'character.'\n") }
if (length(table.placement) != 1) { error.present <- c(error.present, "% Error: Argument 'table.placement' must be of length 1.'\n") }
if (is.character(table.placement) && (length(table.placement)==1)) { # test if table.placement only contains allowed letters
tp.error <- FALSE
for (i in 1:nchar(table.placement)) {
ch <- substring(table.placement,i,i)
if (!(ch %in% c("h","t","b","p","!","H"))) (tp.error <- TRUE)
}
if (tp.error) { error.present <- c(error.present, "% Error: Argument 'table.placement' can only consist of \"h\",\"t\",\"b\",\"p\",\"!\",\"H\".\n") }
}
if ((!is.character(report)) && (!is.null(report))) { error.present <- c(error.present, "% Error: Argument 'report' must be of type 'character.'\n") }
if ((length(report) != 1) && (!is.null(report))) { error.present <- c(error.present, "% Error: Argument 'report' must be of length 1.'\n") }
if (is.character(report) && (length(report)==1)) { # test if report only contains allowed letters
report.error <- FALSE
for (i in 1:nchar(report)) {
ch <- substring(report,i,i)
if (!(ch %in% c("v","c","s","t","p","*"))) (report.error <- TRUE)
}
if (report.error) { error.present <- c(error.present, "% Error: Argument 'report' can only consist of \"v\",\"c\",\"s\",\"t\",\"p\",\"*\".\n") }
}
if (!is.logical(zero.component)) { error.present <- c(error.present, "% Error: Argument 'zero.component' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(zero.component) != 1) { error.present <- c(error.present, "% Error: Argument 'zero.component' must be of length 1.'\n") }
if (!is.logical(summary.logical)) { error.present <- c(error.present, "% Error: Argument 'summary.logical' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(summary.logical) != 1) { error.present <- c(error.present, "% Error: Argument 'summary.logical' must be of length 1.'\n") }
if (!is.logical(nobs)) { error.present <- c(error.present, "% Error: Argument 'nobs' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(nobs) != 1) { error.present <- c(error.present, "% Error: Argument 'nobs' must be of length 1.'\n") }
if (!is.logical(mean.sd)) { error.present <- c(error.present, "% Error: Argument 'mean.sd' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(mean.sd) != 1) { error.present <- c(error.present, "% Error: Argument 'mean.sd' must be of length 1.'\n") }
if (!is.logical(min.max)) { error.present <- c(error.present, "% Error: Argument 'min.max' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(min.max) != 1) { error.present <- c(error.present, "% Error: Argument 'min.max' must be of length 1.'\n") }
if (!is.logical(median)) { error.present <- c(error.present, "% Error: Argument 'median' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(median) != 1) { error.present <- c(error.present, "% Error: Argument 'median' must be of length 1.'\n") }
if (!is.logical(iqr)) { error.present <- c(error.present, "% Error: Argument 'iqr' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(iqr) != 1) { error.present <- c(error.present, "% Error: Argument 'iqr' must be of length 1.'\n") }
## decide what style to use here: start with all settings, and then make adjustment based on desired journal
# initialize pseudo-global variables at NULL
.summary.object <- NULL
.global.dependent.variables.written <- NULL
.global.coefficients <- NULL
.format.model.left <- NULL
.format.model.right <- NULL
.which.variable.label <- NULL
.return.value <- NULL
.publish.horizontal.line <- NULL
.table.part.published <- NULL
.format.omit.table <- NULL
# info about the package and author
.global.package.name <- "stargazer"
.global.package.version <- "5.2.2"
.global.package.author.name <- "Marek Hlavac"
.global.package.author.affiliation <- "Harvard University"
.global.package.author.email <- "hlavac at fas.harvard.edu"
# statistics (.global variables)
.global.formulas.rhs <- NULL
.global.models <- NULL
.global.dependent.variables <- NULL
.global.coefficient.variables <- NULL
.global.coef.vars.by.model <- NULL ## list of coefficient variables by model - to be used by omit, omit.labels, etc
.global.std.errors <- NULL
.global.ci.lb <- NULL
.global.ci.rb <- NULL
.global.t.stats <- NULL
.global.p.values <- NULL
.global.N <- NULL
.global.LL <- NULL
.global.R2 <- NULL
.global.mills <- NULL
.global.max.R2 <- NULL # maximum possible R2
.global.adj.R2 <- NULL
.global.AIC <- NULL
.global.BIC <- NULL
.global.scale <- NULL # estimated scale parameter (gee)
.global.UBRE <- NULL # UBRE score (GAM)
.global.sigma2 <- NULL # sigma2 from arima
.global.theta <- NULL # theta from negative binomial
.global.rho <- NULL
.global.sel.equation <- NULL # selection equation, as opposed to default outcome equation, in heckit and
.global.zero.component <- NULL # zero, as opposed to count, component in hurdle and zeroinfl
# with degrees of freedom
.global.SER <- NULL # residual standard error; standard error of the regression
.global.F.stat <- NULL # F-statistic for the regression
.global.chi.stat <- NULL # chi-squared statistic
.global.wald.stat <- NULL # Wald test statistic (for coxph)
.global.lr.stat <- NULL # LR test statistic (for coxph)
.global.logrank.stat <- NULL # Score (logrank) test (for coxph)
.global.null.deviance <- NULL
.global.residual.deviance <- NULL
# intercept strings
.global.intercept.strings <- c("(Intercept)", "(intercept)","Intercept")
# .formatting: Default
.format.space.size <- "-1.8ex"
.format.dependent.variable.text <- "\\textit{Dependent variable:}"
.format.dependent.variable.text.underline <- TRUE
.format.dependent.variable.text.on <- TRUE
.format.dep.var.labels <- NULL
.format.covariate.labels <- NULL
.format.add.lines <- NULL
.format.dependent.variables.text <- ""
.format.underline.dependent.variables <- TRUE
.format.dependent.variables.left <- ""
.format.dependent.variables.right <- ""
.format.dependent.variables.capitalize <- FALSE
.format.ordered.intercepts <- TRUE
# column labels
.format.column.left <- ""
.format.column.right <- ""
# model numbers
.format.model.numbers <- TRUE
# common headers for multiple columns?
.format.multicolumn <- TRUE
# names for models
.format.model.names.include <- TRUE
.format.model.names <- NULL
.format.model.names <- cbind(c("aov","ANOVA",""), c("arima","ARIMA",""), c("Arima","ARIMA",""), c("blogit","bivariate","logistic"))
.format.model.names <- cbind(.format.model.names, c("bprobit","bivariate","probit"), c("betareg", "beta",""), c("chopit","compound hierarchical","ordered probit"))
.format.model.names <- cbind(.format.model.names, c("clm","cumulative","link"), c("censReg", "censored", "regression"), c("cloglog.net","network compl.","log log"), c("clogit","conditional","logistic"), c("coxph","Cox","prop. hazards"))
.format.model.names <- cbind(.format.model.names, c("dynlm","dynamic","linear"), c("lagsarlm","spatial","autoregressive"), c("errorsarlm","spatial","error"))
.format.model.names <- cbind(.format.model.names, c("ei.dynamic","Quinn dynamic","ecological inference"), c("ei.hier","$2 \times 2$ hierarchical","ecological inference"))
.format.model.names <- cbind(.format.model.names, c("ei.RxC","hierarchical multinominal-Dirichlet","ecological inference"), c("exp","exponential",""), c("ergm","exponential family","random graph"))
.format.model.names <- cbind(.format.model.names, c("factor.bayes","Bayesian","factor analysis"), c("factor.mix","mixed data","factor analysis"))
.format.model.names <- cbind(.format.model.names, c("factor.ord","ordinal data","factor analysis"), c("fGARCH","GARCH",""), c("gamma","gamma",""))
.format.model.names <- cbind(.format.model.names, c("gamma.gee","gamma generalized","estimating equation"), c("gamma.mixed","mixed effects","gamma"))
.format.model.names <- cbind(.format.model.names, c("gamma.net","network","gamma"), c("gamma.survey","survey-weighted","gamma"), c("glmrob","robust","GLM"), c("gls","generalized","least squares"))
.format.model.names <- cbind(.format.model.names, c("gmm","GMM",""), c("rem.dyad", "relational", "event (dyadic)"))
.format.model.names <- cbind(.format.model.names, c("irt1d","IRT","(1-dim.)"), c("irtkd","IRT","(k-dim.)"))
.format.model.names <- cbind(.format.model.names, c("logit","logistic",""), c("logit.bayes","Bayesian","logistic"))
.format.model.names <- cbind(.format.model.names, c("logit.gam","GAM","(logistic)"), c("logit.gee","logistic generalized","estimating equation"))
.format.model.names <- cbind(.format.model.names, c("logit.mixed","mixed effects","logistic"), c("logit.net","network","logistic"))
.format.model.names <- cbind(.format.model.names, c("logit.survey","survey-weighted","logistic"), c("lognorm","log-normal",""))
.format.model.names <- cbind(.format.model.names, c("lmer","linear","mixed-effects"), c("glmer","generalized linear","mixed-effects"), c("nlmer","non-linear","mixed-effects"))
.format.model.names <- cbind(.format.model.names, c("ls","OLS",""), c("ls.mixed","mixed effect","linear"), c("lme","linear","mixed effects"), c("lmrob","MM-type","linear"))
.format.model.names <- cbind(.format.model.names, c("ls.net","network","least squares"), c("mlogit","multinomial","logistic"), c("mnlogit","multinomial","logit"))
.format.model.names <- cbind(.format.model.names, c("mlogit.bayes","Bayesian","multinomial logistic"), c("negbin","negative","binomial"), c("normal","normal",""))
.format.model.names <- cbind(.format.model.names, c("multinom","multinomial log-linear","(neural networks)"), c("nlme","non-linear","mixed effects"))
.format.model.names <- cbind(.format.model.names, c("normal.bayes","Bayesian","normal"), c("normal.gam","GAM","(continuous)"))
.format.model.names <- cbind(.format.model.names, c("normal.gee","normal generalized","estimating equation"), c("normal.net","network","normal"))
.format.model.names <- cbind(.format.model.names, c("normal.survey","survey-weighted","normal"), c("ologit","ordered","logistic"))
.format.model.names <- cbind(.format.model.names, c("oprobit","ordered","probit"), c("oprobit.bayes","Bayesian","ordered probit"))
.format.model.names <- cbind(.format.model.names, c("pmg","mean","groups"), c("poisson","Poisson",""), c("poisson.bayes","Bayesian","Poisson"))
.format.model.names <- cbind(.format.model.names, c("poisson.gam","GAM","(count)"), c("poisson.mixed","mixed effects","Poisson"))
.format.model.names <- cbind(.format.model.names, c("poisson.survey","survey-weighted","Poisson"), c("poisson.gee","Poisson generalized","estimation equation"))
.format.model.names <- cbind(.format.model.names, c("probit","probit",""), c("probit.bayes","Bayesian","probit"))
.format.model.names <- cbind(.format.model.names, c("probit.gam","GAM","(probit)"), c("probit.gee","probit generalized","estimating equation"))
.format.model.names <- cbind(.format.model.names, c("probit.mixed","mixed effects","probit"), c("probit.net","network","probit"))
.format.model.names <- cbind(.format.model.names, c("probit.survey","survey-weighted","probit"), c("relogit","rare events","logistic"))
.format.model.names <- cbind(.format.model.names, c("rq","quantile","regression"))
.format.model.names <- cbind(.format.model.names, c("rlm","robust","linear"), c("sur","SUR",""), c("threesls","3SLS",""))
.format.model.names <- cbind(.format.model.names, c("tobit","Tobit",""), c("tobit(AER)","Tobit",""), c("tobit.bayes","Bayesian","Tobit"))
.format.model.names <- cbind(.format.model.names, c("twosls","2SLS",""), c("weibull","Weibull",""))
.format.model.names <- cbind(.format.model.names, c("zeroinfl","zero-inflated","count data"), c("hurdle","hurdle",""))
.format.model.names <- cbind(.format.model.names, c("plm","panel","linear"), c("pgmm","panel","GMM"), c("ivreg","instrumental","variable"))
.format.model.names <- cbind(.format.model.names, c("coxreg","Cox",""), c("mlreg","ML","prop. hazards"), c("weibreg","Weibull",""))
.format.model.names <- cbind(.format.model.names, c("aftreg","accelerated"," failure time"), c("phreg","parametric","prop. hazards"))
.format.model.names <- cbind(.format.model.names, c("bj","Buckley-James",""), c("cph","Cox",""), c("Gls","generalized","least squares"), c("lrm","logistic",""))
.format.model.names <- cbind(.format.model.names, c("ols","OLS",""), c("psm","parametric","survival"), c("Rq","quantile","regression"))
.format.model.names <- cbind(.format.model.names, c("hetglm","heteroskedastic","GLM"), c("coeftest","coefficient","test"))
.format.model.names <- cbind(.format.model.names, c("heckit","Heckman","selection"), c("selection","selection",""))
.format.model.names <- cbind(.format.model.names, c("probit.ss","probit",""), c("binaryChoice","binary","choice"))
.format.model.names <- cbind(.format.model.names, c("brglm","GLM","(bias reduction)"), c("maBina","binary model","(marginal effect)"))
.format.model.names <- cbind(.format.model.names, c("mclogit","mixed","conditional logit"))
# if you use, say, glm() that does not correspond to one of the pre-defined models, put this as family and link
.format.model.function <- TRUE
.format.model.family <- ""
.format.model.dist <- ""
.format.model.link <- "link = "
## names for journal/output styles
# economics
.journal.style.names <- cbind(c("aer","American Economic Review"), c("qje","Quarterly Journal of Economics"), c("econometrica","Econometrica"))
.journal.style.names <- cbind(.journal.style.names, c("jpe","Journal of Political Economy"), c("jel","Journal of Economic Literature"))
.journal.style.names <- cbind(.journal.style.names, c("jep","Journal of Economic Perspestives"))
.format.coefficient.variables.capitalize <- FALSE
.format.coefficient.variables.left <- ""
.format.coefficient.variables.right <- ""
.format.coefficient.table.parts <- c("variable name","coefficient*","standard error"," ")
## .formatting of numeric output
# keep initial zeros?
.format.initial.zero <- TRUE
# if all zeros, keep going until you find a non-zero digit
.format.until.nonzero.digit <- TRUE
.format.max.extra.digits <- 2
## threshholds for the stars
.format.stars <- "*"
.format.cutoffs <- c(0.1, 0.05, 0.01)
.format.std.errors.left <- "("
.format.std.errors.right <- ")"
.format.p.values.left <- "["
.format.p.values.right <- "]"
.format.t.stats.left <- "t = "
.format.t.stats.right <- ""
.format.models.text <- ""
.format.models.left <- "\\textit{"
.format.models.right <- "}"
.format.underline.models <- FALSE
.format.models.skip.if.one <- TRUE # skip models section if only one model in table?
.format.object.names <- FALSE
.format.numbers.text <- ""
.format.numbers.left <- "("
.format.numbers.right <- ")"
.format.numbers.roman <- FALSE
.format.digit.separator.where <- c(3) # how 'often' to separate digits (e.g., thousands separator = 3)
.format.digit.separator <- ","
.format.ci.separator <- ", "
.format.round.digits <- 3
# for decimal comma use: .format.decimal.character <- "{,}"
.format.decimal.character <- "."
.format.dec.mark.align <- FALSE
# degrees of freedom - report or not?
.format.df <- TRUE
.format.table.parts <- c("=!","dependent variable label","dependent variables","models","colums","numbers","objects","-","coefficients","-","omit","-","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","scale","sigma2","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*(p)","chi2(df)*(p)","Wald(df)*(p)","LR(df)*(p)","logrank(df)*(p)","null deviance(df)","residual deviance(df)","=!","notes")
.format.omit.regexp <- NULL
.format.omit.labels <- NULL
.format.omit.yes <- "Yes"
.format.omit.no <- "No"
.format.keep.regexp <- NULL
.format.N <- "Observations"
.format.LL <- "Log Likelihood"
.format.R2 <- "R$^{2}$"
.format.max.R2 <- "Max. Possible R$^{2}$"
.format.adj.R2 <- "Adjusted R$^{2}$"
.format.scale <- "Scale Parameter"
.format.UBRE <- "UBRE"
.format.rho <- "$\\rho$"
.format.mills <- "Inverse Mills Ratio"
.format.AIC <- "Akaike Inf. Crit."
.format.BIC <- "Bayesian Inf. Crit."
.format.sigma2 <- "$\\sigma^{2}$"
.format.theta <- "$\\theta$"
.format.SER <- "Residual Std. Error"
.format.F.stat <- "F Statistic"
.format.chi.stat <- "$\\chi^{2}$"
.format.wald.stat <- "Wald Test"
.format.lr.stat <- "LR Test"
.format.logrank.stat <- "Score (Logrank) Test"
.format.null.deviance <- "Null Deviance"
.format.residual.deviance <- "Residual Deviance"
.format.df.left <- "(df = "
.format.df.right <- ")"
.format.df.separator <- "; "
.format.intelligent.df <- TRUE
# this is for se, tstat, p.values at the bottom of the table, by statistics
.format.se.left <- " ("
.format.se.right <- ")"
.format.tstat.left <- " (z = "
.format.tstat.right <- ")"
.format.p.value.left <- "["
.format.p.value.right <- "]"
.format.intercept.name <- "Constant"
.format.intercept.bottom <- TRUE
.format.note <- "\\textit{Note:} "
.format.note.alignment <- "r"
.format.note.content <- c("$^{*}$p$<$[0.*]; $^{**}$p$<$[0.**]; $^{***}$p$<$[0.***]")
#### summary statistic table
.format.s.statistics.names <- cbind(c("n","N"), c("nmiss","missing"), c("mean","Mean"), c("sd","St. Dev."), c("median","Median"), c("min","Min"), c("max","Max"), c("mad","Median Abs. Dev."), c("p","Pctl(!)"))
.format.s.stat.parts <- c("=!","stat names","-","statistics1","-!","notes")
.format.s.statistics.list <- c("n","mean","sd","min","p25","median","p75","max")
.format.s.statistics.names.left <- ""
.format.s.statistics.names.right <- ""
.format.s.statistics.names.label <- "Statistic"
.format.s.coefficient.variables.capitalize <- FALSE
.format.s.coefficient.variables.left <- ""
.format.s.coefficient.variables.right <- ""
.format.s.round.digits <- 3
.format.s.note <- ""
.format.s.note.alignment <- "l"
.format.s.note.content <- NULL
####
.adjust.settings.style(style)
# continue only if no errors
if (length(error.present) == 1) {
# summary statistic table or regular table of data frame contents
if (!is.null(summary)) {
# make sure summary is as long as the number of objects
if (length(summary) > how.many.objects) { summary <- summary[1:how.many.objects] }
if (length(summary) < how.many.objects) { length(summary) <- how.many.objects }
# fill in values of summary, if NA keep deafult
for (i in 1:how.many.objects) {
if (!is.na(summary[i])) {
.global.summary[i] <- summary[i]
}
else if (i > 1) { # if NA fill in previous value of summary
.global.summary[i] <- summary[i-1]
}
}
}
## use formatting arguments
# header with name, version, etc.
.format.header <- header
# no empty lines? single row for coefficient and std.error/CI?
.format.single.row <- single.row
if (.format.single.row == TRUE) { .format.no.space <- TRUE }
else { .format.no.space <- FALSE }
if (!is.null(no.space)) { .format.no.space <- no.space }
# font size
.format.font.size <- font.size
# floating, floating environment, etc.
.format.floating <- float
.format.floating.environment <- float.env
.format.table.placement <- table.placement
.format.column.sep.width <- column.sep.width
# if not case-sensitive, transfer to lower case
if (!is.null(digit.separate)) { digit.separate <- tolower(digit.separate) }
# report df?
.format.df <- df
if (.format.df == FALSE) {
.format.table.parts <- gsub("(df)", "", .format.table.parts, fixed=TRUE)
}
# column, dependent variable and covariate labels
.format.column.labels <- column.labels
.format.column.separate <- column.separate
.format.covariate.labels <- covariate.labels
.format.dep.var.labels <- dep.var.labels
.format.add.lines <- add.lines
if (dep.var.labels.include == FALSE) {
.format.table.parts <- .format.table.parts[.format.table.parts!="dependent variables"]
}
if (!is.null(dep.var.caption)) {
if (dep.var.caption == "") {
.format.table.parts <- .format.table.parts[.format.table.parts!="dependent variable label"]
}
else {
.format.dependent.variable.text <- dep.var.caption
}
}
# confidence intervals
.format.ci <- ci
.format.ci.level <- ci.level
if (!is.null(ci.separator)) { .format.ci.separator <- ci.separator }
if (!is.null(ci.custom)) { .format.ci <- TRUE }
# omit
.format.omit.regexp <- omit
.format.omit.index <- omit
if (is.character(omit)) { .format.omit.index <- NULL }
if (is.numeric(omit)) { .format.omit.regexp <- NULL }
.format.omit.labels <- omit.labels
if (!is.null(omit.yes.no)) {
.format.omit.yes <- omit.yes.no[1]
.format.omit.no <- omit.yes.no[2]
}
# keep
.format.keep.regexp <- keep
.format.keep.index <- keep
if (is.character(keep)) { .format.keep.index <- NULL }
if (is.numeric(keep)) { .format.keep.regexp <- NULL }
# remove omitted statistics from table parts
if (!is.null(omit.stat)) {
.lower.omit.stat <- tolower(omit.stat) # make it all lower-case
if ("all" %in% .lower.omit.stat) { .lower.omit.stat <- omit.stat.acceptable }
if ("n" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="N"] }
if ("rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="R-squared"] }
if ("adj.rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="adjusted R-squared"] }
if ("max.rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="max R-squared"] }
if ("ll" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="log likelihood"] }
if ("scale" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="scale"] }
if ("sigma2" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="sigma2"] }
if ("theta" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,5)!="theta"] }
if ("aic" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="AIC"] }
if ("bic" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="BIC"] }
if ("ubre" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="UBRE"] }
if ("rho" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,3)!="rho"] }
if ("mills" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,5)!="Mills"] }
if ("ser" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,3)!="SER"] }
if ("f" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,11)!="F statistic"] }
if ("chi2" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,4)!="chi2"] }
if ("wald" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,4)!="Wald"] }
if ("lr" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,2)!="LR"] }
if ("logrank" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,7)!="logrank"] }
if ("null.dev" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,13)!="null deviance"] }
if ("res.dev" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,17)!="residual deviance"] }
}
# keep statistics in the table
if (!is.null(keep.stat)) {
.lower.keep.stat <- tolower(keep.stat) # make it all lower-case
# do this by omitting everything except what you keep
.lower.omit.stat <- c("n","rsq","adj.rsq","max.rsq","ll","aic","bic","scale","ubre","rho","Mills","sigma2","ser","f","theta","chi2","wald","lr","logrank","null.dev","res.dev")
.lower.omit.stat <- .lower.omit.stat[!(.lower.omit.stat %in% .lower.keep.stat) ]
if ("n" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="N"] }
if ("rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="R-squared"] }
if ("adj.rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="adjusted R-squared"] }
if ("max.rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="max R-squared"] }
if ("ll" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="log likelihood"] }
if ("scale" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="scale"] }
if ("sigma2" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="sigma2"] }
if ("theta" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,5)!="theta"] }
if ("aic" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="AIC"] }
if ("bic" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="BIC"] }
if ("ubre" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="UBRE"] }
if ("rho" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,3)!="rho"] }
if ("mills" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,5)!="Mills"] }
if ("ser" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,3)!="SER"] }
if ("f" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,11)!="F statistic"] }
if ("chi2" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,4)!="chi2"] }
if ("wald" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,4)!="Wald"] }
if ("lr" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,2)!="LR"] }
if ("logrank" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,7)!="logrank"] }
if ("null.dev" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,13)!="null deviance"] }
if ("res.dev" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,17)!="residual deviance"] }
}
# keep statistics in table parts
if (!is.null(keep.stat)) {
.lower.keep.stat <- tolower(keep.stat) # make it all lower-case
keep.stat.acceptable <- c("all","n","rsq","adj.rsq","max.rsq","ll","aic","bic","scale","ubre","rho(se)*","Mills(se)*","sigma2","ser","f","theta","chi2","wald","lr","logrank","null.dev","res.dev") # list of statistic codes that are acceptable
remove.stats <- keep.stat.acceptable[!(keep.stat.acceptable %in% .lower.keep.stat)]
.format.table.parts <- .format.table.parts[!(.format.table.parts %in% remove.stats)]
}
# digits, initial.zeros, decimal characters
if (!is.null(decimal.mark)) { .format.decimal.character <- decimal.mark }
if (!is.null(align)) { .format.dec.mark.align <- align }
if (!is.null(digit.separator)) { .format.digit.separator <- digit.separator }
if (!is.null(initial.zero)) { .format.initial.zero <- initial.zero }
if (!is.null(digit.separate)) {
if (digit.separate=="lakh") { .format.digit.separator.where <- c(3,2) } # lakhs
else if ((digit.separate=="china") || (digit.separate=="japan")) { .format.digit.separator.where <- 4 }
else { .format.digit.separator.where <- digit.separate}
}
if (!is.null(digits)) {
.format.round.digits <- digits
.format.s.round.digits <- digits
}
if (!is.null(digits.extra)) {
.format.max.extra.digits <- digits.extra
if (digits.extra>=1) { .format.until.nonzero.digit <- TRUE }
else ( .format.until.nonzero.digit <- FALSE )
}
# intercept top and bottom
if (!is.null(intercept.top)) { .format.intercept.top <- intercept.top }
if (!is.null(intercept.bottom)) { .format.intercept.bottom <- intercept.bottom }
# model names, numbers and multicolumn
if (!is.null(model.names)) {
.format.model.names.include <- model.names
if (model.names == TRUE) { .format.models.skip.if.one <- FALSE }
}
if (!is.null(model.numbers)) { .format.model.numbers <- model.numbers }
.format.multicolumn <- multicolumn
# object names
.format.object.names <- object.names
# report coefs, std errs, t, p?
if (!is.null(report)) {
.format.coefficient.table.parts <- NULL
for (i in 1:nchar(report)) {
component.letter <- substr(report, i, i)
if (component.letter == "v") { .format.coefficient.table.parts <- append(.format.coefficient.table.parts, "variable name") }
if (component.letter == "c") { .format.coefficient.table.parts <- append(.format.coefficient.table.parts, "coefficient") }
if (component.letter == "s") { .format.coefficient.table.parts <- append(.format.coefficient.table.parts, "standard error") }
if (component.letter == "t") { .format.coefficient.table.parts <- append(.format.coefficient.table.parts, "t-stat") }
if (component.letter == "p") { .format.coefficient.table.parts <- append(.format.coefficient.table.parts, "p-value") }
if ((component.letter == "*") && (i > 1)) {
l <- length(.format.coefficient.table.parts)
if ((.format.coefficient.table.parts[l] != "variable name") && (substr(report,i-1,i-1) != "*")) {
.format.coefficient.table.parts[l] <- paste(.format.coefficient.table.parts[l],"*",sep="")
}
}
}
.format.coefficient.table.parts <- append(.format.coefficient.table.parts, " ")
}
# significance stars
if (!is.null(star.cutoffs)) {
# assign cutoff values
.format.cutoffs <- star.cutoffs
}
if (!is.null(star.char)) {
.format.stars <- star.char
}
for (i in 1:length(.format.cutoffs)) {
if (is.na(.format.stars[i])) {
.format.stars[i] <- paste(rep(.format.stars[1], i), sep="", collapse="")
}
}
.format.stars <- .format.stars[1:length(.format.cutoffs)]
# selection equation
.global.sel.equation <- selection.equation
# colnames and rownames
if (!is.null(rownames)) { .format.rownames <- rownames }
if (!is.null(colnames)) { .format.colnames <- colnames }
# zero vs. count component
.global.zero.component <- zero.component
# notes
replace.dec.mark <- function(s) { return (gsub(".", .format.decimal.character, s, fixed=TRUE))}
# replace star cutoffs in the notes section
for (i in 1:length(.format.cutoffs)) {
if (!is.na(.format.stars[i])) {
star.string <- paste(rep("*", i), sep="", collapse="")
.format.note.content <- gsub(paste("[.",star.string,"]",sep=""), replace.dec.mark(gsub("^[0]+", "",.format.cutoffs[i])), .format.note.content, fixed=TRUE)
.format.note.content <- gsub(paste("[0.",star.string,"]",sep=""), replace.dec.mark(.format.cutoffs[i]), .format.note.content, fixed=TRUE)
.format.note.content <- gsub(paste("[",star.string,"]",sep=""), replace.dec.mark(.format.cutoffs[i]*100), .format.note.content, fixed=TRUE)
}
}
if (!is.null(notes)) {
if (notes.append == TRUE) {
.format.note.content <- c(.format.note.content, notes)
.format.s.note.content <- c(.format.s.note.content, notes)
}
else {
.format.note.content <- notes
.format.s.note.content <- notes
}
}
if (!is.null(notes.align)) {
.format.note.alignment <- notes.align
.format.s.note.alignment <- notes.align
}
if (!is.null(notes.label)) {
.format.note <- notes.label
.format.s.note <- notes.label
}
# ordered probit/logit, etc. - report intercepts?
.format.ordered.intercepts <- ord.intercepts
# perl-compatible regular expressions?
.format.perl <- perl
# standard error for quantile regression
.format.rq.se <- rq.se
# report logical variables in summary statistics tables?
.format.summ.logical <- summary.logical
# summary statistics - what statistics to report - !!! this needs to come before summary.stat and omit.summary.stat
if (!nobs) { .format.s.statistics.list <- .format.s.statistics.list[.format.s.statistics.list!="n"] }
if (!mean.sd) { .format.s.statistics.list <- .format.s.statistics.list[(.format.s.statistics.list!="mean")&&(.format.s.statistics.list!="sd")]}
if (!min.max) { .format.s.statistics.list <- .format.s.statistics.list[(.format.s.statistics.list!="min")&&(.format.s.statistics.list!="max")]}
if (!median) { .format.s.statistics.list <- .format.s.statistics.list[.format.s.statistics.list!="median"] }
if (!iqr) { .format.s.statistics.list <- .format.s.statistics.list[(.format.s.statistics.list!="p25")&&(.format.s.statistics.list!="p75")]}
# keep summary statistics
if (!is.null(summary.stat)) {
.lower.keep.summary.stat <- tolower(summary.stat) # make it all lower-case
.format.s.statistics.list <- .lower.keep.summary.stat
}
# remove omitted statistics from table parts
if (!is.null(omit.summary.stat)) {
.lower.omit.summary.stat <- tolower(omit.summary.stat) # make it all lower-case
.format.s.statistics.list <- .format.s.statistics.list[!(.format.s.statistics.list %in% .lower.omit.summary.stat)]
}
# table layout
.format.table.parts.nonstat <- c("=","-","-!","=!","dependent variable label",
"dependent variables","models","columns","numbers",
"objects","coefficients","omit","additional","notes")
# these are the non-model statistics parts of the table
if (!is.null(table.layout)) {
.format.table.parts.new <- NULL
for (i in 1:nchar(table.layout)) {
component.letter <- substr(table.layout, i, i)
if (component.letter == "=") { .format.table.parts.new <- append(.format.table.parts.new, "=") }
if (component.letter == "-") { .format.table.parts.new <- append(.format.table.parts.new, "-") }
if ((component.letter == "!") && (i > 1)) {
if (.format.table.parts.new[i-1] %in% c("-","=")) {
.format.table.parts.new[i-1] <- paste(.format.table.parts.new[i-1], "!", sep="")
}
}
if (component.letter == "l") { .format.table.parts.new <- append(.format.table.parts.new, "dependent variable label") }
if (component.letter == "d") { .format.table.parts.new <- append(.format.table.parts.new, "dependent variables") }
if (component.letter == "m") {
.format.table.parts.new <- append(.format.table.parts.new, "models")
.format.model.names.include <- TRUE
}
if (component.letter == "c") { .format.table.parts.new <- append(.format.table.parts.new, "columns") }
if (component.letter == "#") {
.format.table.parts.new <- append(.format.table.parts.new, "numbers")
.format.model.numbers <- TRUE
}
if (component.letter == "b") {
.format.table.parts.new <- append(.format.table.parts.new, "objects")
.format.object.names <- TRUE
}
if (component.letter == "t") { .format.table.parts.new <- append(.format.table.parts.new, "coefficients") }
if (component.letter == "o") { .format.table.parts.new <- append(.format.table.parts.new, "omit") }
if (component.letter == "a") { .format.table.parts.new <- append(.format.table.parts.new, "additional") }
if (component.letter == "n") { .format.table.parts.new <- append(.format.table.parts.new, "notes") }
if (component.letter == "s") {
.format.table.parts.new <- append(.format.table.parts.new,
.format.table.parts[!(.format.table.parts %in% .format.table.parts.nonstat)])
}
}
.format.table.parts <- .format.table.parts.new
}
# now omit table parts
if (!is.null(omit.table.layout)) {
for (i in 1:nchar(omit.table.layout)) {
component.letter <- substr(omit.table.layout, i, i)
if (component.letter == "=") { .format.table.parts <- .format.table.parts[.format.table.parts!="="] }
if (component.letter == "-") { .format.table.parts <- .format.table.parts[.format.table.parts!="-"] }
if ((component.letter == "!") && (i > 1)) {
if (substr(omit.table.layout, i-1, i-1) == "=") { .format.table.parts <- .format.table.parts[.format.table.parts!="=!"] }
if (substr(omit.table.layout, i-1, i-1) == "-") { .format.table.parts <- .format.table.parts[.format.table.parts!="-!"] }
}
if (component.letter == "l") { .format.table.parts <- .format.table.parts[.format.table.parts!="dependent variable label"] }
if (component.letter == "d") { .format.table.parts <- .format.table.parts[.format.table.parts!="dependent variables"] }
if (component.letter == "m") { .format.table.parts <- .format.table.parts[.format.table.parts!="models"] }
if (component.letter == "c") { .format.table.parts <- .format.table.parts[.format.table.parts!="columns"] }
if (component.letter == "#") { .format.table.parts <- .format.table.parts[.format.table.parts!="numbers"] }
if (component.letter == "b") { .format.table.parts <- .format.table.parts[.format.table.parts!="objects"] }
if (component.letter == "t") { .format.table.parts <- .format.table.parts[.format.table.parts!="coefficients"] }
if (component.letter == "o") { .format.table.parts <- .format.table.parts[.format.table.parts!="omit"] }
if (component.letter == "a") { .format.table.parts <- .format.table.parts[.format.table.parts!="additional"] }
if (component.letter == "n") { .format.table.parts <- .format.table.parts[.format.table.parts!="notes"] }
if (component.letter == "s") { .format.table.parts <- .format.table.parts[.format.table.parts %in% .format.table.parts.nonstat] }
}
}
# intelligent division of regression tables vs. summary statistics tables
regression.table.objects <- NULL
number.of.table <- 0
title.table <- NULL
label.table <- NULL
for (i in seq(1:how.many.objects)) {
if (is.data.frame(objects[[i]])==TRUE) {
if (!is.null(regression.table.objects)) {
number.of.table <- number.of.table + 1 # allows for multiple table titles and labels
if (!is.na(title[number.of.table])) { .format.title <- title[number.of.table] }
else { .format.title <- title[length(title)] }
if (!is.na(label[number.of.table])) { .format.label <- label[number.of.table] }
else { .format.label <- label[length(label)] }
if (type == "latex") {
do.call(.stargazer.reg.table, as.list(objects[regression.table.objects]))
invisible.output <- latex.code <- c(invisible.output, invisible(capture.output(do.call(.stargazer.reg.table, as.list(objects[regression.table.objects])),file=NULL)) )
}
else if ((type == "text") || (type == "html") || (type == "mmd") ) {
latex.code <- c(latex.code, invisible(capture.output(do.call(.stargazer.reg.table, as.list(objects[regression.table.objects])),file=NULL)) )
}
}
number.of.table <- number.of.table + 1
if (!is.na(title[number.of.table])) { .format.title <- title[number.of.table] }
else { .format.title <- title[length(title)] }
if (!is.na(label[number.of.table])) { .format.label <- label[number.of.table] }
else { .format.label <- label[length(label)] }
if (.global.summary[i]==TRUE) {
if (type == "latex") {
.stargazer.summ.stat.table(objects[[i]])
invisible.output <- latex.code <- c(invisible.output, invisible(capture.output(.stargazer.summ.stat.table(objects[[i]]),file=NULL)) )
}
else if ((type == "text") || (type == "html") || (type == "mmd")) {
latex.code <- c(latex.code, invisible(capture.output(.stargazer.summ.stat.table(objects[[i]]),file=NULL)) )
}
}
else {
if (type == "latex") {
.stargazer.data.frame.table(objects[[i]])
invisible.output <- latex.code <- c(invisible.output, invisible(capture.output(.stargazer.data.frame.table(objects[[i]]),file=NULL)) )
}
else if ((type == "text") || (type == "html") || (type == "mmd")) {
latex.code <- c(latex.code, invisible(capture.output(.stargazer.data.frame.table(objects[[i]]),file=NULL)) )
}
}
regression.table.objects <- NULL
}
else {
regression.table.objects <- c(regression.table.objects, i)
.global.object.names <- .global.object.names.all[regression.table.objects]
}
}
if (!is.null(regression.table.objects)) {
number.of.table <- number.of.table + 1
if (!is.na(title[number.of.table])) { .format.title <- title[number.of.table] }
else { .format.title <- title[length(title)] }
if (!is.na(label[number.of.table])) { .format.label <- label[number.of.table] }
else { .format.label <- label[length(label)] }
if (type == "latex") {
do.call(.stargazer.reg.table, as.list(objects[regression.table.objects]))
invisible.output <- latex.code <- c(invisible.output, invisible(capture.output(do.call(.stargazer.reg.table, as.list(objects[regression.table.objects])),file=NULL)) )
}
else if ((type == "text") || (type == "html") || (type == "mmd")) {
latex.code <- c(latex.code, invisible(capture.output(do.call(.stargazer.reg.table, as.list(objects[regression.table.objects])),file=NULL)) )
}
}
# don't do text output or file outputs if there are errors
if (type == "text") {
.text.output(latex.code)
invisible.output <- invisible(capture.output(.text.output(latex.code)))
}
else if (type == "html") {
.html.output(latex.code)
invisible.output <- invisible(capture.output(.html.output(latex.code)))
}
else if (type == "mmd") {
.mmd.output(latex.code)
invisible.output <- invisible(capture.output(.mmd.output(latex.code)))
}
if (length(out) >= 1) {
text.out <- invisible(capture.output(.text.output(latex.code)))
html.out <- invisible(capture.output(.html.output(latex.code)))
.output.file(out, latex.code, text.out, html.out, type, out.header)
}
}
else {
if (suppress.errors == FALSE) {
cat(error.present, sep="")
invisible.output <- latex.code <- error.present
}
else {
invisible.output <- latex.code <- ""
}
}
options(warn=warn)
return(invisible(invisible.output))
}
|
/R/stargazer-internal.R
|
no_license
|
Anoopsinghrawat/stargazer
|
R
| false | false | 301,763 |
r
|
.onAttach <-
function(libname, pkgname) {
packageStartupMessage("\nPlease cite as: \n")
packageStartupMessage(" Hlavac, Marek (2018). stargazer: Well-Formatted Regression and Summary Statistics Tables.")
packageStartupMessage(" R package version 5.2.2. https://CRAN.R-project.org/package=stargazer \n")
}
.stargazer.wrap <-
function(..., type, title, style, summary, out, out.header, covariate.labels, column.labels, column.separate,
dep.var.caption, dep.var.labels, dep.var.labels.include, align, coef, se, t, p, t.auto,
p.auto, ci, ci.custom, ci.level, ci.separator, add.lines, apply.coef, apply.se, apply.t, apply.p, apply.ci,
colnames,
column.sep.width, decimal.mark, df, digit.separate, digit.separator, digits, digits.extra,
flip, float,
float.env, font.size, header, initial.zero, intercept.bottom, intercept.top, keep, keep.stat,
label, model.names, model.numbers, multicolumn, no.space, notes, notes.align, notes.append,
notes.label, object.names, omit, omit.labels, omit.stat, omit.summary.stat, omit.table.layout,
omit.yes.no, order, ord.intercepts, perl, report, rownames,
rq.se, selection.equation, single.row, star.char, star.cutoffs, suppress.errors,
table.layout, table.placement,
zero.component, summary.logical, summary.stat, nobs, mean.sd, min.max, median, iqr, warn) {
.add.model <-
function(object.name, user.coef=NULL, user.se=NULL, user.t=NULL, user.p=NULL, auto.t=TRUE, auto.p=TRUE, user.ci.lb=NULL, user.ci.rb=NULL) {
if (class(object.name)[1] == "Glm") {
.summary.object <<- summary.glm(object.name)
}
else if (!(.model.identify(object.name) %in% c("aftreg", "coxreg","phreg","weibreg", "Glm", "bj", "cph", "lrm", "ols", "psm", "Rq"))) {
.summary.object <<- summary(object.name)
}
else {
.summary.object <<- object.name
}
if (.model.identify(object.name) == "rq") {
.summary.object <<- suppressMessages(summary(object.name, se=.format.rq.se))
}
model.num.total <- 1 # model number for multinom, etc.
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(.summary.object$coefficients))) {
model.num.total <- nrow(.summary.object$coefficients)
}
}
for (model.num in 1:model.num.total) {
.global.models <<- append(.global.models, .model.identify(object.name))
.global.dependent.variables <<- append(.global.dependent.variables, .dependent.variable(object.name, model.num))
.global.dependent.variables.written <<- append(.global.dependent.variables.written, .dependent.variable.written(object.name, model.num))
.global.N <<- append(.global.N, .number.observations(object.name))
.global.LL <<- append(.global.LL, .log.likelihood(object.name))
.global.R2 <<- append(.global.R2, .r.squared(object.name))
.global.max.R2 <<- append(.global.max.R2, .max.r.squared(object.name))
.global.adj.R2 <<- append(.global.adj.R2, .adj.r.squared(object.name))
.global.AIC <<- append(.global.AIC, .AIC(object.name))
.global.BIC <<- append(.global.BIC, .BIC(object.name))
.global.scale <<- append(.global.scale, .get.scale(object.name))
.global.UBRE <<- append(.global.UBRE, .gcv.UBRE(object.name))
.global.sigma2 <<- append(.global.sigma2, .get.sigma2(object.name))
.global.rho <<- cbind(.global.rho, .get.rho(object.name))
.global.mills <<- cbind(.global.mills, .get.mills(object.name))
.global.theta <<- cbind(.global.theta, .get.theta(object.name))
.global.SER <<- cbind(.global.SER, .SER(object.name))
.global.F.stat <<- cbind(.global.F.stat, .F.stat(object.name))
.global.chi.stat <<- cbind(.global.chi.stat, .chi.stat(object.name))
.global.wald.stat <<- cbind(.global.wald.stat, .wald.stat(object.name))
.global.lr.stat <<- cbind(.global.lr.stat, .lr.stat(object.name))
.global.logrank.stat <<- cbind(.global.logrank.stat, .logrank.stat(object.name))
.global.null.deviance <<- cbind(.global.null.deviance, .null.deviance(object.name))
.global.residual.deviance <<- cbind(.global.residual.deviance, .residual.deviance(object.name))
max.length <- length(.global.coefficient.variables)+length(.coefficient.variables(object.name))
# add RHS variables and coefficients
coef.var <- .coefficient.variables(object.name)
.global.coef.vars.by.model <<- cbind(.global.coef.vars.by.model, coef.var)
temp.gcv <- rep(NA,each=1,times=max.length)
temp.gcv[1:length(.global.coefficient.variables)] <- .global.coefficient.variables
how.many.gcv <- length(.global.coefficient.variables)
# try to find variable
position <- 0
for (i in seq(1:length(coef.var))) {
found <- FALSE
for (j in seq(1:length(.global.coefficient.variables))) {
if (coef.var[i] == .global.coefficient.variables[j]) {
found <- TRUE
for (k in 1:how.many.gcv) {
if (coef.var[i]==temp.gcv[k]) {
position <- k
}
}
}
}
# If variable was found, no need to add it
if (found == FALSE) {
# append new variable to list of regressors
while ((position < how.many.gcv) && (!(temp.gcv[position+1] %in% coef.var))) {
position <- position + 1
}
temp.gcv <- append(temp.gcv, coef.var[i], after=position)
how.many.gcv <- how.many.gcv + 1
position <- position + 1
}
}
.global.coefficient.variables <<- temp.gcv[1:how.many.gcv]
# build up coefficients from scratch
temp.coefficients <- temp.std.errors <- temp.ci.lb <- temp.ci.rb <- temp.t.stats <- temp.p.values <- matrix(data = NA, nrow = length(.global.coefficient.variables), ncol = ncol(.global.coefficients)+1)
rownames(temp.coefficients) <- rownames(temp.std.errors) <- rownames(temp.ci.lb) <- rownames(temp.ci.rb) <- rownames(temp.t.stats) <- rownames(temp.p.values) <- .global.coefficient.variables
# fill in from previous iteration of .global coefficients
which.variable <- 0
for (row in .global.coefficient.variables) {
which.variable <- which.variable + 1
row.i <- .rename.intercept(row) # row with intercept renamed to get the omit and keep right
### if omitted variable, then advance to the next iteration of the loop --- !!! do this also for index
#skip all of this if omitted based on regular expression
omitted <- FALSE
if (!is.null(.format.omit.regexp)) {
for (i in seq(1:length(.format.omit.regexp))) {
if (length(grep(.format.omit.regexp[i], row.i, perl=.format.perl, fixed=FALSE))!=0) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.regexp)) {
omitted <- TRUE
for (i in seq(1:length(.format.keep.regexp))) {
if (length(grep(.format.keep.regexp[i], row.i, perl=.format.perl, fixed=FALSE))!=0) { omitted <- FALSE }
}
}
if (!is.null(.format.omit.index)) {
for (i in seq(1:length(.format.omit.index))) {
if (.format.omit.index[i] == which.variable) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.index)) {
omitted <- TRUE
for (i in seq(1:length(.format.keep.index))) {
if (.format.keep.index[i] == which.variable) { omitted <- FALSE }
}
}
if (omitted == TRUE) { next }
###
for (col in seq(1:ncol(.global.coefficients))) {
if (sum(as.vector(rownames(.global.coefficients[,col, drop=FALSE])==row))!=0) {
if (!is.null(.global.coefficients)) { temp.coefficients[row, col] <- .global.coefficients[row, col] }
if (!is.null(.global.std.errors)) { temp.std.errors[row, col] <- .global.std.errors[row, col] }
if (!is.null(.global.ci.lb)) { temp.ci.lb[row, col] <- .global.ci.lb[row, col] }
if (!is.null(.global.ci.rb)) { temp.ci.rb[row, col] <- .global.ci.rb[row, col] }
if (!is.null(.global.t.stats)) { temp.t.stats[row, col] <- .global.t.stats[row, col] }
if (!is.null(.global.p.values)) { temp.p.values[row, col] <- .global.p.values[row, col] }
}
}
feed.coef <- NA; feed.se <- NA
# coefficients and standard errors
if (!is.null(.get.coefficients(object.name, user.coef, model.num=model.num)[row])) {
temp.coefficients[row, ncol(temp.coefficients)] <- .get.coefficients(object.name, user.coef, model.num=model.num)[row]
feed.coef <- temp.coefficients[, ncol(temp.coefficients)]
}
if (!is.null(.get.standard.errors(object.name, user.se, model.num=model.num)[row])) {
temp.std.errors[row, ncol(temp.std.errors)] <- .get.standard.errors(object.name, user.se, model.num=model.num)[row]
feed.se <- temp.std.errors[, ncol(temp.std.errors)]
}
# confidence interval, left and right bound
if (!is.null(.get.ci.lb(object.name, user.ci.lb, model.num=model.num)[row])) { temp.ci.lb[row, ncol(temp.ci.lb)] <- .get.ci.lb(object.name, user.ci.lb, model.num=model.num)[row] }
if (!is.null(.get.ci.rb(object.name, user.ci.rb, model.num=model.num)[row])) { temp.ci.rb[row, ncol(temp.ci.rb)] <- .get.ci.rb(object.name, user.ci.rb, model.num=model.num)[row] }
# t-stats and p-values
#if (!is.null(user.coef)) { feed.coef <- user.coef } # feed user-defined coefficients, if available - check that this does not mess up multinom
#if (!is.null(user.se)) { feed.se <- user.se } # feed user-defined std errors, if available
if (!is.null(.get.t.stats(object.name, user.t, auto.t, feed.coef, feed.se, user.coef, user.se, model.num=model.num)[row])) { temp.t.stats[row, ncol(temp.std.errors)] <- .get.t.stats(object.name, user.t, auto.t, feed.coef, feed.se, user.coef, user.se, model.num=model.num)[row] }
if (!is.null(.get.p.values(object.name, user.p, auto.p, feed.coef, feed.se, user.coef, user.se, model.num=model.num)[row])) { temp.p.values[row, ncol(temp.std.errors)] <- .get.p.values(object.name, user.p, auto.p, feed.coef, feed.se, user.coef, user.se, model.num=model.num)[row] }
}
if (!is.null(temp.coefficients)) { .global.coefficients <<- temp.coefficients }
if (!is.null(temp.std.errors)) { .global.std.errors <<- temp.std.errors }
if (!is.null(temp.ci.lb)) { .global.ci.lb <<- temp.ci.lb }
if (!is.null(temp.ci.rb)) { .global.ci.rb <<- temp.ci.rb }
if (!is.null(temp.t.stats)) { .global.t.stats <<- temp.t.stats }
if (!is.null(temp.p.values)) { .global.p.values <<- temp.p.values }
}
}
.adj.r.squared <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","coeftest","maBina", "lmer", "glmer", "nlmer", "Gls"))) {
if (model.name %in% c("heckit")) {
return(.summary.object$rSquared$R2adj)
}
if (model.name %in% c("felm")) {
return(.summary.object$r2adj)
}
if (!is.null(suppressMessages(.summary.object$adj.r.squared))) {
return(as.vector(suppressMessages(.summary.object$adj.r.squared)))
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam", "poisson.gam", "gam()")) {
return(as.vector(.summary.object$r.sq))
}
else if (model.name %in% c("plm")) {
return(as.vector(.summary.object$r.squared["adjrsq"]))
}
else if (model.name %in% c("ols")) {
n <- nobs(object.name)
p <- length(object.name$coefficients[names(object.name$coefficients)!="Intercept"])
r2 <- object.name$stats["R2"]
adj.r2 <- 1-(1-r2)*((n-1) / (n-p-1))
return(as.vector(adj.r2))
}
}
return(NA)
}
.adjust.settings.style <-
function(what.style) {
style <- tolower(what.style)
if (style == "all") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","-","omit","-","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","sigma2","theta(se)*(p)", "SER(df)","F statistic(df)*(p)","chi2(df)*(p)","Wald(df)*(p)","LR(df)*(p)","logrank(df)*(p)","AIC","BIC","UBRE","rho(se)*(p)","Mills(se)*(p)","residual deviance(df)*","null deviance(df)*","=!","notes")
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error","t-stat","p-value")
}
else if (style == "all2") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","-","omit","-","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","sigma2","theta(se)*(p)", "SER(df)","F statistic(df)*(p)","chi2(df)*(p)","Wald(df)*(p)","LR(df)*(p)","logrank(df)*(p)","AIC","BIC","UBRE","rho(se)*(p)","Mills(se)*(p)","residual deviance(df)*","null deviance(df)*","=!","notes")
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
}
# aer = American Economic Review
else if (style == "aer") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.until.nonzero.digit <<- FALSE
.format.max.extra.digits <<- 0
.format.model.left <<- ""
.format.model.right <<- ""
.format.note <<- "\\textit{Notes:}"
.format.note.alignment <<- "l"
.format.note.content <<- c("$^{***}$Significant at the [***] percent level.","$^{**}$Significant at the [**] percent level.","$^{*}$Significant at the [*] percent level.")
}
# ajps = American Journal of Political Science
else if (style == "ajps") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.digit.separator <<- ""
.format.dependent.variables.left <<- "\\textbf{"
.format.dependent.variables.right <<- "}"
.format.column.left <<- "\\textbf{"
.format.column.right <<- "}"
.format.models.left <<- "\\textbf{"
.format.models.right <<- "}"
.format.numbers.left <<- "\\textbf{Model "
.format.numbers.right <<- "}"
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.N <<- "N"
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.chi.stat <<- "Chi-square"
.format.R2 <<- "R-squared"
.format.adj.R2 <<- "Adj. R-squared"
.format.max.R2 <<- "Max. R-squared"
.format.note <<- ""
.format.note.content <<- c("$^{***}$p $<$ [.***]; $^{**}$p $<$ [.**]; $^{*}$p $<$ [.*]")
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
}
# ajs = American Journal of Sociology
else if (style == "ajs") {
.format.table.parts <<- c(" ","=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variables.capitalize <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.numbers.left <<- ""
.format.numbers.right <<- ""
.format.until.nonzero.digit <<- FALSE
.format.max.extra.digits <<- 0
.format.model.left <<- ""
.format.model.right <<- ""
.format.note <<- "\\textit{Notes:}"
.format.note.alignment <<- "l"
.format.note.content <<- c("$^{*}$P $<$ [.*]","$^{**}$P $<$ [.**]","$^{***}$P $<$ [.***]")
.format.cutoffs <<- c(0.05, 0.01, 0.001)
.format.initial.zero <<- FALSE
}
# apsr = American Political Science Review
else if (style == "apsr") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.models.left <<- ""
.format.models.right <<- ""
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.N <<- "N"
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.chi.stat <<- "chi$^{2}$"
.format.note <<- ""
.format.note.content <<- c("$^{*}$p $<$ [.*]; $^{**}$p $<$ [.**]; $^{***}$p $<$ [.***]")
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
}
# asq = Administrative Science Quarterly
else if (style == "asq") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.digit.separator <<- ""
.format.dependent.variables.left <<- "\\textbf{"
.format.dependent.variables.right <<- "}"
.format.column.left <<- "\\textbf{"
.format.column.right <<- "}"
.format.models.left <<- "\\textbf{"
.format.models.right <<- "}"
.format.numbers.left <<- "\\textbf{Model "
.format.numbers.right <<- "}"
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.chi.stat <<- "Chi-square"
.format.R2 <<- "R-squared"
.format.adj.R2 <<- "Adj. R-squared"
.format.max.R2 <<- "Max. R-squared"
.format.note <<- ""
.format.note.content <<- c("$^{\\bullet}$p $<$ [.*]; $^{\\bullet\\bullet}$p $<$ [.**]; $^{\\bullet\\bullet\\bullet}$p $<$ [.***]")
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
.format.stars <<- "\\bullet"
}
# asr = American Sociological Review
else if (style == "asr") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.models.left <<- ""
.format.models.right <<- ""
.format.coefficient.table.parts <<- c("variable name","coefficient*")
.format.N <<- "\\textit{N}"
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.chi.stat <<- "chi$^{2}$"
.format.note <<- ""
.format.note.content <<- c("$^{*}$p $<$ [.*]; $^{**}$p $<$ [.**]; $^{***}$p $<$ [.***]")
.format.cutoffs <<- c(0.05, 0.01, 0.001)
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
}
# "demography" = Demography
else if (style == "demography") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.models.left <<- ""
.format.models.right <<- ""
.format.numbers.left <<- "Model "
.format.numbers.right <<- ""
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.N <<- "\\textit{N}"
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.chi.stat <<- "Chi-Square"
.format.note <<- ""
.format.note.content <<- c("$^{*}$p $<$ [.*]; $^{**}$p $<$ [.**]; $^{***}$p $<$ [.***]")
.format.cutoffs <<- c(0.05, 0.01, 0.001)
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
}
# io = International Organization
else if (style == "io") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.coefficient.variables.capitalize <<- TRUE
.format.s.coefficient.variables.capitalize <<- TRUE
.format.intercept.name <<- "Constant"
.format.N <<- "\\textit{Observations}"
.format.AIC <<- "\\textit{Akaike information criterion}"
.format.BIC <<- "\\textit{Bayesian information criterion}"
.format.chi.stat <<- "\\textit{Chi-square}"
.format.logrank.stat <<- "\\textit{Score (logrank) test}"
.format.lr.stat <<- "\\textit{LR test}"
.format.max.R2 <<- "\\textit{Maximum R-squared}"
.format.R2 <<- "\\textit{R-squared}"
.format.adj.R2 <<- "\\textit{Adjusted R-squared}"
.format.UBRE <<- "\\textit{UBRE}"
.format.F.stat <<- "\\textit{F statistic}"
.format.LL <<- "\\textit{Log likelihood}"
.format.SER <<- "\\textit{Residual standard error}"
.format.null.deviance <<- "\\textit{Null deviance}"
.format.residual.deviance <<- "\\textit{Residual deviance}"
.format.scale <<- "\\textit{Scale}"
.format.wald.stat <<- "\\textit{Wald test}"
.format.note <<- "\\textit{Notes:}"
.format.note.content <<- c("$^{***}$p $<$ [.***]; $^{**}$p $<$ [.**]; $^{*}$p $<$ [.*]")
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
}
# jpam = Journal of Policy Analysis and Management
else if (style == "jpam") {
.format.table.parts <<- c("-!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","AIC","BIC","UBRE","rho(se)*","Mills(se)*","-!","notes")
.format.models.skip.if.one <<- TRUE
.format.dependent.variable.text.on <<- FALSE
.format.models.left <<- ""
.format.models.right <<- ""
.format.numbers.left <<- "Model "
.format.numbers.right <<- ""
.format.numbers.roman <<- TRUE
.format.coefficient.table.parts <<- c("variable name","coefficient*","standard error")
.format.intercept.bottom <<- FALSE
.format.intercept.top <<- TRUE
.format.N <<- "N"
.format.AIC <<- "AIC"
.format.BIC <<- "BIC"
.format.note <<- "\\textit{Note:}"
.format.note.content <<- c("$^{***}$p $<$ [.***]; $^{**}$p $<$ [.**]; $^{*}$p $<$ [.*]")
.format.note.alignment <<- "l"
.format.s.stat.parts <<- c("-!","stat names","-","statistics1","-!","notes")
.format.s.statistics.names <<- cbind(c("n","N"), c("nmiss","missing"), c("mean","Mean"), c("sd","SD"), c("median","Median"), c("min","Minimum"), c("max","Maximum"), c("mad","Median Abs. Dev."), c("p","Percentile(!)"))
}
# "qje" = Quarterly Journal of Economics
else if (style=="qje") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","omit","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","=!","notes")
.format.dependent.variable.text.on <<- FALSE
.format.s.stat.parts <<- c("-!","stat names","=","statistics1","=!","notes")
.format.N <<- "\\textit{N}"
.format.note <<- "\\textit{Notes:}"
.format.note.content <<- c("$^{***}$Significant at the [***] percent level.", "$^{**}$Significant at the [**] percent level.", "$^{*}$Significant at the [*] percent level.")
}
# find style based on journal ("default" or other)
else if (style=="commadefault") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","-","omit","-","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","sigma2","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","=!","notes")
.format.digit.separator <<- " "
.format.decimal.character <<- ","
}
else if (style=="default") {
.format.table.parts <<- c("=!","dependent variable label","dependent variables","models","columns","numbers","objects","-","coefficients","-","omit","-","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","sigma2","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*","chi2(df)*","Wald(df)*","LR(df)*","logrank(df)*","=!","notes")
}
}
.apply <-
function(auto.t, auto.p)
{
if ((!is.null(apply.coef)) || ((!is.null(apply.se)))) {
if (!is.null(apply.coef)) { .global.coefficients <<- apply(.global.coefficients, c(1,2), apply.coef) }
if (!is.null(apply.se)) { .global.std.errors <<- apply(.global.std.errors, c(1,2), apply.se) }
if (auto.t == TRUE) { .global.t.stats <<- .global.coefficients / .global.std.errors }
if (auto.p == TRUE) { .global.p.values <<- 2 * pnorm( abs( .global.t.stats ) , mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE) }
}
if (!is.null(apply.t)) { .global.t.stats <<- apply(.global.t.stats, c(1,2), apply.t) }
if (!is.null(apply.p)) { .global.p.values <<- apply(.global.p.values, c(1,2), apply.p) }
}
.AIC <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("coeftest")) {
return(NA)
}
if (model.name %in% c("lmer","lme","nlme","glmer","nlmer", "ergm", "gls", "Gls", "lagsarlm", "errorsarlm", "", "Arima")) {
return(as.vector(AIC(object.name)))
}
if (model.name %in% c("censReg")) {
return(as.vector(AIC(object.name)[1]))
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$ics["AIC"])
}
if (model.name %in% c("maBina")) {
return(as.vector(object.name$w$aic))
}
if (model.name %in% c("arima")) {
return(as.vector(object.name$aic))
}
else if (!is.null(.summary.object$aic)) {
return(as.vector(.summary.object$aic))
}
else if (!is.null(object.name$AIC)) {
return(as.vector(object.name$AIC))
}
return(NA)
}
.BIC <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("coeftest","maBina","Arima")) {
return(NA)
}
if (model.name %in% c("censReg")) {
return(as.vector(BIC(object.name)[1]))
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$ics["BIC"])
}
if (model.name %in% c("lmer","lme","nlme","glmer","nlmer", "ergm", "gls", "Gls")) {
return(as.vector(BIC(object.name)))
}
if (model.name %in% c("arima")) {
return(as.vector(object.name$bic))
}
else if (!is.null(.summary.object$bic)) {
return(as.vector(.summary.object$bic))
}
else if (!is.null(object.name$BIC)) {
return(as.vector(object.name$BIC))
}
return(NA)
}
.chi.stat <-
function(object.name) {
chi.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina","coeftest","lmer", "Gls", "glmer", "nlmer", "normal.gam","logit.gam","probit.gam","poisson.gam","gam()"))) {
if (!is.null(.summary.object$chi)) {
chi.value <- suppressMessages(.summary.object$chi)
df.value <- suppressMessages(.summary.object$df) - suppressMessages(.summary.object$idf)
chi.p.value <- pchisq(chi.value, df.value, ncp=0, lower.tail = FALSE, log.p = FALSE)
chi.output <- as.vector(c(chi.value, df.value, chi.p.value))
}
else if (model.name %in% c("cph", "lrm", "ols", "psm")) {
chi.value <- object.name$stat["Model L.R."]
df.value <- object.name$stat["d.f."]
chi.p.value <- pchisq(chi.value, df.value, ncp=0, lower.tail = FALSE, log.p = FALSE)
chi.output <- as.vector(c(chi.value, df.value, chi.p.value))
}
else if (model.name %in% c("probit.ss")) {
chi.value <- object.name$LRT$LRT
df.value <- object.name$LRT$df
chi.p.value <- pchisq(chi.value, df.value, ncp=0, lower.tail = FALSE, log.p = FALSE)
chi.output <- as.vector(c(chi.value, df.value, chi.p.value))
}
}
names(chi.output) <- c("statistic","df1","p-value")
return(cbind(chi.output))
}
.coefficient.table.part <-
function(part, which.variable, variable.name=NULL) {
# coefficient variable name
if (part=="variable name") {
# use intercept name for intercept, otherwise variable name
if (is.na(.format.covariate.labels[.which.variable.label])) {
if (.format.coefficient.variables.capitalize == TRUE) { cat(" ", .format.coefficient.variables.left, toupper(variable.name), .format.coefficient.variables.right, sep="") }
else { cat(" ", .format.coefficient.variables.left, variable.name, .format.coefficient.variables.right, sep="") }
}
else { cat(" ", .format.coefficient.variables.left, .format.covariate.labels[.which.variable.label], .format.coefficient.variables.right, sep="") }
}
# coefficients and stars
else if ((part=="coefficient") || (part=="coefficient*")) {
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.coefficients[.global.coefficient.variables[which.variable],i])) {
# report the coefficient
cat(" & ", .iround(.global.coefficients[.global.coefficient.variables[which.variable],i],.format.round.digits),sep="")
# add stars to denote statistical significance
if (part=="coefficient*") {
p.value <- .global.p.values[.global.coefficient.variables[which.variable],i]
.enter.significance.stars(p.value)
}
}
else {
cat(" & ",sep="")
}
# if single-row, follow up with standard error / confidence interval
if ((.format.single.row == TRUE) && (("standard error" %in% .format.coefficient.table.parts) || ("standard error*" %in% .format.coefficient.table.parts))) {
if (.format.dec.mark.align == TRUE) { space.char <- "$ $"}
else { space.char <- " "}
if (!is.na(.global.std.errors[.global.coefficient.variables[which.variable],i])) {
# report standard errors or confidence intervals
.format.ci.use <- .format.ci[i]
if (is.na(.format.ci.use)) {
for (j in i:1) {
if (!is.na(.format.ci[j])) {
.format.ci.use <- .format.ci[j]
break
}
}
}
if (.format.ci.use == TRUE) {
# if ci level is NA, find the most recent set level
.format.ci.level.use <- .format.ci.level[i]
if (is.na(.format.ci.level.use)) {
for (j in i:1) {
if (!is.na(.format.ci.level[j])) {
.format.ci.level.use <- .format.ci.level[j]
break
}
}
}
z.value <- qnorm((1 + .format.ci.level.use)/2)
coef <- .global.coefficients[.global.coefficient.variables[which.variable],i]
se <- .global.std.errors[.global.coefficient.variables[which.variable],i]
ci.lower.bound <- coef - z.value * se
ci.upper.bound <- coef + z.value * se
if (!is.null(ci.custom[[i]])) {
ci.lower.bound.temp <- .global.ci.lb[.global.coefficient.variables[which.variable],i]
ci.upper.bound.temp <- .global.ci.rb[.global.coefficient.variables[which.variable],i]
if (!is.na(ci.lower.bound.temp)) (ci.lower.bound <- ci.lower.bound.temp)
if (!is.na(ci.upper.bound.temp)) (ci.upper.bound <- ci.upper.bound.temp)
}
if (!is.null(apply.ci)) {
ci.lower.bound <- do.call(apply.ci, list(ci.lower.bound))
ci.upper.bound <- do.call(apply.ci, list(ci.upper.bound))
}
if (.format.dec.mark.align == TRUE) {
hyphen <- paste("$",.format.ci.separator,"$", sep="")
}
else {
hyphen <- .format.ci.separator
}
cat(space.char, .format.std.errors.left, .iround(ci.lower.bound,.format.round.digits),hyphen,.iround(ci.upper.bound,.format.round.digits),.format.std.errors.right,sep="")
}
else {
cat(space.char, .format.std.errors.left, .iround(.global.std.errors[.global.coefficient.variables[which.variable],i],.format.round.digits),.format.std.errors.right,sep="")
}
# add stars to denote statistical significance
if ("standard error*" %in% .format.coefficient.table.parts) {
p.value <- .global.p.values[.global.coefficient.variables[which.variable],i]
.enter.significance.stars(p.value)
}
}
}
}
cat(" \\\\ \n ")
}
# standard errors
else if (((part=="standard error") || (part=="standard error*")) && (.format.single.row==FALSE)) {
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.std.errors[.global.coefficient.variables[which.variable],i])) {
# report standard errors or confidence intervals
.format.ci.use <- .format.ci[i]
if (is.na(.format.ci.use)) {
for (j in i:1) {
if (!is.na(.format.ci[j])) {
.format.ci.use <- .format.ci[j]
break
}
}
}
if (.format.ci.use == TRUE) {
# if ci level is NA, find the most recent set level
.format.ci.level.use <- .format.ci.level[i]
if (is.na(.format.ci.level.use)) {
for (j in i:1) {
if (!is.na(.format.ci.level[j])) {
.format.ci.level.use <- .format.ci.level[j]
break
}
}
}
z.value <- qnorm((1 + .format.ci.level.use)/2)
coef <- .global.coefficients[.global.coefficient.variables[which.variable],i]
se <- .global.std.errors[.global.coefficient.variables[which.variable],i]
ci.lower.bound <- coef - z.value * se
ci.upper.bound <- coef + z.value * se
if (!is.null(ci.custom[[i]])) {
ci.lower.bound.temp <- .global.ci.lb[.global.coefficient.variables[which.variable],i]
ci.upper.bound.temp <- .global.ci.rb[.global.coefficient.variables[which.variable],i]
if (!is.na(ci.lower.bound.temp)) (ci.lower.bound <- ci.lower.bound.temp)
if (!is.na(ci.upper.bound.temp)) (ci.upper.bound <- ci.upper.bound.temp)
}
if (!is.null(apply.ci)) {
ci.lower.bound <- do.call(apply.ci, list(ci.lower.bound))
ci.upper.bound <- do.call(apply.ci, list(ci.upper.bound))
}
if (.format.dec.mark.align == TRUE) {
hyphen <- paste("$",.format.ci.separator,"$", sep="")
}
else {
hyphen <- .format.ci.separator
}
if (.format.dec.mark.align == TRUE) {
cat(" & \\multicolumn{1}{c}{", .format.std.errors.left, .iround(ci.lower.bound,.format.round.digits),hyphen,.iround(ci.upper.bound,.format.round.digits),.format.std.errors.right,"}",sep="")
}
else {
cat(" & ", .format.std.errors.left, .iround(ci.lower.bound,.format.round.digits),hyphen,.iround(ci.upper.bound,.format.round.digits),.format.std.errors.right,sep="")
}
}
else {
cat(" & ", .format.std.errors.left, .iround(.global.std.errors[.global.coefficient.variables[which.variable],i],.format.round.digits),.format.std.errors.right,sep="")
}
# add stars to denote statistical significance
if (part=="standard error*") {
p.value <- .global.p.values[.global.coefficient.variables[which.variable],i]
.enter.significance.stars(p.value)
}
}
else {
cat(" & ",sep="")
}
}
cat(" \\\\ \n ")
}
# p-values
else if ((part=="p-value") || (part=="p-value*")) {
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.p.values[.global.coefficient.variables[which.variable],i])) {
# report p-values
cat(" & ", .format.p.values.left, .iround(.global.p.values[.global.coefficient.variables[which.variable],i],.format.round.digits,round.up.positive=TRUE),.format.p.values.right,sep="")
# add stars to denote statistical significance
if (part=="p-value*") {
p.value <- .global.p.values[.global.coefficient.variables[which.variable],i]
.enter.significance.stars(p.value)
}
}
else {
cat(" & ",sep="")
}
}
cat(" \\\\ \n ")
}
# t-statistics
else if ((part=="t-stat") || (part=="t-stat*")) {
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.t.stats[.global.coefficient.variables[which.variable],i])) {
# report t-statistics
cat(" & ", .format.t.stats.left, .iround(.global.t.stats[.global.coefficient.variables[which.variable],i],.format.round.digits),.format.t.stats.right,sep="")
# add stars to denote statistical significance
if (part=="t-stat*") {
p.value <- .global.p.values[.global.coefficient.variables[which.variable],i]
.enter.significance.stars(p.value)
}
}
else {
cat(" & ",sep="")
}
}
cat(" \\\\ \n ")
}
# empty line
else if (part==" ") {
.table.empty.line()
}
# horizontal line
else if (part=="-") {
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
# double horizontal line
else if (part=="=") {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
}
.coefficient.variables <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit", "poisson", "negbin", "normal.gee", "logit.gee", "probit.gee", "poisson.gee", "normal.gam",
"logit.gam", "probit.gam", "poisson.gam", "normal.survey", "poisson.survey", "probit.survey", "logit.survey", "gamma", "gamma.gee", "gamma.survey",
"exp", "weibull", "coxph", "clogit", "lognorm", "tobit", "tobit(AER)", "brglm", "glm()", "Glm()", "svyglm()", "gee()", "survreg()", "gam()", "plm", "ivreg", "pmg", "lmrob", "glmrob",
"dynlm", "gls", "rq", "lagsarlm", "errorsarlm", "gmm", "mclogit")) {
return(as.vector(names(object.name$coefficients)))
}
else if (model.name %in% c("Arima")) {
return(names(object.name$coef))
}
else if (model.name %in% c("fGARCH")) {
return(rownames(object.name@fit$matcoef))
}
else if (model.name %in% c("censReg")) {
return(rownames(.summary.object$estimate))
}
else if (model.name %in% c("mnlogit")) {
return(rownames(.summary.object$CoefTable))
}
else if (model.name %in% c("lme","nlme")) {
return(rownames(.summary.object$tTable))
}
else if (model.name %in% c("felm")) {
return(row.names(object.name$coefficients))
}
else if (model.name %in% c("maBina")) {
return(as.vector(rownames(object.name$out)))
}
else if (model.name %in% c("mlogit")) {
return(as.vector(rownames(.summary.object$CoefTable)))
}
else if (model.name %in% c("hetglm")) {
return(as.vector(names(object.name$coefficients$mean)))
}
else if (model.name %in% c("selection","heckit")) {
if (!.global.sel.equation) {
indices <- .summary.object$param$index$betaO ### outcome equation
}
else {
indices <- .summary.object$param$index$betaS ### selection equation
}
return(as.vector(names(.summary.object$estimate[indices, 1])))
}
else if (model.name %in% c("probit.ss", "binaryChoice")) {
return(as.vector(names(.summary.object$estimate[,1])))
}
else if (model.name %in% c("coeftest")) {
return(as.vector(rownames(object.name)))
}
else if (model.name %in% c("clm")) {
if (.format.ordered.intercepts == FALSE) { return(as.vector(names(object.name$beta))) }
else { return(c(as.vector(names(object.name$beta)), as.vector(names(object.name$alpha)))) }
}
else if (model.name %in% c("lmer", "glmer", "nlmer", "pgmm")) {
return(as.vector(rownames(.summary.object$coefficients)))
}
else if (model.name %in% c("ergm", "rem.dyad")) {
return(as.vector(names(object.name$coef)))
}
else if (model.name %in% c("betareg")) {
return(as.vector(names(object.name$coefficients$mean)))
}
else if (model.name %in% c("zeroinfl", "hurdle")) {
if (.global.zero.component==FALSE) {
return(as.vector(names(object.name$coefficients$count)))
}
else {
return(as.vector(names(object.name$coefficients$zero)))
}
}
else if (model.name %in% c("cloglog.net", "gamma.net", "logit.net", "probit.net")) {
return(as.vector(rownames(.summary.object$coefficients)))
}
else if (model.name %in% c("rlm")) {
return(as.vector(rownames(suppressMessages(.summary.object$coefficients))))
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
coef.temp <- as.vector(rownames(suppressMessages(.summary.object$coefficients)))
if (.format.ordered.intercepts == FALSE) { return(coef.temp[seq(from=1, to=length(coef.temp)-(length(suppressMessages(.summary.object$lev))-1))]) }
else { return(coef.temp) }
}
else if (model.name %in% c("arima")) {
return(as.vector(names(object.name$coef)))
}
else if (model.name %in% c("multinom")) {
return(as.vector(object.name$coefnames))
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
return(as.vector(names(object.name$coefficients)))
}
return(NULL)
}
.dependent.variable <-
function(object.name, model.num=1) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("lmer", "glmer", "nlmer", "gls")) {
return(as.vector(as.character(formula(object.name))[2]))
}
if (model.name %in% c("Arima")) {
return(as.character(object.name$call$x))
}
if (model.name %in% c("fGARCH")) {
return(as.character(object.name@call$data))
}
if (model.name %in% c("multinom")) {
if (!is.null(rownames(.summary.object$coefficients))) {
return(as.vector(rownames(.summary.object$coefficients)[model.num]))
}
}
if (model.name %in% c("rem.dyad", "coeftest")) {
return(as.vector(as.character(" ")))
}
if (model.name %in% c("gmm")) {
formula <- object.name$call[2]
position <- regexpr("~", formula, fixed=T)
return( .trim(substr(formula, 1, position-1)) )
}
if (model.name %in% c("selection","heckit")) {
if (!.global.sel.equation) {
formula <- object.name$call["outcome"] ### outcome
}
else {
formula <- object.name$call["selection"] ### outcome
}
position <- regexpr("~", formula, fixed=T)
return( .trim(substr(formula, 1, position-1)))
}
if (model.name %in% c("probit.ss","binaryChoice")) {
formula <- object.name$call["formula"]
position <- regexpr("~", formula, fixed=T)
return( .trim(substr(formula, 1, position-1)))
}
if (model.name %in% c("maBina")) {
object.name <- object.name$w
}
if (model.name %in% c("lme")) {
object.name$call$formula <- object.name$call$fixed
}
if (model.name %in% c("nlme")) {
object.name$call$formula <- object.name$call$model
}
if (!is.null(object.name$call$formula)) {
if (is.symbol(object.name$call$formula)) {
formula.temp <- as.formula(object.name)
}
else {
formula.temp <- object.name$call$formula
}
if (length(as.vector(as.character(formula.temp)))>1) {
return(as.vector(as.character(formula.temp)[2]))
}
}
if (!is.null(object.name$formula)) {
if (is.symbol(object.name$formula)) {
formula.temp <- as.formula(object.name)
}
else {
formula.temp <- object.name$formula
}
if (length(as.vector(as.character(formula.temp)))>1) { # this is for zelig$result ones
return(as.vector(as.character(formula.temp)[2]))
}
}
if (!is.null(object.name$formula2)) {
if (is.symbol(object.name$formula2)) {
formula.temp <- as.formula(object.name)
}
else {
formula.temp <- object.name$formula2
}
if (length(as.vector(as.character(formula.temp)))>1) { # z.ls
return(as.vector(as.character(formula.temp)[2]))
}
}
return("")
}
.dependent.variable.written <-
function(object.name, model.num=1) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("tobit","ologit","oprobit", "relogit", "coxph","exp","lognorm","weibull","survreg()","arima",
"aftreg", "weibreg", "coxreg", "phreg", "bj", "cph", "psm")) {
written.var <- .inside.bracket(.dependent.variable(object.name))[1]
}
else if (model.name %in% c("clogit","mclogit")) {
written.var <- .inside.bracket(.dependent.variable(object.name))[2]
}
else { written.var <- .dependent.variable(object.name, model.num) }
# some formatting changes
# remove everything before and including he last dollar sign from variable name
temp <- strsplit(written.var,"$",fixed=TRUE)
written.var <- temp[[1]][length(temp[[1]])]
# if underscore or ^, etc. in variable name, then insert an escape \ before it
written.var <- .remove.special.chars(written.var)
return(written.var)
}
.enter.significance.stars <-
function(p.value, force.math=FALSE) {
if ((!is.na(p.value)) && (!is.null(p.value))) {
if (.format.dec.mark.align == TRUE) {
c <- ""
}
else {
c <- "$"
}
if (force.math == TRUE) { c <- "$" }
cutoffs <- .format.cutoffs[length(.format.cutoffs):1]
stars <- .format.stars[length(.format.stars):1]
for (i in 1:length(cutoffs)) {
if (!is.na(cutoffs[i])) {
if (p.value < cutoffs[i]) {
cat(c,"^{",stars[i],"}",c,sep="")
break
}
}
}
}
}
.F.stat <-
function(object.name) {
F.stat.output <- as.vector(rep(NA,times=4))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH", "Arima", "maBina","coeftest", "lmer", "glmer", "nlmer", "Gls"))) {
if (model.name %in% c("plm")) {
F.stat.value <- .summary.object$fstatistic$statistic
df.numerator <- .summary.object$fstatistic$parameter["df1"]
df.denominator <- .summary.object$fstatistic$parameter["df2"]
F.stat.p.value <- .summary.object$fstatistic$p.value
F.stat.output <- as.vector(c(F.stat.value, df.numerator, df.denominator, F.stat.p.value))
}
else if (!is.null(suppressMessages(.summary.object$fstatistic["value"]))) {
F.stat.value <- .summary.object$fstatistic["value"]
df.numerator <- .summary.object$fstatistic["numdf"]
df.denominator <- .summary.object$fstatistic["dendf"]
F.stat.p.value <- pf(F.stat.value, df.numerator, df.denominator, lower.tail=FALSE)
F.stat.output <- as.vector(c(F.stat.value, df.numerator, df.denominator, F.stat.p.value))
}
}
names(F.stat.output) <- c("statistic","df1","df2","p-value")
return(cbind(F.stat.output))
}
.gcv.UBRE <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH", "Arima", "maBina", "coeftest", "lmer", "Gls", "glmer", "nlmer"))) {
if (!is.null(object.name$gcv.ubre)) {
return(as.vector(object.name$gcv.ubre))
}
}
return(NA)
}
# fill in NAs into a if b is the longer vector
.fill.NA <-
function(a, b) {
a.temp <- a; b.temp <- b
if (length(a) >= length(b)) {
return(a.temp)
}
else {
length(a.temp) <- length(b)
return(a.temp)
}
}
.get.model.name <-
function(object.name) {
return.value <- .model.identify(object.name)
if (substr(return.value,1,5)=="glm()") { return.value <- "glm()" }
if (substr(return.value,1,8)=="svyglm()") { return.value <- "svyglm()" }
if (substr(return.value,1,5)=="gee()") { return.value <- "gee()" }
if (substr(return.value,1,5)=="gam()") { return.value <- "gam()" }
if (substr(return.value,1,6)=="polr()") { return.value <- "polr()" }
if (substr(return.value,1,9)=="survreg()") { return.value <- "survreg()" }
return(return.value)
}
.get.p.values.1 <-
function(object.name, user.given=NULL, auto=TRUE, f.coef=NULL, f.se=NULL, user.coef=NULL, user.se=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) {
user.given <- as.vector(user.given[model.num,])
}
}
return(user.given)
}
if (auto == TRUE) {
if ((!is.null(user.coef)) || (!is.null(user.se))) {
#if (.model.identify(object.name) == "multinom") {
# f.coef <- as.vector(f.coef[model.num,])
# f.se <- as.vector(f.se[model.num,])
#}
# set the lengths of the vectors to be equal to each other
coef.div <- .fill.NA(f.coef, f.se)
se.div <- .fill.NA(f.se, f.coef)
t.out <- (coef.div / se.div)
auto.return <- 2*pnorm(abs(t.out), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
names(auto.return) <- names(f.coef)
return( auto.return )
}
}
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit", "poisson", "negbin", "normal.survey", "poisson.survey", "probit.survey", "logit.survey", "gamma", "gamma.survey",
"cloglog.net", "gamma.net", "logit.net", "probit.net", "brglm", "glm()", "Glm()", "svyglm()", "plm", "pgmm", "ivreg", "lmrob", "glmrob", "dynlm", "rq", "gmm","mclogit","felm")) {
return(.summary.object$coefficients[,4])
}
if (model.name %in% c("censReg")) {
return(.summary.object$estimate[,4])
}
if (model.name %in% c("mnlogit")) {
return(.summary.object$CoefTable[,4])
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$matcoef[,4])
}
if (model.name %in% c("lme", "nlme")) {
return(.summary.object$tTable[,5])
}
if (model.name %in% c("maBina")) {
return(as.vector(object.name$out[,4]))
}
if (model.name %in% c("mlogit")) {
return(as.vector(.summary.object$CoefTable[,4]))
}
if (model.name %in% c("coeftest")) {
return(as.vector(object.name[,4]))
}
if (model.name %in% c("hetglm")) {
return(as.vector(.summary.object$coefficients$mean[,4]))
}
if (model.name %in% c("selection","heckit")) {
if (!.global.sel.equation) {
indices <- .summary.object$param$index$betaO ### outcome equation
}
else {
indices <- .summary.object$param$index$betaS ### selection equation
}
return(as.vector(.summary.object$estimate[indices,4]))
}
if (model.name %in% c("probit.ss", "binaryChoice")) {
return(as.vector(.summary.object$estimate[,4]))
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(.summary.object$Coef[,4])
}
if (model.name %in% c("lmer", "glmer", "nlmer")) {
Vcov <- as.matrix(vcov(object.name, useScale = FALSE))
coefs <- .summary.object$coefficients[,1]
se <- sqrt(diag(Vcov))
tstat <- coefs / se
pval <- 2 * pnorm(abs(tstat), lower.tail = FALSE)
names(pval) <- names(coefs)
return(pval)
}
if (model.name %in% c("Arima")) {
coef.temp <- object.name$coef
se.temp <- sqrt(diag(object.name$var.coef))
tstat <- coef.temp / se.temp
pval <- 2 * pnorm(abs(tstat), lower.tail = FALSE)
return(pval)
}
if (model.name %in% c("ergm")) {
return(.summary.object$coefs[,4])
}
if (model.name %in% c("clm")) {
if (.format.ordered.intercepts == FALSE) {
return(.summary.object$coefficients[(length(object.name$alpha)+1):(length(object.name$coefficients)),4])
}
else {
return(.summary.object$coefficients[,4])
}
}
else if (model.name %in% c("pmg")) {
coef.temp <- .summary.object$coefficients
std.err.temp <- sqrt(diag(.summary.object$vcov))
t.stat.temp <- coef.temp / std.err.temp
df.temp <- length(.summary.object$residuals)
return( 2 * pt(abs(t.stat.temp), df=df.temp, lower.tail = FALSE, log.p = FALSE) )
}
else if (model.name %in% c("zeroinfl", "hurdle")) {
if (.global.zero.component==FALSE) {
return(.summary.object$coefficients$count[,4])
}
else {
return(.summary.object$coefficients$zero[,4])
}
}
else if (model.name %in% c("normal.gee", "logit.gee", "poisson.gee", "probit.gee", "gamma.gee", "gee()")) {
return(2*pnorm(abs(.summary.object$coefficients[,"Robust z"]), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE))
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam", "poisson.gam", "gam()")) {
return(.summary.object$p.pv)
}
else if (model.name %in% c("coxph", "clogit")) {
return(.summary.object$coef[,"Pr(>|z|)"])
}
else if (model.name %in% c("exp","lognorm","weibull","tobit", "survreg()")) {
return(.summary.object$table[,"p"])
}
else if (model.name %in% c("rlm")) {
coef.temp <- suppressMessages(.summary.object$coefficients[,"t value"])
coef.temp <- 2*pnorm(abs(coef.temp[seq(from=1, to=length(coef.temp))]), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
return(coef.temp)
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
coef.temp <- suppressMessages(.summary.object$coefficients[,"t value"])
if (.format.ordered.intercepts == FALSE) { return(2*pnorm(abs(coef.temp[seq(from=1, to=length(coef.temp)-(length(suppressMessages(.summary.object$lev))-1))]), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)) }
else {
return( 2*pnorm(abs(coef.temp[seq(from=1, to=length(coef.temp))]), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE) )
}
}
else if (model.name %in% c("arima")) {
return(2*pnorm( abs(object.name$coef / (sqrt(diag(object.name$var.coef))) ), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE))
}
else if (model.name %in% c("tobit(AER)")){
return(.summary.object$coefficients[,"Pr(>|z|)"])
}
else if (model.name %in% c("multinom")) {
if (is.null(nrow(.summary.object$coefficients))) {
coef.temp <- .summary.object$coefficients
se.temp <- .summary.object$standard.errors
}
else {
coef.temp <- .summary.object$coefficients[model.num,]
se.temp <- .summary.object$standard.errors[model.num,]
}
return( 2*pnorm( abs( (coef.temp) / (se.temp) ) , mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE) )
}
else if (model.name %in% c("betareg")) {
return(.summary.object$coefficients$mean[,"Pr(>|z|)"])
}
else if (model.name %in% c("gls")) {
coef.temp <- object.name$coefficients
se.temp <- sqrt(diag(object.name$varBeta))
t.temp <- coef.temp / se.temp
p.temp <- 2*pnorm( abs( t.temp ) , mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
return(p.temp)
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
coef.temp <- object.name$coefficients
se.temp <- sqrt(diag(object.name$var))
t.temp <- coef.temp / se.temp
p.temp <- 2*pnorm( abs( t.temp ) , mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
return(p.temp)
}
else if (model.name %in% c("rem.dyad")) {
coef.temp <- object.name$coef
se.temp <- sqrt(diag(object.name$cov))
t.temp <- coef.temp / se.temp
p.temp <- 2*pnorm( abs( t.temp ) , mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
return(p.temp)
}
return(NULL)
}
.get.p.values <-
function(object.name, user.given=NULL, auto=TRUE, f.coef=NULL, f.se=NULL, user.coef=NULL, user.se=NULL, model.num=1) {
out <- .get.p.values.1(object.name, user.given, auto, f.coef, f.se, user.coef, user.se, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.get.scale <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina", "coeftest", "Gls", "lmer", "glmer", "nlmer"))) {
if (!is.null(object.name$scale)) {
if (model.name %in% c("normal.gee", "logit.gee", "poisson.gee", "probit.gee", "gamma.gee", "gee()", "exp","lognorm","weibull","tobit","survreg()","tobit(AER)")) {
return(as.vector(object.name$scale))
}
}
}
return(NA)
}
.get.sigma2 <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("arima","fGARCH","maBina", "coeftest", "Gls", "lmer", "glmer", "nlmer")) {
return(NA)
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(.summary.object$s2)
}
if (!is.null(object.name$sigma2)) {
return(as.vector(object.name$sigma2))
}
return(NA)
}
.get.rho <-
function(object.name) {
model.name <- .get.model.name(object.name)
rho.output <- as.vector(rep(NA,times=4))
if (model.name %in% c("selection")) {
i <- object.name$param$index$rho
if (is.null(i)) { i <- object.name$param$index$errTerms["rho"] }
if (!is.null(i)) {
rho.output <- as.vector(.summary.object$estimate[i,])
}
}
if (model.name %in% c("heckit")) {
if (object.name$method == "2step") {
i <- object.name$param$index$rho
rho.output <- as.vector(.summary.object$estimate[i,])
}
}
names(rho.output) <- c("statistic","se","tstat","p-value")
return(cbind(rho.output))
}
.get.mills <-
function(object.name) {
model.name <- .get.model.name(object.name)
mills.output <- as.vector(rep(NA,times=4))
if (model.name %in% c("heckit", "selection")) {
i <- object.name$param$index$Mills
if (!is.null(i)) {
mills.output <- as.vector(.summary.object$estimate[i,])
}
}
names(mills.output) <- c("statistic","se","tstat","p-value")
return(cbind(mills.output))
}
.get.standard.errors.1 <-
function(object.name, user.given=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) { user.given <- as.vector(user.given[model.num,]) }
}
return(user.given)
}
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit", "poisson", "negbin", "normal.survey", "poisson.survey", "probit.survey", "logit.survey", "gamma", "gamma.survey",
"cloglog.net", "gamma.net", "logit.net", "probit.net", "brglm", "glm()", "Glm()", "svyglm()", "plm", "pgmm", "ivreg", "lmrob", "glmrob", "dynlm", "gmm","mclogit")) {
return(.summary.object$coefficients[,"Std. Error"])
}
if (model.name %in% c("Arima")) {
return(sqrt(diag(object.name$var.coef)))
}
if (model.name %in% c("censReg")) {
return(.summary.object$estimate[,2])
}
if (model.name %in% c("mnlogit")) {
return(.summary.object$CoefTable[,2])
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$matcoef[,2])
}
if (model.name %in% c("lme", "nlme")) {
return(.summary.object$tTable[,2])
}
if (model.name %in% c("maBina")) {
return(as.vector(object.name$out[,2]))
}
if (model.name %in% c("mlogit")) {
return(as.vector(.summary.object$CoefTable[,2]))
}
if (model.name %in% c("coeftest")) {
return(as.vector(object.name[,2]))
}
if (model.name %in% c("selection","heckit")) {
if (!.global.sel.equation) {
indices <- .summary.object$param$index$betaO ### outcome equation
}
else {
indices <- .summary.object$param$index$betaS ### selection equation
}
return(as.vector(.summary.object$estimate[indices,2]))
}
if (model.name %in% c("probit.ss", "binaryChoice")) {
return(as.vector(.summary.object$estimate[,2]))
}
if (model.name %in% c("hetglm")) {
return(as.vector(.summary.object$coefficients$mean[,2]))
}
if (model.name %in% c("lmer", "glmer", "nlmer")) {
Vcov <- as.matrix(vcov(object.name, useScale = FALSE))
coefs <-.summary.object$coefficients[,1]
se <- sqrt(diag(Vcov))
names(se) <- names(coefs)
return(se)
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(.summary.object$Coef[,2])
}
if (model.name %in% c("ergm")) {
return(.summary.object$coefs[,2])
}
if (model.name %in% c("rq","felm")) {
return(.summary.object$coefficients[,2])
}
if (model.name %in% c("clm")) {
if (.format.ordered.intercepts == FALSE) {
return(.summary.object$coefficients[(length(object.name$alpha)+1):(length(object.name$coefficients)),2])
}
else {
return(.summary.object$coefficients[,2])
}
}
else if (model.name %in% c("pmg")) {
return (sqrt(diag(.summary.object$vcov)))
}
if (model.name %in% c("zeroinfl", "hurdle")) {
if (.global.zero.component == FALSE) {
return(.summary.object$coefficients$count[,"Std. Error"])
}
else {
return(.summary.object$coefficients$zero[,"Std. Error"])
}
}
else if (model.name %in% c("normal.gee", "logit.gee", "poisson.gee", "probit.gee", "gamma.gee", "gee()")) {
return(.summary.object$coefficients[,"Robust S.E."])
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam", "poisson.gam", "gam()")) {
temp.se <- .summary.object$se
names(temp.se) <- names(.summary.object$p.coeff)
return(temp.se)
}
else if (model.name %in% c("coxph")) {
return(.summary.object$coef[,"se(coef)"])
}
else if (model.name %in% c("clogit")) {
return(.summary.object$coef[,"se(coef)"])
}
else if (model.name %in% c("exp","lognorm","weibull","tobit","survreg()")) {
return(.summary.object$table[,"Std. Error"])
}
else if (model.name %in% c("rlm")) {
return(suppressMessages(.summary.object$coefficients[,"Std. Error"]))
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
se.temp <- suppressMessages(.summary.object$coefficients[,"Std. Error"])
if (.format.ordered.intercepts == FALSE) { return(se.temp[seq(from=1, to=length(se.temp)-(length(suppressMessages(.summary.object$lev))-1))]) }
else { return(se.temp) }
}
else if (model.name %in% c("arima")) {
return( sqrt(diag(object.name$var.coef)) )
}
else if (model.name %in% c("tobit(AER)")){
return(.summary.object$coefficients[,"Std. Error"])
}
else if (model.name %in% c("multinom")) {
if (is.null(nrow(.summary.object$coefficients))) {
se.temp <- .summary.object$standard.errors
}
else {
se.temp <- .summary.object$standard.errors[model.num,]
}
return(se.temp)
}
else if (model.name %in% c("betareg")) {
return(.summary.object$coefficients$mean[,"Std. Error"])
}
else if (model.name %in% c("gls")) {
se.temp <- sqrt(diag(object.name$varBeta))
return(se.temp)
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
return( sqrt(diag(object.name$var) ) )
}
else if (model.name %in% c("rem.dyad")) {
return( sqrt(diag(object.name$cov) ) )
}
return(NULL)
}
.get.standard.errors <-
function(object.name, user.given=NULL, model.num=1) {
out <- .get.standard.errors.1(object.name, user.given, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.get.ci.lb.1 <-
function(object.name, user.given=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) { user.given <- as.vector(user.given[model.num,]) }
}
return(user.given)
}
return(NULL)
}
.get.ci.lb <-
function(object.name, user.given=NULL, model.num=1) {
out <- .get.ci.lb.1(object.name, user.given, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.get.ci.rb.1 <-
function(object.name, user.given=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) { user.given <- as.vector(user.given[model.num,]) }
}
return(user.given)
}
return(NULL)
}
.get.ci.rb <-
function(object.name, user.given=NULL, model.num=1) {
out <- .get.ci.rb.1(object.name, user.given, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.get.t.stats.1 <-
function(object.name, user.given=NULL, auto=TRUE, f.coef=NULL, f.se=NULL, user.coef=NULL, user.se=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) {
user.given <- as.vector(user.given[model.num,])
}
}
return(user.given)
}
if (auto == TRUE) {
if ((!is.null(user.coef)) || (!is.null(user.se))) {
#if (.model.identify(object.name) == "multinom") {
# f.coef <- as.vector(f.coef[model.num,])
# f.se <- as.vector(f.se[model.num,])
#}
# set the lengths of the vectors to be equal to each other
coef.div <- .fill.NA(f.coef, f.se)
se.div <- .fill.NA(f.se, f.coef)
auto.return <- coef.div / se.div
names(auto.return) <- names(f.coef)
return(auto.return)
}
}
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit", "poisson", "negbin", "normal.survey", "poisson.survey", "probit.survey", "logit.survey", "gamma", "gamma.survey",
"cloglog.net", "gamma.net", "logit.net", "probit.net", "glm()", "Glm()", "svyglm()","plm", "pgmm", "ivreg", "lmrob", "glmrob", "dynlm", "gmm", "mclogit", "felm")) {
return(.summary.object$coefficients[,3])
}
if (model.name %in% c("censReg")) {
return(.summary.object$estimate[,3])
}
if (model.name %in% c("mnlogit")) {
return(.summary.object$CoefTable[,3])
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$matcoef[,3])
}
if (model.name %in% c("lme", "nlme")) {
return(.summary.object$tTable[,4])
}
if (model.name %in% c("coeftest")) {
return(as.vector(object.name[,3]))
}
if (model.name %in% c("maBina")) {
return(as.vector(object.name$out[,3]))
}
if (model.name %in% c("mlogit")) {
return(as.vector(.summary.object$CoefTable[,3]))
}
if (model.name %in% c("selection","heckit")) {
if (!.global.sel.equation) {
indices <- .summary.object$param$index$betaO ### outcome equation
}
else {
indices <- .summary.object$param$index$betaS ### selection equation
}
return(as.vector(.summary.object$estimate[indices,3]))
}
if (model.name %in% c("probit.ss", "binaryChoice")) {
return(as.vector(.summary.object$estimate[,3]))
}
if (model.name %in% c("hetglm")) {
return(as.vector(.summary.object$coefficients$mean[,3]))
}
if (model.name %in% c("lmer", "glmer", "nlmer")) {
Vcov <- as.matrix(vcov(object.name, useScale = FALSE))
coefs <- .summary.object$coefficients[,1]
se <- sqrt(diag(Vcov))
tstat <- coefs / se
names(tstat) <- names(coefs)
return(tstat)
}
if (model.name %in% c("ergm")) {
return((.summary.object$coefs[,1])/(.summary.object$coefs[,2]))
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(.summary.object$Coef[,3])
}
if (model.name %in% c("rq")) {
return(.summary.object$coefficients[,3])
}
if (model.name %in% c("clm")) {
if (.format.ordered.intercepts == FALSE) {
return(.summary.object$coefficients[(length(object.name$alpha)+1):(length(object.name$coefficients)),3])
}
else {
return(.summary.object$coefficients[,3])
}
}
else if (model.name %in% c("pmg")) {
coef.temp <- .summary.object$coef
std.err.temp <- sqrt(diag(.summary.object$vcov))
t.stat.temp <- coef.temp / std.err.temp
return(t.stat.temp)
}
else if (model.name %in% c("zeroinfl", "hurdle")) {
if (.global.zero.component == FALSE) {
return(.summary.object$coefficients$count[,3])
}
else {
return(.summary.object$coefficients$zero[,3])
}
}
else if (model.name %in% c("normal.gee", "logit.gee", "poisson.gee", "probit.gee", "gamma.gee", "gee()")) {
return(.summary.object$coefficients[,"Robust z"])
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam", "poisson.gam", "gam()")) {
return(.summary.object$p.t)
}
else if (model.name %in% c("coxph", "clogit")) {
return(.summary.object$coef[,"z"])
}
else if (model.name %in% c("exp","lognorm","weibull", "tobit","survreg()")) {
return(.summary.object$table[,"z"])
}
else if (model.name %in% c("rlm")) {
return(suppressMessages(.summary.object$coefficients[,"t value"]))
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
tstat.temp <- suppressMessages(.summary.object$coefficients[,"t value"])
if (.format.ordered.intercepts == FALSE) { return(tstat.temp[seq(from=1, to=length(tstat.temp)-(length(suppressMessages(.summary.object$lev))-1))]) }
else { return(tstat.temp) }
}
else if (model.name %in% c("arima")) {
return( object.name$coef / (sqrt(diag(object.name$var.coef))) )
}
else if (model.name %in% c("tobit(AER)")){
return(.summary.object$coefficients[,"z value"])
}
else if (model.name %in% c("multinom")) {
if (is.null(nrow(.summary.object$coefficients))) {
coef.temp <- .summary.object$coefficients
se.temp <- .summary.object$standard.errors
}
else {
coef.temp <- .summary.object$coefficients[model.num,]
se.temp <- .summary.object$standard.errors[model.num,]
}
return( (coef.temp) / (se.temp) )
}
else if (model.name %in% c("betareg")) {
return(.summary.object$coefficients$mean[,"z value"])
}
else if (model.name %in% c("gls")) {
coef.temp <- object.name$coefficients
se.temp <- sqrt(diag(object.name$varBeta))
return(coef.temp / se.temp)
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
coef.temp <- object.name$coefficients
se.temp <- sqrt(diag(object.name$var))
return(coef.temp / se.temp )
}
else if (model.name %in% c("Arima")) {
coef.temp <- object.name$coef
se.temp <- sqrt(diag(object.name$var.coef))
return(coef.temp / se.temp )
}
else if (model.name %in% c("rem.dyad")) {
coef.temp <- object.name$coef
se.temp <- sqrt(diag(object.name$cov))
return(coef.temp / se.temp )
}
return(NULL)
}
.get.t.stats <-
function(object.name, user.given=NULL, auto=TRUE, f.coef=NULL, f.se=NULL, user.coef=NULL, user.se=NULL, model.num=1) {
out <- .get.t.stats.1(object.name, user.given, auto, f.coef, f.se, user.coef, user.se, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.get.theta <-
function(object.name) {
theta.output <- as.vector(rep(NA,times=4))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina", "coeftest", "Gls", "lmer", "glmer", "nlmer"))) {
if ((!is.null(object.name$theta)) && (!is.null(object.name$SE.theta))) {
theta.value <- object.name$theta
theta.se.value <- object.name$SE.theta
theta.tstat.value <- theta.value / theta.se.value
theta.p.value <- 2*pnorm(abs(theta.tstat.value), mean = 0, sd = 1, lower.tail = FALSE, log.p = FALSE)
theta.output <- as.vector(c(theta.value, theta.se.value, theta.tstat.value, theta.p.value))
}
}
names(theta.output) <- c("statistic","se","tstat","p-value")
return(cbind(theta.output))
}
.inside.bracket <-
function(s) {
process.string <- ""
return.vector <- NULL
if (!is.character(s)) { return("") }
if (is.null(s)) { return("") }
if (is.na(s)) { return("") }
if (s=="") { return("") }
if (length(s) > 1) { return("") }
inside.inner.bracket <- 0
for (i in seq(from = (regexpr("(",s,fixed=TRUE)[1])+1, to = nchar(s))) {
letter <- substr(s,i,i)
if (letter == "(") { inside.inner.bracket <- inside.inner.bracket + 1 }
if (letter == ")") { inside.inner.bracket <- inside.inner.bracket - 1 }
if ((letter == ",") && (inside.inner.bracket == 0)) {
return.vector <- c(return.vector, process.string)
process.string <- ""
}
else if (inside.inner.bracket >= 0) { process.string <- paste(process.string, letter, sep="") }
else { break }
}
if (process.string != "") { return.vector <- c(return.vector, process.string) }
return (.trim(return.vector))
}
.iround <-
function(x, decimal.places=0, round.up.positive=FALSE, simply.output=FALSE) {
x.original <- x
first.part <- ""
if (is.na(x) || is.null(x)) { return("") }
if (simply.output == TRUE) {
if (!is.numeric(x)) { return(.remove.special.chars(x)) }
}
if (x.original < 0) { x <- abs(x) }
if (!is.na(decimal.places)) {
if ((.format.until.nonzero.digit == FALSE) || (decimal.places <= 0)) {
round.result <- round(x, digits=decimal.places)
}
else {
temp.places <- decimal.places
if (!.is.all.integers(x)) {
while ((round(x, digits=temp.places) == 0) && (temp.places < (decimal.places + .format.max.extra.digits))) {
temp.places <- temp.places + 1
}
}
round.result <- round(x, digits=temp.places)
decimal.places <- temp.places
}
if ((round.up.positive==TRUE) && (round.result < x)) { # useful for p-values that should be rounded up
if (x > (10^((-1)*(decimal.places+1)))) {
round.result <- round.result + 10^((-1)*decimal.places)
}
else { round.result <- 0 }
}
}
else { # if the decimal place is NA
round.result <- x
}
round.result.char <- as.character(format(round.result, scientific=FALSE))
split.round.result <- unlist(strsplit(round.result.char, "\\."))
## first deal with digit separator
for (i in seq(from=1, to=length(.format.digit.separator.where))) {
if (.format.digit.separator.where[i]<=0) {
.format.digit.separator.where[i] <<- -1
}
}
separator.count <- 1
length.integer.part <- nchar(split.round.result[1])
digits.in.separated.unit <- 0
for (i in seq(from=length.integer.part, to=1)) {
if ((digits.in.separated.unit == .format.digit.separator.where[separator.count]) && (substr(split.round.result[1],i,i)!="-")){
first.part <- paste(.format.digit.separator,first.part,sep="")
if (separator.count < length(.format.digit.separator.where)) { separator.count <- separator.count + 1 }
digits.in.separated.unit <- 0
}
first.part <- paste(substr(split.round.result[1],i,i),first.part,sep="")
digits.in.separated.unit <- digits.in.separated.unit + 1
}
# remove initial zero and there are decimal places, if that is requested
if (.format.initial.zero==FALSE) {
if ((round.result > 0) && (round.result < 1)) {
if ((is.na(decimal.places)) || (decimal.places > 0)) {
first.part <- ""
}
}
}
if (x.original < 0) { # use math-mode for a better looking negative sign
if (.format.dec.mark.align == TRUE) {
first.part <- paste("-", first.part, sep="")
}
else {
first.part <- paste("$-$", first.part, sep="")
}
}
# now deal with the decimal part
if (!is.na(decimal.places)) {
if (decimal.places <= 0) {
return(first.part)
}
}
if (length(split.round.result)==2) {
if (is.na(decimal.places)) { return(paste(first.part,.format.decimal.character,split.round.result[2],sep="")) }
if (nchar(split.round.result[2]) < decimal.places) {
decimal.part <- split.round.result[2]
for (i in seq(from = 1,to = (decimal.places - nchar(split.round.result[2])))) {
decimal.part <- paste(decimal.part,"0", sep="")
}
return(paste(first.part,.format.decimal.character,decimal.part,sep=""))
}
else { return(paste(first.part,.format.decimal.character,split.round.result[2],sep="")) }
}
else if (length(split.round.result)==1) {
if (is.na(decimal.places)) { return(paste(first.part,.format.decimal.character,decimal.part,sep="")) }
decimal.part <- ""
for (i in seq(from = 1,to = decimal.places)) {
decimal.part <- paste(decimal.part,"0", sep="")
}
return(paste(first.part,.format.decimal.character,decimal.part,sep=""))
}
else { return(NULL) }
}
is.wholenumber <-
function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
.is.all.integers <-
function(x) {
if (!is.numeric(x)) { return(FALSE) }
if (length(x[!is.na(x)]) == length(is.wholenumber(x)[(!is.na(x)) && (is.wholenumber(x)==TRUE)])) {
return(TRUE)
}
else { return (FALSE) }
}
.log.likelihood <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("coeftest","maBina","gamma.net","logit.net","probit.net","cloglog.net")) {
return(NA)
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$value)
}
if (model.name %in% c("mlogit", "mnlogit")) {
return(as.vector(object.name$logLik[1]))
}
if (model.name %in% c("arima", "betareg", "zeroinfl", "hurdle", "hetglm", "Arima")) {
return(as.vector(object.name$loglik))
}
if (model.name %in% c("selection","binaryChoice", "probit.ss")) {
return(as.vector(.summary.object$loglik))
}
if (model.name %in% c("lme","nlme","lmer", "glmer", "nlmer","censReg")) {
return(as.vector(logLik(object.name)[1]))
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(as.vector(.summary.object$LL))
}
if (model.name %in% c("clm", "gls")) {
return(as.vector(object.name$logLik))
}
else if (model.name %in% c("coxph", "clogit", "exp", "weibull", "lognorm","tobit", "tobit(AER)", "survreg()")) {
return(as.vector(.summary.object$loglik[2]))
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg")) {
return(as.vector(object.name$loglik[2]))
}
else if (!is.null(object.name$aic)) {
return(as.vector(-(0.5)*(object.name$aic-2*length(.summary.object$coefficients[,"Estimate"]))))
}
return(NA)
}
.logrank.stat <-
function(object.name) {
logrank.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina", "coeftest", "Gls", "lmer", "glmer", "nlmer"))) {
if (!is.null(.summary.object$logtest)) {
logrank.value <- suppressMessages(.summary.object$sctest[1])
df.value <- suppressMessages(.summary.object$sctest[2])
logrank.p.value <- suppressMessages(.summary.object$sctest[3])
logrank.output <- as.vector(c(logrank.value, df.value, logrank.p.value))
}
}
names(logrank.output) <- c("statistic","df1","p-value")
return(cbind(logrank.output))
}
.lr.stat <-
function(object.name) {
log.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (model.name %in% c("mlogit")) {
log.value <- as.vector(.summary.object$lratio$statistic["chisq"])
if (!is.null(log.value)) {
df.value <- as.vector(length(object.name$coeff))
log.p.value <- as.vector(pchisq(log.value,df.value,lower.tail=FALSE))
log.output <- as.vector(c(log.value, df.value, log.p.value))
}
}
else if (model.name %in% c("lagsarlm", "errorsarlm")) {
log.value <- as.vector(.summary.object$LR1$statistic)
df.value <- as.vector(.summary.object$LR1$parameter)
log.p.value <- as.vector(.summary.object$LR1$p.value)
log.output <- as.vector(c(log.value, df.value, log.p.value))
}
else if (!(model.name %in% c("arima","fGARCH","Arima","maBina","coeftest","Gls","lmer","glmer","nlmer"))) {
if (!is.null(.summary.object$logtest)) {
log.value <- suppressMessages(.summary.object$logtest[1])
df.value <- suppressMessages(.summary.object$logtest[2])
log.p.value <- suppressMessages(.summary.object$logtest[3])
log.output <- as.vector(c(log.value, df.value, log.p.value))
}
}
names(log.output) <- c("statistic","df1","p-value")
return(cbind(log.output))
}
.max.r.squared <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","fGARCH","Arima","maBina", "coeftest", "lmer", "glmer", "nlmer", "Gls", "Arima"))) {
if (model.name %in% c("coxph", "clogit")) {
return(as.vector(.summary.object$rsq[2]))
}
}
return(NA)
}
.model.identify <-
function(object.name) {
if (class(object.name)[1]=="NULL") { #### !!!!! continue this
return("NULL")
}
if (class(object.name)[1]=="Arima") {
return("Arima")
}
if (class(object.name)[1]=="fGARCH") {
return("fGARCH")
}
if (class(object.name)[1]=="censReg") {
return("censReg")
}
if (class(object.name)[1]=="ergm") {
return("ergm")
}
if (class(object.name)[1]=="mnlogit") {
return("mnlogit")
}
if (class(object.name)[1]=="lme") {
return("lme")
}
if (class(object.name)[1]=="nlme") {
return("nlme")
}
if (class(object.name)[1]=="felm") {
return("felm")
}
if (class(object.name)[1] %in% c("mclogit","mclogitRandeff")) {
return("mclogit")
}
if (class(object.name)[1]=="mlogit") {
return("mlogit")
}
if (class(object.name)[1]=="maBina") {
return("maBina")
}
if (class(object.name)[1]=="coeftest") {
return("coeftest")
}
if (class(object.name)[1]=="rem.dyad") {
return("rem.dyad")
}
if (class(object.name)[1]=="lmerMod") {
return("lmer")
}
if (class(object.name)[1]=="glmerMod") {
return("glmer")
}
if (class(object.name)[1]=="nlmerMod") {
return("nlmer")
}
if (!is.null(object.name$call)) {
if (object.name$call[1]=="lm()") { return("ls") }
else if ((object.name$call[1]=="glm()") || (object.name$call[1]=="Glm()")) {
if (object.name$family$family=="gaussian") {
if (object.name$family$link=="identity") {
return("normal")
}
}
else if (object.name$family$family=="binomial") {
if (object.name$family$link=="probit") {
return("probit")
}
if (object.name$family$link=="logit") {
return("logit")
}
}
else if (object.name$family$family=="poisson") {
if (object.name$family$link=="log") {
return("poisson")
}
}
else if (object.name$family$family=="Gamma") {
if (object.name$family$link=="inverse") {
return("gamma")
}
}
return(paste("glm()#",object.name$family$family,"#",object.name$family$link, sep=""))
}
else if (object.name$call[1]=="svyglm()") {
if (object.name$family$family=="gaussian") {
if (object.name$family$link=="identity") {
return("normal.survey")
}
}
else if ((object.name$family$family=="binomial") || (object.name$family$family=="quasibinomial")) {
if (object.name$family$link=="probit") {
return("probit.survey")
}
if (object.name$family$link=="logit") {
return("logit.survey")
}
}
else if (object.name$family$family=="poisson") {
if (object.name$family$link=="log") {
return("poisson.survey")
}
}
else if (object.name$family$family=="Gamma") {
if (object.name$family$link=="inverse") {
return("gamma.survey")
}
}
return(paste("svyglm()#",object.name$family$family,"#",object.name$family$link, sep=""))
}
else if (object.name$call[1]=="gam()") {
if (object.name$family$family=="gaussian") {
if (object.name$family$link=="identity") {
return("normal.gam")
}
}
else if (object.name$family$family=="binomial") {
if (object.name$family$link=="probit") {
return("probit.gam")
}
if (object.name$family$link=="logit") {
return("logit.gam")
}
}
else if (object.name$family$family=="poisson") {
if (object.name$family$link=="log") {
return("poisson.gam")
}
}
else if (object.name$family$family=="Gamma") {
if (object.name$family$link=="inverse") {
return("gamma.gam")
}
}
return(paste("gam()#",object.name$family$family,"#",object.name$family$link, sep=""))
}
else if (object.name$call[1]=="polr()") {
if (object.name$method=="logistic") {
return("ologit")
}
else if (object.name$method=="probit") {
return("oprobit")
}
return(paste("polr()#",object.name$method, sep=""))
}
else if (object.name$call[1]=="gee()") {
if (object.name$family$family=="gaussian") {
if (object.name$family$link=="identity") {
return("normal.gee")
}
}
else if (object.name$family$family=="binomial") {
if (object.name$family$link=="probit") {
return("probit.gee")
}
if (object.name$family$link=="logit") {
return("logit.gee")
}
}
else if (object.name$family$family=="poisson") {
if (object.name$family$link=="log") {
return("poisson.gee")
}
}
else if (object.name$family$family=="Gamma") {
if (object.name$family$link=="inverse") {
return("gamma.gee")
}
}
return(paste("gee()#",object.name$family$family,"#",object.name$family$link, sep=""))
}
else if (object.name$call[1]=="survreg()") {
if (object.name$dist=="exponential") {
return("exp")
}
else if (object.name$dist=="weibull") {
return("weibull")
}
else if (object.name$dist=="lognorm") {
return("lognormal")
}
else if (object.name$dist=="gaussian") {
return("tobit")
}
return(paste("survreg()#",object.name$dist, sep=""))
}
else if (object.name$call[1]=="glm.nb()") {
return("negbin")
}
else if (object.name$call[1]=="\"glm.nb\"()") {
return("negbin")
}
if (!is.null(object.name$userCall)) {
if (object.name$userCall[1]=="clogit()") {
return("clogit")
}
}
if (object.name$call[1]=="coxph()") {
return("coxph")
}
if (object.name$call[1]=="pmg()") {
return("pmg")
}
if (object.name$call[1]=="selection()") {
return("selection")
}
if (object.name$call[1]=="heckit()") {
return("heckit")
}
if (object.name$call[1]=="probit()") {
return("probit.ss")
}
if (object.name$call[1]=="binaryChoice()") {
return("binaryChoice")
}
if (object.name$call[1]=="brglm()") {
return("brglm")
}
if (object.name$call[1]=="gls()") {
return("gls")
}
if (object.name$call[1]=="clm()") {
return("clm")
}
if (object.name$call[1]=="lmrob()") {
return("lmrob")
}
if (object.name$call[1]=="glmrob()") {
return("glmrob")
}
if (object.name$call[1]=="dynlm()") {
return("dynlm")
}
if (object.name$call[1]=="rq()") {
return("rq")
}
if (object.name$call[1]=="gmm()") {
return("gmm")
}
if (object.name$call[1]=="lagsarlm()") {
return("lagsarlm")
}
if (object.name$call[1]=="errorsarlm()") {
return("errorsarlm")
}
if (object.name$call[1]=="rlm()") {
return("rlm")
}
if (object.name$call[1]=="aftreg()") {
return("aftreg")
}
if (object.name$call[1]=="coxreg()") {
return("coxreg")
}
if (object.name$call[1]=="phreg()") {
return("phreg")
}
if (object.name$call[1]=="weibreg()") {
return("weibreg")
}
if (object.name$call[1]=="bj()") {
return("bj")
}
if (object.name$call[1]=="cph()") {
return("cph")
}
if (object.name$call[1]=="Gls()") {
return("Gls")
}
if (object.name$call[1]=="lrm()") {
return("lrm")
}
if (object.name$call[1]=="ols()") {
return("ols")
}
if (object.name$call[1]=="psm()") {
return("psm")
}
if (object.name$call[1]=="Rq()") {
return("Rq")
}
if (object.name$call[1]=="hetglm()") {
return("hetglm")
}
else if (object.name$call[1]=="relogit()") {
return("relogit")
}
else if (object.name$call[1]=="netbinom()") {
if (object.name$call$LF=="probit") { return("probit.net") }
if (object.name$call$LF=="logit") { return("logit.net") }
if (object.name$call$LF=="cloglog") { return("cloglog.net") }
}
else if (object.name$call[1]=="netgamma()") {
return("gamma.net")
}
else if (object.name$call[1]=="zelig()") {
if (object.name$call$model %in% c("ls","normal","logit","probit","relogit","poisson","poisson.survey",
"negbinom","probit.survey","logit.survey","normal.gee","logit.gee","probit.gee",
"poisson.gee","normal.gam","logit.gam","probit.gam","poisson.gam","exp",
"coxph","weibull","lognorm","normal.survey","gamma","gamma.survey",
"gamma.gee","cloglog.net","logit.net","probit.net","gamma.net","ologit",
"oprobit","arima","tobit")) {
return(object.name$call$model)
}
else { return("unsupported zelig") }
}
else if (object.name$call[1]=="tobit()") {
return("tobit(AER)")
}
else if (object.name$call[1]=="multinom()") {
return("multinom")
}
else if (object.name$call[1]=="betareg()") {
return("betareg")
}
else if (object.name$call[1]=="zeroinfl()") {
return("zeroinfl")
}
else if (object.name$call[1]=="hurdle()") {
return("hurdle")
}
else if (object.name$call[1]=="plm()") {
return("plm")
}
else if (object.name$call[1]=="pgmm()") {
return("pgmm")
}
else if (object.name$call[1]=="ivreg()") {
return("ivreg")
}
}
return("unknown")
}
.new.table <-
function(object.name, user.coef=NULL, user.se=NULL, user.t=NULL, user.p=NULL, auto.t=TRUE, auto.p=TRUE, user.ci.lb=NULL, user.ci.rb=NULL) {
if (class(object.name)[1] == "Glm") {
.summary.object <<- summary.glm(object.name)
}
else if (!(.model.identify(object.name) %in% c("aftreg", "coxreg","phreg","weibreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq"))) {
.summary.object <<- summary(object.name)
}
else {
.summary.object <<- object.name
}
if (.model.identify(object.name) == "rq") {
.summary.object <<- suppressMessages(summary(object.name, se=.format.rq.se))
}
model.num.total <- 1 # model number for multinom, etc.
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(.summary.object$coefficients))) {
model.num.total <- nrow(.summary.object$coefficients)
}
}
# set to null
.global.models <<- NULL
.global.dependent.variables <<- NULL
.global.dependent.variables.written <<- NULL
.global.coefficient.variables <<- NULL
.global.coef.vars.by.model <<- NULL
.global.coefficients <<- NULL
.global.std.errors <<- NULL
.global.ci.lb <<- NULL
.global.ci.rb <<- NULL
.global.t.stats <<- NULL
.global.p.values <<- NULL
.global.N <<- NULL
.global.LL <<- NULL
.global.R2 <<- NULL
.global.max.R2 <<- NULL
.global.adj.R2 <<- NULL
.global.AIC <<- NULL
.global.BIC <<- NULL
.global.scale <<- NULL
.global.UBRE <<- NULL
.global.sigma2 <<- NULL
.global.theta <<- NULL
.global.rho <<- NULL
.global.mills <<- NULL
.global.SER <<- NULL
.global.F.stat <<- NULL
.global.chi.stat <<- NULL
.global.wald.stat <<- NULL
.global.lr.stat <<- NULL
.global.logrank.stat <<- NULL
.global.null.deviance <<- NULL
.global.residual.deviance <<- NULL
for (model.num in 1:model.num.total) {
.global.models <<- c(.global.models, suppressMessages(as.vector(.model.identify(object.name))))
.global.dependent.variables <<- c(.global.dependent.variables, suppressMessages(.dependent.variable(object.name, model.num)))
.global.dependent.variables.written <<- c(.global.dependent.variables.written, suppressMessages(.dependent.variable.written(object.name, model.num)))
.global.coefficient.variables <<- suppressMessages(.coefficient.variables(object.name))
.global.coef.vars.by.model <<- suppressMessages(cbind(.global.coef.vars.by.model, .global.coefficient.variables))
get.coef <- suppressMessages(.get.coefficients(object.name, user.coef, model.num=model.num))
get.se <- suppressMessages(.get.standard.errors(object.name, user.se, model.num=model.num))
.global.coefficients <<- cbind(.global.coefficients, get.coef)
.global.std.errors <<- cbind(.global.std.errors, get.se)
.global.ci.lb <<- suppressMessages(cbind(.global.ci.lb, .get.ci.lb(object.name, user.ci.lb, model.num=model.num)))
.global.ci.rb <<- suppressMessages(cbind(.global.ci.rb, .get.ci.rb(object.name, user.ci.rb, model.num=model.num)))
feed.coef <- NA; feed.se <- NA
if (!is.null(get.coef)) { feed.coef <- get.coef }
if (!is.null(get.se)) { feed.se <- get.se }
if (!is.null(user.coef)) { feed.coef <- user.coef } # feed user-defined coefficients, if available
if (!is.null(user.se)) { feed.se <- user.se } # feed user-defined std errors, if available
.global.t.stats <<- suppressMessages(cbind(.global.t.stats, .get.t.stats(object.name, user.t, auto.t, feed.coef, feed.se, user.coef, user.se, model.num=model.num)))
.global.p.values <<- suppressMessages(cbind(.global.p.values, .get.p.values(object.name, user.p, auto.p, feed.coef, feed.se, user.coef, user.se, model.num=model.num)))
.global.N <<- c(.global.N, suppressMessages(.number.observations(object.name)))
.global.LL <<- c(.global.LL, suppressMessages(.log.likelihood(object.name)))
.global.R2 <<- c(.global.R2, suppressMessages(.r.squared(object.name)))
.global.max.R2 <<- c(.global.max.R2, suppressMessages(.max.r.squared(object.name)))
.global.adj.R2 <<- c(.global.adj.R2, suppressMessages(.adj.r.squared(object.name)))
.global.AIC <<- c(.global.AIC, suppressMessages(.AIC(object.name)))
.global.BIC <<- c(.global.BIC, suppressMessages(.BIC(object.name)))
.global.scale <<- c(.global.scale, suppressMessages(.get.scale(object.name)))
.global.UBRE <<- c(.global.UBRE, suppressMessages(.gcv.UBRE(object.name)))
.global.sigma2 <<- c(.global.sigma2, suppressMessages(.get.sigma2(object.name)))
.global.rho <<- cbind(suppressMessages(.get.rho(object.name)))
.global.mills <<- cbind(suppressMessages(.get.mills(object.name)))
.global.theta <<- cbind(suppressMessages(.get.theta(object.name)))
.global.SER <<- cbind(suppressMessages(.SER(object.name)))
.global.F.stat <<- cbind(suppressMessages(.F.stat(object.name)))
.global.chi.stat <<- cbind(suppressMessages(.chi.stat(object.name)))
.global.wald.stat <<- cbind(suppressMessages(.wald.stat(object.name)))
.global.lr.stat <<- cbind(suppressMessages(.lr.stat(object.name)))
.global.logrank.stat <<- cbind(suppressMessages(.logrank.stat(object.name)))
.global.null.deviance <<- cbind(suppressMessages(.null.deviance(object.name)))
.global.residual.deviance <<- cbind(suppressMessages(.residual.deviance(object.name)))
}
}
.null.deviance <-
function(object.name) {
null.deviance.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","coeftest","Gls","lmer","glmer","nlmer", "ergm"))) {
if (model.name %in% c("rem.dyad", "mclogit")) {
null.deviance.value <- object.name$null.deviance
null.deviance.output <- as.vector(c(null.deviance.value, NA, NA))
}
else if (model.name %in% c("maBina")) {
null.deviance.value <- object.name$w$null.deviance
df.value <- object.name$w$df.null
null.deviance.output <- as.vector(c(null.deviance.value, df.value, NA))
}
else if (!is.null(suppressMessages(.summary.object$null.deviance))) {
null.deviance.value <- suppressMessages(.summary.object$null.deviance)
df.value <- object.name$df.null
null.deviance.output <- as.vector(c(null.deviance.value, df.value, NA))
}
else if (!is.null(object.name$null.deviance)) {
null.deviance.value <- object.name$null.deviance
df.value <- object.name$df.null
null.deviance.output <- as.vector(c(null.deviance.value, df.value, NA))
}
}
names(null.deviance.output) <- c("statistic","df1","p-value")
return(cbind(null.deviance.output))
}
.number.observations <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit",
"poisson", "negbin", "normal.survey", "poisson.survey",
"probit.survey", "logit.survey", "gamma", "gamma.survey",
"z.arima", "brglm","glm()", "Glm()", "svyglm()")) {
return(length(object.name$residuals))
}
else if (model.name %in% c("fGARCH")) {
return(length(object.name@data))
}
else if (model.name %in% c("maBina")) {
return(length(object.name$w$residuals))
}
else if (model.name %in% c("mlogit")) {
return(sum(object.name$freq))
}
else if (model.name %in% c("felm")) {
return(object.name$N)
}
else if (model.name %in% c("mclogit")) {
return(object.name$N)
}
else if (model.name %in% c("selection", "heckit")) {
return(.summary.object$param$nObs)
}
else if (model.name %in% c("binaryChoice", "probit.ss")) {
return(object.name$param$nObs)
}
else if (model.name %in% c("lmer","glmer","nlmer")) {
return(length(resid(object.name)))
}
else if (model.name %in% c("gmm")) {
return(object.name$n)
}
else if (model.name %in% c("plm", "pgmm", "pmg", "rlm", "lmrob", "glmrob", "dynlm", "rq", "lagsarlm", "errorsarlm", "rem.dyad")) {
return(as.vector(length(object.name$residual)))
}
else if (model.name %in% c("mnlogit")) {
return(as.vector(.summary.object$model.size$N))
}
else if (model.name %in% c("hurdle", "zeroinfl")) {
return(as.vector(object.name$n))
}
else if (model.name %in% c("ivreg","clm","hetglm")) {
return(as.vector(object.name$nobs))
}
if (model.name %in% c("normal.gee", "logit.gee", "poisson.gee",
"probit.gee", "gamma.gee", "gee()", "betareg")) {
return(as.vector(.summary.object$nobs))
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam",
"poisson.gam", "coxph", "clogit", "exp", "lognorm", "weibull", "survreg()",
"gam()")) {
return(as.vector(.summary.object$n))
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
return(as.vector(.summary.object$nobs))
}
else if (model.name %in% c("gls")) {
return(as.vector(object.name$dims$N))
}
else if (model.name %in% c("tobit(AER)")) {
return(as.vector(.summary.object$n["Total"]))
}
else if (model.name %in% c("Arima","censReg","lme","nlme","weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
return(as.vector(nobs(object.name)))
}
return(NA)
}
.rename.intercept <-
function(x) {
out <- x
for (i in seq(1:length(x))) {
if (x[i] %in% .global.intercept.strings) {
out[i] <- .format.intercept.name
}
}
return(out)
}
.order.reg.table <-
function(order) {
# first, find the position of the intercept and rename the variable to be the intercept string
intercept.position <- NULL
for (i in seq(1:length(.global.coefficient.variables))) {
if (.global.coefficient.variables[i] %in% .global.intercept.strings) {
intercept.position <- i
.global.coefficient.variables[i] <<- .format.intercept.name
rownames(.global.coefficients)[i] <<- .format.intercept.name
rownames(.global.std.errors)[i] <<- .format.intercept.name
rownames(.global.ci.lb)[i] <<- .format.intercept.name
rownames(.global.ci.rb)[i] <<- .format.intercept.name
rownames(.global.t.stats)[i] <<- .format.intercept.name
rownames(.global.p.values)[i] <<- .format.intercept.name
}
}
# put intercept on bottom if necessary
if (!is.null(intercept.position)) {
# hold contents of last row in placeholder variables
placehold.coefficient.variables <- .global.coefficient.variables[-intercept.position]
intercept.coefficient.variables <- .global.coefficient.variables[intercept.position]
if (.format.intercept.bottom) {
.global.coefficient.variables <<- c(placehold.coefficient.variables, intercept.coefficient.variables)
}
if (.format.intercept.top) {
.global.coefficient.variables <<- c(intercept.coefficient.variables, placehold.coefficient.variables)
}
}
# order according to user's wishes
old.order <- 1:length(.global.coefficient.variables)
new.order <- NULL; add.these <- NULL
if (!is.null(order)) {
# if order is regular expression...
if (is.character(order)) {
not.ordered.yet <- .global.coefficient.variables
for (i in 1:length(order)) {
add.these <- grep(order[i], not.ordered.yet, perl=.format.perl, fixed=FALSE)
not.ordered.yet[add.these] <- NA
if (length(add.these) != 0) {
new.order <- c(new.order, add.these)
}
}
}
else if (is.numeric(order)) { # if order contains indices
order <- unique(order)
order <- order[order <= max(old.order)]
new.order <- old.order[order]
}
}
if (!is.null(new.order)) {
remainder <- old.order[-new.order]
new.order <- c(new.order, remainder)
}
else { new.order <- old.order }
# set the right order
.global.coefficient.variables[old.order] <<- .global.coefficient.variables[new.order]
}
.insert.col.front <- function(d, new.col) {
# values
d.new <- d
d.new[,seq(2,ncol(d)+1)] <- d[,seq(1,ncol(d))]
d.new[,1] <- new.col
# column names
if (!is.null(colnames(d))) {
colnames(d.new)[seq(2,ncol(d)+1)] <- colnames(d)[seq(1,ncol(d))]
colnames(d.new)[1] <- ""
}
return(d.new)
}
.order.data.frame <-
function(d, order, summary=FALSE) {
if ((.format.rownames == TRUE) && (summary == FALSE)) { # if we want to report rownames, add them to data frame
if (!is.null(rownames(d))) { d <- .insert.col.front(d, rownames(d)) }
}
# order according to user's wishes
old.order <- 1:length(colnames(d))
new.order <- NULL; add.these <- NULL
if (!is.null(order)) {
# if order is regular expression...
if (is.character(order)) {
not.ordered.yet <- colnames(d)
for (i in 1:length(order)) {
add.these <- grep(order[i], d, perl=.format.perl, fixed=FALSE)
not.ordered.yet[add.these] <- NA
if (length(add.these) != 0) {
new.order <- c(new.order, add.these)
}
}
}
else if (is.numeric(order)) { # if order contains indices
order <- unique(order)
order <- order[order <= max(old.order)]
new.order <- old.order[order]
}
}
if (!is.null(new.order)) {
remainder <- old.order[-new.order]
new.order <- c(new.order, remainder)
}
else { new.order <- old.order }
return( d[new.order] )
}
.print.additional.lines <-
function(part.number=NULL) {
# if no additional lines, then quit the function
if (is.null(.format.add.lines)) { return(NULL) }
max.l <- length(.global.models)+1
for (line in 1:length(.format.add.lines)) {
## add columns if too few, remove if too many
if (max.l > length(.format.add.lines[[line]])) {
.format.add.lines[[line]] <- c(.format.add.lines[[line]], rep(NA, times=max.l - length(.format.add.lines[[line]])))
}
else if (max.l < length(.format.add.lines[[line]])) {
.format.add.lines[[line]] <- .format.add.lines[[line]][1:max.l]
}
.format.add.lines[[line]] <- .format.add.lines[[line]]
## print each line
for (i in 1:max.l) {
if (!is.na(.format.add.lines[[line]][i])) {
if (i==1) {
cat(.format.add.lines[[line]][i], sep="")
}
else {
cat(" & ",.format.add.lines[[line]][i], sep="")
}
}
else {
if (i==1) {
cat(" ", sep="")
}
else {
cat(" & ", sep="")
}
}
}
cat(" \\\\ \n")
}
.table.part.published[part.number] <<- TRUE
}
.print.table.statistic <-
function(.global.var.name, .format.var.name, decimal.digits=.format.round.digits, part.string="", part.number=NULL, type.se=FALSE) {
# default values
report.df <- FALSE
report.p.value <- FALSE
significance.stars <- FALSE
report.se <- FALSE
report.tstat <- FALSE
intelligent.df <- .format.intelligent.df
force.math <- FALSE
# reporting of df, p-value, significance stars, standard errors, t-stats
if (length(grep("(df)", part.string,fixed=TRUE))!=0) { report.df <- TRUE }
if (length(grep("(se)", part.string,fixed=TRUE))!=0) { report.se <- TRUE }
if (length(grep("(t)", part.string,fixed=TRUE))!=0) { report.tstat <- TRUE }
if (length(grep("(p)", part.string,fixed=TRUE))!=0) { report.p.value <- TRUE }
if (length(grep("*", part.string,fixed=TRUE))!=0) { significance.stars <- TRUE }
# first for vectors (statistics without, say, degrees of freedom)
if (is.vector(.global.var.name) == TRUE) {
if (sum(!is.na(.global.var.name))!=0) {
cat (.format.var.name)
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.var.name[i])) {
if (.format.dec.mark.align == TRUE) {
cat(" & \\multicolumn{1}{c}{",.iround(.global.var.name[i], decimal.digits),"}", sep="")
}
else {
cat(" & ",.iround(.global.var.name[i], decimal.digits), sep="")
}
}
else { cat(" & ", sep="") }
}
cat(" \\\\ \n")
.table.part.published[part.number] <<- TRUE
}
}
else if ((is.matrix(.global.var.name) == TRUE) && (type.se == FALSE)) { # for statistics that have degrees of freedom
if (sum(!is.na(as.vector(.global.var.name["statistic",])))!=0) {
# intelligent df reporting (figure out whether only report it on left side, or also)
report.df.left.column <- FALSE
# whittle down unique values
df.all.together <- NULL
for (i in seq(1:length(.global.models))) {
df.string <- ""
for (j in seq(1:(nrow(.global.var.name)- 2))) {
df.string <- paste(df.string,";",as.character(.global.var.name[paste("df",as.character(j),sep=""),i]),sep="")
}
df.all.together <- append(df.all.together, df.string)
}
# remove.na.r
df.all.together.no.NA <- NULL
for (i in seq(1:length(df.all.together))) {
if (substr(df.all.together[i],1,3)!=";NA") { df.all.together.no.NA <- c(df.all.together.no.NA, df.all.together[i]) }
}
df.all.together.no.NA.unique <- sort(unique(df.all.together.no.NA))
# put df on the left if only one unique df in the table, and not just one column w/ given df
if (intelligent.df == TRUE) {
if ((length(df.all.together.no.NA.unique)==1) && (length(df.all.together.no.NA)>=2)) { report.df.left.column <- TRUE }
}
# write down the line
cat (.format.var.name)
# report df on left side w/ intelligent reporting
if (report.df.left.column == TRUE) {
if (report.df == TRUE) {
cat(" ",.format.df.left,sep="")
df.list <- unlist(strsplit(df.all.together.no.NA.unique[1],";"))
for (i in seq(from=2, to=length(df.list))) {
if (i>=3) { cat(.format.df.separator) }
cat(df.list[i],sep="")
}
cat(.format.df.right,sep="")
}
}
# now, go column by column
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.var.name["statistic",i])) {
if (.format.dec.mark.align==TRUE) {
cat(" & \\multicolumn{1}{c}{",.iround(.global.var.name["statistic",i], decimal.digits), sep="")
force.math <- TRUE
}
else {
cat(" & ",.iround(.global.var.name["statistic",i], decimal.digits), sep="")
}
# significance stars
if ((significance.stars == TRUE) && (!is.na(.global.var.name["p-value",i]))) { .enter.significance.stars(.global.var.name["p-value",i], force.math) }
# degrees of freedom - only report by statistics if not in the left column already
if (report.df.left.column == FALSE) {
if ((report.df == TRUE) && (!is.na(.global.var.name["df1",i]))) {
cat(" ",.format.df.left,sep="")
for (j in seq(1:(nrow(.global.var.name)- 2))) {
if (!is.na(.global.var.name[paste("df",as.character(j),sep=""),i])) {
if (j>=2) { cat(.format.df.separator) }
cat(.global.var.name[paste("df",as.character(j),sep=""),i],sep="")
}
}
cat(.format.df.right,sep="")
}
}
# p-values
if ((report.p.value == TRUE) && (!is.na(.global.var.name["p-value",i]))) {
cat(" ",.format.p.value.left,sep="")
if (!is.na(.global.var.name[paste("df",as.character(j),sep=""),i])) {
cat(.iround(.global.var.name["p-value",i],.format.round.digits, round.up.positive=TRUE),sep="")
}
cat(.format.p.value.right,sep="")
}
if (.format.dec.mark.align==TRUE) {
cat("}")
}
else {
cat("")
}
}
else { cat(" & ", sep="") }
}
cat(" \\\\ \n")
.table.part.published[part.number] <<- TRUE
}
}
else if ((is.matrix(.global.var.name) == TRUE) && (type.se == TRUE)) { # for statistics that have a standard error
if (sum(!is.na(as.vector(.global.var.name["statistic",])))!=0) {
# write down the line
cat (.format.var.name)
# now, go column by column
for (i in seq(1:length(.global.models))) {
if (!is.na(.global.var.name["statistic",i])) {
if (.format.dec.mark.align == TRUE) {
cat(" & \\multicolumn{1}{c}{",.iround(.global.var.name["statistic",i], decimal.digits), sep="")
}
else {
cat(" & ",.iround(.global.var.name["statistic",i], decimal.digits), sep="")
}
# significance stars
if ((significance.stars == TRUE) && (!is.na(.global.var.name["p-value",i]))) { .enter.significance.stars(.global.var.name["p-value",i], force.math) }
# standard errors
if ((report.se == TRUE) && (!is.na(.global.var.name["se",i]))) { cat(" ",.format.se.left,.iround(.global.var.name["se",i], decimal.digits),.format.se.right,sep="") }
# t-statistics
if ((report.tstat == TRUE) && (!is.na(.global.var.name["tstat",i]))) { cat(" ",.format.tstat.left, .iround(.global.var.name["tstat",i], decimal.digits),.format.tstat.right,sep="") }
# p-values
if ((report.p.value == TRUE) && (!is.na(.global.var.name["p-value",i]))) { cat(" ",.format.p.value.left,.iround(.global.var.name["p-value",i], decimal.digits),.format.p.value.right,sep="") }
if (.format.dec.mark.align == TRUE) {
cat("}")
}
else {
cat("")
}
}
else { cat(" & ", sep="") }
}
cat(" \\\\ \n")
.table.part.published[part.number] <<- TRUE
}
}
}
.publish.table <-
function() {
.table.info.comment()
# table header
.table.header()
.table.insert.space()
.table.part.published <<- as.vector(rep(NA, times=length(.format.table.parts))) # to keep track what has been published (to deal intelligently with horizontal lines)
.publish.horizontal.line <<- TRUE # should non-compulsory horizontal lines be published? (yes, if something else published since the previous line)
if (length(.format.table.parts)>=1) {
for (i in seq(1:length(.format.table.parts))) {
.publish.table.part(part=.format.table.parts[i], which.part.number=i)
if (.table.part.published[i]==TRUE) { .publish.horizontal.line <<- TRUE }
if ((.format.table.parts[i]=="-") || (.format.table.parts[i]=="-!") || (.format.table.parts[i]=="=") || (.format.table.parts[i]=="=!")) { .publish.horizontal.line <<- FALSE }
}
}
cat("\\end{tabular} \n")
if (.format.floating == TRUE) { cat("\\end{", .format.floating.environment,"} \n", sep="") }
else if (!is.null(.format.font.size)) {
cat("\\endgroup \n",sep="")
}
}
.publish.table.part <-
function(part, which.part.number) {
.table.part.published[which.part.number] <<- FALSE
# dependent variable label line
if (part=="dependent variable label") {
if (.format.dependent.variable.text.on == TRUE) {
cat(" & \\multicolumn{",length(.global.models),"}{c}{",.format.dependent.variable.text, "} \\\\ \n", sep="")
if (.format.dependent.variable.text.underline == TRUE) { cat("\\cline{2-",length(.global.models)+1,"} \n", sep="") }
}
.table.part.published[which.part.number] <<- TRUE
}
# dependent variables
else if (part=="dependent variables") {
.table.insert.space()
cat(.format.dependent.variables.text)
how.many.columns <- 0
label.counter <- 0
for (i in seq(1:length(.global.models))) {
if (is.null(.format.dep.var.labels)) { .format.dep.var.labels <<- NA }
how.many.columns <- how.many.columns + 1
# write down if next column has different dependent variable, or if end of columns
different.dependent.variable <- FALSE
if (i == length(.global.models)) {different.dependent.variable <- TRUE}
else if ((as.character(.global.dependent.variables[i])) != (as.character(.global.dependent.variables[i+1]))) {different.dependent.variable <- TRUE}
if (.format.multicolumn==FALSE) { different.dependent.variable <- TRUE }
if (different.dependent.variable == TRUE) {
label.counter <- label.counter + 1
if (how.many.columns == 1) {
if (.format.dec.mark.align==TRUE) {
if (is.na(.format.dep.var.labels[label.counter])) {
if (.format.dependent.variables.capitalize == TRUE) { cat(" & \\multicolumn{1}{c}{",.format.dependent.variables.left,toupper(as.character(.global.dependent.variables.written[i])),.format.dependent.variables.right,"}", sep="") }
else { cat(" & \\multicolumn{1}{c}{",.format.dependent.variables.left,as.character(.global.dependent.variables.written[i]),.format.dependent.variables.right,"}", sep="") }
}
else { cat(" & \\multicolumn{1}{c}{",.format.dependent.variables.left,.format.dep.var.labels[label.counter],.format.dependent.variables.right,"}", sep="") }
}
else {
if (is.na(.format.dep.var.labels[label.counter])) {
if (.format.dependent.variables.capitalize == TRUE) { cat(" & ",.format.dependent.variables.left,toupper(as.character(.global.dependent.variables.written[i])),.format.dependent.variables.right, sep="") }
else { cat(" & ",.format.dependent.variables.left,as.character(.global.dependent.variables.written[i]),.format.dependent.variables.right, sep="") }
}
else { cat(" & ",.format.dependent.variables.left,.format.dep.var.labels[label.counter],.format.dependent.variables.right, sep="") }
}
}
else {
if (is.na(.format.dep.var.labels[label.counter])) {
if (.format.dependent.variables.capitalize == TRUE) {cat(" & \\multicolumn{",how.many.columns,"}{c}{",.format.dependent.variables.left,toupper(as.character(.global.dependent.variables.written[i])),.format.dependent.variables.right,"}", sep="")}
else {cat(" & \\multicolumn{",how.many.columns,"}{c}{",.format.dependent.variables.left,as.character(.global.dependent.variables.written[i]),.format.dependent.variables.right,"}", sep="")}
}
else {cat(" & \\multicolumn{",how.many.columns,"}{c}{",.format.dependent.variables.left,.format.dep.var.labels[label.counter],.format.dependent.variables.right,"}", sep="")}
}
how.many.columns <- 0
}
}
cat(" \\\\ \n")
.table.part.published[which.part.number] <<- TRUE
}
# models
else if (part=="models") {
if ((.format.model.names.include==TRUE) && ((.format.models.skip.if.one == FALSE) || ((.format.models.skip.if.one == TRUE) && (length(unique(.global.models))>=2)))) {
.table.insert.space()
cat(.format.models.text)
# rename models based on .formatting preferences
renamed.global.models <- as.matrix(rbind(.global.models, rep("", times=length(.global.models))))
for (i in seq(1:length(.global.models))) {
for (j in seq(1:ncol(.format.model.names))) {
model.strsplit <- unlist(strsplit(.global.models[i], split="#"))
if (.global.models[i]==.format.model.names[1,j]) {
renamed.global.models[1,i] <- .format.model.names[2,j]
renamed.global.models[2,i] <- .format.model.names[3,j]
}
else if ((model.strsplit[1]=="glm()") || (model.strsplit[1]=="svyglm()") || (model.strsplit[1]=="gee()") || (model.strsplit[1]=="gam()")) {
if ( .format.model.function == TRUE ) { renamed.global.models[1,i] <- paste(substr(model.strsplit[1],1,nchar(model.strsplit[1])-2),": ", .format.model.family, model.strsplit[2], sep="") }
else { renamed.global.models[1,i] <- paste(.format.model.family, model.strsplit[2], sep="")}
renamed.global.models[2,i] <- paste(.format.model.link, model.strsplit[3], sep="")
}
else if ((model.strsplit[1]=="survreg()") || (model.strsplit[1]=="polr()")) {
if ( .format.model.function == TRUE ) { renamed.global.models[1,i] <- paste(substr(model.strsplit[1],1,nchar(model.strsplit[1])-2),": ", .format.model.dist, model.strsplit[2], sep="") }
else { renamed.global.models[1,i] <- paste(.format.model.dist, model.strsplit[2], sep="")}
renamed.global.models[2,i] <- ""
}
}
}
if (sum(renamed.global.models[2,]==rep("", times=length(.global.models)))==length(.global.models)) { how.many.model.rows <- 1}
else { how.many.model.rows <- 2 }
for (row in seq(from=1, to=how.many.model.rows)) {
how.many.columns <- 0
for (i in seq(1:length(.global.models))) {
how.many.columns <- how.many.columns + 1
# write down if next column has different dependent variable, or if end of columns
different.model <- FALSE
if (i == length(.global.models)) {different.model <- TRUE}
else if ((as.character(.global.models[i])) != (as.character(.global.models[i+1]))) {different.model <- TRUE}
else if ((as.character(.global.dependent.variables[i])) != (as.character(.global.dependent.variables[i+1]))) {different.model <- TRUE} # subsume models under dependent variables
if (.format.multicolumn==FALSE) { different.model <- TRUE }
if (different.model == TRUE) {
if (how.many.columns == 1) {
if (.format.dec.mark.align == TRUE) {
cat(" & \\multicolumn{1}{c}{",.format.models.left,as.character(renamed.global.models[row,i]),.format.models.right,"}", sep="")
}
else {
cat(" & ",.format.models.left,as.character(renamed.global.models[row,i]),.format.models.right, sep="")
}
}
else {cat(" & \\multicolumn{",how.many.columns,"}{c}{",.format.models.left,as.character(renamed.global.models[row,i]),.format.models.right,"}", sep="")}
how.many.columns <- 0
}
}
cat(" \\\\ \n")
}
# underline models
if (.format.underline.models == TRUE) {
how.many.columns <- 0
for (i in seq(1:length(.global.models))) {
how.many.columns <- how.many.columns + 1
# underline if next column has different dependent variable, or if end of columns
different.model <- FALSE
if (i == length(.global.models)) {different.model <- TRUE}
else if ((as.character(.global.models[i])) != (as.character(.global.models[i+1]))) {different.model <- TRUE}
else if ((as.character(.global.dependent.variables[i])) != (as.character(.global.dependent.variables[i+1]))) {different.model <- TRUE} # subsume models under dependent variables
if (different.model== TRUE) {
cat("\\cline{",(i-how.many.columns+1)+1,"-",i+1,"} ",sep="")
how.many.columns <- 0
}
}
cat("\n")
}
.table.part.published[which.part.number] <<- TRUE
}
}
# column labels
else if (part=="columns") {
if (!is.null(.format.column.labels)) {
if (is.null(.format.column.separate)) { .format.column.separate <- 1 }
# adjust column.separate to have the same number of columns as the table
models.in.table <- length(.global.models)
models.in.col <- 0
for (i in seq(1:length(.format.column.separate))) { # count up how many models in column.separate
models.in.col <- models.in.col + .format.column.separate[i]
}
excess <- models.in.table - models.in.col
# if too few column labels, add ones to column.separate
if (excess > 0) {
last.index <- length(.format.column.separate)
for (i in seq(1:excess)) {
.format.column.separate[last.index + i] <- 1
}
}
# if too many column labels, then cut down
if (excess < 0) {
col.total <- 0
new.format.column.separate <- NULL
for(i in seq(1:length(.format.column.separate))) {
col.total <- col.total + .format.column.separate[i]
if (col.total > models.in.table) {
new.format.column.separate[i] <- .format.column.separate[i] - (col.total - models.in.table)
if (new.format.column.separate[i] == 0) { new.format.column.separate <- new.format.column.separate[-i] }
break
}
else {
new.format.column.separate[i] <- .format.column.separate[i]
}
}
.format.column.separate <- new.format.column.separate
}
# output column labels
col.position <- 1
for (i in seq(1:length(.format.column.separate))) {
if (is.null(.format.column.labels[col.position])) { .format.column.labels[col.position] <- "" }
if (is.na(.format.column.labels[col.position])) { .format.column.labels[col.position] <- "" }
if (.format.column.separate[i]==1) {
if (.format.dec.mark.align==TRUE) {
cat(" & \\multicolumn{1}{c}{",.format.column.left,.format.column.labels[col.position],.format.column.right,"}", sep="")
}
else {
cat(" & ",.format.column.left,.format.column.labels[col.position],.format.column.right, sep="")
}
}
else {
cat(" & \\multicolumn{",.format.column.separate[i],"}{c}{",.format.column.left,.format.column.labels[col.position],.format.column.right,"}", sep="")
}
col.position <- col.position + 1
}
cat(" \\\\ \n")
}
}
# numbers
else if (part=="numbers") {
if ((.format.model.numbers == TRUE) && (length(.global.models)>1)) {
.table.insert.space()
cat(.format.numbers.text)
for (i in seq(1:length(.global.models))) {
if (.format.dec.mark.align==TRUE) {
if (.format.numbers.roman == TRUE) { cat(" & \\multicolumn{1}{c}{",.format.numbers.left,.roman.numeral(i),.format.numbers.right,"}", sep="") }
else { cat(" & \\multicolumn{1}{c}{",.format.numbers.left,i,.format.numbers.right,"}", sep="") }
}
else {
if (.format.numbers.roman == TRUE) { cat(" & ",.format.numbers.left,.roman.numeral(i),.format.numbers.right, sep="") }
else { cat(" & ",.format.numbers.left,i,.format.numbers.right, sep="") }
}
}
cat("\\\\ \n")
.table.part.published[which.part.number] <<- TRUE
}
}
# numbers
else if (part=="objects") {
if (.format.object.names == TRUE) {
.table.insert.space()
for (i in seq(1:length(.global.models))) {
if (.format.dec.mark.align==TRUE) {
cat(" & \\multicolumn{1}{c}{",.global.object.names[i],"}", sep="")
}
else {
cat(" & ",.global.object.names[i], sep="")
}
}
cat("\\\\ \n")
.table.part.published[which.part.number] <<- TRUE
}
}
## coefficients
else if (part=="coefficients") {
.which.variable.label <<- 0
if (is.null(.format.covariate.labels)) { .format.covariate.labels <<- NA }
# then, enter the coefficients
for (i in seq(1:length(.global.coefficient.variables))) { .table.enter.coefficients(i) }
.table.part.published[which.part.number] <<- TRUE
}
# number of observations
else if (part=="N") { .print.table.statistic(.global.var.name=.global.N, .format.var.name=.format.N, decimal.digits=0, part.number=which.part.number) }
# fixed effects table
else if (part=="omit") {
if ((!is.null(.format.omit.regexp)) && (!is.null(.format.omit.labels))) {
.format.omit.table <<- matrix(.format.omit.no, nrow=length(.format.omit.regexp), ncol=length(.global.models))
for (i in seq(1:length(.global.models))) {
for (j in seq(1:length(.format.omit.regexp))) {
for (k in seq(1:length(.global.coef.vars.by.model[,i]))) {
relevant.coef.var <- .global.coef.vars.by.model[k,i]
if (length(grep(.format.omit.regexp[j], relevant.coef.var, perl=.format.perl, fixed=FALSE))!=0) {
.format.omit.table[j,i] <<- .format.omit.yes
}
}
}
}
for (i in seq(1:length(.format.omit.regexp))) {
cat (.format.omit.labels[i])
for (j in seq(1:length(.global.models))) {
if (.format.dec.mark.align == TRUE) {
cat(" & \\multicolumn{1}{c}{",.format.omit.table[i,j],"}", sep="")
}
else {
cat(" & ",.format.omit.table[i,j], sep="")
}
}
cat(" \\\\ \n")
}
.table.part.published[which.part.number] <<- TRUE
}
}
# R-squared
else if (part=="R-squared") { .print.table.statistic(.global.var.name=.global.R2, .format.var.name=.format.R2, part.number=which.part.number) }
# max R-squared
else if (part=="max R-squared") { .print.table.statistic(.global.var.name=.global.max.R2, .format.var.name=.format.max.R2, part.number=which.part.number) }
# adjusted R-squared
else if (part=="adjusted R-squared") { .print.table.statistic(.global.var.name=.global.adj.R2, .format.var.name=.format.adj.R2, part.number=which.part.number) }
# log likelihood
else if (part=="log likelihood") { .print.table.statistic(.global.var.name=.global.LL, .format.var.name=.format.LL, part.number=which.part.number) }
# Akaike Information Criterion (AIC)
else if (part=="AIC") { .print.table.statistic(.global.var.name=.global.AIC, .format.var.name=.format.AIC, part.number=which.part.number) }
# Bayesian Information Criterion (BIC)
else if (part=="BIC") { .print.table.statistic(.global.var.name=.global.BIC, .format.var.name=.format.BIC, part.number=which.part.number) }
# Scale Parameter
else if (part=="scale") { .print.table.statistic(.global.var.name=.global.scale, .format.var.name=.format.scale, part.number=which.part.number) }
# UBRE
else if (part=="UBRE") { .print.table.statistic(.global.var.name=.global.UBRE, .format.var.name=.format.UBRE, part.number=which.part.number) }
# sigma2
else if (part=="sigma2") { .print.table.statistic(.global.var.name=.global.sigma2, .format.var.name=.format.sigma2, part.number=which.part.number) }
## with degrees of freedom
# residual standard error (sigma); standard error of the regression
else if (substr(part,1,nchar("SER"))=="SER") { .print.table.statistic(.global.var.name=.global.SER, .format.var.name=.format.SER, part.string=part, part.number=which.part.number) }
# F-statistic
else if (substr(part,1,nchar("F statistic"))=="F statistic") { .print.table.statistic(.global.var.name=.global.F.stat, .format.var.name=.format.F.stat, part.string=part, part.number=which.part.number) }
# theta
else if (substr(part,1,nchar("theta"))=="theta") { .print.table.statistic(.global.var.name=.global.theta, .format.var.name=.format.theta, part.string=part, part.number=which.part.number, type.se=TRUE) }
# rho
else if (substr(part,1,nchar("rho"))=="rho") { .print.table.statistic(.global.var.name=.global.rho, .format.var.name=.format.rho, part.string=part, part.number=which.part.number, type.se=TRUE) }
# Inverse Mills ratio
else if (substr(part,1,nchar("Mills"))=="Mills") { .print.table.statistic(.global.var.name=.global.mills, .format.var.name=.format.mills, part.string=part, part.number=which.part.number, type.se=TRUE) }
# Chi-squared
else if (substr(part,1,nchar("chi2"))=="chi2") { .print.table.statistic(.global.var.name=.global.chi.stat, .format.var.name=.format.chi.stat, part.string=part, part.number=which.part.number) }
# Wald Test
else if (substr(part,1,nchar("Wald"))=="Wald") { .print.table.statistic(.global.var.name=.global.wald.stat, .format.var.name=.format.wald.stat, part.string=part, part.number=which.part.number) }
# LR Test
else if (substr(part,1,nchar("LR"))=="LR") { .print.table.statistic(.global.var.name=.global.lr.stat, .format.var.name=.format.lr.stat, part.string=part, part.number=which.part.number) }
# Score (Logrank) Test
else if (substr(part,1,nchar("logrank"))=="logrank") { .print.table.statistic(.global.var.name=.global.logrank.stat, .format.var.name=.format.logrank.stat, part.string=part, part.number=which.part.number) }
# null deviance
else if (substr(part,1,nchar("null deviance"))=="null deviance") { .print.table.statistic(.global.var.name=.global.null.deviance, .format.var.name=.format.null.deviance, part.string=part, part.number=which.part.number) }
# residual deviance
else if (substr(part,1,nchar("residual deviance"))=="residual deviance") { .print.table.statistic(.global.var.name=.global.residual.deviance, .format.var.name=.format.residual.deviance, part.string=part, part.number=which.part.number) }
##
# single horizontal line, no matter what
else if (part=="-!") {
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
# single horizontal line, optional
else if (part=="-") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
}
# double horizontal line, no matter what
else if (part=="=!") {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
# double horizontal line
else if (part=="=") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
}
# notes
else if (part=="notes") {
if (.format.note != "") { cat(.format.note) }
for (i in seq(1:length(.format.note.content))) {
.format.note.content[i] <- .format.note.content[i]
# print individual notes
if (.format.note == "") { cat("\\multicolumn{",length(.global.models)+1,"}{",.format.note.alignment,"}{",.format.note.content[i],"} \\\\ \n", sep="") }
else { cat(" & \\multicolumn{",length(.global.models),"}{",.format.note.alignment,"}{",.format.note.content[i],"} \\\\ \n", sep="") }
}
.table.part.published[which.part.number] <<- TRUE
}
# empty line
else if (part==" ") {
.table.empty.line();
.table.part.published[which.part.number] <<- TRUE
}
# additional lines
else if (part=="additional") { .print.additional.lines(part.number=which.part.number) }
}
.r.squared <-
function(object.name) {
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina","coeftest","nlmer", "glmer", "lmer","Gls","Arima"))) {
if (model.name %in% c("heckit")) {
return(.summary.object$rSquared$R2)
}
if (model.name %in% c("felm")) {
return(.summary.object$r2)
}
if (model.name %in% c("mlogit")) {
return(.summary.object$mfR2[1])
}
if (model.name %in% c("plm")) {
return(as.vector(.summary.object$r.squared["rsq"]))
}
else if (model.name %in% c("betareg")) {
return(as.vector(.summary.object$pseudo.r.squared))
}
else if (!is.null(.summary.object$r.squared)) {
return(as.vector(.summary.object$r.squared))
}
else if (model.name %in% c("coxph", "clogit")) {
return(as.vector(.summary.object$rsq[1]))
}
else if (model.name %in% c("pmg")) {
return(as.vector(.summary.object$rsqr))
}
else if (model.name %in% c("cph","lrm","ols","psm")) {
return(as.vector(object.name$stats["R2"]))
}
}
return(NA)
}
.remove.special.chars <-
function(s) {
if (!is.character(s)) { s.out <- as.character(s) }
else { s.out <- s }
# this has to go first
s.out <- gsub("\\","\\textbackslash ",s.out,fixed=TRUE)
# basic special characters
s.out <- gsub("_","\\_",s.out,fixed=TRUE)
s.out <- gsub("#","\\#",s.out,fixed=TRUE)
s.out <- gsub("~","\\textasciitilde",s.out,fixed=TRUE)
s.out <- gsub("{","\\{",s.out,fixed=TRUE)
s.out <- gsub("}","\\}",s.out,fixed=TRUE)
s.out <- gsub("%","\\%",s.out,fixed=TRUE)
s.out <- gsub("$","\\$",s.out,fixed=TRUE)
# pre-defined text-mode commands (add more?)
s.out <- gsub("*","\\textasteriskcentered ",s.out,fixed=TRUE)
s.out <- gsub("|","\\textbar ",s.out,fixed=TRUE)
s.out <- gsub(">","\\textgreater ",s.out,fixed=TRUE)
s.out <- gsub("<","\\textless ",s.out,fixed=TRUE)
# more substitutions
s.out <- gsub("^","$\\hat{\\mkern6mu}$",s.out,fixed=TRUE)
return(s.out)
}
.residual.deviance <-
function(object.name) {
residual.deviance.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","coeftest", "Gls","multinom","lmer","glmer","nlmer"))) {
if (model.name %in% c("rem.dyad")) {
residual.deviance.value <- object.name$residual.deviance
residual.deviance.output <- as.vector(c(residual.deviance.value, NA, NA))
}
else if (model.name %in% c("mclogit")) {
residual.deviance.value <- object.name$deviance
residual.deviance.output <- as.vector(c(residual.deviance.value, NA, NA))
}
else if (model.name %in% c("maBina")) {
residual.deviance.value <- object.name$w$deviance
df.value <- object.name$w$df.residual
residual.deviance.output <- as.vector(c(residual.deviance.value, df.value, NA))
}
else if (!is.null(.summary.object$deviance)) {
residual.deviance.value <- suppressMessages(.summary.object$deviance)
df.value <- object.name$df.residual
residual.deviance.output <- as.vector(c(residual.deviance.value, df.value, NA))
}
else if (!is.null(object.name$deviance)) {
residual.deviance.value <- object.name$deviance
df.value <- object.name$df.residual
residual.deviance.output <- as.vector(c(residual.deviance.value, df.value, NA))
}
}
names(residual.deviance.output) <- c("statistic","df1","p-value")
return(cbind(residual.deviance.output))
}
.roman.numeral <-
function(regular.number) {
# unique representation only for integers between 1 and 3899
if ((regular.number < 1) || (regular.number > 3899)) {
return(NULL)
}
else {
roman.output <- ""
number.remaining <- regular.number
while (number.remaining > 999) {
roman.output <- paste(roman.output, "M", sep="")
number.remaining <- number.remaining - 1000
}
if (number.remaining > 899) {
roman.output <- paste(roman.output, "CM", sep="")
number.remaining <- number.remaining - 900
}
if (number.remaining > 499) {
roman.output <- paste(roman.output, "D", sep="")
number.remaining <- number.remaining - 500
}
if (number.remaining > 399) {
roman.output <- paste(roman.output, "CD", sep="")
number.remaining <- number.remaining - 400
}
if (number.remaining > 399) {
roman.output <- paste(roman.output, "D", sep="")
number.remaining <- number.remaining - 400
}
while (number.remaining > 99) {
roman.output <- paste(roman.output, "C", sep="")
number.remaining <- number.remaining - 100
}
if (number.remaining > 89) {
roman.output <- paste(roman.output, "XC", sep="")
number.remaining <- number.remaining - 90
}
if (number.remaining > 49) {
roman.output <- paste(roman.output, "L", sep="")
number.remaining <- number.remaining - 50
}
if (number.remaining > 39) {
roman.output <- paste(roman.output, "XL", sep="")
number.remaining <- number.remaining - 40
}
while (number.remaining > 9) {
roman.output <- paste(roman.output, "X", sep="")
number.remaining <- number.remaining - 10
}
if (number.remaining > 8) {
roman.output <- paste(roman.output, "IX", sep="")
number.remaining <- number.remaining - 9
}
if (number.remaining > 4) {
roman.output <- paste(roman.output, "V", sep="")
number.remaining <- number.remaining - 5
}
if (number.remaining > 3) {
roman.output <- paste(roman.output, "IV", sep="")
number.remaining <- number.remaining - 4
}
if (number.remaining > 3) {
roman.output <- paste(roman.output, "IV", sep="")
number.remaining <- number.remaining - 4
}
while (number.remaining > 0) {
roman.output <- paste(roman.output, "I", sep="")
number.remaining <- number.remaining - 1
}
return(roman.output)
}
}
.SER <-
function(object.name) {
SER.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","lme","nlme","fGARCH","Arima","maBina","coeftest","lmer","glmer","nlmer","gls","Gls"))) {
if (model.name %in% c("felm")) {
SER.output <- as.vector(c(.summary.object$rse, .summary.object$rdf, NA))
}
else if (!is.null(suppressMessages(.summary.object$sigma))) {
sigma.value <-suppressMessages(.summary.object$sigma)
if (model.name %in% c("rlm")) {
df.residual.value <- .summary.object$df[2]
}
else {
df.residual.value <- object.name$df.residual
}
SER.output <- as.vector(c(sigma.value, df.residual.value, NA))
}
}
names(SER.output) <- c("statistic","df1","p-value")
return(cbind(SER.output))
}
.stargazer.reg.table <-
function(...) {
list.of.models <- as.list(list(...))
how.many.models <- length(list.of.models)
# find how many models user wants to customize
# max.user <- max(length(coef),length(se),length(t),length(p),length(ci.custom))
length(coef) <<- length(se) <<- length(t) <<- length(p) <<- length(ci.custom) <<- how.many.models
if (how.many.models >= 1) {
suppressMessages(.new.table(list.of.models[[1]], user.coef=coef[[1]], user.se=se[[1]], user.t=t[[1]], user.p=p[[1]], auto.t=t.auto, auto.p=p.auto, user.ci.lb=ci.custom[[1]][,1], user.ci.rb=ci.custom[[1]][,2]))
if (how.many.models >= 2) {
for (i in seq(from = 2,to = how.many.models)) {
#if (i <= max.user) {
suppressMessages(.add.model(list.of.models[[i]], user.coef=coef[[i]], user.se=se[[i]], user.t=t[[i]], user.p=p[[i]], auto.t=t.auto, auto.p=p.auto, user.ci.lb=ci.custom[[i]][,1], user.ci.rb=ci.custom[[i]][,2]))
#}
#else {
# suppressMessages(.add.model(list.of.models[[i]], user.coef=NULL, user.se=NULL, user.t=NULL, user.p=NULL, auto.t=t.auto, auto.p=p.auto, user.ci.lb=NULL, user.ci.rb=NULL))
#}
}
}
.apply(auto.t=t.auto, auto.p=p.auto)
.order.reg.table(order)
suppressMessages(.publish.table())
}
}
.set.font.size <-
function() {
if (!is.null(.format.font.size)) {
cat("\\", .format.font.size," \n", sep="")
}
}
.floating.header <-
function() {
if (.format.floating==TRUE) {
cat("\\begin{", .format.floating.environment,"}[", .format.table.placement,"] \\centering \n",sep="")
cat(" \\caption{", .format.title, "} \n",sep="")
cat(" \\label{", .format.label, "} \n",sep="")
.set.font.size()
}
else if (!is.null(.format.font.size)) { # set font size using begingroup
cat("\\begingroup \n", sep="")
.set.font.size()
}
}
.data.frame.table.header <-
function(object) {
.floating.header()
.formatting.alignment <- paste("@{\\extracolsep{",.format.column.sep.width,"}} ", sep="")
for (i in seq(1:(length(names(object))))) {
if (.format.dec.mark.align == FALSE) {
.formatting.alignment <- paste(.formatting.alignment, "c", sep="")
}
else {
.formatting.alignment <- paste(.formatting.alignment, "D{", .format.decimal.character,"}{", .format.decimal.character,"}{-", .format.s.round.digits,"} ", sep="")
}
}
#
cat("\\begin{tabular}{",.formatting.alignment,"} \n",sep="")
}
.stargazer.data.frame.table <-
function(object) {
# flip objects
if (.format.flip == TRUE) {
# keep row- and column names
obj.rownames <- rownames(object)
obj.colnames <- colnames(object)
object <- as.data.frame(t(object))
colnames(object) <- obj.rownames
rownames(object) <- obj.colnames
}
if ((nrow(object) < 1) || (ncol(object) < 1)) {
cat("% Error: Data frame must have at least one row and one column.\n")
}
else {
object <- .order.data.frame(object, order)
.table.info.comment()
#create table header
.data.frame.table.header(object)
.table.insert.space()
.table.part.published <<- as.vector(rep(NA, times=length(.format.s.stat.parts))) # to keep track what has been published (to deal intelligently with horizontal lines)
.publish.horizontal.line <<- TRUE # should non-compulsory horizontal lines be published? (yes, if something else published since the previous line)
if (length(.format.s.stat.parts)>=1) {
for (i in seq(1:length(.format.s.stat.parts))) {
.data.frame.table.part(object,.format.s.stat.parts[i], which.part.number = i)
if (.table.part.published[i]==TRUE) { .publish.horizontal.line <<- TRUE }
if ((.format.s.stat.parts[i]=="-") || (.format.s.stat.parts[i]=="-!") || (.format.s.stat.parts[i]=="=") || (.format.s.stat.parts[i]=="=!")) { .publish.horizontal.line <<- FALSE }
}
}
cat("\\end{tabular} \n")
if (.format.floating == TRUE) { cat("\\end{", .format.floating.environment,"} \n", sep="") }
else if (!is.null(.format.font.size)) {
cat("\\endgroup \n",sep="")
}
}
}
.data.frame.table.part <-
function(object, part, which.part.number) {
.table.part.published[which.part.number] <<- FALSE
if ((part=="stat names") && (.format.colnames==TRUE)) {
x.which <- 0
if (is.null(.format.covariate.labels)) { .format.covariate.labels <<- NA }
for (x in seq(1:length(names(object)))) {
omitted <- FALSE
if (!is.null(.format.omit.regexp)) {
for (j in seq(1:length(.format.omit.regexp))) {
if (length(grep(.format.omit.regexp[j], names(object)[x], perl=.format.perl, fixed=FALSE))!=0) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.regexp)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.regexp))) {
if (length(grep(.format.keep.regexp[j], names(object)[x], perl=.format.perl, fixed=FALSE))!=0) { omitted <- FALSE }
}
}
if (!is.null(.format.omit.index)) {
for (j in seq(1:length(.format.omit.index))) {
if (.format.omit.index[j] == x) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.index)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.index))) {
if (.format.keep.index[j] == x) { omitted <- FALSE }
}
}
if (omitted == FALSE) {
x.which <- x.which + 1
if (x >= 2) { cat(" & ", sep="")}
# if underscore or ^ in variable name, then insert an escape \ before it
name.printed <- .remove.special.chars(names(object)[x])
if (is.na(.format.covariate.labels[x.which])) {
if (.format.coefficient.variables.capitalize == TRUE) { name.printed <- toupper(name.printed) }
}
else { name.printed <- .format.covariate.labels[x.which] }
if (.format.dec.mark.align==TRUE) {
cat("\\multicolumn{1}{c}{",.format.s.coefficient.variables.left, name.printed,.format.s.coefficient.variables.right,"}", sep="")
}
else {
cat(.format.s.coefficient.variables.left, name.printed,.format.s.coefficient.variables.right, sep="")
}
}
}
cat(" \\\\ \n")
.table.part.published[which.part.number] <<- TRUE
}
if (substr(part,1,10)=="statistics") {
for (y in seq(1:nrow(object))) {
for (x in seq(1:length(names(object)))) {
omitted <- FALSE
if (!is.null(.format.omit.regexp)) {
for (j in seq(1:length(.format.omit.regexp))) {
if (length(grep(.format.omit.regexp[j], names(object)[x], perl=.format.perl, fixed=FALSE))!=0) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.regexp)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.regexp))) {
if (length(grep(.format.keep.regexp[j], names(object)[x], perl=.format.perl, fixed=FALSE))!=0) { omitted <- FALSE }
}
}
if (!is.null(.format.omit.index)) {
for (j in seq(1:length(.format.omit.index))) {
if (.format.omit.index[j] == x) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.index)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.index))) {
if (.format.keep.index[j] == x) { omitted <- FALSE }
}
}
if (omitted == FALSE) {
if (x >= 2) { cat(" & ", sep="") }
.how.much.to.round <- .format.round.digits
if (is.numeric(object[y,x])) {
if (.is.all.integers(object[y,x])) { .how.much.to.round <- 0 }
rounded.object <- .iround(object[y,x], .how.much.to.round)
if (.format.dec.mark.align==TRUE) {
cat(rounded.object, sep="")
}
else {
cat("$", rounded.object, "$",sep="")
}
}
else {
adjusted.object <- .remove.special.chars(object[y, x])
if (is.na(adjusted.object)) { adjusted.object <- "" }
if (.format.dec.mark.align==TRUE) {
cat("\\multicolumn{1}{c}{", adjusted.object, "}", sep="")
}
else {
cat(adjusted.object, sep="")
}
}
}
}
# add empty lines
how.many.empty.lines <- as.numeric(substr(part,11,nchar(part)))
if (is.na(how.many.empty.lines)) { how.many.empty.lines <- 1 }
for (j in seq(1:how.many.empty.lines)) {
cat(" \\\\ \n")
}
}
.table.part.published[which.part.number] <<- TRUE
}
# notes
else if ((part=="notes") && (!is.null(.format.s.note.content))) {
if (.format.s.note != "") cat(.format.s.note)
for (i in seq(1:length(.format.s.note.content))) {
.format.s.note.content[i] <- .format.s.note.content[i]
if (.format.s.note == "") { cat("\\multicolumn{",length(names(object)),"}{",.format.s.note.alignment,"}{",.format.s.note.content[i],"} \\\\ \n", sep="") }
else { cat(" & \\multicolumn{",length(names(object)),"}{",.format.s.note.alignment,"}{",.format.s.note.content[i],"} \\\\ \n", sep="") }
}
.table.part.published[which.part.number] <<- TRUE
}
# empty line
else if (part==" ") {
.table.empty.line()
.table.part.published[which.part.number] <<- TRUE
}
# horizontal line
else if (part=="-!") {
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
else if (part=="-") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
}
# double horizontal line
else if (part=="=!") {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
else if (part=="=") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
.table.part.published[which.part.number] <<- TRUE
}
}
}
.stargazer.summ.stat.table <-
function(object) {
if (length(names(object)) < 1) {
cat("% Error: Data frame columns do not have any names.\n")
}
else if ((nrow(object) < 1) || (ncol(object) < 1)) {
cat("% Error: Data frame must have at least one row and one column.\n")
}
else {
object <- .order.data.frame(object, order, summary=T)
.table.info.comment()
# create table header
.summ.stat.table.header(object)
.table.insert.space()
for (i in seq(1:length(.format.s.stat.parts))) {
.summ.stat.table.part(object,.format.s.stat.parts[i])
}
cat("\\end{tabular} \n")
if (.format.floating == TRUE) { cat("\\end{", .format.floating.environment,"} \n", sep="") }
else if (!is.null(.format.font.size)) {
cat("\\endgroup \n",sep="")
}
}
}
.summ.stat.publish.statistic <-
function(object, which.variable, which.statistic) {
if ((is.numeric(object[,which.variable]) == TRUE) || ((is.logical(object[,which.variable])) && (.format.summ.logical==TRUE))) {
if ((is.logical(object[,which.variable])) && (.format.summ.logical==TRUE)) {
temp.var <- rep(NA, time=length(object[,which.variable]))
temp.var[object[,which.variable]==TRUE] <- 1
temp.var[object[,which.variable]==FALSE] <- 0
}
else {
temp.var <- object[,which.variable]
}
which.statistic <- tolower(which.statistic)
if (which.statistic == "n") {
return(.iround(sum(!is.na(temp.var)), 0))
}
else if (which.statistic == "nmiss") {
return(.iround(sum(is.na(temp.var)), 0))
}
else if (which.statistic == "mean") {
return(.iround(mean(temp.var, na.rm=TRUE), .format.s.round.digits))
}
else if (which.statistic == "median") {
median.value <- median(temp.var, na.rm=TRUE)
if (.is.all.integers(temp.var) == FALSE) { how.much.to.round <- .format.s.round.digits }
else {
if (.is.all.integers(median.value) == TRUE) { how.much.to.round <- 0 }
else { how.much.to.round <- 1 }
}
return(.iround(median.value, how.much.to.round))
}
else if (which.statistic == "sd") {
return(.iround(sd(temp.var, na.rm=TRUE), .format.s.round.digits))
}
else if (which.statistic == "min") {
if (.is.all.integers(temp.var) == FALSE) { how.much.to.round <- .format.s.round.digits }
else { how.much.to.round <- 0 }
return(.iround(min(temp.var, na.rm=TRUE), how.much.to.round))
}
else if (which.statistic == "max") {
if (.is.all.integers(temp.var) == FALSE) { how.much.to.round <- .format.s.round.digits }
else { how.much.to.round <- 0 }
return(.iround(max(temp.var, na.rm=TRUE), how.much.to.round))
}
else if (which.statistic == "mad") {
return(.iround(mad(temp.var, na.rm=TRUE), .format.s.round.digits))
}
else if (substr(which.statistic,1,1) == "p") {
percentile.value <- quantile(temp.var, as.numeric(substr(which.statistic,2,nchar(which.statistic))) / 100, na.rm=TRUE)
if (.is.all.integers(temp.var) == FALSE) { how.much.to.round <- .format.s.round.digits }
else {
if (.is.all.integers(percentile.value) == TRUE) { how.much.to.round <- 0 }
else { how.much.to.round <- 1 }
}
return(.iround(percentile.value, how.much.to.round))
}
}
else { return(NA) }
}
.summ.stat.table.header <-
function(object) {
.floating.header()
#
.formatting.alignment <- paste("@{\\extracolsep{",.format.column.sep.width,"}}l", sep="")
if (.format.flip == FALSE) { width <- length(.format.s.statistics.list) }
else { width <- length(.summ.stat.included(object)) }
for (i in seq(1:width)) {
if (.format.dec.mark.align == FALSE) {
.formatting.alignment <- paste(.formatting.alignment, "c", sep="")
}
else {
.formatting.alignment <- paste(.formatting.alignment, "D{", .format.decimal.character,"}{", .format.decimal.character,"}{-", .format.s.round.digits,"} ", sep="")
}
}
#
cat("\\begin{tabular}{",.formatting.alignment,"} \n",sep="")
}
# figure out which variables are included --> returns indices of included variables
.summ.stat.included <-
function(object) {
included <- NULL
for (i in seq(1:length(names(object)))) {
# skip all of this if omitted based on regular expression
omitted <- FALSE
if ((is.numeric(object[,i]) == TRUE) || (is.logical(object[,i]) && (.format.summ.logical==TRUE))) {
# also omit if all missing values
if (!any(!is.na(object[,i]))) { omitted <- TRUE }
if (!is.null(.format.omit.regexp)) {
for (j in seq(1:length(.format.omit.regexp))) {
if (length(grep(.format.omit.regexp[j], names(object)[i], perl=.format.perl, fixed=FALSE))!=0) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.regexp)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.regexp))) {
if (length(grep(.format.keep.regexp[j], names(object)[i], perl=.format.perl, fixed=FALSE))!=0) { omitted <- FALSE }
}
}
if (!is.null(.format.omit.index)) {
for (j in seq(1:length(.format.omit.index))) {
if (.format.omit.index[j] == i) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.index)) {
omitted <- TRUE
for (j in seq(1:length(.format.keep.index))) {
if (.format.keep.index[j] == i) { omitted <- FALSE }
}
}
}
else { omitted <- TRUE }
if (omitted == FALSE) { included <- c(included, i) }
}
return(included)
}
.summ.stat.table.part <-
function(object, part) {
included <- .summ.stat.included(object)
# with summary statistics, always publish horizontal line
.publish.horizontal.line <<- TRUE
if (part=="stat names") {
cat(.format.s.statistics.names.label, sep="")
if (.format.flip == FALSE) {
if (length(.format.s.statistics.list)>=1) {
for (i in seq(1:length(.format.s.statistics.list))) {
for (j in seq(1:ncol(.format.s.statistics.names))) {
if ((substr(.format.s.statistics.list[i],1,1)=="p") && (substr(.format.s.statistics.list[i],1,1)==.format.s.statistics.names[1,j])) {
cat(" & \\multicolumn{1}{c}{", .format.s.statistics.names.left, sub("!", substr(.format.s.statistics.list[i],2,nchar(.format.s.statistics.list[i])), .format.s.statistics.names[2,j], ignore.case =FALSE, fixed=TRUE), .format.s.statistics.names.right,"}", sep="")
}
else if (.format.s.statistics.list[i]==.format.s.statistics.names[1,j]) {
cat(" & \\multicolumn{1}{c}{", .format.s.statistics.names.left, .format.s.statistics.names[2,j], .format.s.statistics.names.right, "}", sep="")
}
}
}
}
}
else { # flipped summary statistic table
if (is.null(.format.covariate.labels)) { .format.covariate.labels <<- NA }
i.label <- 0
for (i in included) {
i.label <- i.label + 1
# if underscore in variable name, then insert an escape \ before it
name.printed <- .remove.special.chars(names(object)[i])
cat(" & ")
if (is.na(.format.covariate.labels[i.label])) {
if ( .format.s.coefficient.variables.capitalize == TRUE) { cat(.format.s.coefficient.variables.left, toupper(name.printed), .format.s.coefficient.variables.right, sep="") }
else { cat(.format.s.coefficient.variables.left, name.printed, .format.s.coefficient.variables.right, sep="") }
}
else { cat(.format.s.coefficient.variables.left, .format.covariate.labels[i.label], .format.s.coefficient.variables.right, sep="") }
}
}
cat(" \\\\ \n")
}
if (substr(part,1,10)=="statistics") {
if (is.null(.format.covariate.labels)) { .format.covariate.labels <<- NA }
if (.format.flip == FALSE) {
i.label <- 0
for (i in included) {
i.label <- i.label + 1
# if underscore in variable name, then insert an escape \ before it
name.printed <- .remove.special.chars(names(object)[i])
if (is.na(.format.covariate.labels[i.label])) {
if ( .format.s.coefficient.variables.capitalize == TRUE) { cat(.format.s.coefficient.variables.left, toupper(name.printed), .format.s.coefficient.variables.right, sep="") }
else { cat(.format.s.coefficient.variables.left, name.printed, .format.s.coefficient.variables.right, sep="") }
}
else { cat(.format.s.coefficient.variables.left, .format.covariate.labels[i.label], .format.s.coefficient.variables.right, sep="") }
if (length(.format.s.statistics.list)>=1) {
for (j in seq(1:length(.format.s.statistics.list))) {
# if aligning decimal marks, need to use multicolumn for anything w/o decimal mark
if (.format.dec.mark.align == FALSE) { # not aligning
cat(" & ", .summ.stat.publish.statistic(object, i, .format.s.statistics.list[j]), sep="")
}
else { # aligning
if (.is.all.integers(.summ.stat.publish.statistic(object, i, .format.s.statistics.list[j]))) {
cat(" & \\multicolumn{1}{c}{", .summ.stat.publish.statistic(object, i, .format.s.statistics.list[j]),"}", sep="")
}
else {
cat(" & ", .summ.stat.publish.statistic(object, i, .format.s.statistics.list[j]), sep="")
}
}
}
}
# add empty lines
how.many.empty.lines <- as.numeric(substr(part,11,nchar(part)))
if (is.na(how.many.empty.lines)) { how.many.empty.lines <- 1 }
for (j in seq(1:how.many.empty.lines)) {
cat(" \\\\ \n")
}
}
}
else { # flipped
if (length(.format.s.statistics.list)>=1) {
for (i in seq(1:length(.format.s.statistics.list))) {
for (j in seq(1:ncol(.format.s.statistics.names))) {
if ((substr(.format.s.statistics.list[i],1,1)=="p") && (substr(.format.s.statistics.list[i],1,1)==.format.s.statistics.names[1,j])) {
cat(.format.s.statistics.names.left, sub("!", substr(.format.s.statistics.list[i],2,nchar(.format.s.statistics.list[i])), .format.s.statistics.names[2,j], ignore.case =FALSE, fixed=TRUE), .format.s.statistics.names.right, sep="")
}
else if (.format.s.statistics.list[i]==.format.s.statistics.names[1,j]) {
cat(.format.s.statistics.names.left, .format.s.statistics.names[2,j], .format.s.statistics.names.right, sep="")
}
}
for (j in included) {
# if aligning decimal marks, need to use multicolumn for anything w/o decimal mark
if (.format.dec.mark.align == FALSE) { # not aligning
cat(" & ", .summ.stat.publish.statistic(object, j, .format.s.statistics.list[i]), sep="")
}
else { # aligning
if (.is.all.integers(.summ.stat.publish.statistic(object, j, .format.s.statistics.list[i]))) {
cat(" & \\multicolumn{1}{c}{", .summ.stat.publish.statistic(object, j, .format.s.statistics.list[i]),"}", sep="")
}
else {
cat(" & ", .summ.stat.publish.statistic(object, j, .format.s.statistics.list[i]), sep="")
}
}
}
# add empty lines
how.many.empty.lines <- as.numeric(substr(part,11,nchar(part)))
if (is.na(how.many.empty.lines)) { how.many.empty.lines <- 1 }
for (k in seq(1:how.many.empty.lines)) {
cat(" \\\\ \n")
}
}
}
}
}
# notes
else if ((part=="notes") && (!is.null(.format.s.note.content))) {
if (.format.s.note != "") cat(.format.s.note)
if (.format.s.note=="") { offset <- 1 }
else { offset <- 0 }
if (.format.flip == FALSE) { width <- length(.format.s.statistics.list)+ offset }
else { width <- length(included) + offset }
for (i in seq(1:length(.format.s.note.content))) {
.format.s.note.content[i] <- .format.s.note.content[i]
if (.format.s.note == "") { cat("\\multicolumn{",width,"}{",.format.s.note.alignment,"}{",.format.s.note.content[i],"} \\\\ \n", sep="") }
else { cat(" & \\multicolumn{",width,"}{",.format.s.note.alignment,"}{",.format.s.note.content[i],"} \\\\ \n", sep="") }
}
}
# empty line
else if (part==" ") {
.table.empty.line()
}
# horizontal line
else if (part=="-!") {
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
else if (part=="-") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
}
# double horizontal line
else if (part=="=!") {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
else if (part=="=") {
if (.publish.horizontal.line==TRUE) {
cat("\\hline \n")
cat("\\hline ")
.table.insert.space()
cat(" \n")
}
}
}
.table.empty.line <-
function() {
if (.format.no.space == FALSE) {
cat(" ")
for (i in seq(1:length(.global.models))) {
cat("& ")
}
cat("\\\\ \n")
}
}
.table.enter.coefficients <-
function(which.variable) {
if (which.variable > length(.global.coefficients)) {
return();
}
local.coefficient.var.name <- .global.coefficient.variables[which.variable]
#skip all of this if omitted based on regular expression
omitted <- FALSE
if (!is.null(.format.omit.regexp)) {
for (i in seq(1:length(.format.omit.regexp))) {
if (length(grep(.format.omit.regexp[i], local.coefficient.var.name, perl=.format.perl, fixed=FALSE))!=0) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.regexp)) {
omitted <- TRUE
for (i in seq(1:length(.format.keep.regexp))) {
if (length(grep(.format.keep.regexp[i], local.coefficient.var.name, perl=.format.perl, fixed=FALSE))!=0) { omitted <- FALSE }
}
}
if (!is.null(.format.omit.index)) {
for (i in seq(1:length(.format.omit.index))) {
if (.format.omit.index[i] == which.variable) { omitted <- TRUE }
}
}
if (!is.null(.format.keep.index)) {
omitted <- TRUE
for (i in seq(1:length(.format.keep.index))) {
if (.format.keep.index[i] == which.variable) { omitted <- FALSE }
}
}
if (omitted == FALSE) {
.which.variable.label <<- .which.variable.label + 1
# remove final -TRUE (added by Zelig) from dummy variables
if (substr(local.coefficient.var.name, nchar(local.coefficient.var.name)-3, nchar(local.coefficient.var.name)) == "TRUE") {
### only remove TRUE if added by Zelig, rather than pre-existing in the formula name
if (length(grep(local.coefficient.var.name, .global.formulas.rhs,fixed=TRUE))==0) {
local.coefficient.var.name <- substr(local.coefficient.var.name, 1, nchar(local.coefficient.var.name)-4)
}
}
# remove everything before and including he last dollar sign from variable name
temp <- strsplit(local.coefficient.var.name,"$",fixed=TRUE)
local.coefficient.var.name <- temp[[1]][length(temp[[1]])]
# if underscore or ^ in variable name, then insert an escape \ before it
local.coefficient.var.name <- .remove.special.chars(local.coefficient.var.name)
if (length(.format.coefficient.table.parts)>=1) {
for (i in seq(1:length(.format.coefficient.table.parts))) {
.coefficient.table.part(part=.format.coefficient.table.parts[i], which.variable, variable.name=local.coefficient.var.name)
}
}
}
}
.table.header <-
function() {
.floating.header()
#
.formatting.alignment <- paste("@{\\extracolsep{",.format.column.sep.width,"}}l", sep="")
for (i in seq(1:length(.global.models))) {
if (.format.dec.mark.align==FALSE) {
.formatting.alignment <- paste(.formatting.alignment, "c", sep="")
}
else {
.formatting.alignment <- paste(.formatting.alignment, "D{", .format.decimal.character,"}{", .format.decimal.character,"}{-", .format.round.digits,"} ", sep="")
}
}
#
cat("\\begin{tabular}{",.formatting.alignment,"} \n",sep="")
}
.table.info.comment <-
function() {
cat("\n")
if (.format.header==TRUE) {
cat("% Table created by ", .global.package.name, " v.", .global.package.version, " by ", .global.package.author.name, ", ", .global.package.author.affiliation, ". E-mail: ", .global.package.author.email, "\n", sep="")
cat("% Date and time:", format(Sys.time(), "%a, %b %d, %Y - %X"))
cat("\n")
required.latex.packages <- NULL
if (.format.dec.mark.align==TRUE) { required.latex.packages <- c(required.latex.packages, "dcolumn") }
if (.format.floating.environment=="sidewaystable") { required.latex.packages <- c(required.latex.packages, "rotating") }
if (!is.null(required.latex.packages)) {
cat("% Requires LaTeX packages: ")
for (i in 1:length(required.latex.packages)){
cat(required.latex.packages[i]," ", sep="")
}
cat("\n")
}
}
}
.table.insert.space <-
function() {
cat("\\\\[",.format.space.size,"]",sep="")
}
.trim <-
function (x) gsub("^\\s+|\\s+$", "", x)
.wald.stat <-
function(object.name) {
wald.output <- as.vector(rep(NA,times=3))
model.name <- .get.model.name(object.name)
if (!(model.name %in% c("arima","fGARCH","Arima","maBina","coeftest", "Gls", "ivreg","lmer","glmer","nlmer"))) {
if (!is.null(.summary.object$waldtest)) {
wald.value <- suppressMessages(.summary.object$waldtest[1])
df.value <- suppressMessages(.summary.object$waldtest[2])
wald.p.value <- suppressMessages(.summary.object$waldtest[3])
wald.output <- as.vector(c(wald.value, df.value, wald.p.value))
}
else if (model.name %in% c("tobit(AER)")) {
wald.value <- .summary.object$wald
df.value <- .summary.object$df - .summary.object$idf
wald.p.value <- pchisq(wald.value, df.value, lower.tail=FALSE)
wald.output <- as.vector(c(wald.value, df.value, wald.p.value))
}
else if (model.name %in% c("lagsarlm", "errorsarlm")) {
wald.value <- as.vector(.summary.object$Wald1$statistic)
df.value <- as.vector(.summary.object$Wald1$parameter)
wald.p.value <- as.vector(.summary.object$Wald1$p.value)
wald.output <- as.vector(c(wald.value, df.value, wald.p.value))
}
}
names(wald.output) <- c("statistic","df1","p-value")
return(cbind(wald.output))
}
.get.coefficients.1 <-
function(object.name, user.given=NULL, model.num=1) {
if (!is.null(user.given)) {
if (.model.identify(object.name) == "multinom") {
if (!is.null(nrow(user.given))) { user.given <- as.vector(user.given[model.num,]) }
}
return(user.given)
}
model.name <- .get.model.name(object.name)
if (model.name %in% c("ls", "normal", "logit", "probit", "relogit", "poisson", "negbin", "normal.survey", "poisson.survey", "probit.survey", "logit.survey", "gamma", "gamma.survey",
"cloglog.net", "gamma.net", "logit.net", "probit.net", "brglm", "glm()", "Glm()", "svyglm()", "plm", "pgmm", "ivreg", "lmrob", "glmrob", "dynlm", "gmm", "mclogit")) {
return(.summary.object$coefficients[,"Estimate"])
}
if (model.name %in% c("Arima")) {
return(object.name$coef)
}
if (model.name %in% c("censReg")) {
return(.summary.object$estimate[,1])
}
if (model.name %in% c("mnlogit")) {
return(.summary.object$CoefTable[,1])
}
if (model.name %in% c("fGARCH")) {
return(object.name@fit$matcoef[,1])
}
if (model.name %in% c("lme","nlme")) {
return(.summary.object$tTable[,1])
}
if (model.name %in% c("maBina")) {
return(as.vector(object.name$out[,1]))
}
if (model.name %in% c("mlogit")) {
return(as.vector(.summary.object$CoefTable[,1]))
}
if (model.name %in% c("coeftest")) {
return(as.vector(object.name[,1]))
}
if (model.name %in% c("selection", "heckit")) {
if (!.global.sel.equation) {
indices <- .summary.object$param$index$betaO ### outcome equation
}
else {
indices <- .summary.object$param$index$betaS ### selection equation
}
return(as.vector(.summary.object$estimate[indices,1]))
}
if (model.name %in% c("probit.ss", "binaryChoice")) {
return(as.vector(.summary.object$estimate[,1]))
}
if (model.name %in% c("hetglm")) {
return(as.vector(.summary.object$coefficients$mean[,1]))
}
if (model.name %in% c("lmer","glmer","nlmer")) {
coefs <- .summary.object$coefficients[,1]
return(coefs)
}
if (model.name %in% c("ergm")) {
return(.summary.object$coefs[,1])
}
if (model.name %in% c("lagsarlm", "errorsarlm")) {
return(.summary.object$Coef[,1])
}
if (model.name %in% c("rq","felm")) {
return(.summary.object$coefficients[,1])
}
if (model.name %in% c("clm")) {
if (.format.ordered.intercepts == FALSE) {
return(.summary.object$coefficients[(length(object.name$alpha)+1):(length(object.name$coefficients)),1])
}
else {
return(.summary.object$coefficients[,1])
}
}
else if (model.name %in% c("pmg")) {
return(.summary.object$coefficients)
}
else if (model.name %in% c("zeroinfl", "hurdle")) {
if (.global.zero.component==FALSE) {
return(.summary.object$coefficients$count[,"Estimate"])
}
else {
return(.summary.object$coefficients$zero[,"Estimate"])
}
}
else if (model.name %in% c("normal.gee", "logit.gee", "probit.gee", "poisson.gee", "gamma.gee", "gee()")) {
return(.summary.object$coefficients[,"Estimate"])
}
else if (model.name %in% c("normal.gam", "logit.gam", "probit.gam", "poisson.gam", "gam()")) {
return(.summary.object$p.coeff)
}
else if (model.name %in% c("coxph", "clogit")) {
return(.summary.object$coef[,"coef"])
}
else if (model.name %in% c("exp","lognorm","weibull","tobit","survreg()")) {
return(.summary.object$table[,"Value"])
}
else if (model.name %in% c("rlm")) {
return(suppressMessages(.summary.object$coefficients[,"Value"]))
}
else if (model.name %in% c("ologit", "oprobit", "polr()")) {
coef.temp <- suppressMessages(.summary.object$coefficients[,"Value"])
if (.format.ordered.intercepts == FALSE) { return(coef.temp[seq(from=1, to=length(coef.temp)-(length(suppressMessages(.summary.object$lev))-1))]) }
else { return(coef.temp) }
}
else if (model.name %in% c("arima", "rem.dyad")) {
return( object.name$coef )
}
else if (model.name %in% c("tobit(AER)")){
return(.summary.object$coefficients[,"Estimate"])
}
else if (model.name %in% c("multinom")){
if (is.null(nrow(.summary.object$coefficients))) {
coef.temp <- .summary.object$coefficients
}
else {
coef.temp <- .summary.object$coefficients[model.num,]
}
return(coef.temp)
}
else if (model.name %in% c("betareg")){
return(.summary.object$coefficients$mean[,"Estimate"])
}
else if (model.name %in% c("gls")) {
coef.temp <- object.name$coefficients
return(coef.temp)
}
else if (model.name %in% c("weibreg", "coxreg", "phreg", "aftreg", "bj", "cph", "Gls", "lrm", "ols", "psm", "Rq")) {
return( object.name$coefficients )
}
else { return(NULL) }
}
.get.coefficients <-
function(object.name, user.given=NULL, model.num=1) {
out <- .get.coefficients.1(object.name, user.given, model.num)
coef.vars <- .coefficient.variables(object.name)
if (is.null(names(out))) {
if (length(out) < length(coef.vars)) {
out.temp <- rep(NA, times=length(coef.vars)-length(out))
out <- c(out, out.temp)
}
else if (length(out) > length(coef.vars)) {
out <- out[1:length(coef.vars)]
}
names(out) <- coef.vars
}
else {
out.temp <- rep(NA, times = length(coef.vars))
names(out.temp) <- coef.vars
for (i in 1:length(out)) {
name <- names(out)[i]
if (name %in% coef.vars) {
out.temp[name] <- out[i]
}
}
out <- out.temp
}
return(out)
}
.turn.into.list <-
function(x) {
if (is.vector(x) || is.matrix(x)) {
if (!is.list(x)) { return(as.list(x)) }
}
return(x)
}
.is.list.numeric <-
function(x) {
# tolerate NA or NULL
if (is.null(x)) { return(TRUE) }
if (!is.list(x)) { return(FALSE) }
for (i in 1:length(x)) {
elem <- x[[i]]
if (!is.null(elem)) {
if (length(elem) != length(elem[is.numeric(elem) || (is.na(elem))])) { return(FALSE) }
}
}
return(TRUE)
}
.is.list.numeric.matrix <-
function(x) {
# tolerate NA or NULL
if (is.null(x)) { return(TRUE) }
if (!is.list(x)) { return(FALSE) }
for (i in 1:length(x)) {
elem <- as.matrix(x[[i]])
if (!is.null(elem)) {
if (length(elem) != length(elem[is.numeric(elem) || (is.na(elem))])) { return(FALSE) }
}
}
return(TRUE)
}
.get.file.extension <-
function (path) {
split <- strsplit(path, "\\.")[[1]]
return( tolower(split[length(split)]) )
}
############## TEXT AND html MODE ##############
.split.line <- # split line of a LaTeX table into constituent parts separated by &
function(s) {
# remove the "\\\\"
s <- gsub("\\\\", "", s, fixed=TRUE)
s <- paste(" ",s," ", sep="")
return(.trim(strsplit(s, " &", fixed=TRUE)[[1]]))
}
.remove.extra.spaces <-
function(s) {
new.s <- ""
space <- FALSE
for (i in 1:nchar(s)) {
s.i <- substr(s,i,i)
if (s.i == " ") {
if (space == FALSE) {
space <- TRUE
new.s <- paste(new.s, s.i, sep="")
}
}
else {
space <- FALSE
new.s <- paste(new.s, s.i, sep="")
}
}
return(new.s)
}
strpos <-
function(x, s) {
return( regexpr(x, s, fixed=TRUE)[1] )
}
is.alphanumeric <-
function(s) {
alphanum <- FALSE
numbers <- grepl("^[[:digit:]]+$", s)
letters <- grepl("^[[:alpha:]]+$", s)
both <- grepl("^[[:digit:][:alpha:]]+$", s)
if ((numbers == TRUE) || (letters == TRUE) || (both == TRUE)) {
alphanum <- TRUE
}
return(alphanum)
}
.replace.latex.symbols <-
function (s) {
latex.replace <- NULL
latex.replace <- cbind(latex.replace, c("\\textbackslash","\\"), c("\\_","_"), c("\\#","#"), c("\\textasciitilde","~"), c("\\{","{"), c("\\}","}"), c("\\%","%"))
latex.replace <- cbind(latex.replace, c("\\textasteriskcentered","*"), c("\\textbar","|"), c("\\textgreater",">"), c("\\textless","<"), c("$\\hat{\\mkern6mu}$","^"))
# Greek letters
latex.replace <- cbind(latex.replace, c("\\alpha","alpha"), c("\\beta","beta"), c("\\gamma","gamma"), c("\\delta","delta"), c("\\epsilon","epsilon"), c("\\varepsilon","epsilon"), c("\\zeta","zeta"))
latex.replace <- cbind(latex.replace, c("\\eta","eta"), c("\\theta","theta"), c("\\vartheta","theta"), c("\\iota","iota"), c("\\kappa","kappa"), c("\\lambda","lambda"), c("\\mu","mu"))
latex.replace <- cbind(latex.replace, c("\\nu","nu"), c("\\xi","xi"), c("\\pi","pi"), c("\\varpi","pi"), c("\\rho","rho"), c("\\varrho","rho"), c("\\sigma","sigma"))
latex.replace <- cbind(latex.replace, c("\\varsigma","sigma"), c("\\tau","tau"), c("\\upsilon","upsilon"), c("\\phi","phi"), c("\\varphi","phi"), c("\\chi","chi"), c("\\psi","psi"))
latex.replace <- cbind(latex.replace, c("\\omega","omega"), c("\\Gamma","gamma"), c("\\Delta","delta"), c("\\Theta","theta"), c("\\Lambda","lambda"), c("\\Xi","xi"), c("\\Pi","pi"))
latex.replace <- cbind(latex.replace, c("\\Sigma","sigma"), c("\\Upsilon","upsilon"), c("\\Phi","phi"), c("\\Psi","psi"), c("\\Omega","omega"))
s.out <- s
for (item in 1:ncol(latex.replace)) {
symbol <- latex.replace[1, item]
replacement <- latex.replace[2, item]
# quick check if any latex characters
symbol.regexp <- gsub("\\","\\\\",symbol,fixed=TRUE)
symbol.regexp <- gsub("{","\\{",symbol.regexp,fixed=TRUE)
symbol.regexp <- gsub("}","\\}",symbol.regexp,fixed=TRUE)
symbol.regexp <- gsub("$","\\$",symbol.regexp,fixed=TRUE)
symbol.regexp <- paste(symbol.regexp, "[^[:alnum:]_]+", sep="")
pos <- 1
while (pos <= nchar(s.out)) {
if (length(grep(symbol.regexp, s.out))==0) { break }
s.pre <- substr(s.out, 1, pos-1)
s.pos.char <- substr(s.out, pos, pos)
s.post <- substr(s.out, pos + nchar(symbol), nchar(s.out))
if (substr(s.out, pos, pos+nchar(symbol)-1) == symbol) {
if (!is.alphanumeric(substr(s.post, 1, 1))) {
s.out <- paste(s.pre, replacement, s.post, sep="")
post <- pos + nchar(replacement) - 1
}
}
pos <- pos + 1
}
}
return(s.out)
}
.remove.control.sequences <-
function (s, type="text") {
s <- paste(" ",s, " ", sep="")
# replace latex symbols
s <- .replace.latex.symbols(s)
# remove dollar signs and underscores [ what about text-related starts ]
s <- gsub("\\$", "", s)
# remove extra spaces
s <- .remove.extra.spaces(s)
# add: replace some sequences with corresponding letters
# walk through the string
i <- 1
new.s <- ""
control.sequence <- ""
while (i <= nchar(s)) {
s.i0 <- substr(s, i-1, i)
s.i <- substr(s, i, i)
s.i2 <- substr(s, i, i+1)
if ((s.i %in% c("\\", "_", "^")) && (!(s.i2 %in% c("\\_","\\^"))) && (!(s.i0 %in% c("\\_","\\^"))) ) {
remainder.s <- substr(s, i+1, nchar(s)) # if control character not followed by curly brace
if ((strpos(" ", remainder.s) < strpos("{", remainder.s)) || (strpos("{", remainder.s)==-1)) {
i <- i + strpos(" ", remainder.s) + 1
}
else { # control character followed by curly brace
control.sequence <- substr(s, i, i+strpos("{", remainder.s)-1)
if (type=="html") {
if (control.sequence == "\\textit") { new.s <- paste(new.s,"<em>",sep="") }
if (control.sequence == "\\textbf") { new.s <- paste(new.s,"<strong>",sep="") }
if (control.sequence == "_") { new.s <- paste(new.s,"<sub>",sep="") }
if (control.sequence == "^") { new.s <- paste(new.s,"<sup>",sep="") }
}
if (type=="mmd") {
if (control.sequence == "\\textit") { new.s <- paste(new.s,"*",sep="") }
if (control.sequence == "\\textbf") { new.s <- paste(new.s,"**",sep="") }
if (control.sequence == "~") { new.s <- paste(new.s,"~",sep="") }
if (control.sequence == "^") { new.s <- paste(new.s,"^",sep="") }
}
s.sub <- substr(remainder.s, strpos("{", remainder.s), nchar(remainder.s))
open.brackets <- 0
bracket.start <- bracket.end <- strpos("{", s.sub)
for (j in 1:nchar(s.sub)) {
s.sub.j <- substr(s.sub, j, j)
if (s.sub.j == "{") {
open.brackets <- open.brackets + 1
if (open.brackets == 1) { bracket.start <- j + 1 }
}
if (s.sub.j == "}") {
open.brackets <- open.brackets - 1
if (open.brackets == 0) { bracket.end <- j - 1 }
}
if (!(s.sub.j %in% c("{","}"))) {
if (open.brackets == 0) { break }
}
}
if (bracket.end < bracket.start) {
examine.substring <- ""
}
else {
examine.substring <- substr(s.sub, bracket.start, bracket.end)
}
new.s <- paste(new.s, .remove.control.sequences(examine.substring, type=type), sep="")
if (type=="html") {
if (control.sequence == "\\textit") { new.s <- paste(new.s,"</em>",sep="") }
if (control.sequence == "\\textbf") { new.s <- paste(new.s,"</strong>",sep="") }
if (control.sequence == "_") { new.s <- paste(new.s,"</sub>",sep="") }
if (control.sequence == "^") { new.s <- paste(new.s,"</sup>",sep="") }
}
if (type=="mmd") {
if (control.sequence == "\\textit") { new.s <- paste(new.s,"*",sep="") }
if (control.sequence == "\\textbf") { new.s <- paste(new.s,"**",sep="") }
if (control.sequence == "~") { new.s <- paste(new.s,"~",sep="") }
if (control.sequence == "^") { new.s <- paste(new.s,"^",sep="") }
}
i <- i + strpos("{", remainder.s) + bracket.end + 1
}
}
else { # not inside a control sequence
new.s <- paste(new.s, s.i, sep="")
i <- i + 1
}
}
# replace underscores, etc.
new.s <- gsub("\\_", "_", new.s, fixed=T)
new.s <- gsub("\\^", "^", new.s, fixed=T)
return(.trim(new.s))
}
.text.cline <-
function (cline, max.length, line.char="-") {
for (i in 1:length(cline)) {
if ((cline[i]==0) && (sum(cline[i:length(cline)]) != 0)) {
.repeat.char(" ", rep=max.length[i]+1, new.line=FALSE)
}
else if (cline[i]>=1) {
underline.len <- 0
for (j in i:(i+cline[i]-1)) {
underline.len <- underline.len + max.length[j] + 1
}
underline.len <- underline.len - 1
.repeat.char(line.char, rep=underline.len, new.line=FALSE)
if ((sum(cline[i:length(cline)]) != cline[i])) { cat(" ") }
}
}
cat("\n")
}
.html.cline <-
function (cline) {
cat("<tr>")
for (i in 1:length(cline)) {
if ((cline[i]==0) && (sum(cline[i:length(cline)]) != 0)) {
cat("<td></td>")
}
else if (cline[i]>=1) {
cat("<td colspan=\"",cline[i],"\" style=\"border-bottom: 1px solid black\"></td>",sep="")
}
}
cat("</tr>\n")
}
.mmd.cline <-
function (cline) {
# no support for cline in MMD as far as I am aware
}
.text.horizontal.line <-
function (line.char="-", max.length) {
horizontal.length <- 0
for (i in 1:length(max.length)) {
horizontal.length <- horizontal.length + max.length[i] + 1
}
horizontal.length = horizontal.length - 1
.repeat.char(line.char, rep=horizontal.length, new.line=TRUE)
}
.html.horizontal.line <-
function (how.many.columns) {
cat("<tr><td colspan=\"",how.many.columns,"\" style=\"border-bottom: 1px solid black\"></td></tr>",sep="")
}
.mmd.horizontal.line <-
function (how.many.columns) {
# no support for hline in MMD as far as I am aware
}
.text.output <-
function(all.latex.code) {
how.many.tables <- 0
start.lines <- NULL
for (i in 1:length(all.latex.code)) {
if (all.latex.code[i] %in% c("")) {
how.many.tables <- how.many.tables + 1
start.lines <- c(start.lines, i)
}
}
for (table.number in 1:how.many.tables) {
if (table.number < how.many.tables) {
latex.code <- all.latex.code[start.lines[table.number]:start.lines[table.number+1]]
}
else {
latex.code <- all.latex.code[start.lines[table.number]:length(all.latex.code)]
}
how.many.columns <- .get.number.of.columns(latex.code)
r <- 0
matrices <- .matrices(latex.code, how.many.columns)
t <- matrices[[1]]
c <- matrices[[2]]
j <- matrices[[3]]
max.l <- .text.column.width(t, c)
w <- .width.matrix(c, max.l)
cat("\n")
for (row in 1:length(latex.code)) {
line <- latex.code[row]
if (substr(line, nchar(line)-2, nchar(line)) == "\\\\ ") {
r <- r + 1
.text.output.line(t, r, w, c, j)
}
else if (strpos("\\caption{", line) != -1) {
inside.caption <- substr(.trim(line), 10, nchar(.trim(line))-1)
text.title <- .trim(.remove.control.sequences(inside.caption))
if (text.title != "") { cat(.remove.control.sequences(inside.caption),"\n", sep="") }
}
else if (strpos("\\cline{", line) != -1) {
s <- paste(" ", line, " ", sep="")
cline <- rep(0, times=how.many.columns)
while (strpos("\\cline{", s) != -1) {
from <- strpos("\\cline{", s) + 7
to <- strpos("}", s) - 1
underline.columns <- substr(s, from, to)
split.columns <- strsplit(underline.columns,"-", fixed=TRUE)[[1]]
col.underline.begin <- as.numeric(split.columns[1])
col.underline.number <- as.numeric(split.columns[2]) - col.underline.begin + 1
cline[col.underline.begin] <- col.underline.number
s <- substr(s, to+1, nchar(s))
.text.cline(cline, max.l)
}
}
else if (strpos("\\hline",line) != -1) {
if (!(is.na(latex.code[row+1]))) {
if (strpos("\\hline", latex.code[row+1]) != -1) {
.text.horizontal.line("=", max.l)
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.text.horizontal.line("-", max.l)
}
}
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.text.horizontal.line("-", max.l)
}
}
}
}
}
}
.html.output <-
function(all.latex.code) {
how.many.tables <- 0
start.lines <- NULL
for (i in 1:length(all.latex.code)) {
if (all.latex.code[i] %in% c("")) {
how.many.tables <- how.many.tables + 1
start.lines <- c(start.lines, i)
}
}
for (table.number in 1:how.many.tables) {
if (table.number < how.many.tables) {
latex.code <- all.latex.code[start.lines[table.number]:start.lines[table.number+1]]
}
else {
latex.code <- all.latex.code[start.lines[table.number]:length(all.latex.code)]
}
how.many.columns <- .get.number.of.columns(latex.code)
r <- 0
matrices <- .matrices(latex.code, how.many.columns, type="html")
t <- matrices[[1]]
c <- matrices[[2]]
j <- matrices[[3]]
max.l <- .text.column.width(t, c)
w <- .width.matrix(c, max.l)
cat("\n")
cat("<table style=\"text-align:center\">")
for (row in 1:length(latex.code)) {
line <- latex.code[row]
if (substr(line, nchar(line)-2, nchar(line)) == "\\\\ ") {
r <- r + 1
.html.output.line(t, r, w, c, j)
}
else if (strpos("\\caption{", line) != -1) {
inside.caption <- substr(.trim(line), 10, nchar(.trim(line))-1)
text.title <- .trim(.remove.control.sequences(inside.caption, type="html"))
if (text.title != "") { cat("<caption><strong>",.remove.control.sequences(inside.caption, type="html"),"</strong></caption>\n", sep="") }
}
else if (strpos("\\cline{", line) != -1) {
s <- paste(" ", line, " ", sep="")
cline <- rep(0, times=how.many.columns)
while (strpos("\\cline{", s) != -1) {
from <- strpos("\\cline{", s) + 7
to <- strpos("}", s) - 1
underline.columns <- substr(s, from, to)
split.columns <- strsplit(underline.columns,"-", fixed=TRUE)[[1]]
col.underline.begin <- as.numeric(split.columns[1])
col.underline.number <- as.numeric(split.columns[2]) - col.underline.begin + 1
cline[col.underline.begin] <- col.underline.number
s <- substr(s, to+1, nchar(s))
.html.cline(cline)
}
}
else if (strpos("\\hline",line) != -1) {
if (!(is.na(latex.code[row+1]))) {
if (strpos("\\hline", latex.code[row+1]) != -1) {
.html.horizontal.line(how.many.columns)
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.html.horizontal.line(how.many.columns)
}
}
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.html.horizontal.line(how.many.columns)
}
}
}
}
cat("</table>\n")
}
}
.mmd.output <-
function(all.latex.code) {
how.many.tables <- 0
start.lines <- NULL
for (i in 1:length(all.latex.code)) {
if (all.latex.code[i] %in% c("")) {
how.many.tables <- how.many.tables + 1
start.lines <- c(start.lines, i)
}
}
for (table.number in 1:how.many.tables) {
if (table.number < how.many.tables) {
latex.code <- all.latex.code[start.lines[table.number]:start.lines[table.number+1]]
}
else {
latex.code <- all.latex.code[start.lines[table.number]:length(all.latex.code)]
}
how.many.columns <- .get.number.of.columns(latex.code)
r <- 0
matrices <- .matrices(latex.code, how.many.columns, type="mmd")
t <- matrices[[1]]
c <- matrices[[2]]
j <- matrices[[3]]
max.l <- .text.column.width(t, c)
w <- .width.matrix(c, max.l)
cat("\n")
cat("<table style=\"text-align:center\">")
for (row in 1:length(latex.code)) {
line <- latex.code[row]
if (substr(line, nchar(line)-2, nchar(line)) == "\\\\ ") {
r <- r + 1
.mmd.output.line(t, r, w, c, j)
}
else if (strpos("\\caption{", line) != -1) {
inside.caption <- substr(.trim(line), 10, nchar(.trim(line))-1)
text.title <- .trim(.remove.control.sequences(inside.caption, type="mmd"))
if (text.title != "") { cat("**",.remove.control.sequences(inside.caption, type="mmd"),"***\n", sep="") }
### ADD THE REQUISITE NUMBER OF |s
}
else if (strpos("\\cline{", line) != -1) {
s <- paste(" ", line, " ", sep="")
cline <- rep(0, times=how.many.columns)
while (strpos("\\cline{", s) != -1) {
from <- strpos("\\cline{", s) + 7
to <- strpos("}", s) - 1
underline.columns <- substr(s, from, to)
split.columns <- strsplit(underline.columns,"-", fixed=TRUE)[[1]]
col.underline.begin <- as.numeric(split.columns[1])
col.underline.number <- as.numeric(split.columns[2]) - col.underline.begin + 1
cline[col.underline.begin] <- col.underline.number
s <- substr(s, to+1, nchar(s))
.mmd.cline(cline)
}
}
else if (strpos("\\hline",line) != -1) {
if (!(is.na(latex.code[row+1]))) {
if (strpos("\\hline", latex.code[row+1]) != -1) {
.mmd.horizontal.line(how.many.columns)
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.mmd.horizontal.line(how.many.columns)
}
}
}
else {
if (strpos("\\hline", latex.code[row-1]) == -1) {
.mmd.horizontal.line(how.many.columns)
}
}
}
}
cat("</table>\n")
}
}
.text.output.line <-
function(text.matrix, row, width.matrix, column.matrix, justification.matrix) {
real.c <- 0 # "real" column position
for (c in 1:ncol(text.matrix)) {
real.c <- real.c + column.matrix[row,c]
justify <- justification.matrix[row, c]
if (!(is.na(text.matrix[row,c]))) {
.just.cat(text.matrix[row, c], width=width.matrix[row, c], justify=justify)
if (real.c < ncol(text.matrix)) { cat(" ",sep="")}
}
}
cat("\n")
}
.html.output.line <-
function(text.matrix, row, width.matrix, column.matrix, justification.matrix) {
real.c <- 0 # "real" column position
cat("<tr>")
for (c in 1:ncol(text.matrix)) {
cm <- column.matrix[row,c]
real.c <- real.c + cm
justify <- justification.matrix[row, c]
if (!(is.na(text.matrix[row,c]))) {
cat("<td")
if (cm > 1) { cat(" colspan=\"",cm,"\"", sep="") }
if (justify == "l") { cat(" style=\"text-align:left\"", sep="") }
if (justify == "r") { cat(" style=\"text-align:right\"", sep="") }
cat(">")
.just.cat(text.matrix[row, c], width=width.matrix[row, c], justify="n")
cat("</td>")
}
}
cat("</tr>\n")
}
.mmd.output.line <-
function(text.matrix, row, width.matrix, column.matrix, justification.matrix) {
real.c <- 0 # "real" column position
for (c in 1:ncol(text.matrix)) {
cm <- column.matrix[row,c]
real.c <- real.c + cm
justify <- justification.matrix[row, c]
if (!(is.na(text.matrix[row,c]))) {
.just.cat(text.matrix[row, c], width=width.matrix[row, c], justify=justify)
for (i in 1:cm) { cat("|") }
}
}
cat("\n")
}
.width.matrix <-
function(column.matrix, max.length) {
w.matrix <- matrix(NA, nrow = nrow(column.matrix), ncol = ncol(column.matrix))
# enter single widths first
for (r in 1:nrow(column.matrix)) {
for (c in 1:ncol(column.matrix)) {
w.matrix[r,c] <- max.length[c]
}
}
# think about multicolumns
for (r in 1:nrow(column.matrix)) {
from.c <- 0 # from which column do I start hoovering up widths?
for (c in 1:ncol(column.matrix)) {
from.c <- from.c+1
if (column.matrix[r,c] >= 2) {
total.width <- 0
for (i in from.c:(from.c+column.matrix[r,c]-1)) {
total.width <- total.width + max.length[i] + 1
if (i > from.c) {
for (j in i:ncol(column.matrix)) {
if ((j+1) <= ncol(column.matrix)) {
w.matrix[r,j] <- w.matrix[r, j+1]
w.matrix[r,j+1] <- NA
}
else {
w.matrix[r,j] <- NA
}
}
}
}
w.matrix[r,c] <- total.width - 1
from.c <- from.c + column.matrix[r,c] - 1
}
}
}
return(w.matrix)
}
.text.column.width <-
function(text.matrix, column.matrix) {
max.length = rep(1, times=ncol(column.matrix))
temp.text.matrix <- text.matrix
# first, get the maximum width of single columns
for (r in 1:nrow(text.matrix)) {
for (c in 1:ncol(text.matrix)) {
real.c <- 0 # 'real' column number, adjusted for multicolumn
for (i in 1:c) {
real.c <- real.c + column.matrix[r, i]
}
if (real.c <= ncol(text.matrix)) {
if (column.matrix[r,c] == 1) { # only look at singles here
if (nchar(text.matrix[r,c]) > max.length[real.c]) { max.length[real.c] <- nchar(text.matrix[r,c]) }
}
}
}
}
# think about multicolumns
for (r in 1:nrow(text.matrix)) {
for (c in 1:ncol(text.matrix)) {
if (!is.na(column.matrix[r,c])) {
if (column.matrix[r,c] >= 2) { # only look at multicolumns
total.width <- 0
for (i in c:(c+column.matrix[r,c]-1)) {
total.width <- total.width + max.length[i]
}
while (total.width < nchar(text.matrix[r,c])) { # if does not fit into single columns, widen the maxima
relevant.maxima <- NULL
for (i in c:(c+column.matrix[r,c]-1)) {
relevant.maxima <- c(relevant.maxima, max.length[i])
if (max.length[i] == min(relevant.maxima)) {
total.width <- 0
for (j in c:(c+column.matrix[r,c]-1)) {
total.width <- total.width + max.length[j]
}
if (total.width < nchar(text.matrix[r,c])) { max.length[i] <- max.length[i] + 1 }
}
}
}
}
}
}
}
return(max.length)
}
.text.table.rows <-
function(latex.code) {
# figure out how many columns
rows <- 0
for (i in 1:length(latex.code)) {
line <- latex.code[i]
if (substr(line, nchar(line)-2, nchar(line)) == "\\\\ ") {
rows <- rows + 1
}
}
return(rows)
}
.get.number.of.columns <-
function(latex.code) {
formatting.string <- ""
for (i in 1:length(latex.code)) {
line <- latex.code[i]
if ((substr(line, 1, 7) == "\\begin{") && (regexpr("}}",line,fixed=TRUE)[[1]] != -1)) {
formatting.string <- substr(line, regexpr("}}",line,fixed=TRUE)[[1]]+2, nchar(line)-1)
}
}
columns <- 0
for (i in 1:nchar(formatting.string)) {
if (substring(formatting.string, i, i) %in% c("l", "c", "r", "D")) { columns <- columns + 1 }
}
return(columns)
}
.matrices <-
function(latex.code, how.many.columns, type="text") {
rows <- .text.table.rows(latex.code)
t.matrix <- matrix(NA, nrow = rows, ncol = how.many.columns)
c.matrix <- matrix(1, nrow = rows, ncol = how.many.columns)
j.matrix <- matrix(NA, nrow = rows, ncol = how.many.columns)
line.content.j <- rep("c", how.many.columns)
# put strings into matrix
row <- 0
for (i in 1:length(latex.code)) {
line <- latex.code[i]
if (substr(line, nchar(line)-2, nchar(line)) == "\\\\ ") {
row <- row + 1
line.content <- .split.line(.remove.control.sequences(line, type=type))
length(line.content) <- how.many.columns
t.matrix[row,] <- line.content
line.content.j[1] <- "l"
line.content.j[2:how.many.columns] <- "c"
line.split <- .split.line(line)
# add in column widths
line.column <- rep(1, how.many.columns)
for (j in 1:length(line.split)) {
no.of.columns <- 0
if (regexpr("\\multicolumn{", line.split[j], fixed=TRUE) != -1) {
# text
multicolumn.no <- substr(line.split[j], regexpr("{", line.split[j], fixed=TRUE)+1, regexpr("}", line.split[j], fixed=TRUE)-1)
no.of.columns <- as.numeric(multicolumn.no)
# justification
from <- regexpr("}{", line.split[j], fixed=TRUE)+2
rest.of.expression <- substr(line.split[j], from, nchar(line.split[j]))
to <- regexpr("}", rest.of.expression, fixed=TRUE) - 1
justification <- substr(rest.of.expression, 1, to)
line.content.j[j] <- justification
}
else {
no.of.columns <- 1
}
line.column[j] <- no.of.columns
}
# column
length(line.column) <- how.many.columns
c.matrix[row,] <- line.column
# justification
length(line.content.j) <- how.many.columns
j.matrix[row,] <- line.content.j
}
}
return(list(t.matrix,c.matrix,j.matrix))
}
.repeat.char <-
function(ch, rep=1, new.line=FALSE) {
if (rep >= 1) {
out.str <- ""
for (i in 1:rep) {
out.str <- paste(out.str, ch, sep="")
}
if (new.line == TRUE) { out.str <- paste(out.str, "\n", sep="")}
cat(out.str)
}
}
.just.cat <- # cat that justifies string appropriately over the next couple of paragraphs
function(s, width, offset.char=" ", justify="c"){
len <- nchar(s)
if (width <= len) {
cat(s)
}
else {
if (justify == "c") {
offset <- (width - len) %/% 2
.repeat.char(offset.char, offset)
cat(s)
.repeat.char(offset.char, width - len - offset)
}
else if (justify == "l") {
cat(s)
.repeat.char(offset.char, width - len)
}
else if (justify == "r") {
.repeat.char(offset.char, width - len)
cat(s)
}
else if (justify == "n") { # no justification, just output
cat(s)
}
}
}
############## OUTPUT INTO FILE ##############
### !!!! - add packages
.output.tex <-
function (file.out, content, header) {
header.tex <- "\\documentclass{article}\n"
required.latex.packages <- NULL
if (.format.dec.mark.align==TRUE) { required.latex.packages <- c(required.latex.packages, "dcolumn") }
if (.format.floating.environment=="sidewaystable") { required.latex.packages <- c(required.latex.packages, "rotating") }
if (!is.null(required.latex.packages)) {
for (i in 1:length(required.latex.packages)) {
header.tex <- paste(header.tex, "\\usepackage{", required.latex.packages[i], "}\n", sep="")
}
}
if (header == TRUE) {
cat(
header.tex,
"\\begin{document}",
paste(content, collapse="\n"),
"\\end{document}\n",
sep="\n",
file = file.out
)
} else {
cat(
paste(content, collapse="\n"),
sep="\n",
file = file.out
)
}
}
.output.html <-
function (file.out, content, header) {
if (header == TRUE) {
cat(
"<!DOCTYPE html>",
"<html>",
"<body>",
paste(content, collapse="\n"),
"</body>",
"</html>\n",
sep="\n",
file = file.out
)
} else {
cat(
paste(content, collapse="\n"),
sep="\n",
file = file.out
)
}
}
.output.txt <-
function (file.out, content, header) {
cat(
paste(content, collapse="\n"),
sep="\n",
file = file.out
)
}
# !!! - work on this more in a later version
.output.pdf <-
function (file.out, content) {
tex.temp.file <- tempfile("temp", fileext="tex")
.output.tex(tex.temp.file, content)
capture.output(system(paste( "pdflatex --interaction=nonstopmode", shQuote(tex.temp.file)), show.output.on.console = FALSE ))
}
.output.file <-
function (out, latex.code, text.out, html.out, type, out.header) {
for (i in 1:length(out)) {
if (.get.file.extension(out[i])=="tex") { .output.tex(out[i], latex.code, out.header) }
# else if (.get.file.extension(out[i])=="pdf") { .output.pdf(out[i], latex.code) }
else if (.get.file.extension(out[i])=="txt") { .output.txt(out[i], text.out, out.header) }
else if ((.get.file.extension(out[i])=="html") || (.get.file.extension(out[i])=="htm")) {
.output.html(out[i], html.out, out.header)
}
else { # if another extension, do latex or text based on 'type'
if (type == "latex") { .output.tex(out[i], latex.code, out.header) }
else if (type == "text") { .output.txt(out[i], text.out, out.header) }
else if (type == "html") { .output.html(out[i], html.out, out.header) }
}
}
}
###########################################
.get.objects <-
function(list.of.objects) {
objects <- list()
for (i in 1:length(list.of.objects)) {
current.object <- list.of.objects[[i]]
if (class(current.object)[1] == "list") {
objects <- append(objects, .get.objects(current.object))
}
else {
objects <- append(objects, list(current.object))
}
}
return(objects)
}
# exact object names from ... string
.get.object.names <- function(s) {
object.names <- NULL
inside <- .inside.bracket(s)
for (i in 1:length(inside)) {
if (substr(inside[i],1,nchar("list("))=="list(") {
object.names <- c(object.names, .get.object.names(inside[i]))
}
else {
object.names <- c(object.names, inside[i])
}
}
return(object.names)
}
###########################################
## invisible output
invisible.output <- NULL
latex.code <- NULL
text.out <- NULL
## error handling
error.present <- "\n"
# get object names --- !!! CHECK ORDER
object.names.string <- deparse(substitute(list(...))) ### for further processing to extract object names
.global.object.names.all <- .get.object.names(object.names.string)
# get objects
list.of.objects <- list(...)
objects <- as.list(.get.objects(list.of.objects))
how.many.objects <- length(objects)
# should we include a summary statistics table when given a data frame
.global.summary <- rep(TRUE, times=how.many.objects)
## check if argument input is ok
.format.rownames <- TRUE
.format.colnames <- TRUE
# flip the table?
.format.flip <- flip
if (how.many.objects < 1) { error.present <- c(error.present, "% Error: At least one object is required.\n") }
else {
# identify objects
for (i in seq(1:how.many.objects)) {
if (is.data.frame(objects[[i]])) {
obj.rownames <- rownames(objects[[i]])
if (is.null(obj.rownames)) { .format.rownames <- FALSE }
}
else if ((is.matrix(objects[[i]])) && (class(objects[[i]])[1] != "coeftest")) {
.global.summary[i] <- FALSE # content output default for matrices
obj.rownames <- rownames(objects[[i]])
obj.colnames <- colnames(objects[[i]])
if (is.null(obj.rownames)) {
if (.format.flip == FALSE) { .format.rownames <- FALSE }
else { .format.colnames <- FALSE }
obj.rownames <- as.character(c(1:nrow(objects[[i]])))
}
if (is.null(obj.colnames)) {
if (.format.flip == FALSE) { .format.colnames <- FALSE }
else { .format.rownames <- FALSE }
obj.colnames <- as.character(c(1:ncol(objects[[i]])))
}
objects[[i]] <- as.data.frame(objects[[i]])
colnames(objects[[i]]) <- obj.colnames
}
else if (is.vector(objects[[i]])) {
.global.summary[i] <- FALSE # content output default for vectors
obj.names <- names(objects[[i]])
if (is.null(obj.names)) {
.format.colnames <- FALSE
.format.rownames <- FALSE
obj.names <- as.character(c(1:length(objects[[i]])))
}
objects[[i]] <- as.data.frame(t(objects[[i]]))
names(objects[[i]]) <- obj.names
if (.format.flip == TRUE) { .format.colnames <- FALSE }
else { .format.rownames <- FALSE }
}
if (!is.data.frame(objects[[i]])) {
# if zelig$result relevant, identify this automatically
if (class(objects[[i]])[1] %in% c("coeftest","lmerMod","glmerMod","nlmerMod","fGARCH")) { # use this to eliminate lmer, glmer, nlmer
if (.model.identify(objects[[i]])=="unknown") { error.present <- c(error.present, "% Error: Unrecognized object type.\n",i) }
}
else {
if (!is.null(objects[[i]]$zelig.call)) {
if (!is.null(objects[[i]]$formula)) { formula <- objects[[i]]$formula }
objects[[i]] <- objects[[i]]$result
if (!is.null(formula)) { objects[[i]]$formula2 <- formula }
}
###
if (is.atomic(objects[[i]]) && (!is.null(objects[[i]]))) { error.present <- c(error.present, "% Error: Unrecognized object type.\n") }
else if (.model.identify(objects[[i]])=="unknown") { error.present <- c(error.present, "% Error: Unrecognized object type.\n") }
else if (.model.identify(objects[[i]])=="unsupported zelig") { error.present <- c(error.present, "% Error: Unsupported 'zelig' model.\n") }
}
}
}
}
if (!is.character(type)) { error.present <- c(error.present, "% Error: Argument 'type' must be of type 'character.'\n") }
if (length(type) != 1) { error.present <- c(error.present, "% Error: Argument 'type' must be of length 1.'\n") }
if (is.character(type)) {
if (!(tolower(type) %in% c("latex", "text", "html"))) {
error.present <- c(error.present, "% Error: 'style' must be either 'latex' (default), 'html' or 'text.'\n")
}
}
if (!is.character(title)) { error.present <- c(error.present, "% Error: Argument 'title' must be of type 'character.'\n") }
if (!is.character(style)) { error.present <- c(error.present, "% Error: Argument 'style' must be of type 'character.'\n") }
if (length(style) != 1) { error.present <- c(error.present, "% Error: Argument 'style' must be of length 1.'\n") }
if (is.character(style)) {
if (!(tolower(style) %in% c("all","all2","default","commadefault","aer","ajps","ajs","asq","asr","apsr","demography","io","jpam","qje"))) {
error.present <- c(error.present, "% Error: 'style' not recognized'\n")
}
}
if ((!is.logical(summary)) && (!is.null(summary))) { error.present <- c(error.present, "% Error: Argument 'summary' must be NULL, or of type 'logical' (TRUE/FALSE) \n") }
if ((!is.character(out)) && (!is.null(out))) { error.present <- c(error.present, "% Error: Argument 'out' must be NULL (default), or a vector of type 'character.' \n") }
if (!is.logical(out.header)) { error.present <- c(error.present, "% Error: Argument 'out.header' be of type 'logical' (TRUE/FALSE) \n") }
if ((!is.numeric(column.separate)) && (!is.null(column.separate))) { error.present <- c(error.present, "% Error: Argument 'column.separate' must be NULL (default), a vector of type 'numeric.'\n") }
if ((!is.character(column.labels)) && (!is.null(column.labels))) { error.present <- c(error.present, "% Error: Argument 'column.labels' must be NULL (default), or a vector of type 'character.'\n") }
if ((!is.character(covariate.labels)) && (!is.null(covariate.labels))) { error.present <- c(error.present, "% Error: Argument 'covariate.labels' must be NULL (default), or a vector of type 'character.'\n") }
if ((!is.character(dep.var.labels)) && (!is.null(dep.var.labels))) { error.present <- c(error.present, "% Error: Argument 'dep.var.labels' must be NULL (default), or a vector of type 'character.'\n") }
if ((!is.logical(dep.var.labels.include)) && (!is.null(dep.var.labels.include))) { error.present <- c(error.present, "% Error: Argument 'dep.var.labels.include' must be NULL (default), or of type 'logical' (TRUE/FALSE) \n") }
if ((length(dep.var.labels.include) != 1) && (!is.null(dep.var.labels.include))) { error.present <- c(error.present, "% Error: Argument 'dep.var.labels.include' must be of length 1.'\n") }
if ((!is.character(dep.var.caption)) && (!is.null(dep.var.caption))) { error.present <- c(error.present, "% Error: Argument 'dep.var.caption must be NULL (default), or of type 'character.'\n") }
if ((length(dep.var.caption) != 1) && (!is.null(dep.var.caption))) { error.present <- c(error.present, "% Error: Argument 'dep.var.caption' must be of length 1.'\n") }
coef <- .turn.into.list(coef); se <- .turn.into.list(se)
t <- .turn.into.list(t); p <- .turn.into.list(p)
if ((!.is.list.numeric(coef))) { error.present <- c(error.present, "% Error: Argument 'coef' must be NULL (default), or a list of numeric vectors.\n") }
if ((!.is.list.numeric(se))) { error.present <- c(error.present, "% Error: Argument 'se' must be NULL (default), or a list of numeric vectors.\n") }
if ((!.is.list.numeric(t))) { error.present <- c(error.present, "% Error: Argument 't' must be NULL (default), or a list of numeric vectors.\n") }
if ((!.is.list.numeric(p))) { error.present <- c(error.present, "% Error: Argument 'p' must be NULL (default), or a list of numeric vectors.\n") }
if (!is.logical(t.auto)) { error.present <- c(error.present, "% Error: Argument 't.auto' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(t.auto) != 1) { error.present <- c(error.present, "% Error: Argument 't.auto' must be of length 1.'\n") }
if (!is.logical(p.auto)) { error.present <- c(error.present, "% Error: Argument 't.auto' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(p.auto) != 1) { error.present <- c(error.present, "% Error: Argument 't.auto' must be of length 1.'\n") }
if (!is.logical(align)) { error.present <- c(error.present, "% Error: Argument 'align' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(align) != 1) { error.present <- c(error.present, "% Error: Argument 'align' must be of length 1.'\n") }
if (!is.logical(ci)) { error.present <- c(error.present, "% Error: Argument 'ci' must be of type 'logical' (TRUE/FALSE) \n") }
ci.custom <- .turn.into.list(ci.custom)
if ((!.is.list.numeric.matrix(ci.custom))) { error.present <- c(error.present, "% Error: Argument 'ci.custom' must be NULL (default), or a list of numeric matrices. \n") }
else if (!is.null(ci.custom)) {
l <- length(ci.custom)
bad.dimension <- FALSE
for (i in 1:l) {
if (!is.null(ci.custom[[i]])) {
if (ncol(ci.custom[[i]]) != 2 ) { bad.dimension <- TRUE }
}
}
if (bad.dimension) { error.present <- c(error.present, "% Error: The numeric matrix in 'ci.custom' must have two columns (lower bound and upper bound, respectively). \n") }
}
if (!is.numeric(ci.level)) { error.present <- c(error.present, "% Error: Argument 'ci.level' must be of type 'numeric.' \n") }
if ((!is.character(ci.separator)) && (!is.null(ci.separator))) { error.present <- c(error.present, "% Error: Argument 'ci.separator' must be NULL (default), or of type 'character.'\n") }
if ((length(ci.separator) != 1) && (!is.null(ci.separator))) { error.present <- c(error.present, "% Error: Argument 'ci.separator' must be of length 1.'\n") }
add.lines <- .turn.into.list(add.lines)
if ((!is.list(add.lines)) && (!is.null(add.lines))) { error.present <- c(error.present, "% Error: Argument 'add.lines' must be NULL (default), or a list of vectors. \n") }
if (!is.null(add.lines)) {
if (length(add.lines) < 1) { error.present <- c(error.present, "% Error: The list in argument 'add.lines' must be of length 1 or more. \n") }
if (!all(unlist(lapply(add.lines, is.vector)))) { error.present <- c(error.present, "% Error: Argument 'add.lines' must be NULL (default), or a list of vectors. \n") }
}
if ((!is.function(apply.coef)) && (!is.null(apply.coef))) { error.present <- c(error.present, "% Error: Argument 'apply.coef' must be NULL (default), or a function.'\n") }
if ((!is.function(apply.se)) && (!is.null(apply.se))) { error.present <- c(error.present, "% Error: Argument 'apply.se' must be NULL (default), or a function.'\n") }
if ((!is.function(apply.t)) && (!is.null(apply.t))) { error.present <- c(error.present, "% Error: Argument 'apply.t' must be NULL (default), or a function.'\n") }
if ((!is.function(apply.p)) && (!is.null(apply.p))) { error.present <- c(error.present, "% Error: Argument 'apply.p' must be NULL (default), or a function.'\n") }
if ((!is.function(apply.ci)) && (!is.null(apply.ci))) { error.present <- c(error.present, "% Error: Argument 'apply.ci' must be NULL (default), or a function.'\n") }
if (!is.character(column.sep.width)) { error.present <- c(error.present, "% Error: Argument 'column.sep.width' must be of type 'character.'\n") }
if (length(column.sep.width) != 1) { error.present <- c(error.present, "% Error: Argument 'column.sep.width' must be of length 1.'\n") }
if ((!is.character(decimal.mark)) && (!is.null(decimal.mark))) { error.present <- c(error.present, "% Error: Argument 'decimal.mark' must be NULL (default), or of type 'character.'\n") }
if ((length(decimal.mark) != 1) && (!is.null(decimal.mark))) { error.present <- c(error.present, "% Error: Argument 'decimal.mark' must be of length 1.'\n") }
if (!is.logical(df)) { error.present <- c(error.present, "% Error: Argument 'df' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(df) != 1) { error.present <- c(error.present, "% Error: Argument 'df' must be of length 1.'\n") }
if ((!is.numeric(digit.separate)) && (!is.null(digit.separate)) & (!is.character(digit.separate))) { error.present <- c(error.present, "% Error: Argument 'digit.separate' must be NULL (default), a vector of type 'numeric,' or of type 'character.' \n") }
if (is.character(digit.separate)) {
if (!(digit.separate %in% c("lakh","japan","china"))) { error.present <- c(error.present, "% Error: If argument 'digit.separate' is of type character, it must be one of \"lakh\"/\"china\"/\"japan\".\n") }
}
if ((!is.character(digit.separator)) && (!is.null(digit.separator))) { error.present <- c(error.present, "% Error: Argument 'digit.separator' must be NULL (default), or of type 'character.'\n") }
if ((length(digit.separator) != 1) && (!is.null(digit.separator))) { error.present <- c(error.present, "% Error: Argument 'digit.separator' must be of length 1.'\n") }
if ((!is.numeric(digits)) && (!is.null(digits))) {
if (!is.na(digits)) { error.present <- c(error.present, "% Error: Argument 'digits' must be NULL (default), or of type 'numeric.'\n") }
}
if ((length(digits) != 1) && (!is.null(digits))) {
if (!is.na(digits)) { error.present <- c(error.present, "% Error: Argument 'digits' must be of length 1.'\n") }
}
if (!is.null(digits)) {
if (!is.na(digits)) {
if ((digits<0) && (is.numeric(digits))) { error.present <- c(error.present, "% Error: Argument 'digits' must be >= 0.'\n") }
}
}
if ((!is.numeric(digits.extra)) && (!is.null(digits.extra))) { error.present <- c(error.present, "% Error: Argument 'digits.extra' must be NULL (default), or of type 'numeric.'\n") }
if ((length(digits.extra) != 1) && (!is.null(digits.extra))) { error.present <- c(error.present, "% Error: Argument 'digits.extra' must be of length 1.'\n") }
if (!is.null(digits.extra)) {
if ((digits.extra<0) && (is.numeric(digits.extra))) { error.present <- c(error.present, "% Error: Argument 'digits.extra' must be >= 0.'\n") }
}
if (!is.logical(flip)) { error.present <- c(error.present, "% Error: Argument 'flip' must be of type 'logical' (TRUE/FALSE) \n") }
if ((length(flip) != 1) && (!is.null(flip))) { error.present <- c(error.present, "% Error: Argument 'flip' must be of length 1.'\n") }
if (!is.logical(float)) { error.present <- c(error.present, "% Error: Argument 'float' must be of type 'logical' (TRUE/FALSE) \n") }
if ((length(float) != 1) && (!is.null(float))) { error.present <- c(error.present, "% Error: Argument 'float' must be of length 1.'\n") }
if (!(float.env %in% c("table","table*","sidewaystable"))) { error.present <- c(error.present, "% Error: Argument 'float.env' must be one of \"table\", \"table*\" or \"sidewaystable\".\n") }
if (length(float.env) != 1) { error.present <- c(error.present, "% Error: Argument 'float.env' must be of length 1.'\n") }
if (!is.null(font.size)) {
if (!(font.size %in% c("tiny","scriptsize","footnotesize","small","normalsize","large","Large","LARGE","huge","Huge"))) { error.present <- c(error.present, "% Error: Argument 'font.size' must be NULL (default), or one of the available font sizes. See documentation.") }
}
if ((length(font.size) != 1) && (!is.null(font.size))) { error.present <- c(error.present, "% Error: Argument 'font.size' must be of length 1.'\n") }
if (!is.logical(header)) { error.present <- c(error.present, "% Error: Argument 'header' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(header) != 1) { error.present <- c(error.present, "% Error: Argument 'header' must be of length 1.'\n") }
if ((!is.logical(initial.zero)) && (!is.null(initial.zero))) { error.present <- c(error.present, "% Error: Argument 'initial.zero' must be NULL (default), or of type 'logical' (TRUE/FALSE) \n") }
if ((length(initial.zero) != 1) && (!is.null(initial.zero))) { error.present <- c(error.present, "% Error: Argument 'initial.zero' must be of length 1.'\n") }
if (!is.logical(intercept.bottom)) { error.present <- c(error.present, "% Error: Argument 'intercept.bottom' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(intercept.bottom) != 1) { error.present <- c(error.present, "% Error: Argument 'intercept.bottom' must be of length 1.'\n") }
if (!is.logical(intercept.top)) { error.present <- c(error.present, "% Error: Argument 'intercept.top' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(intercept.top) != 1) { error.present <- c(error.present, "% Error: Argument 'intercept.top' must be of length 1.'\n") }
if (intercept.top && intercept.bottom) { error.present <- c(error.present, "% Error: Arguments 'intercept.bottom' and 'intercept.top' cannot both be TRUE. \n")}
if ((!is.character(keep)) && (!is.numeric(keep)) && (!is.null(keep))) { error.present <- c(error.present, "% Error: Argument 'keep' must be NULL (default; all variables kept), or a vector of type 'character' or 'numeric.'\n") }
if ((!is.character(keep.stat)) && (!is.null(keep.stat))) { error.present <- c(error.present, "% Error: Argument 'keep.stat' must be NULL (default), or a vector of type 'character.'\n") }
keep.stat.acceptable <- c("all","n","rsq","adj.rsq","max.rsq","ll","aic","bic","scale","ubre","rho(se)*","Mills(se)*","sigma2","ser","f","theta","chi2","wald","lr","logrank","null.dev","res.dev") # list of statistic codes that are acceptable
if (is.character(keep.stat)) {
is.acceptable <- unique(tolower(keep.stat) %in% keep.stat.acceptable)
if (length(is.acceptable)>1) { is.acceptable <- FALSE }
if (!is.acceptable) { error.present <- c(error.present, "% Error: Unknown statistic in 'keep.stat' argument.\n") }
}
if (!is.character(label)) { error.present <- c(error.present, "% Error: Argument 'label' must be of type 'character.'\n") }
if ((!is.logical(model.names)) && (!is.null(model.names))) { error.present <- c(error.present, "% Error: Argument 'model.names' must be NULL (default), or of type 'logical' (TRUE/FALSE) \n") }
if ((length(model.names) != 1) && (!is.null(model.names))) { error.present <- c(error.present, "% Error: Argument 'model.names' must be of length 1.'\n") }
if ((!is.logical(model.numbers)) && (!is.null(model.numbers))) { error.present <- c(error.present, "% Error: Argument 'model.numbers' must be NULL (default), or of type 'logical' (TRUE/FALSE) \n") }
if ((length(model.numbers) != 1) && (!is.null(model.numbers))) { error.present <- c(error.present, "% Error: Argument 'model.numbers' must be of length 1.'\n") }
if (!is.logical(multicolumn)) { error.present <- c(error.present, "% Error: Argument 'multicolumn' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(multicolumn) != 1) { error.present <- c(error.present, "% Error: Argument 'multicolumn' must be of length 1.'\n") }
if ((!is.logical(no.space)) && (!is.null(no.space))) { error.present <- c(error.present, "% Error: Argument 'no.space' must be NULL (default), or of type 'logical' (TRUE/FALSE) \n") }
if ((length(no.space) != 1) && (!is.null(no.space))) { error.present <- c(error.present, "% Error: Argument 'no.space' must be of length 1.'\n") }
if ((!is.character(notes)) && (!is.null(notes))) { error.present <- c(error.present, "% Error: Argument 'notes' must be NULL (default), or a vector of type 'character.'\n") }
if (!is.null(notes.align)) {
if (!(tolower(notes.align) %in% c("l","c","r"))) { error.present <- c(error.present, "% Error: Argument 'notes.align' must be NULL (default), or \"l\"/\"c\"/\"r\".\n") }
}
if ((length(notes.align) != 1) && (!is.null(notes.align))) { error.present <- c(error.present, "% Error: Argument 'notes.align' must be of length 1.'\n") }
if (!is.logical(notes.append)) { error.present <- c(error.present, "% Error: Argument 'notes.append' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(notes.append) != 1) { error.present <- c(error.present, "% Error: Argument 'notes.append' must be of length 1.'\n") }
if ((!is.character(notes.label)) && (!is.null(notes.label))) { error.present <- c(error.present, "% Error: Argument 'notes.label' must be NULL (default), or of type 'character.'\n") }
if ((length(notes.label) != 1) && (!is.null(notes.label))) { error.present <- c(error.present, "% Error: Argument 'notes.label' must be of length 1.'\n") }
if (!is.logical(object.names)) { error.present <- c(error.present, "% Error: Argument 'object.names' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(object.names) != 1) { error.present <- c(error.present, "% Error: Argument 'object.names' must be of length 1.'\n") }
if ((!is.character(omit)) && (!is.numeric(omit)) && (!is.null(omit))) { error.present <- c(error.present, "% Error: Argument 'omit' must be NULL (default; no omissions), or a vector of type 'character' or 'numeric.'\n") }
if ((!is.character(omit.labels)) && (!is.null(omit.labels))) { error.present <- c(error.present, "% Error: Argument 'omit' must be NULL (default; no omissions), or a vector of type 'character.'\n") }
if (!is.null(omit.labels)) {
if (length(omit) != length(omit.labels)) { error.present <- c(error.present, "% Error: Arguments 'omit.labels' must be NULL (default; no omissions), or equal in length to 'omit.labels'.'\n") }
}
if ((!is.character(omit.stat)) && (!is.null(omit.stat))) { error.present <- c(error.present, "% Error: Argument 'omit.stat' must be NULL (default), or a vector of type 'character.'\n") }
omit.stat.acceptable <- c("all","n","rsq","adj.rsq","max.rsq","ll","aic","bic","scale","ubre","rho(se)*","Mills(se)*","sigma2","ser","f","theta","chi2","wald","lr","logrank","null.dev","res.dev") # list of statistic codes that are acceptable
if (is.character(omit.stat)) {
is.acceptable <- unique(tolower(omit.stat) %in% omit.stat.acceptable)
if (length(is.acceptable)>1) { is.acceptable <- FALSE }
if (!is.acceptable) { error.present <- c(error.present, "% Error: Unknown statistic in 'omit.stat' argument.\n") }
}
if ((!is.character(omit.summary.stat)) && (!is.null(omit.summary.stat))) { error.present <- c(error.present, "% Error: Argument 'omit.summary.stat' must be NULL (default), or a vector of type 'character.'\n") }
omit.summary.stat.acceptable <- c("n","mean","sd","min","p25","median","p75","max")
if (is.character(omit.summary.stat)) {
is.acceptable <- unique(tolower(omit.summary.stat) %in% omit.summary.stat.acceptable)
if (length(is.acceptable)>1) { is.acceptable <- FALSE }
if (!is.acceptable) { error.present <- c(error.present, "% Error: Unknown statistic in 'omit.summary.stat' argument.\n") }
}
if ((!is.character(omit.yes.no)) && (!is.null(omit.yes.no))) { error.present <- c(error.present, "% Error: Argument 'omit.yes.no' must be a vector of type 'character.'\n") }
if ((length(omit.yes.no) != 2) && (!is.null(omit.yes.no))) { error.present <- c(error.present, "% Error: Argument 'omit.yes.no' must be of length 2.'\n") }
if ((!is.character(order)) && (!is.numeric(order)) & (!is.null(order))) { error.present <- c(error.present, "% Error: Argument 'order' must be NULL (default; no omissions), or a vector of type 'character' or 'numeric.'\n") }
if (!is.logical(ord.intercepts)) { error.present <- c(error.present, "% Error: Argument 'ord.intercepts' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(ord.intercepts) != 1) { error.present <- c(error.present, "% Error: Argument 'ord.intercepts' must be of length 1.'\n") }
if (!is.logical(perl)) { error.present <- c(error.present, "% Error: Argument 'perl' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(perl) != 1) { error.present <- c(error.present, "% Error: Argument 'perl' must be of length 1.'\n") }
if (!(is.logical(colnames)) && (!is.null(colnames))) { error.present <- c(error.present, "% Error: Argument 'colnames' must be NULL, or of type 'logical' (TRUE/FALSE) \n") }
if ((length(colnames) != 1) && (!is.null(colnames))) { error.present <- c(error.present, "% Error: Argument 'colnames' must be of length 1.'\n") }
if (!(is.logical(rownames)) && (!is.null(rownames))) { error.present <- c(error.present, "% Error: Argument 'rownames' must be NULL, or of type 'logical' (TRUE/FALSE) \n") }
if ((length(rownames) != 1) && (!is.null(rownames))) { error.present <- c(error.present, "% Error: Argument 'rownames' must be of length 1.'\n") }
if (!is.character(rq.se)) { error.present <- c(error.present, "% Error: Argument 'rq.se' must be of type 'character.' \n") }
if (length(rq.se) != 1) { error.present <- c(error.present, "% Error: Argument 'rq.se' must be of length 1.'\n") }
if (is.character(rq.se)) {
if (!(rq.se %in% c("iid", "nid", "ker", "boot"))) { error.present <- c(error.present, "% Error: Argument 'rq.se' must be one of: 'iid', 'nid', 'ker' or 'boot.' \n") }
}
if (!is.logical(selection.equation)) { error.present <- c(error.present, "% Error: Argument 'selection.equation' must be of type 'logical' (TRUE/FALSE) \n") }
if ((length(selection.equation) != 1) && (!is.null(selection.equation))) { error.present <- c(error.present, "% Error: Argument 'selection.equation' must be of length 1.'\n") }
if (!is.logical(single.row)) { error.present <- c(error.present, "% Error: Argument 'single.row' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(single.row) != 1) { error.present <- c(error.present, "% Error: Argument 'single.row' must be of length 1.'\n") }
if ((!is.character(star.char)) && (!is.null(star.char))) { error.present <- c(error.present, "% Error: Argument 'star.char' must be NULL (default), or of type 'character.'\n") }
if ((!(length(star.char) >= 1)) && (!is.null(star.char))) { error.present <- c(error.present, "% Error: Argument 'star.char' must be at least of length 1.'\n") }
if (!is.null(star.cutoffs)) {
if (sum(is.na(star.cutoffs)) != length(star.cutoffs)) {
if (!is.numeric(star.cutoffs)) { error.present <- c(error.present, "% Error: Argument 'star.cutoffs' must be NULL (default), or a vector of type 'numeric.'\n") }
}
if ( !(length(star.cutoffs) >= 1) && (!is.null(star.cutoffs))) { error.present <- c(error.present, "% Error: Argument 'star.cutoffs' must be a vector with at least one element.\n") }
if (sum(star.cutoffs[!is.na(star.cutoffs)] == sort(star.cutoffs, decreasing = TRUE, na.last=NA)) != length(star.cutoffs[!is.na(star.cutoffs)])) { error.present <- c(error.present, "% Error: The elements of 'star.cutoffs' must be in weakly decreasing order.\n") }
}
if ((!is.character(summary.stat)) && (!is.null(summary.stat))) { error.present <- c(error.present, "% Error: Argument 'summary.stat' must be NULL (default), or a vector of type 'character.'\n") }
summary.stat.acceptable <- c("n","mean","sd","min","p25","median","p75","max") # list of statistic codes that are acceptable
if (is.character(summary.stat)) {
is.acceptable <- unique(tolower(summary.stat) %in% summary.stat.acceptable)
if (length(is.acceptable)>1) { is.acceptable <- FALSE }
if (!is.acceptable) { error.present <- c(error.present, "% Error: Unknown statistic in 'summary.stat' argument.\n") }
}
if ((!is.character(table.layout)) && (!is.null(table.layout))) { error.present <- c(error.present, "% Error: Argument 'table.layout' must be of type 'character.'\n") }
if ((length(table.layout) != 1) && (!is.null(table.layout))) { error.present <- c(error.present, "% Error: Argument 'table.layout' must be of length 1.'\n") }
if (is.character(table.layout) && (length(table.layout)==1)) { # test if report only contains allowed letters
layout.error <- FALSE
for (i in 1:nchar(table.layout)) {
ch <- substring(table.layout,i,i)
if (!(ch %in% c("=","-","!","l","d","m","c","#","b","t","o","a","s","n"))) (layout.error <- TRUE)
}
if (layout.error) { error.present <- c(error.present, "% Error: Invalid characters in 'table.layout'. See package documentation. \n") }
}
if ((!is.character(omit.table.layout)) && (!is.null(omit.table.layout))) { error.present <- c(error.present, "% Error: Argument 'omit.table.layout' must be of type 'character.'\n") }
if ((length(omit.table.layout) != 1) && (!is.null(omit.table.layout))) { error.present <- c(error.present, "% Error: Argument 'omit.table.layout' must be of length 1.'\n") }
if (is.character(omit.table.layout) && (length(omit.table.layout)==1)) { # test if report only contains allowed letters
layout.error <- FALSE
for (i in 1:nchar(omit.table.layout)) {
ch <- substring(omit.table.layout,i,i)
if (!(ch %in% c("=","-","!","l","d","m","c","#","b","t","o","a","s","n"))) (layout.error <- TRUE)
}
if (layout.error) { error.present <- c(error.present, "% Error: Invalid characters in 'omit.table.layout'. See package documentation. \n") }
}
if (!is.character(table.placement)) { error.present <- c(error.present, "% Error: Argument 'table.placement' must be of type 'character.'\n") }
if (length(table.placement) != 1) { error.present <- c(error.present, "% Error: Argument 'table.placement' must be of length 1.'\n") }
if (is.character(table.placement) && (length(table.placement)==1)) { # test if table.placement only contains allowed letters
tp.error <- FALSE
for (i in 1:nchar(table.placement)) {
ch <- substring(table.placement,i,i)
if (!(ch %in% c("h","t","b","p","!","H"))) (tp.error <- TRUE)
}
if (tp.error) { error.present <- c(error.present, "% Error: Argument 'table.placement' can only consist of \"h\",\"t\",\"b\",\"p\",\"!\",\"H\".\n") }
}
if ((!is.character(report)) && (!is.null(report))) { error.present <- c(error.present, "% Error: Argument 'report' must be of type 'character.'\n") }
if ((length(report) != 1) && (!is.null(report))) { error.present <- c(error.present, "% Error: Argument 'report' must be of length 1.'\n") }
if (is.character(report) && (length(report)==1)) { # test if report only contains allowed letters
report.error <- FALSE
for (i in 1:nchar(report)) {
ch <- substring(report,i,i)
if (!(ch %in% c("v","c","s","t","p","*"))) (report.error <- TRUE)
}
if (report.error) { error.present <- c(error.present, "% Error: Argument 'report' can only consist of \"v\",\"c\",\"s\",\"t\",\"p\",\"*\".\n") }
}
if (!is.logical(zero.component)) { error.present <- c(error.present, "% Error: Argument 'zero.component' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(zero.component) != 1) { error.present <- c(error.present, "% Error: Argument 'zero.component' must be of length 1.'\n") }
if (!is.logical(summary.logical)) { error.present <- c(error.present, "% Error: Argument 'summary.logical' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(summary.logical) != 1) { error.present <- c(error.present, "% Error: Argument 'summary.logical' must be of length 1.'\n") }
if (!is.logical(nobs)) { error.present <- c(error.present, "% Error: Argument 'nobs' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(nobs) != 1) { error.present <- c(error.present, "% Error: Argument 'nobs' must be of length 1.'\n") }
if (!is.logical(mean.sd)) { error.present <- c(error.present, "% Error: Argument 'mean.sd' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(mean.sd) != 1) { error.present <- c(error.present, "% Error: Argument 'mean.sd' must be of length 1.'\n") }
if (!is.logical(min.max)) { error.present <- c(error.present, "% Error: Argument 'min.max' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(min.max) != 1) { error.present <- c(error.present, "% Error: Argument 'min.max' must be of length 1.'\n") }
if (!is.logical(median)) { error.present <- c(error.present, "% Error: Argument 'median' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(median) != 1) { error.present <- c(error.present, "% Error: Argument 'median' must be of length 1.'\n") }
if (!is.logical(iqr)) { error.present <- c(error.present, "% Error: Argument 'iqr' must be of type 'logical' (TRUE/FALSE) \n") }
if (length(iqr) != 1) { error.present <- c(error.present, "% Error: Argument 'iqr' must be of length 1.'\n") }
## decide what style to use here: start with all settings, and then make adjustment based on desired journal
# initialize pseudo-global variables at NULL
.summary.object <- NULL
.global.dependent.variables.written <- NULL
.global.coefficients <- NULL
.format.model.left <- NULL
.format.model.right <- NULL
.which.variable.label <- NULL
.return.value <- NULL
.publish.horizontal.line <- NULL
.table.part.published <- NULL
.format.omit.table <- NULL
# info about the package and author
.global.package.name <- "stargazer"
.global.package.version <- "5.2.2"
.global.package.author.name <- "Marek Hlavac"
.global.package.author.affiliation <- "Harvard University"
.global.package.author.email <- "hlavac at fas.harvard.edu"
# statistics (.global variables)
.global.formulas.rhs <- NULL
.global.models <- NULL
.global.dependent.variables <- NULL
.global.coefficient.variables <- NULL
.global.coef.vars.by.model <- NULL ## list of coefficient variables by model - to be used by omit, omit.labels, etc
.global.std.errors <- NULL
.global.ci.lb <- NULL
.global.ci.rb <- NULL
.global.t.stats <- NULL
.global.p.values <- NULL
.global.N <- NULL
.global.LL <- NULL
.global.R2 <- NULL
.global.mills <- NULL
.global.max.R2 <- NULL # maximum possible R2
.global.adj.R2 <- NULL
.global.AIC <- NULL
.global.BIC <- NULL
.global.scale <- NULL # estimated scale parameter (gee)
.global.UBRE <- NULL # UBRE score (GAM)
.global.sigma2 <- NULL # sigma2 from arima
.global.theta <- NULL # theta from negative binomial
.global.rho <- NULL
.global.sel.equation <- NULL # selection equation, as opposed to default outcome equation, in heckit and
.global.zero.component <- NULL # zero, as opposed to count, component in hurdle and zeroinfl
# with degrees of freedom
.global.SER <- NULL # residual standard error; standard error of the regression
.global.F.stat <- NULL # F-statistic for the regression
.global.chi.stat <- NULL # chi-squared statistic
.global.wald.stat <- NULL # Wald test statistic (for coxph)
.global.lr.stat <- NULL # LR test statistic (for coxph)
.global.logrank.stat <- NULL # Score (logrank) test (for coxph)
.global.null.deviance <- NULL
.global.residual.deviance <- NULL
# intercept strings
.global.intercept.strings <- c("(Intercept)", "(intercept)","Intercept")
# .formatting: Default
.format.space.size <- "-1.8ex"
.format.dependent.variable.text <- "\\textit{Dependent variable:}"
.format.dependent.variable.text.underline <- TRUE
.format.dependent.variable.text.on <- TRUE
.format.dep.var.labels <- NULL
.format.covariate.labels <- NULL
.format.add.lines <- NULL
.format.dependent.variables.text <- ""
.format.underline.dependent.variables <- TRUE
.format.dependent.variables.left <- ""
.format.dependent.variables.right <- ""
.format.dependent.variables.capitalize <- FALSE
.format.ordered.intercepts <- TRUE
# column labels
.format.column.left <- ""
.format.column.right <- ""
# model numbers
.format.model.numbers <- TRUE
# common headers for multiple columns?
.format.multicolumn <- TRUE
# names for models
.format.model.names.include <- TRUE
.format.model.names <- NULL
.format.model.names <- cbind(c("aov","ANOVA",""), c("arima","ARIMA",""), c("Arima","ARIMA",""), c("blogit","bivariate","logistic"))
.format.model.names <- cbind(.format.model.names, c("bprobit","bivariate","probit"), c("betareg", "beta",""), c("chopit","compound hierarchical","ordered probit"))
.format.model.names <- cbind(.format.model.names, c("clm","cumulative","link"), c("censReg", "censored", "regression"), c("cloglog.net","network compl.","log log"), c("clogit","conditional","logistic"), c("coxph","Cox","prop. hazards"))
.format.model.names <- cbind(.format.model.names, c("dynlm","dynamic","linear"), c("lagsarlm","spatial","autoregressive"), c("errorsarlm","spatial","error"))
.format.model.names <- cbind(.format.model.names, c("ei.dynamic","Quinn dynamic","ecological inference"), c("ei.hier","$2 \times 2$ hierarchical","ecological inference"))
.format.model.names <- cbind(.format.model.names, c("ei.RxC","hierarchical multinominal-Dirichlet","ecological inference"), c("exp","exponential",""), c("ergm","exponential family","random graph"))
.format.model.names <- cbind(.format.model.names, c("factor.bayes","Bayesian","factor analysis"), c("factor.mix","mixed data","factor analysis"))
.format.model.names <- cbind(.format.model.names, c("factor.ord","ordinal data","factor analysis"), c("fGARCH","GARCH",""), c("gamma","gamma",""))
.format.model.names <- cbind(.format.model.names, c("gamma.gee","gamma generalized","estimating equation"), c("gamma.mixed","mixed effects","gamma"))
.format.model.names <- cbind(.format.model.names, c("gamma.net","network","gamma"), c("gamma.survey","survey-weighted","gamma"), c("glmrob","robust","GLM"), c("gls","generalized","least squares"))
.format.model.names <- cbind(.format.model.names, c("gmm","GMM",""), c("rem.dyad", "relational", "event (dyadic)"))
.format.model.names <- cbind(.format.model.names, c("irt1d","IRT","(1-dim.)"), c("irtkd","IRT","(k-dim.)"))
.format.model.names <- cbind(.format.model.names, c("logit","logistic",""), c("logit.bayes","Bayesian","logistic"))
.format.model.names <- cbind(.format.model.names, c("logit.gam","GAM","(logistic)"), c("logit.gee","logistic generalized","estimating equation"))
.format.model.names <- cbind(.format.model.names, c("logit.mixed","mixed effects","logistic"), c("logit.net","network","logistic"))
.format.model.names <- cbind(.format.model.names, c("logit.survey","survey-weighted","logistic"), c("lognorm","log-normal",""))
.format.model.names <- cbind(.format.model.names, c("lmer","linear","mixed-effects"), c("glmer","generalized linear","mixed-effects"), c("nlmer","non-linear","mixed-effects"))
.format.model.names <- cbind(.format.model.names, c("ls","OLS",""), c("ls.mixed","mixed effect","linear"), c("lme","linear","mixed effects"), c("lmrob","MM-type","linear"))
.format.model.names <- cbind(.format.model.names, c("ls.net","network","least squares"), c("mlogit","multinomial","logistic"), c("mnlogit","multinomial","logit"))
.format.model.names <- cbind(.format.model.names, c("mlogit.bayes","Bayesian","multinomial logistic"), c("negbin","negative","binomial"), c("normal","normal",""))
.format.model.names <- cbind(.format.model.names, c("multinom","multinomial log-linear","(neural networks)"), c("nlme","non-linear","mixed effects"))
.format.model.names <- cbind(.format.model.names, c("normal.bayes","Bayesian","normal"), c("normal.gam","GAM","(continuous)"))
.format.model.names <- cbind(.format.model.names, c("normal.gee","normal generalized","estimating equation"), c("normal.net","network","normal"))
.format.model.names <- cbind(.format.model.names, c("normal.survey","survey-weighted","normal"), c("ologit","ordered","logistic"))
.format.model.names <- cbind(.format.model.names, c("oprobit","ordered","probit"), c("oprobit.bayes","Bayesian","ordered probit"))
.format.model.names <- cbind(.format.model.names, c("pmg","mean","groups"), c("poisson","Poisson",""), c("poisson.bayes","Bayesian","Poisson"))
.format.model.names <- cbind(.format.model.names, c("poisson.gam","GAM","(count)"), c("poisson.mixed","mixed effects","Poisson"))
.format.model.names <- cbind(.format.model.names, c("poisson.survey","survey-weighted","Poisson"), c("poisson.gee","Poisson generalized","estimation equation"))
.format.model.names <- cbind(.format.model.names, c("probit","probit",""), c("probit.bayes","Bayesian","probit"))
.format.model.names <- cbind(.format.model.names, c("probit.gam","GAM","(probit)"), c("probit.gee","probit generalized","estimating equation"))
.format.model.names <- cbind(.format.model.names, c("probit.mixed","mixed effects","probit"), c("probit.net","network","probit"))
.format.model.names <- cbind(.format.model.names, c("probit.survey","survey-weighted","probit"), c("relogit","rare events","logistic"))
.format.model.names <- cbind(.format.model.names, c("rq","quantile","regression"))
.format.model.names <- cbind(.format.model.names, c("rlm","robust","linear"), c("sur","SUR",""), c("threesls","3SLS",""))
.format.model.names <- cbind(.format.model.names, c("tobit","Tobit",""), c("tobit(AER)","Tobit",""), c("tobit.bayes","Bayesian","Tobit"))
.format.model.names <- cbind(.format.model.names, c("twosls","2SLS",""), c("weibull","Weibull",""))
.format.model.names <- cbind(.format.model.names, c("zeroinfl","zero-inflated","count data"), c("hurdle","hurdle",""))
.format.model.names <- cbind(.format.model.names, c("plm","panel","linear"), c("pgmm","panel","GMM"), c("ivreg","instrumental","variable"))
.format.model.names <- cbind(.format.model.names, c("coxreg","Cox",""), c("mlreg","ML","prop. hazards"), c("weibreg","Weibull",""))
.format.model.names <- cbind(.format.model.names, c("aftreg","accelerated"," failure time"), c("phreg","parametric","prop. hazards"))
.format.model.names <- cbind(.format.model.names, c("bj","Buckley-James",""), c("cph","Cox",""), c("Gls","generalized","least squares"), c("lrm","logistic",""))
.format.model.names <- cbind(.format.model.names, c("ols","OLS",""), c("psm","parametric","survival"), c("Rq","quantile","regression"))
.format.model.names <- cbind(.format.model.names, c("hetglm","heteroskedastic","GLM"), c("coeftest","coefficient","test"))
.format.model.names <- cbind(.format.model.names, c("heckit","Heckman","selection"), c("selection","selection",""))
.format.model.names <- cbind(.format.model.names, c("probit.ss","probit",""), c("binaryChoice","binary","choice"))
.format.model.names <- cbind(.format.model.names, c("brglm","GLM","(bias reduction)"), c("maBina","binary model","(marginal effect)"))
.format.model.names <- cbind(.format.model.names, c("mclogit","mixed","conditional logit"))
# if you use, say, glm() that does not correspond to one of the pre-defined models, put this as family and link
.format.model.function <- TRUE
.format.model.family <- ""
.format.model.dist <- ""
.format.model.link <- "link = "
## names for journal/output styles
# economics
.journal.style.names <- cbind(c("aer","American Economic Review"), c("qje","Quarterly Journal of Economics"), c("econometrica","Econometrica"))
.journal.style.names <- cbind(.journal.style.names, c("jpe","Journal of Political Economy"), c("jel","Journal of Economic Literature"))
.journal.style.names <- cbind(.journal.style.names, c("jep","Journal of Economic Perspestives"))
.format.coefficient.variables.capitalize <- FALSE
.format.coefficient.variables.left <- ""
.format.coefficient.variables.right <- ""
.format.coefficient.table.parts <- c("variable name","coefficient*","standard error"," ")
## .formatting of numeric output
# keep initial zeros?
.format.initial.zero <- TRUE
# if all zeros, keep going until you find a non-zero digit
.format.until.nonzero.digit <- TRUE
.format.max.extra.digits <- 2
## threshholds for the stars
.format.stars <- "*"
.format.cutoffs <- c(0.1, 0.05, 0.01)
.format.std.errors.left <- "("
.format.std.errors.right <- ")"
.format.p.values.left <- "["
.format.p.values.right <- "]"
.format.t.stats.left <- "t = "
.format.t.stats.right <- ""
.format.models.text <- ""
.format.models.left <- "\\textit{"
.format.models.right <- "}"
.format.underline.models <- FALSE
.format.models.skip.if.one <- TRUE # skip models section if only one model in table?
.format.object.names <- FALSE
.format.numbers.text <- ""
.format.numbers.left <- "("
.format.numbers.right <- ")"
.format.numbers.roman <- FALSE
.format.digit.separator.where <- c(3) # how 'often' to separate digits (e.g., thousands separator = 3)
.format.digit.separator <- ","
.format.ci.separator <- ", "
.format.round.digits <- 3
# for decimal comma use: .format.decimal.character <- "{,}"
.format.decimal.character <- "."
.format.dec.mark.align <- FALSE
# degrees of freedom - report or not?
.format.df <- TRUE
.format.table.parts <- c("=!","dependent variable label","dependent variables","models","colums","numbers","objects","-","coefficients","-","omit","-","additional","N","R-squared","adjusted R-squared","max R-squared","log likelihood","scale","sigma2","theta(se)*", "AIC","BIC","UBRE","rho(se)*","Mills(se)*", "SER(df)","F statistic(df)*(p)","chi2(df)*(p)","Wald(df)*(p)","LR(df)*(p)","logrank(df)*(p)","null deviance(df)","residual deviance(df)","=!","notes")
.format.omit.regexp <- NULL
.format.omit.labels <- NULL
.format.omit.yes <- "Yes"
.format.omit.no <- "No"
.format.keep.regexp <- NULL
.format.N <- "Observations"
.format.LL <- "Log Likelihood"
.format.R2 <- "R$^{2}$"
.format.max.R2 <- "Max. Possible R$^{2}$"
.format.adj.R2 <- "Adjusted R$^{2}$"
.format.scale <- "Scale Parameter"
.format.UBRE <- "UBRE"
.format.rho <- "$\\rho$"
.format.mills <- "Inverse Mills Ratio"
.format.AIC <- "Akaike Inf. Crit."
.format.BIC <- "Bayesian Inf. Crit."
.format.sigma2 <- "$\\sigma^{2}$"
.format.theta <- "$\\theta$"
.format.SER <- "Residual Std. Error"
.format.F.stat <- "F Statistic"
.format.chi.stat <- "$\\chi^{2}$"
.format.wald.stat <- "Wald Test"
.format.lr.stat <- "LR Test"
.format.logrank.stat <- "Score (Logrank) Test"
.format.null.deviance <- "Null Deviance"
.format.residual.deviance <- "Residual Deviance"
.format.df.left <- "(df = "
.format.df.right <- ")"
.format.df.separator <- "; "
.format.intelligent.df <- TRUE
# this is for se, tstat, p.values at the bottom of the table, by statistics
.format.se.left <- " ("
.format.se.right <- ")"
.format.tstat.left <- " (z = "
.format.tstat.right <- ")"
.format.p.value.left <- "["
.format.p.value.right <- "]"
.format.intercept.name <- "Constant"
.format.intercept.bottom <- TRUE
.format.note <- "\\textit{Note:} "
.format.note.alignment <- "r"
.format.note.content <- c("$^{*}$p$<$[0.*]; $^{**}$p$<$[0.**]; $^{***}$p$<$[0.***]")
#### summary statistic table
.format.s.statistics.names <- cbind(c("n","N"), c("nmiss","missing"), c("mean","Mean"), c("sd","St. Dev."), c("median","Median"), c("min","Min"), c("max","Max"), c("mad","Median Abs. Dev."), c("p","Pctl(!)"))
.format.s.stat.parts <- c("=!","stat names","-","statistics1","-!","notes")
.format.s.statistics.list <- c("n","mean","sd","min","p25","median","p75","max")
.format.s.statistics.names.left <- ""
.format.s.statistics.names.right <- ""
.format.s.statistics.names.label <- "Statistic"
.format.s.coefficient.variables.capitalize <- FALSE
.format.s.coefficient.variables.left <- ""
.format.s.coefficient.variables.right <- ""
.format.s.round.digits <- 3
.format.s.note <- ""
.format.s.note.alignment <- "l"
.format.s.note.content <- NULL
####
.adjust.settings.style(style)
# continue only if no errors
if (length(error.present) == 1) {
# summary statistic table or regular table of data frame contents
if (!is.null(summary)) {
# make sure summary is as long as the number of objects
if (length(summary) > how.many.objects) { summary <- summary[1:how.many.objects] }
if (length(summary) < how.many.objects) { length(summary) <- how.many.objects }
# fill in values of summary, if NA keep deafult
for (i in 1:how.many.objects) {
if (!is.na(summary[i])) {
.global.summary[i] <- summary[i]
}
else if (i > 1) { # if NA fill in previous value of summary
.global.summary[i] <- summary[i-1]
}
}
}
## use formatting arguments
# header with name, version, etc.
.format.header <- header
# no empty lines? single row for coefficient and std.error/CI?
.format.single.row <- single.row
if (.format.single.row == TRUE) { .format.no.space <- TRUE }
else { .format.no.space <- FALSE }
if (!is.null(no.space)) { .format.no.space <- no.space }
# font size
.format.font.size <- font.size
# floating, floating environment, etc.
.format.floating <- float
.format.floating.environment <- float.env
.format.table.placement <- table.placement
.format.column.sep.width <- column.sep.width
# if not case-sensitive, transfer to lower case
if (!is.null(digit.separate)) { digit.separate <- tolower(digit.separate) }
# report df?
.format.df <- df
if (.format.df == FALSE) {
.format.table.parts <- gsub("(df)", "", .format.table.parts, fixed=TRUE)
}
# column, dependent variable and covariate labels
.format.column.labels <- column.labels
.format.column.separate <- column.separate
.format.covariate.labels <- covariate.labels
.format.dep.var.labels <- dep.var.labels
.format.add.lines <- add.lines
if (dep.var.labels.include == FALSE) {
.format.table.parts <- .format.table.parts[.format.table.parts!="dependent variables"]
}
if (!is.null(dep.var.caption)) {
if (dep.var.caption == "") {
.format.table.parts <- .format.table.parts[.format.table.parts!="dependent variable label"]
}
else {
.format.dependent.variable.text <- dep.var.caption
}
}
# confidence intervals
.format.ci <- ci
.format.ci.level <- ci.level
if (!is.null(ci.separator)) { .format.ci.separator <- ci.separator }
if (!is.null(ci.custom)) { .format.ci <- TRUE }
# omit
.format.omit.regexp <- omit
.format.omit.index <- omit
if (is.character(omit)) { .format.omit.index <- NULL }
if (is.numeric(omit)) { .format.omit.regexp <- NULL }
.format.omit.labels <- omit.labels
if (!is.null(omit.yes.no)) {
.format.omit.yes <- omit.yes.no[1]
.format.omit.no <- omit.yes.no[2]
}
# keep
.format.keep.regexp <- keep
.format.keep.index <- keep
if (is.character(keep)) { .format.keep.index <- NULL }
if (is.numeric(keep)) { .format.keep.regexp <- NULL }
# remove omitted statistics from table parts
if (!is.null(omit.stat)) {
.lower.omit.stat <- tolower(omit.stat) # make it all lower-case
if ("all" %in% .lower.omit.stat) { .lower.omit.stat <- omit.stat.acceptable }
if ("n" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="N"] }
if ("rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="R-squared"] }
if ("adj.rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="adjusted R-squared"] }
if ("max.rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="max R-squared"] }
if ("ll" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="log likelihood"] }
if ("scale" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="scale"] }
if ("sigma2" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="sigma2"] }
if ("theta" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,5)!="theta"] }
if ("aic" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="AIC"] }
if ("bic" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="BIC"] }
if ("ubre" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="UBRE"] }
if ("rho" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,3)!="rho"] }
if ("mills" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,5)!="Mills"] }
if ("ser" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,3)!="SER"] }
if ("f" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,11)!="F statistic"] }
if ("chi2" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,4)!="chi2"] }
if ("wald" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,4)!="Wald"] }
if ("lr" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,2)!="LR"] }
if ("logrank" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,7)!="logrank"] }
if ("null.dev" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,13)!="null deviance"] }
if ("res.dev" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,17)!="residual deviance"] }
}
# keep statistics in the table
if (!is.null(keep.stat)) {
.lower.keep.stat <- tolower(keep.stat) # make it all lower-case
# do this by omitting everything except what you keep
.lower.omit.stat <- c("n","rsq","adj.rsq","max.rsq","ll","aic","bic","scale","ubre","rho","Mills","sigma2","ser","f","theta","chi2","wald","lr","logrank","null.dev","res.dev")
.lower.omit.stat <- .lower.omit.stat[!(.lower.omit.stat %in% .lower.keep.stat) ]
if ("n" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="N"] }
if ("rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="R-squared"] }
if ("adj.rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="adjusted R-squared"] }
if ("max.rsq" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="max R-squared"] }
if ("ll" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="log likelihood"] }
if ("scale" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="scale"] }
if ("sigma2" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="sigma2"] }
if ("theta" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,5)!="theta"] }
if ("aic" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="AIC"] }
if ("bic" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="BIC"] }
if ("ubre" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[.format.table.parts!="UBRE"] }
if ("rho" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,3)!="rho"] }
if ("mills" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,5)!="Mills"] }
if ("ser" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,3)!="SER"] }
if ("f" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,11)!="F statistic"] }
if ("chi2" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,4)!="chi2"] }
if ("wald" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,4)!="Wald"] }
if ("lr" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,2)!="LR"] }
if ("logrank" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,7)!="logrank"] }
if ("null.dev" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,13)!="null deviance"] }
if ("res.dev" %in% .lower.omit.stat) { .format.table.parts <- .format.table.parts[substr(.format.table.parts,1,17)!="residual deviance"] }
}
# keep statistics in table parts
if (!is.null(keep.stat)) {
.lower.keep.stat <- tolower(keep.stat) # make it all lower-case
keep.stat.acceptable <- c("all","n","rsq","adj.rsq","max.rsq","ll","aic","bic","scale","ubre","rho(se)*","Mills(se)*","sigma2","ser","f","theta","chi2","wald","lr","logrank","null.dev","res.dev") # list of statistic codes that are acceptable
remove.stats <- keep.stat.acceptable[!(keep.stat.acceptable %in% .lower.keep.stat)]
.format.table.parts <- .format.table.parts[!(.format.table.parts %in% remove.stats)]
}
# digits, initial.zeros, decimal characters
if (!is.null(decimal.mark)) { .format.decimal.character <- decimal.mark }
if (!is.null(align)) { .format.dec.mark.align <- align }
if (!is.null(digit.separator)) { .format.digit.separator <- digit.separator }
if (!is.null(initial.zero)) { .format.initial.zero <- initial.zero }
if (!is.null(digit.separate)) {
if (digit.separate=="lakh") { .format.digit.separator.where <- c(3,2) } # lakhs
else if ((digit.separate=="china") || (digit.separate=="japan")) { .format.digit.separator.where <- 4 }
else { .format.digit.separator.where <- digit.separate}
}
if (!is.null(digits)) {
.format.round.digits <- digits
.format.s.round.digits <- digits
}
if (!is.null(digits.extra)) {
.format.max.extra.digits <- digits.extra
if (digits.extra>=1) { .format.until.nonzero.digit <- TRUE }
else ( .format.until.nonzero.digit <- FALSE )
}
# intercept top and bottom
if (!is.null(intercept.top)) { .format.intercept.top <- intercept.top }
if (!is.null(intercept.bottom)) { .format.intercept.bottom <- intercept.bottom }
# model names, numbers and multicolumn
if (!is.null(model.names)) {
.format.model.names.include <- model.names
if (model.names == TRUE) { .format.models.skip.if.one <- FALSE }
}
if (!is.null(model.numbers)) { .format.model.numbers <- model.numbers }
.format.multicolumn <- multicolumn
# object names
.format.object.names <- object.names
# report coefs, std errs, t, p?
if (!is.null(report)) {
.format.coefficient.table.parts <- NULL
for (i in 1:nchar(report)) {
component.letter <- substr(report, i, i)
if (component.letter == "v") { .format.coefficient.table.parts <- append(.format.coefficient.table.parts, "variable name") }
if (component.letter == "c") { .format.coefficient.table.parts <- append(.format.coefficient.table.parts, "coefficient") }
if (component.letter == "s") { .format.coefficient.table.parts <- append(.format.coefficient.table.parts, "standard error") }
if (component.letter == "t") { .format.coefficient.table.parts <- append(.format.coefficient.table.parts, "t-stat") }
if (component.letter == "p") { .format.coefficient.table.parts <- append(.format.coefficient.table.parts, "p-value") }
if ((component.letter == "*") && (i > 1)) {
l <- length(.format.coefficient.table.parts)
if ((.format.coefficient.table.parts[l] != "variable name") && (substr(report,i-1,i-1) != "*")) {
.format.coefficient.table.parts[l] <- paste(.format.coefficient.table.parts[l],"*",sep="")
}
}
}
.format.coefficient.table.parts <- append(.format.coefficient.table.parts, " ")
}
# significance stars
if (!is.null(star.cutoffs)) {
# assign cutoff values
.format.cutoffs <- star.cutoffs
}
if (!is.null(star.char)) {
.format.stars <- star.char
}
for (i in 1:length(.format.cutoffs)) {
if (is.na(.format.stars[i])) {
.format.stars[i] <- paste(rep(.format.stars[1], i), sep="", collapse="")
}
}
.format.stars <- .format.stars[1:length(.format.cutoffs)]
# selection equation
.global.sel.equation <- selection.equation
# colnames and rownames
if (!is.null(rownames)) { .format.rownames <- rownames }
if (!is.null(colnames)) { .format.colnames <- colnames }
# zero vs. count component
.global.zero.component <- zero.component
# notes
replace.dec.mark <- function(s) { return (gsub(".", .format.decimal.character, s, fixed=TRUE))}
# replace star cutoffs in the notes section
for (i in 1:length(.format.cutoffs)) {
if (!is.na(.format.stars[i])) {
star.string <- paste(rep("*", i), sep="", collapse="")
.format.note.content <- gsub(paste("[.",star.string,"]",sep=""), replace.dec.mark(gsub("^[0]+", "",.format.cutoffs[i])), .format.note.content, fixed=TRUE)
.format.note.content <- gsub(paste("[0.",star.string,"]",sep=""), replace.dec.mark(.format.cutoffs[i]), .format.note.content, fixed=TRUE)
.format.note.content <- gsub(paste("[",star.string,"]",sep=""), replace.dec.mark(.format.cutoffs[i]*100), .format.note.content, fixed=TRUE)
}
}
if (!is.null(notes)) {
if (notes.append == TRUE) {
.format.note.content <- c(.format.note.content, notes)
.format.s.note.content <- c(.format.s.note.content, notes)
}
else {
.format.note.content <- notes
.format.s.note.content <- notes
}
}
if (!is.null(notes.align)) {
.format.note.alignment <- notes.align
.format.s.note.alignment <- notes.align
}
if (!is.null(notes.label)) {
.format.note <- notes.label
.format.s.note <- notes.label
}
# ordered probit/logit, etc. - report intercepts?
.format.ordered.intercepts <- ord.intercepts
# perl-compatible regular expressions?
.format.perl <- perl
# standard error for quantile regression
.format.rq.se <- rq.se
# report logical variables in summary statistics tables?
.format.summ.logical <- summary.logical
# summary statistics - what statistics to report - !!! this needs to come before summary.stat and omit.summary.stat
if (!nobs) { .format.s.statistics.list <- .format.s.statistics.list[.format.s.statistics.list!="n"] }
if (!mean.sd) { .format.s.statistics.list <- .format.s.statistics.list[(.format.s.statistics.list!="mean")&&(.format.s.statistics.list!="sd")]}
if (!min.max) { .format.s.statistics.list <- .format.s.statistics.list[(.format.s.statistics.list!="min")&&(.format.s.statistics.list!="max")]}
if (!median) { .format.s.statistics.list <- .format.s.statistics.list[.format.s.statistics.list!="median"] }
if (!iqr) { .format.s.statistics.list <- .format.s.statistics.list[(.format.s.statistics.list!="p25")&&(.format.s.statistics.list!="p75")]}
# keep summary statistics
if (!is.null(summary.stat)) {
.lower.keep.summary.stat <- tolower(summary.stat) # make it all lower-case
.format.s.statistics.list <- .lower.keep.summary.stat
}
# remove omitted statistics from table parts
if (!is.null(omit.summary.stat)) {
.lower.omit.summary.stat <- tolower(omit.summary.stat) # make it all lower-case
.format.s.statistics.list <- .format.s.statistics.list[!(.format.s.statistics.list %in% .lower.omit.summary.stat)]
}
# table layout
.format.table.parts.nonstat <- c("=","-","-!","=!","dependent variable label",
"dependent variables","models","columns","numbers",
"objects","coefficients","omit","additional","notes")
# these are the non-model statistics parts of the table
if (!is.null(table.layout)) {
.format.table.parts.new <- NULL
for (i in 1:nchar(table.layout)) {
component.letter <- substr(table.layout, i, i)
if (component.letter == "=") { .format.table.parts.new <- append(.format.table.parts.new, "=") }
if (component.letter == "-") { .format.table.parts.new <- append(.format.table.parts.new, "-") }
if ((component.letter == "!") && (i > 1)) {
if (.format.table.parts.new[i-1] %in% c("-","=")) {
.format.table.parts.new[i-1] <- paste(.format.table.parts.new[i-1], "!", sep="")
}
}
if (component.letter == "l") { .format.table.parts.new <- append(.format.table.parts.new, "dependent variable label") }
if (component.letter == "d") { .format.table.parts.new <- append(.format.table.parts.new, "dependent variables") }
if (component.letter == "m") {
.format.table.parts.new <- append(.format.table.parts.new, "models")
.format.model.names.include <- TRUE
}
if (component.letter == "c") { .format.table.parts.new <- append(.format.table.parts.new, "columns") }
if (component.letter == "#") {
.format.table.parts.new <- append(.format.table.parts.new, "numbers")
.format.model.numbers <- TRUE
}
if (component.letter == "b") {
.format.table.parts.new <- append(.format.table.parts.new, "objects")
.format.object.names <- TRUE
}
if (component.letter == "t") { .format.table.parts.new <- append(.format.table.parts.new, "coefficients") }
if (component.letter == "o") { .format.table.parts.new <- append(.format.table.parts.new, "omit") }
if (component.letter == "a") { .format.table.parts.new <- append(.format.table.parts.new, "additional") }
if (component.letter == "n") { .format.table.parts.new <- append(.format.table.parts.new, "notes") }
if (component.letter == "s") {
.format.table.parts.new <- append(.format.table.parts.new,
.format.table.parts[!(.format.table.parts %in% .format.table.parts.nonstat)])
}
}
.format.table.parts <- .format.table.parts.new
}
# now omit table parts
if (!is.null(omit.table.layout)) {
for (i in 1:nchar(omit.table.layout)) {
component.letter <- substr(omit.table.layout, i, i)
if (component.letter == "=") { .format.table.parts <- .format.table.parts[.format.table.parts!="="] }
if (component.letter == "-") { .format.table.parts <- .format.table.parts[.format.table.parts!="-"] }
if ((component.letter == "!") && (i > 1)) {
if (substr(omit.table.layout, i-1, i-1) == "=") { .format.table.parts <- .format.table.parts[.format.table.parts!="=!"] }
if (substr(omit.table.layout, i-1, i-1) == "-") { .format.table.parts <- .format.table.parts[.format.table.parts!="-!"] }
}
if (component.letter == "l") { .format.table.parts <- .format.table.parts[.format.table.parts!="dependent variable label"] }
if (component.letter == "d") { .format.table.parts <- .format.table.parts[.format.table.parts!="dependent variables"] }
if (component.letter == "m") { .format.table.parts <- .format.table.parts[.format.table.parts!="models"] }
if (component.letter == "c") { .format.table.parts <- .format.table.parts[.format.table.parts!="columns"] }
if (component.letter == "#") { .format.table.parts <- .format.table.parts[.format.table.parts!="numbers"] }
if (component.letter == "b") { .format.table.parts <- .format.table.parts[.format.table.parts!="objects"] }
if (component.letter == "t") { .format.table.parts <- .format.table.parts[.format.table.parts!="coefficients"] }
if (component.letter == "o") { .format.table.parts <- .format.table.parts[.format.table.parts!="omit"] }
if (component.letter == "a") { .format.table.parts <- .format.table.parts[.format.table.parts!="additional"] }
if (component.letter == "n") { .format.table.parts <- .format.table.parts[.format.table.parts!="notes"] }
if (component.letter == "s") { .format.table.parts <- .format.table.parts[.format.table.parts %in% .format.table.parts.nonstat] }
}
}
# intelligent division of regression tables vs. summary statistics tables
regression.table.objects <- NULL
number.of.table <- 0
title.table <- NULL
label.table <- NULL
for (i in seq(1:how.many.objects)) {
if (is.data.frame(objects[[i]])==TRUE) {
if (!is.null(regression.table.objects)) {
number.of.table <- number.of.table + 1 # allows for multiple table titles and labels
if (!is.na(title[number.of.table])) { .format.title <- title[number.of.table] }
else { .format.title <- title[length(title)] }
if (!is.na(label[number.of.table])) { .format.label <- label[number.of.table] }
else { .format.label <- label[length(label)] }
if (type == "latex") {
do.call(.stargazer.reg.table, as.list(objects[regression.table.objects]))
invisible.output <- latex.code <- c(invisible.output, invisible(capture.output(do.call(.stargazer.reg.table, as.list(objects[regression.table.objects])),file=NULL)) )
}
else if ((type == "text") || (type == "html") || (type == "mmd") ) {
latex.code <- c(latex.code, invisible(capture.output(do.call(.stargazer.reg.table, as.list(objects[regression.table.objects])),file=NULL)) )
}
}
number.of.table <- number.of.table + 1
if (!is.na(title[number.of.table])) { .format.title <- title[number.of.table] }
else { .format.title <- title[length(title)] }
if (!is.na(label[number.of.table])) { .format.label <- label[number.of.table] }
else { .format.label <- label[length(label)] }
if (.global.summary[i]==TRUE) {
if (type == "latex") {
.stargazer.summ.stat.table(objects[[i]])
invisible.output <- latex.code <- c(invisible.output, invisible(capture.output(.stargazer.summ.stat.table(objects[[i]]),file=NULL)) )
}
else if ((type == "text") || (type == "html") || (type == "mmd")) {
latex.code <- c(latex.code, invisible(capture.output(.stargazer.summ.stat.table(objects[[i]]),file=NULL)) )
}
}
else {
if (type == "latex") {
.stargazer.data.frame.table(objects[[i]])
invisible.output <- latex.code <- c(invisible.output, invisible(capture.output(.stargazer.data.frame.table(objects[[i]]),file=NULL)) )
}
else if ((type == "text") || (type == "html") || (type == "mmd")) {
latex.code <- c(latex.code, invisible(capture.output(.stargazer.data.frame.table(objects[[i]]),file=NULL)) )
}
}
regression.table.objects <- NULL
}
else {
regression.table.objects <- c(regression.table.objects, i)
.global.object.names <- .global.object.names.all[regression.table.objects]
}
}
if (!is.null(regression.table.objects)) {
number.of.table <- number.of.table + 1
if (!is.na(title[number.of.table])) { .format.title <- title[number.of.table] }
else { .format.title <- title[length(title)] }
if (!is.na(label[number.of.table])) { .format.label <- label[number.of.table] }
else { .format.label <- label[length(label)] }
if (type == "latex") {
do.call(.stargazer.reg.table, as.list(objects[regression.table.objects]))
invisible.output <- latex.code <- c(invisible.output, invisible(capture.output(do.call(.stargazer.reg.table, as.list(objects[regression.table.objects])),file=NULL)) )
}
else if ((type == "text") || (type == "html") || (type == "mmd")) {
latex.code <- c(latex.code, invisible(capture.output(do.call(.stargazer.reg.table, as.list(objects[regression.table.objects])),file=NULL)) )
}
}
# don't do text output or file outputs if there are errors
if (type == "text") {
.text.output(latex.code)
invisible.output <- invisible(capture.output(.text.output(latex.code)))
}
else if (type == "html") {
.html.output(latex.code)
invisible.output <- invisible(capture.output(.html.output(latex.code)))
}
else if (type == "mmd") {
.mmd.output(latex.code)
invisible.output <- invisible(capture.output(.mmd.output(latex.code)))
}
if (length(out) >= 1) {
text.out <- invisible(capture.output(.text.output(latex.code)))
html.out <- invisible(capture.output(.html.output(latex.code)))
.output.file(out, latex.code, text.out, html.out, type, out.header)
}
}
else {
if (suppress.errors == FALSE) {
cat(error.present, sep="")
invisible.output <- latex.code <- error.present
}
else {
invisible.output <- latex.code <- ""
}
}
options(warn=warn)
return(invisible(invisible.output))
}
|
library(ape)
testtree <- read.tree("1646_4.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1646_4_unrooted.txt")
|
/codeml_files/newick_trees_processed/1646_4/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 135 |
r
|
library(ape)
testtree <- read.tree("1646_4.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1646_4_unrooted.txt")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_visuals.R
\name{plotUMAP}
\alias{plotUMAP}
\title{plotUMAP}
\usage{
plotUMAP(gobject, dim_reduction_name = "umap", default_save_name = "UMAP", ...)
}
\arguments{
\item{gobject}{giotto object}
\item{dim_reduction_name}{dimension reduction name}
\item{default_save_name}{default save name for saving, don't change, change save_name in save_param}
\item{groub_by}{create multiple plots based on cell annotation column}
\item{group_by_subset}{subset the group_by factor column}
\item{dim1_to_use}{dimension to use on x-axis}
\item{dim2_to_use}{dimension to use on y-axis}
\item{spat_enr_names}{names of spatial enrichment results to include}
\item{show_NN_network}{show underlying NN network}
\item{nn_network_to_use}{type of NN network to use (kNN vs sNN)}
\item{network_name}{name of NN network to use, if show_NN_network = TRUE}
\item{cell_color}{color for cells (see details)}
\item{color_as_factor}{convert color column to factor}
\item{cell_color_code}{named vector with colors}
\item{cell_color_gradient}{vector with 3 colors for numeric data}
\item{gradient_midpoint}{midpoint for color gradient}
\item{gradient_limits}{vector with lower and upper limits}
\item{select_cell_groups}{select subset of cells/clusters based on cell_color parameter}
\item{select_cells}{select subset of cells based on cell IDs}
\item{show_other_cells}{display not selected cells}
\item{other_cell_color}{color of not selected cells}
\item{other_point_size}{size of not selected cells}
\item{show_cluster_center}{plot center of selected clusters}
\item{show_center_label}{plot label of selected clusters}
\item{center_point_size}{size of center points}
\item{label_size}{size of labels}
\item{label_fontface}{font of labels}
\item{edge_alpha}{column to use for alpha of the edges}
\item{point_shape}{point with border or not (border or no_border)}
\item{point_size}{size of point (cell)}
\item{point_border_col}{color of border around points}
\item{point_border_stroke}{stroke size of border around points}
\item{title}{title for plot, defaults to cell_color parameter}
\item{show_legend}{show legend}
\item{legend_text}{size of legend text}
\item{legend_symbol_size}{size of legend symbols}
\item{background_color}{color of plot background}
\item{axis_text}{size of axis text}
\item{axis_title}{size of axis title}
\item{cow_n_col}{cowplot param: how many columns}
\item{cow_rel_h}{cowplot param: relative height}
\item{cow_rel_w}{cowplot param: relative width}
\item{cow_align}{cowplot param: how to align}
\item{show_plot}{show plot}
\item{return_plot}{return ggplot object}
\item{save_plot}{directly save the plot [boolean]}
\item{save_param}{list of saving parameters from \code{\link{all_plots_save_function}}}
}
\value{
ggplot
}
\description{
Short wrapper for UMAP visualization
}
\details{
Description of parameters, see \code{\link{dimPlot2D}}. For 3D plots see \code{\link{plotUMAP_3D}}
}
\examples{
plotUMAP(gobject)
}
|
/doc/plotUMAP.Rd
|
no_license
|
bernard2012/spatialgiotto_web
|
R
| false | true | 3,049 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_visuals.R
\name{plotUMAP}
\alias{plotUMAP}
\title{plotUMAP}
\usage{
plotUMAP(gobject, dim_reduction_name = "umap", default_save_name = "UMAP", ...)
}
\arguments{
\item{gobject}{giotto object}
\item{dim_reduction_name}{dimension reduction name}
\item{default_save_name}{default save name for saving, don't change, change save_name in save_param}
\item{groub_by}{create multiple plots based on cell annotation column}
\item{group_by_subset}{subset the group_by factor column}
\item{dim1_to_use}{dimension to use on x-axis}
\item{dim2_to_use}{dimension to use on y-axis}
\item{spat_enr_names}{names of spatial enrichment results to include}
\item{show_NN_network}{show underlying NN network}
\item{nn_network_to_use}{type of NN network to use (kNN vs sNN)}
\item{network_name}{name of NN network to use, if show_NN_network = TRUE}
\item{cell_color}{color for cells (see details)}
\item{color_as_factor}{convert color column to factor}
\item{cell_color_code}{named vector with colors}
\item{cell_color_gradient}{vector with 3 colors for numeric data}
\item{gradient_midpoint}{midpoint for color gradient}
\item{gradient_limits}{vector with lower and upper limits}
\item{select_cell_groups}{select subset of cells/clusters based on cell_color parameter}
\item{select_cells}{select subset of cells based on cell IDs}
\item{show_other_cells}{display not selected cells}
\item{other_cell_color}{color of not selected cells}
\item{other_point_size}{size of not selected cells}
\item{show_cluster_center}{plot center of selected clusters}
\item{show_center_label}{plot label of selected clusters}
\item{center_point_size}{size of center points}
\item{label_size}{size of labels}
\item{label_fontface}{font of labels}
\item{edge_alpha}{column to use for alpha of the edges}
\item{point_shape}{point with border or not (border or no_border)}
\item{point_size}{size of point (cell)}
\item{point_border_col}{color of border around points}
\item{point_border_stroke}{stroke size of border around points}
\item{title}{title for plot, defaults to cell_color parameter}
\item{show_legend}{show legend}
\item{legend_text}{size of legend text}
\item{legend_symbol_size}{size of legend symbols}
\item{background_color}{color of plot background}
\item{axis_text}{size of axis text}
\item{axis_title}{size of axis title}
\item{cow_n_col}{cowplot param: how many columns}
\item{cow_rel_h}{cowplot param: relative height}
\item{cow_rel_w}{cowplot param: relative width}
\item{cow_align}{cowplot param: how to align}
\item{show_plot}{show plot}
\item{return_plot}{return ggplot object}
\item{save_plot}{directly save the plot [boolean]}
\item{save_param}{list of saving parameters from \code{\link{all_plots_save_function}}}
}
\value{
ggplot
}
\description{
Short wrapper for UMAP visualization
}
\details{
Description of parameters, see \code{\link{dimPlot2D}}. For 3D plots see \code{\link{plotUMAP_3D}}
}
\examples{
plotUMAP(gobject)
}
|
#' Akaike information criterion
#'
#' @description Akaike information criterion for model selection.
#'
#' @param model An object of class \code{lm}.
#'
#' @param method A character vector; specify the method to compute AIC. Valid
#' options include R, STATA and SAS.
#'
#' @details
#' AIC provides a means for model selection. Given a collection of models for
#' the data, AIC estimates the quality of each model, relative to each of the
#' other models. R and STATA use loglikelihood to compute AIC. SAS uses residual
#' sum of squares. Below is the formula in each case:
#'
#' \emph{R & STATA}
#' \deqn{AIC = -2(loglikelihood) + 2p}
#'
#' \emph{SAS}
#' \deqn{AIC = n * ln(SSE / n) + 2p}
#'
#' where \emph{n} is the sample size and \emph{p} is the number of model parameters including intercept.
#'
#' @return Akaike information criterion of the model.
#'
#' @references
#' Akaike, H. (1969). “Fitting Autoregressive Models for Prediction.” Annals of the Institute of Statistical
#' Mathematics 21:243–247.
#'
#' Judge, G. G., Griffiths, W. E., Hill, R. C., and Lee, T.-C. (1980). The Theory and Practice of Econometrics.
#' New York: John Wiley & Sons.
#'
#' @examples
#' # using R computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_aic(model)
#'
#' # using STATA computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_aic(model, method = 'STATA')
#'
#' # using SAS computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_aic(model, method = 'SAS')
#'
#' @family model selection criteria
#'
#' @importFrom stats logLik
#'
#' @export
#'
ols_aic <- function(model, method = c("R", "STATA", "SAS")) {
check_model(model)
method <- match.arg(method)
n <- model_rows(model)
p <- model_n_coeffs(model)
if (method == "R") {
lk <- logLik(model)
-2 * lk[1] + 2 * (p + 1)
} else if (method == "STATA") {
lk <- logLik(model)
-2 * lk[1] + 2 * p
} else if (method == "SAS") {
sse <- model_rss(model)
n * log(sse / n) + 2 * p
} else {
message("Please specify a valid method.")
}
}
#' Bayesian information criterion
#'
#' Bayesian information criterion for model selection.
#'
#' @param model An object of class \code{lm}.
#' @param method A character vector; specify the method to compute BIC. Valid
#' options include R, STATA and SAS.
#'
#' @details
#' SBC provides a means for model selection. Given a collection of models for
#' the data, SBC estimates the quality of each model, relative to each of the
#' other models. R and STATA use loglikelihood to compute SBC. SAS uses residual
#' sum of squares. Below is the formula in each case:
#'
#' \emph{R & STATA}
#' \deqn{AIC = -2(loglikelihood) + ln(n) * 2p}
#'
#' \emph{SAS}
#' \deqn{AIC = n * ln(SSE / n) + p * ln(n)}
#'
#' where \emph{n} is the sample size and \emph{p} is the number of model parameters including intercept.
#'
#' @return The bayesian information criterion of the model.
#'
#' @references
#' Schwarz, G. (1978). “Estimating the Dimension of a Model.” Annals of Statistics 6:461–464.
#'
#' Judge, G. G., Griffiths, W. E., Hill, R. C., and Lee, T.-C. (1980). The Theory and Practice of Econometrics.
#' New York: John Wiley & Sons.
#'
#' @examples
#' # using R computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_sbc(model)
#'
#' # using STATA computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_sbc(model, method = 'STATA')
#'
#' # using SAS computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_sbc(model, method = 'SAS')
#'
#' @family model selection criteria
#'
#' @export
#'
ols_sbc <- function(model, method = c("R", "STATA", "SAS")) {
check_model(model)
method <- match.arg(method)
n <- model_rows(model)
p <- model_n_coeffs(model)
if (method == "R") {
lk <- logLik(model)
-2 * lk[1] + log(n) * (p + 1)
} else if (method == "STATA") {
lk <- logLik(model)
-2 * lk[1] + log(n) * p
} else if (method == "SAS") {
sse <- model_rss(model)
n * log(sse / n) + p * log(n)
} else {
message("Please specify a valid method.")
}
}
#' Sawa's bayesian information criterion
#'
#' @description Sawa's bayesian information criterion for model selection.
#'
#' @param model An object of class \code{lm}.
#' @param full_model An object of class \code{lm}.
#'
#' @details
#' Sawa (1978) developed a model selection criterion that was derived from a
#' Bayesian modification of the AIC criterion. Sawa's Bayesian Information
#' Criterion (BIC) is a function of the number of observations n, the SSE, the
#' pure error variance fitting the full model, and the number of independent
#' variables including the intercept.
#'
#' \deqn{SBIC = n * ln(SSE / n) + 2(p + 2)q - 2(q^2)}
#'
#' where \eqn{q = n(\sigma^2)/SSE}, \emph{n} is the sample size, \emph{p} is the number of model parameters including intercept
#' \emph{SSE} is the residual sum of squares.
#' @return Sawa's Bayesian Information Criterion
#'
#' @references
#' Sawa, T. (1978). “Information Criteria for Discriminating among Alternative Regression Models.” Econometrica
#' 46:1273–1282.
#'
#' Judge, G. G., Griffiths, W. E., Hill, R. C., and Lee, T.-C. (1980). The Theory and Practice of Econometrics.
#' New York: John Wiley & Sons.
#'
#' @examples
#' full_model <- lm(mpg ~ ., data = mtcars)
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_sbic(model, full_model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_sbic <- function(model, full_model) {
check_model(model)
check_model(full_model)
n <- model_rows(model)
p <- anova_coeffs(model)
r <- full_model_coeffs(full_model)
q <- (q1(full_model, r) / (q2(model, p))) * (n)
sbicout(model, n, p, q)
}
#' Extract mean square
#'
#' Extracts the mean square from \code{anova()}.
#'
#' @param full_model An object of class \code{lm}.
#' @param r A vector of length 1.
#'
#' @keywords internal
#'
#' @noRd
#'
q1 <- function(full_model, r) {
anova(full_model)[[3]][r]
}
#' Extract sum of squares
#'
#' Extracts the sum of squares from \code{anova()}.
#'
#' @param full_model An object of class \code{lm}.
#' @param p A vector of length 1.
#'
#' @keywords internal
#'
#' @noRd
#'
q2 <- function(model, p) {
anova(model)[[2]][p]
}
#' SBIC internal
#'
#' Returns sawa's bayesian information criterion.
#'
#' @param model An object of class \code{lm}.
#' @param n A vector of length 1.
#' @param p A vector of length 1.
#' @param q A vector of length 1.
#'
#' @keywords internal
#'
#' @noRd
#'
sbicout <- function(model, n, p, q) {
a <- (2 * (p + 2) * q)
b <- (2 * (q ^ 2))
(log(model_rss(model) / n) * n) + a - b
}
#' Mallow's Cp
#'
#' @description Mallow's Cp.
#'
#' @param model An object of class \code{lm}.
#' @param fullmodel An object of class \code{lm}.
#'
#' @details
#' Mallows' Cp statistic estimates the size of the bias that is introduced into
#' the predicted responses by having an underspecified model. Use Mallows' Cp
#' to choose between multiple regression models. Look for models where
#' Mallows' Cp is small and close to the number of predictors in the model plus
#' the constant (p).
#'
#' @return Mallow's Cp of the model.
#'
#' @references
#' Hocking, R. R. (1976). “The Analysis and Selection of Variables in a Linear Regression.” Biometrics
#' 32:1–50.
#'
#' Mallows, C. L. (1973). “Some Comments on Cp.” Technometrics 15:661–675.
#'
#' @examples
#' full_model <- lm(mpg ~ ., data = mtcars)
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_mallows_cp(model, full_model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_mallows_cp <- function(model, fullmodel) {
check_model(model)
check_model(fullmodel)
n <- model_rows(model)
p <- anova_coeffs(model)
q <- full_model_coeffs(fullmodel)
mcpout(model, fullmodel, n, p, q)
}
#' Mallow's Cp internal
#'
#' Computes Mallow's Cp.
#'
#' @param model An object of class \code{lm}.
#' @param fullmodel An object of class \code{lm}.
#' @param n A numeric vector of length 1.
#' @param p A numeric vector of length 1.
#' @param q A numeric vector of length 1.
#'
#' @keywords internal
#'
#' @noRd
#'
mcpout <- function(model, fullmodel, n, p, q) {
sse <- model_rss(model)
sec <- (n - (2 * p))
mse <- rev(anova(fullmodel)[[3]])[1]
(sse / mse) - sec
}
#' MSEP
#'
#' Estimated error of prediction, assuming multivariate normality.
#'
#' @param model An object of class \code{lm}.
#'
#' @details
#' Computes the estimated mean square error of prediction assuming that both
#' independent and dependent variables are multivariate normal.
#'
#' \deqn{MSE(n + 1)(n - 2) / n(n - p - 1)}
#'
#' where \eqn{MSE = SSE / (n - p)}, n is the sample size and p is the number of
#' predictors including the intercept
#'
#' @return Estimated error of prediction of the model.
#'
#' @references
#' Stein, C. (1960). “Multiple Regression.” In Contributions to Probability and Statistics: Essays in Honor
#' of Harold Hotelling, edited by I. Olkin, S. G. Ghurye, W. Hoeffding, W. G. Madow, and H. B. Mann,
#' 264–305. Stanford, CA: Stanford University Press.
#'
#' Darlington, R. B. (1968). “Multiple Regression in Psychological Research and Practice.” Psychological
#' Bulletin 69:161–182.
#'
#' @examples
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_msep(model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_msep <- function(model) {
check_model(model)
sepout(model)
}
#' MSEP internal
#'
#' Computes the estimated mean square error of prediction.
#'
#' @param model An object of class \code{lm}.
#'
#' @keywords internal
#'
#' @noRd
#'
sepout <- function(model) {
n <- model_rows(model)
p <- anova_coeffs(model)
mse <- anova(model)[[2]][p]
num <- ((n + 1) * (n - 2)) * mse
den <- n * (n - p - 1)
num / den
}
#' Final prediction error
#'
#' @description Estimated mean square error of prediction.
#'
#' @param model An object of class \code{lm}.
#'
#' @details
#' Computes the estimated mean square error of prediction for each model
#' selected assuming that the values of the regressors are fixed and that the
#' model is correct.
#'
#' \deqn{MSE((n + p) / n)}
#'
#' where \eqn{MSE = SSE / (n - p)}, n is the sample size and p is the number of predictors including the intercept
#'
#' @return Final prediction error of the model.
#'
#' @references
#' Akaike, H. (1969). “Fitting Autoregressive Models for Prediction.” Annals of the Institute of Statistical
#' Mathematics 21:243–247.
#'
#' Judge, G. G., Griffiths, W. E., Hill, R. C., and Lee, T.-C. (1980). The Theory and Practice of Econometrics.
#' New York: John Wiley & Sons.
#'
#' @examples
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_fpe(model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_fpe <- function(model) {
check_model(model)
jpout(model)
}
#' Final prediction error internal
#'
#' Computes the final prediction error.
#'
#' @param model An object of class \code{lm}.
#'
#' @keywords internal
#'
#' @noRd
#'
jpout <- function(model) {
n <- model_rows(model)
p <- anova_coeffs(model)
mse <- anova(model)[[3]][p]
((n + p) / n) * mse
}
#' @title Amemiya's prediction criterion
#'
#' @description Amemiya's prediction error.
#'
#' @param model An object of class \code{lm}.
#'
#' @details
#' Amemiya's Prediction Criterion penalizes R-squared more heavily than does
#' adjusted R-squared for each addition degree of freedom used on the
#' right-hand-side of the equation. The lower the better for this criterion.
#'
#' \deqn{((n + p) / (n - p))(1 - (R^2))}
#'
#' where \emph{n} is the sample size, \emph{p} is the number of predictors including the intercept and
#' \emph{R^2} is the coefficient of determination.
#'
#' @return Amemiya's prediction error of the model.
#'
#' @references
#' Amemiya, T. (1976). Selection of Regressors. Technical Report 225, Stanford University, Stanford, CA.
#'
#' Judge, G. G., Griffiths, W. E., Hill, R. C., and Lee, T.-C. (1980). The Theory and Practice of Econometrics.
#' New York: John Wiley & Sons.
#'
#' @examples
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_apc(model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_apc <- function(model) {
check_model(model)
pcout(model)
}
#' Amemiya internal
#'
#' Computes the Amemiya prediction error.
#'
#' @param model An object of class \code{lm}.
#'
#' @keywords internal
#'
#' @noRd
#'
pcout <- function(model) {
n <- model_rows(model)
p <- anova_coeffs(model)
rse <- summary(model)[[8]]
((n + p) / (n - p)) * (1 - rse)
}
#' @title Hocking's Sp
#'
#' @description Average prediction mean squared error.
#'
#' @param model An object of class \code{lm}.
#'
#' @details Hocking's Sp criterion is an adjustment of the residual sum of
#' Squares. Minimize this criterion.
#'
#' \deqn{MSE / (n - p - 1)}
#'
#' where \eqn{MSE = SSE / (n - p)}, n is the sample size and p is the number of predictors including the intercept
#'
#' @return Hocking's Sp of the model.
#'
#' @references
#' Hocking, R. R. (1976). “The Analysis and Selection of Variables in a Linear Regression.” Biometrics
#' 32:1–50.
#'
#' @examples
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_hsp(model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_hsp <- function(model) {
check_model(model)
spout(model)
}
#' Hocking's internal
#'
#' Computes the Hocking's Sp statistic.
#'
#' @param model An object of class \code{lm}.
#'
#' @keywords internal
#'
#' @noRd
#'
spout <- function(model) {
n <- model_rows(model)
p <- anova_coeffs(model)
mse <- anova(model)[[3]][p]
mse / (n - p - 1)
}
#' Model data rows
#'
#' Returns the number of rows in the data used in the model.
#'
#' @param model An object of class \code{lm}.
#'
#' @noRd
#'
model_rows <- function(model) {
nrow(model.frame(model))
}
#' Model Coefficients
#'
#' Returns the number of coefficients in the model.
#'
#' @param model An object of class \code{lm}.
#'
#' @noRd
#'
model_n_coeffs <- function(model) {
length(model$coefficients)
}
#' Residual sum of squares
#'
#' Returns the residual sum of squares.
#'
#' @param model An object of class \code{lm}.
#'
#' @noRd
#'
model_rss <- function(model) {
sum(residuals(model) ^ 2)
}
#' Coefficients
#'
#' Returns the number of coefficients in the model
#' using `anova()` including the residual sum of squares.
#'
#' @param model An object of class \code{lm}.
#'
#' @noRd
#'
anova_coeffs <- function(model) {
length(anova(model)[[1]])
}
#' Number of columns
#'
#' Returns the number of columns in the data.
#'
#' @param model An object of class \code{lm}.
#'
#' @noRd
#'
full_model_coeffs <- function(model) {
length(model.frame(model))
}
|
/R/ols-information-criteria.R
|
no_license
|
kaushikmanikonda/olsrr
|
R
| false | false | 14,925 |
r
|
#' Akaike information criterion
#'
#' @description Akaike information criterion for model selection.
#'
#' @param model An object of class \code{lm}.
#'
#' @param method A character vector; specify the method to compute AIC. Valid
#' options include R, STATA and SAS.
#'
#' @details
#' AIC provides a means for model selection. Given a collection of models for
#' the data, AIC estimates the quality of each model, relative to each of the
#' other models. R and STATA use loglikelihood to compute AIC. SAS uses residual
#' sum of squares. Below is the formula in each case:
#'
#' \emph{R & STATA}
#' \deqn{AIC = -2(loglikelihood) + 2p}
#'
#' \emph{SAS}
#' \deqn{AIC = n * ln(SSE / n) + 2p}
#'
#' where \emph{n} is the sample size and \emph{p} is the number of model parameters including intercept.
#'
#' @return Akaike information criterion of the model.
#'
#' @references
#' Akaike, H. (1969). “Fitting Autoregressive Models for Prediction.” Annals of the Institute of Statistical
#' Mathematics 21:243–247.
#'
#' Judge, G. G., Griffiths, W. E., Hill, R. C., and Lee, T.-C. (1980). The Theory and Practice of Econometrics.
#' New York: John Wiley & Sons.
#'
#' @examples
#' # using R computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_aic(model)
#'
#' # using STATA computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_aic(model, method = 'STATA')
#'
#' # using SAS computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_aic(model, method = 'SAS')
#'
#' @family model selection criteria
#'
#' @importFrom stats logLik
#'
#' @export
#'
ols_aic <- function(model, method = c("R", "STATA", "SAS")) {
check_model(model)
method <- match.arg(method)
n <- model_rows(model)
p <- model_n_coeffs(model)
if (method == "R") {
lk <- logLik(model)
-2 * lk[1] + 2 * (p + 1)
} else if (method == "STATA") {
lk <- logLik(model)
-2 * lk[1] + 2 * p
} else if (method == "SAS") {
sse <- model_rss(model)
n * log(sse / n) + 2 * p
} else {
message("Please specify a valid method.")
}
}
#' Bayesian information criterion
#'
#' Bayesian information criterion for model selection.
#'
#' @param model An object of class \code{lm}.
#' @param method A character vector; specify the method to compute BIC. Valid
#' options include R, STATA and SAS.
#'
#' @details
#' SBC provides a means for model selection. Given a collection of models for
#' the data, SBC estimates the quality of each model, relative to each of the
#' other models. R and STATA use loglikelihood to compute SBC. SAS uses residual
#' sum of squares. Below is the formula in each case:
#'
#' \emph{R & STATA}
#' \deqn{AIC = -2(loglikelihood) + ln(n) * 2p}
#'
#' \emph{SAS}
#' \deqn{AIC = n * ln(SSE / n) + p * ln(n)}
#'
#' where \emph{n} is the sample size and \emph{p} is the number of model parameters including intercept.
#'
#' @return The bayesian information criterion of the model.
#'
#' @references
#' Schwarz, G. (1978). “Estimating the Dimension of a Model.” Annals of Statistics 6:461–464.
#'
#' Judge, G. G., Griffiths, W. E., Hill, R. C., and Lee, T.-C. (1980). The Theory and Practice of Econometrics.
#' New York: John Wiley & Sons.
#'
#' @examples
#' # using R computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_sbc(model)
#'
#' # using STATA computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_sbc(model, method = 'STATA')
#'
#' # using SAS computation method
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_sbc(model, method = 'SAS')
#'
#' @family model selection criteria
#'
#' @export
#'
ols_sbc <- function(model, method = c("R", "STATA", "SAS")) {
check_model(model)
method <- match.arg(method)
n <- model_rows(model)
p <- model_n_coeffs(model)
if (method == "R") {
lk <- logLik(model)
-2 * lk[1] + log(n) * (p + 1)
} else if (method == "STATA") {
lk <- logLik(model)
-2 * lk[1] + log(n) * p
} else if (method == "SAS") {
sse <- model_rss(model)
n * log(sse / n) + p * log(n)
} else {
message("Please specify a valid method.")
}
}
#' Sawa's bayesian information criterion
#'
#' @description Sawa's bayesian information criterion for model selection.
#'
#' @param model An object of class \code{lm}.
#' @param full_model An object of class \code{lm}.
#'
#' @details
#' Sawa (1978) developed a model selection criterion that was derived from a
#' Bayesian modification of the AIC criterion. Sawa's Bayesian Information
#' Criterion (BIC) is a function of the number of observations n, the SSE, the
#' pure error variance fitting the full model, and the number of independent
#' variables including the intercept.
#'
#' \deqn{SBIC = n * ln(SSE / n) + 2(p + 2)q - 2(q^2)}
#'
#' where \eqn{q = n(\sigma^2)/SSE}, \emph{n} is the sample size, \emph{p} is the number of model parameters including intercept
#' \emph{SSE} is the residual sum of squares.
#' @return Sawa's Bayesian Information Criterion
#'
#' @references
#' Sawa, T. (1978). “Information Criteria for Discriminating among Alternative Regression Models.” Econometrica
#' 46:1273–1282.
#'
#' Judge, G. G., Griffiths, W. E., Hill, R. C., and Lee, T.-C. (1980). The Theory and Practice of Econometrics.
#' New York: John Wiley & Sons.
#'
#' @examples
#' full_model <- lm(mpg ~ ., data = mtcars)
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_sbic(model, full_model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_sbic <- function(model, full_model) {
check_model(model)
check_model(full_model)
n <- model_rows(model)
p <- anova_coeffs(model)
r <- full_model_coeffs(full_model)
q <- (q1(full_model, r) / (q2(model, p))) * (n)
sbicout(model, n, p, q)
}
#' Extract mean square
#'
#' Extracts the mean square from \code{anova()}.
#'
#' @param full_model An object of class \code{lm}.
#' @param r A vector of length 1.
#'
#' @keywords internal
#'
#' @noRd
#'
q1 <- function(full_model, r) {
anova(full_model)[[3]][r]
}
#' Extract sum of squares
#'
#' Extracts the sum of squares from \code{anova()}.
#'
#' @param full_model An object of class \code{lm}.
#' @param p A vector of length 1.
#'
#' @keywords internal
#'
#' @noRd
#'
q2 <- function(model, p) {
anova(model)[[2]][p]
}
#' SBIC internal
#'
#' Returns sawa's bayesian information criterion.
#'
#' @param model An object of class \code{lm}.
#' @param n A vector of length 1.
#' @param p A vector of length 1.
#' @param q A vector of length 1.
#'
#' @keywords internal
#'
#' @noRd
#'
sbicout <- function(model, n, p, q) {
a <- (2 * (p + 2) * q)
b <- (2 * (q ^ 2))
(log(model_rss(model) / n) * n) + a - b
}
#' Mallow's Cp
#'
#' @description Mallow's Cp.
#'
#' @param model An object of class \code{lm}.
#' @param fullmodel An object of class \code{lm}.
#'
#' @details
#' Mallows' Cp statistic estimates the size of the bias that is introduced into
#' the predicted responses by having an underspecified model. Use Mallows' Cp
#' to choose between multiple regression models. Look for models where
#' Mallows' Cp is small and close to the number of predictors in the model plus
#' the constant (p).
#'
#' @return Mallow's Cp of the model.
#'
#' @references
#' Hocking, R. R. (1976). “The Analysis and Selection of Variables in a Linear Regression.” Biometrics
#' 32:1–50.
#'
#' Mallows, C. L. (1973). “Some Comments on Cp.” Technometrics 15:661–675.
#'
#' @examples
#' full_model <- lm(mpg ~ ., data = mtcars)
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_mallows_cp(model, full_model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_mallows_cp <- function(model, fullmodel) {
check_model(model)
check_model(fullmodel)
n <- model_rows(model)
p <- anova_coeffs(model)
q <- full_model_coeffs(fullmodel)
mcpout(model, fullmodel, n, p, q)
}
#' Mallow's Cp internal
#'
#' Computes Mallow's Cp.
#'
#' @param model An object of class \code{lm}.
#' @param fullmodel An object of class \code{lm}.
#' @param n A numeric vector of length 1.
#' @param p A numeric vector of length 1.
#' @param q A numeric vector of length 1.
#'
#' @keywords internal
#'
#' @noRd
#'
mcpout <- function(model, fullmodel, n, p, q) {
sse <- model_rss(model)
sec <- (n - (2 * p))
mse <- rev(anova(fullmodel)[[3]])[1]
(sse / mse) - sec
}
#' MSEP
#'
#' Estimated error of prediction, assuming multivariate normality.
#'
#' @param model An object of class \code{lm}.
#'
#' @details
#' Computes the estimated mean square error of prediction assuming that both
#' independent and dependent variables are multivariate normal.
#'
#' \deqn{MSE(n + 1)(n - 2) / n(n - p - 1)}
#'
#' where \eqn{MSE = SSE / (n - p)}, n is the sample size and p is the number of
#' predictors including the intercept
#'
#' @return Estimated error of prediction of the model.
#'
#' @references
#' Stein, C. (1960). “Multiple Regression.” In Contributions to Probability and Statistics: Essays in Honor
#' of Harold Hotelling, edited by I. Olkin, S. G. Ghurye, W. Hoeffding, W. G. Madow, and H. B. Mann,
#' 264–305. Stanford, CA: Stanford University Press.
#'
#' Darlington, R. B. (1968). “Multiple Regression in Psychological Research and Practice.” Psychological
#' Bulletin 69:161–182.
#'
#' @examples
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_msep(model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_msep <- function(model) {
check_model(model)
sepout(model)
}
#' MSEP internal
#'
#' Computes the estimated mean square error of prediction.
#'
#' @param model An object of class \code{lm}.
#'
#' @keywords internal
#'
#' @noRd
#'
sepout <- function(model) {
n <- model_rows(model)
p <- anova_coeffs(model)
mse <- anova(model)[[2]][p]
num <- ((n + 1) * (n - 2)) * mse
den <- n * (n - p - 1)
num / den
}
#' Final prediction error
#'
#' @description Estimated mean square error of prediction.
#'
#' @param model An object of class \code{lm}.
#'
#' @details
#' Computes the estimated mean square error of prediction for each model
#' selected assuming that the values of the regressors are fixed and that the
#' model is correct.
#'
#' \deqn{MSE((n + p) / n)}
#'
#' where \eqn{MSE = SSE / (n - p)}, n is the sample size and p is the number of predictors including the intercept
#'
#' @return Final prediction error of the model.
#'
#' @references
#' Akaike, H. (1969). “Fitting Autoregressive Models for Prediction.” Annals of the Institute of Statistical
#' Mathematics 21:243–247.
#'
#' Judge, G. G., Griffiths, W. E., Hill, R. C., and Lee, T.-C. (1980). The Theory and Practice of Econometrics.
#' New York: John Wiley & Sons.
#'
#' @examples
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_fpe(model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_fpe <- function(model) {
check_model(model)
jpout(model)
}
#' Final prediction error internal
#'
#' Computes the final prediction error.
#'
#' @param model An object of class \code{lm}.
#'
#' @keywords internal
#'
#' @noRd
#'
jpout <- function(model) {
n <- model_rows(model)
p <- anova_coeffs(model)
mse <- anova(model)[[3]][p]
((n + p) / n) * mse
}
#' @title Amemiya's prediction criterion
#'
#' @description Amemiya's prediction error.
#'
#' @param model An object of class \code{lm}.
#'
#' @details
#' Amemiya's Prediction Criterion penalizes R-squared more heavily than does
#' adjusted R-squared for each addition degree of freedom used on the
#' right-hand-side of the equation. The lower the better for this criterion.
#'
#' \deqn{((n + p) / (n - p))(1 - (R^2))}
#'
#' where \emph{n} is the sample size, \emph{p} is the number of predictors including the intercept and
#' \emph{R^2} is the coefficient of determination.
#'
#' @return Amemiya's prediction error of the model.
#'
#' @references
#' Amemiya, T. (1976). Selection of Regressors. Technical Report 225, Stanford University, Stanford, CA.
#'
#' Judge, G. G., Griffiths, W. E., Hill, R. C., and Lee, T.-C. (1980). The Theory and Practice of Econometrics.
#' New York: John Wiley & Sons.
#'
#' @examples
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_apc(model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_apc <- function(model) {
check_model(model)
pcout(model)
}
#' Amemiya internal
#'
#' Computes the Amemiya prediction error.
#'
#' @param model An object of class \code{lm}.
#'
#' @keywords internal
#'
#' @noRd
#'
pcout <- function(model) {
n <- model_rows(model)
p <- anova_coeffs(model)
rse <- summary(model)[[8]]
((n + p) / (n - p)) * (1 - rse)
}
#' @title Hocking's Sp
#'
#' @description Average prediction mean squared error.
#'
#' @param model An object of class \code{lm}.
#'
#' @details Hocking's Sp criterion is an adjustment of the residual sum of
#' Squares. Minimize this criterion.
#'
#' \deqn{MSE / (n - p - 1)}
#'
#' where \eqn{MSE = SSE / (n - p)}, n is the sample size and p is the number of predictors including the intercept
#'
#' @return Hocking's Sp of the model.
#'
#' @references
#' Hocking, R. R. (1976). “The Analysis and Selection of Variables in a Linear Regression.” Biometrics
#' 32:1–50.
#'
#' @examples
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_hsp(model)
#'
#' @family model selection criteria
#'
#' @export
#'
ols_hsp <- function(model) {
check_model(model)
spout(model)
}
#' Hocking's internal
#'
#' Computes the Hocking's Sp statistic.
#'
#' @param model An object of class \code{lm}.
#'
#' @keywords internal
#'
#' @noRd
#'
spout <- function(model) {
n <- model_rows(model)
p <- anova_coeffs(model)
mse <- anova(model)[[3]][p]
mse / (n - p - 1)
}
#' Model data rows
#'
#' Returns the number of rows in the data used in the model.
#'
#' @param model An object of class \code{lm}.
#'
#' @noRd
#'
model_rows <- function(model) {
nrow(model.frame(model))
}
#' Model Coefficients
#'
#' Returns the number of coefficients in the model.
#'
#' @param model An object of class \code{lm}.
#'
#' @noRd
#'
model_n_coeffs <- function(model) {
length(model$coefficients)
}
#' Residual sum of squares
#'
#' Returns the residual sum of squares.
#'
#' @param model An object of class \code{lm}.
#'
#' @noRd
#'
model_rss <- function(model) {
sum(residuals(model) ^ 2)
}
#' Coefficients
#'
#' Returns the number of coefficients in the model
#' using `anova()` including the residual sum of squares.
#'
#' @param model An object of class \code{lm}.
#'
#' @noRd
#'
anova_coeffs <- function(model) {
length(anova(model)[[1]])
}
#' Number of columns
#'
#' Returns the number of columns in the data.
#'
#' @param model An object of class \code{lm}.
#'
#' @noRd
#'
full_model_coeffs <- function(model) {
length(model.frame(model))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/body.R
\name{body_get_wb_cols}
\alias{body_get_wb_cols}
\title{Compute the columns of the workbook which are occupied by the body}
\usage{
body_get_wb_cols(tab)
}
\arguments{
\item{tab}{The core tab object}
}
\description{
Compute the columns of the workbook which are occupied by the body
}
|
/man/body_get_wb_cols.Rd
|
no_license
|
harryprince/xltabr
|
R
| false | true | 370 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/body.R
\name{body_get_wb_cols}
\alias{body_get_wb_cols}
\title{Compute the columns of the workbook which are occupied by the body}
\usage{
body_get_wb_cols(tab)
}
\arguments{
\item{tab}{The core tab object}
}
\description{
Compute the columns of the workbook which are occupied by the body
}
|
testlist <- list(a = 0L, b = 0L, x = c(16722175L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131026-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 119 |
r
|
testlist <- list(a = 0L, b = 0L, x = c(16722175L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neuro_surface.R
\docType{methods}
\name{[[,NeuroSurfaceVector,numeric-method}
\alias{[[,NeuroSurfaceVector,numeric-method}
\title{extractor}
\usage{
\S4method{[[}{NeuroSurfaceVector,numeric}(x, i)
}
\arguments{
\item{x}{the object}
\item{i}{first index}
}
\description{
extractor
}
|
/man/sub-sub-NeuroSurfaceVector-numeric-method.Rd
|
no_license
|
bbuchsbaum/neurosurf
|
R
| false | true | 361 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neuro_surface.R
\docType{methods}
\name{[[,NeuroSurfaceVector,numeric-method}
\alias{[[,NeuroSurfaceVector,numeric-method}
\title{extractor}
\usage{
\S4method{[[}{NeuroSurfaceVector,numeric}(x, i)
}
\arguments{
\item{x}{the object}
\item{i}{first index}
}
\description{
extractor
}
|
#### Lukas Beinhauer 08/12/20 ####
## Making various functions ##
### Attention: 'sep.objects' ist not returning separate objects at the moment. If desired,
### you can use 'list', and cycle through the elements of the list in a for loop AFTER calling
### and the function and storing the list in an object.
###
### ex.: x <- MultDataObjects_sheets(5, filepath="C:/Users/beinhaul/Documents/Git/ManyLabs1/MLforR.xlsx",
### basename = "data", col=NULL, output="list")
### for(i in 1:k){
### assign(name(x[[i]]), x[[i]])
### }
MultDataObjects_sheets <- function(k, filepath, basename = "data", col = NA,
output = c("list", "sep.objects", "data.frame")){
if (!require(readxl)) {
stop("readxl not installed")
}
spaces <- ceiling(k/10)
if(output == "sep.objects"){
for(i in 1:k){
assign(gsub(" ", "0", sprintf(paste0(basename, "%", spaces, "d", sep=""), i)),
read_excel(filepath, sheet=i))
}
}else{
if(output == "list"){
dat <- list()
for(i in 1:k){
dat[[i]] <- assign(gsub(" ", "0", sprintf(paste0(basename, "%", spaces, "d", sep=""), i)),
read_excel(filepath, sheet=i))
}
return(dat)
}else{
if(output == "data.frame"){
for(i in 1:k){
temp.dat <- assign(gsub(" ", "0", sprintf(paste0(basename, "%", spaces, "d", sep=""), i)),
read_excel(filepath, sheet=i))
if(i == 1){
df <- data.frame(matrix(NA, nrow=nrow(temp.dat), ncol=k))
}
if(is.na(col)){
stop("Please specify a column index")
}
df[,i] <- temp.dat[,col]
}
return(df)
}else{
stop("Specify output as either 'list', 'sep.objects' or 'data.frame'")
}
}
}
}
getPPV <- function(kstudies=1000, alpha=.05, Power=.8, ratio.true.hypotheses=.3){
Pb <- ratio.true.hypotheses # the ratio of "true" hypotheses to all hypotheses (or P(B)) if we refer to the Bayesian Therem, with A
# being the event of a statistically significant test and B being the event of a genuine effect present)
noeffstudies <- (1-Pb)*kstudies # nr of studies assessing *no* genuine "true" effect
effstudies <- Pb*kstudies # nr of studies assessing a genuine "true" effect
ksig <- Power*effstudies + alpha*noeffstudies # nr of studies with a statistically significant test
knonsig <- (1-Power)*effstudies + (1-alpha)*noeffstudies # nr of studies with *no* statistically significant test
falsepositives <- noeffstudies*alpha # nr of studies falsely identifying an effect (Type-I-error)
falsenegatives <- effstudies*(1-Power) # nr of studies falsely missing to identify an effect (type-II-error)
truepositives <- effstudies*(Power) # nr of studies correctly identifying an effect
truenegatives <- noeffstudies*(1-alpha) # nr of studies correctly missing to identify an effect
FDR <- falsepositives/ksig # False Discovery Rate
PPV <- truepositives/ksig # Posterior Predictive Value (Probability of dealing with a genuine "true" effect, when a statistically
# significant test is achieved.)
return(list(FDR = FDR,
PPV = PPV))
}
|
/Functions.R
|
no_license
|
LBeinhauer/VariousFunctions
|
R
| false | false | 3,374 |
r
|
#### Lukas Beinhauer 08/12/20 ####
## Making various functions ##
### Attention: 'sep.objects' ist not returning separate objects at the moment. If desired,
### you can use 'list', and cycle through the elements of the list in a for loop AFTER calling
### and the function and storing the list in an object.
###
### ex.: x <- MultDataObjects_sheets(5, filepath="C:/Users/beinhaul/Documents/Git/ManyLabs1/MLforR.xlsx",
### basename = "data", col=NULL, output="list")
### for(i in 1:k){
### assign(name(x[[i]]), x[[i]])
### }
MultDataObjects_sheets <- function(k, filepath, basename = "data", col = NA,
output = c("list", "sep.objects", "data.frame")){
if (!require(readxl)) {
stop("readxl not installed")
}
spaces <- ceiling(k/10)
if(output == "sep.objects"){
for(i in 1:k){
assign(gsub(" ", "0", sprintf(paste0(basename, "%", spaces, "d", sep=""), i)),
read_excel(filepath, sheet=i))
}
}else{
if(output == "list"){
dat <- list()
for(i in 1:k){
dat[[i]] <- assign(gsub(" ", "0", sprintf(paste0(basename, "%", spaces, "d", sep=""), i)),
read_excel(filepath, sheet=i))
}
return(dat)
}else{
if(output == "data.frame"){
for(i in 1:k){
temp.dat <- assign(gsub(" ", "0", sprintf(paste0(basename, "%", spaces, "d", sep=""), i)),
read_excel(filepath, sheet=i))
if(i == 1){
df <- data.frame(matrix(NA, nrow=nrow(temp.dat), ncol=k))
}
if(is.na(col)){
stop("Please specify a column index")
}
df[,i] <- temp.dat[,col]
}
return(df)
}else{
stop("Specify output as either 'list', 'sep.objects' or 'data.frame'")
}
}
}
}
getPPV <- function(kstudies=1000, alpha=.05, Power=.8, ratio.true.hypotheses=.3){
Pb <- ratio.true.hypotheses # the ratio of "true" hypotheses to all hypotheses (or P(B)) if we refer to the Bayesian Therem, with A
# being the event of a statistically significant test and B being the event of a genuine effect present)
noeffstudies <- (1-Pb)*kstudies # nr of studies assessing *no* genuine "true" effect
effstudies <- Pb*kstudies # nr of studies assessing a genuine "true" effect
ksig <- Power*effstudies + alpha*noeffstudies # nr of studies with a statistically significant test
knonsig <- (1-Power)*effstudies + (1-alpha)*noeffstudies # nr of studies with *no* statistically significant test
falsepositives <- noeffstudies*alpha # nr of studies falsely identifying an effect (Type-I-error)
falsenegatives <- effstudies*(1-Power) # nr of studies falsely missing to identify an effect (type-II-error)
truepositives <- effstudies*(Power) # nr of studies correctly identifying an effect
truenegatives <- noeffstudies*(1-alpha) # nr of studies correctly missing to identify an effect
FDR <- falsepositives/ksig # False Discovery Rate
PPV <- truepositives/ksig # Posterior Predictive Value (Probability of dealing with a genuine "true" effect, when a statistically
# significant test is achieved.)
return(list(FDR = FDR,
PPV = PPV))
}
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
append_df = data.frame()
for(i in id){
file = read.csv(list.files(directory)[i])
append_df = rbind(append_df, file)
}
return(mean(append_df[, paste(pollutant)], na.rm = T))
}
#Instructions can be found on the coursera website, Assigment 1: Air Pollution.
#Line 2: create a new empty dataframe where we will append the dataframes
#Lines 4-7: loop through each id number, read it in, then rbind it to append_df
#Line 9: return the mean given the pollutant chosen.
|
/pollutantmean.R
|
no_license
|
oyafusoz/RProgrammingCoursera
|
R
| false | false | 549 |
r
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
append_df = data.frame()
for(i in id){
file = read.csv(list.files(directory)[i])
append_df = rbind(append_df, file)
}
return(mean(append_df[, paste(pollutant)], na.rm = T))
}
#Instructions can be found on the coursera website, Assigment 1: Air Pollution.
#Line 2: create a new empty dataframe where we will append the dataframes
#Lines 4-7: loop through each id number, read it in, then rbind it to append_df
#Line 9: return the mean given the pollutant chosen.
|
x1 = rnorm(100, mean = 200, sd = 60)
y1 = rnorm(100, mean = 10, sd = 10)
x2 = rnorm(100, mean = 10, sd = 20)
y2 = rnorm(100, mean = 400, sd = 200)
x3 = rnorm(100, mean = 200, sd = 50)
y3 = rnorm(100, mean = 800, sd = 10)
x = as.matrix(c(x1,x2, x3))
y = as.matrix(c(y1,y2, y3))
A <- cbind(x,y)
par(mfrow = c(1,4))
cl <- clara(A, 3, metric = c("manhattan"), stand = TRUE )
plot(A, col = cl$clustering, xlab = "x", ylab = "y")
title("manhattan, TRUE")
cl <- clara(A, 3, metric = c("manhattan"), stand = FALSE)
plot(A, col = cl$clustering, xlab = "x", ylab = "y")
title("manhattan, FALSE")
cl <- clara(A, 3, metric = c("euclidean"), stand = TRUE)
plot(A, col = cl$clustering, xlab = "x", ylab = "y")
title("euclidean, TRUE")
cl <- clara(A, 3, metric = c("euclidean"), stand = FALSE)
plot(A, col = cl$clustering, xlab = "x", ylab = "y")
title("euclidean, FALSE")
|
/MACHINE-LEARNING/LABS-progs/ann/All_Labs/7Lab_cluster/2.r
|
no_license
|
BC30138/Sem10
|
R
| false | false | 894 |
r
|
x1 = rnorm(100, mean = 200, sd = 60)
y1 = rnorm(100, mean = 10, sd = 10)
x2 = rnorm(100, mean = 10, sd = 20)
y2 = rnorm(100, mean = 400, sd = 200)
x3 = rnorm(100, mean = 200, sd = 50)
y3 = rnorm(100, mean = 800, sd = 10)
x = as.matrix(c(x1,x2, x3))
y = as.matrix(c(y1,y2, y3))
A <- cbind(x,y)
par(mfrow = c(1,4))
cl <- clara(A, 3, metric = c("manhattan"), stand = TRUE )
plot(A, col = cl$clustering, xlab = "x", ylab = "y")
title("manhattan, TRUE")
cl <- clara(A, 3, metric = c("manhattan"), stand = FALSE)
plot(A, col = cl$clustering, xlab = "x", ylab = "y")
title("manhattan, FALSE")
cl <- clara(A, 3, metric = c("euclidean"), stand = TRUE)
plot(A, col = cl$clustering, xlab = "x", ylab = "y")
title("euclidean, TRUE")
cl <- clara(A, 3, metric = c("euclidean"), stand = FALSE)
plot(A, col = cl$clustering, xlab = "x", ylab = "y")
title("euclidean, FALSE")
|
#' Inset small plot within figure
#'
#' Inset plot with margins, background and border
#'
#' @return parameters of small plot, invisible.
#' @section Warning: setting mai etc does not work!
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, 2014
#' @seealso \code{\link{colPointsHist}} for an example of usage, \code{\link[TeachingDemos]{subplot}} and \code{\link[ade4]{add.scatter}} for alternative solutions to this problem that do not set margins.
#' @keywords hplot
#' @export
#' @examples
#'
#' # Basic usage:
#' plot(1:10)
#' smallPlot(plot(5:1) )
#' smallPlot(plot(5:1), x=c(30,80), y=30:60, bg="yellow", yaxt="n")
#' # if R warns "figure margins too large", try dragging the plot viewer bigger
#'
#' # select focus for further add-on's:
#' points(3, 2, pch="+", cex=2)
#' smallPlot( plot(5:1), bg="blue", resetfocus=FALSE )
#' points(3, 2, pch="+", cex=2)
#'
#' # More par settings:
#' smallPlot( plot(50:1), bg=6, mai=c(0.2, 0.3, 0.1, 0.1))
#' # If you find any more that screw things up, please let me know!
#' smallPlot( plot(5:1), bg=8, ann=FALSE)
#' smallPlot(plot(10:50)) # with default bg ("transparent"), old plot is kept
#' smallPlot(plot(10:50))
#'
#' # complex graphics in code chunks:
#' plot(1:10)
#' smallPlot( {plot(5:1, ylab="Blubber"); lines(c(2,4,3));
#' legend("topright", "BerryRocks!", lwd=3) }, bg="white" )
#'
#' # in par multiple figure, things now work as well if resetfocus=TRUE:
#' op <- par("plt")
#' par(mfrow=c(2,3))
#' for(i in 1:2) plot(cumsum(rnorm(50)))
#' smallPlot( plot(50:1), bg=6)
#' plot(3:9) # opens new window
#' smallPlot( plot(50:1), bg=6, resetfocus=FALSE)
#' points(3, 2, pch="+", cex=2)
#' plot(3:9) # plot in next window, but it is still small
#' par(plt=op)
#' plot(3:9) # margins, las and mgp are still changed
#'
#'
#' @param expr expression creating a plot. Can be code within {braces}.
#' @param x,y Position of small plot, relative to current figure region (0:100). max and min from vector are taken. DEFAULT: 5-70, 50-100
#' @param x1,y1,x2,y2 Positions of topleft and bottomright corner. Replaced with x,y, kept here for backcompatibility.
#' @param mar Margin vector in relative units (0:100), thus behaves differently than \code{\link{par}(mar)}. DEFAULT: c(12, 14, 3, 3)
#' @param mgp MarGinPlacement: distance of xlab/ylab, numbers and line from plot margin, as in \code{\link{par}}, but with different defaults. DEFAULT: c(1.8, 0.8, 0)
#' @param bg Background. DEFAULT: par("bg")
#' @param border Border around inset plot. DEFAULT: par("fg")
#' @param las LabelAxisStyle. DEFAULT: 1
#' @param resetfocus reset focus to original plot? Specifies where further low level plot commands are directed to. DEFAULT: TRUE
#' @param \dots further arguments passed to \code{\link{par}. new=F} removes old plot. May mess things up - please tell me for which arguments!
#'
smallPlot <- function(
expr,
x=c(5,70),
y=c(50,100),
x1,y1,x2,y2,
mar=c(12, 14, 3, 3),
mgp=c(1.8, 0.8, 0),
bg=par("bg"),
border=par("fg"),
las=1,
resetfocus=TRUE,
...)
{ # ------------
# Input check: # y1 | P1 |
if(missing(x1)) x1 <- min(x, na.rm=TRUE) # | |
if(missing(x2)) x2 <- max(x, na.rm=TRUE) # y2 | P2 |
if(missing(y1)) y1 <- max(y, na.rm=TRUE) # ------------
if(missing(y2)) y2 <- min(y, na.rm=TRUE) # x1 x2
# catch outside plot:
if(x1<0) {x1 <- 0; warning("x (",x1,") set to 0.")}
if(y2<0) {y2 <- 0; warning("y (",y2,") set to 0.")}
if(x2>100){x2 <- 100; warning("x (",x2,") set to 100.")}
if(y1>100){y1 <- 100; warning("y (",y1,") set to 100.")}
# control for 0:1 input:
if(diff(range(x, na.rm=TRUE)) < 1 | diff(range(y, na.rm=TRUE)) < 1 )
stop("x or y was probably given as coodinates between 0 and 1. They must be between 0 and 100.")
# old parameters to be restored at exit:
op <- par(no.readonly=TRUE)
# inset plot: background, border
par(plt=c(x1, x2, y2, y1)/100, new=TRUE, mgp=mgp) # plt / fig
plot.new() # code line from ade4::add.scatter
u <- par("usr")
rect(u[1], u[3], u[2], u[4], col=bg, border=border)
# inset plot: margins
par(plt=c(x1+mar[2], x2-mar[4], y2+mar[1], y1-mar[3])/100, new=TRUE, las=las, ...)
# Actual plot:
expr
# par of small plot:
sp <- par(no.readonly=TRUE)
# par reset
if(resetfocus)
{
if( par("mfrow")[1]==1 & par("mfrow")[2]==1 ) par(op) # ruins multiple figure plots, so:
else par(plt=op$plt, new=op$new, mgp=op$mgp, las=op$las)
}
return(invisible(sp))
}
|
/berryFunctions/R/smallPlot.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 4,623 |
r
|
#' Inset small plot within figure
#'
#' Inset plot with margins, background and border
#'
#' @return parameters of small plot, invisible.
#' @section Warning: setting mai etc does not work!
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, 2014
#' @seealso \code{\link{colPointsHist}} for an example of usage, \code{\link[TeachingDemos]{subplot}} and \code{\link[ade4]{add.scatter}} for alternative solutions to this problem that do not set margins.
#' @keywords hplot
#' @export
#' @examples
#'
#' # Basic usage:
#' plot(1:10)
#' smallPlot(plot(5:1) )
#' smallPlot(plot(5:1), x=c(30,80), y=30:60, bg="yellow", yaxt="n")
#' # if R warns "figure margins too large", try dragging the plot viewer bigger
#'
#' # select focus for further add-on's:
#' points(3, 2, pch="+", cex=2)
#' smallPlot( plot(5:1), bg="blue", resetfocus=FALSE )
#' points(3, 2, pch="+", cex=2)
#'
#' # More par settings:
#' smallPlot( plot(50:1), bg=6, mai=c(0.2, 0.3, 0.1, 0.1))
#' # If you find any more that screw things up, please let me know!
#' smallPlot( plot(5:1), bg=8, ann=FALSE)
#' smallPlot(plot(10:50)) # with default bg ("transparent"), old plot is kept
#' smallPlot(plot(10:50))
#'
#' # complex graphics in code chunks:
#' plot(1:10)
#' smallPlot( {plot(5:1, ylab="Blubber"); lines(c(2,4,3));
#' legend("topright", "BerryRocks!", lwd=3) }, bg="white" )
#'
#' # in par multiple figure, things now work as well if resetfocus=TRUE:
#' op <- par("plt")
#' par(mfrow=c(2,3))
#' for(i in 1:2) plot(cumsum(rnorm(50)))
#' smallPlot( plot(50:1), bg=6)
#' plot(3:9) # opens new window
#' smallPlot( plot(50:1), bg=6, resetfocus=FALSE)
#' points(3, 2, pch="+", cex=2)
#' plot(3:9) # plot in next window, but it is still small
#' par(plt=op)
#' plot(3:9) # margins, las and mgp are still changed
#'
#'
#' @param expr expression creating a plot. Can be code within {braces}.
#' @param x,y Position of small plot, relative to current figure region (0:100). max and min from vector are taken. DEFAULT: 5-70, 50-100
#' @param x1,y1,x2,y2 Positions of topleft and bottomright corner. Replaced with x,y, kept here for backcompatibility.
#' @param mar Margin vector in relative units (0:100), thus behaves differently than \code{\link{par}(mar)}. DEFAULT: c(12, 14, 3, 3)
#' @param mgp MarGinPlacement: distance of xlab/ylab, numbers and line from plot margin, as in \code{\link{par}}, but with different defaults. DEFAULT: c(1.8, 0.8, 0)
#' @param bg Background. DEFAULT: par("bg")
#' @param border Border around inset plot. DEFAULT: par("fg")
#' @param las LabelAxisStyle. DEFAULT: 1
#' @param resetfocus reset focus to original plot? Specifies where further low level plot commands are directed to. DEFAULT: TRUE
#' @param \dots further arguments passed to \code{\link{par}. new=F} removes old plot. May mess things up - please tell me for which arguments!
#'
smallPlot <- function(
expr,
x=c(5,70),
y=c(50,100),
x1,y1,x2,y2,
mar=c(12, 14, 3, 3),
mgp=c(1.8, 0.8, 0),
bg=par("bg"),
border=par("fg"),
las=1,
resetfocus=TRUE,
...)
{ # ------------
# Input check: # y1 | P1 |
if(missing(x1)) x1 <- min(x, na.rm=TRUE) # | |
if(missing(x2)) x2 <- max(x, na.rm=TRUE) # y2 | P2 |
if(missing(y1)) y1 <- max(y, na.rm=TRUE) # ------------
if(missing(y2)) y2 <- min(y, na.rm=TRUE) # x1 x2
# catch outside plot:
if(x1<0) {x1 <- 0; warning("x (",x1,") set to 0.")}
if(y2<0) {y2 <- 0; warning("y (",y2,") set to 0.")}
if(x2>100){x2 <- 100; warning("x (",x2,") set to 100.")}
if(y1>100){y1 <- 100; warning("y (",y1,") set to 100.")}
# control for 0:1 input:
if(diff(range(x, na.rm=TRUE)) < 1 | diff(range(y, na.rm=TRUE)) < 1 )
stop("x or y was probably given as coodinates between 0 and 1. They must be between 0 and 100.")
# old parameters to be restored at exit:
op <- par(no.readonly=TRUE)
# inset plot: background, border
par(plt=c(x1, x2, y2, y1)/100, new=TRUE, mgp=mgp) # plt / fig
plot.new() # code line from ade4::add.scatter
u <- par("usr")
rect(u[1], u[3], u[2], u[4], col=bg, border=border)
# inset plot: margins
par(plt=c(x1+mar[2], x2-mar[4], y2+mar[1], y1-mar[3])/100, new=TRUE, las=las, ...)
# Actual plot:
expr
# par of small plot:
sp <- par(no.readonly=TRUE)
# par reset
if(resetfocus)
{
if( par("mfrow")[1]==1 & par("mfrow")[2]==1 ) par(op) # ruins multiple figure plots, so:
else par(plt=op$plt, new=op$new, mgp=op$mgp, las=op$las)
}
return(invisible(sp))
}
|
#' Ancestor-Descendant Relationships for Macroperforate Foraminifera, from Aze et al. (2011)
#'
#' An example dataset of ancestor-descendent relationships and first and last appearance dates for
#' a set of macroperforate Foramanifera, taken from the supplemental materials of Aze et al. (2011).
#' This dataset is included here primarily for testing functions \code{parentChild2taxonTree}
#' and \code{taxa2phylo}.
#' @name macroperforateForam
#' @rdname macroperforateForam
#' @aliases foramAL foramAM foramALb foramAMb
#' @details
#' This example dataset is composed of four tables, each containing information
#' on the ancestor-descendant relationships and first and last appearances of
#' species of macroperforate foraminifera species from the fossil record.
#' Each of the four tables are for the same set of taxa, but divide and
#' concatanate the included foram species in four different ways, relating to
#' the use of morpospecies versus combined anagenetic lineages (see Ezard et
#' al., 2012), and whether taxa are retained as units related by budding-cladogensis
#' or the splitting of taxa at branching points to create a fully 'bifurcating' set
#' of relationships, independent of ancestral morphotaxon persistance through branching
#' events. See the examples section for more details.
#' @format
#' The 'foramAM' and 'foramAL' tables include budding taxon units
#' for morphospecies and lineages respective, with four columns:
#' taxon name, ancestral taxon's name, first appearance date and last appearance
#' date (note that column headings vary). The 'foramAMb' and 'foramALb' tables are
#' composed of data for the same taxon units as the previous
#" set, except parent taxa that persist through
#' branching events are split so that the relationships are fully 'bifurcating', rather
#' than 'budding'. As this obscures taxonomic identity, taxon identification labels
#' are included in an additional, fifth column in these tables.
#' See the examples section for more details.
#' @source
#' This dataset is obtained from the supplementary materials of, specifically
#' 'Appendix S5':
#'
#' Aze, T., T. H. G. Ezard, A. Purvis, H. K. Coxall, D. R. M. Stewart,
#' B. S. Wade, and P. N. Pearson. 2011. A phylogeny of Cenozoic macroperforate
#' planktonic foraminifera from fossil data. \emph{Biological Reviews} 86(4):900-927.
#' @references
#'
#' This dataset has been used or referenced in a number of works, including:
#'
#' Aze, T., T. H. G. Ezard, A. Purvis, H. K. Coxall, D. R. M. Stewart, B. S. Wade, and P. N. Pearson. 2013.
#' Identifying anagenesis and cladogenesis in the fossil record.
#' \emph{Proceedings of the National Academy of Sciences} 110(32):E2946-E2946.
#'
#' Ezard, T. H. G., T. Aze, P. N. Pearson, and A. Purvis. 2011. Interplay Between Changing Climate and Species'
#' Ecology Drives Macroevolutionary Dynamics. \emph{Science} 332(6027):349-351.
#'
#' Ezard, T. H. G., P. N. Pearson, T. Aze, and A. Purvis. 2012. The meaning of birth and death (in
#' macroevolutionary birth-death models). \emph{Biology Letters} 8(1):139-142.
#'
#' Ezard, T. H. G., G. H. Thomas, and A. Purvis. 2013. Inclusion of a near-complete fossil record reveals
#' speciation-related molecular evolution. \emph{Methods in Ecology and Evolution} 4(8):745-753.
#'
#' Strotz, L. C., and A. P. Allen. 2013. Assessing the role of cladogenesis in macroevolution by integrating
#' fossil and molecular evidence. \emph{Proceedings of the National Academy of Sciences} 110(8):2904-2909.
#'
#' Strotz, L. C., and A. P. Allen. 2013. Reply to Aze et al.: Distinguishing speciation modes based on
#' multiple lines of evidence. \emph{Proceedings of the National Academy of Sciences} 110(32):E2947-E2947.
#'
#' @keywords datasets
#' @docType data
#' @examples
#'
#' # Following Text Reproduced from Aze et al. 2011's Supplemental Material
#' # Appendix S5
#' #
#' # 'Data required to produce all of the phylogenies included in the manuscript
#' # using paleoPhylo (Ezard & Purvis, 2009) a free software package to draw
#' # paleobiological phylogenies in R.'
#' #
#' # 'The four tabs hold different versions of our phylogeny:
#' # aMb: fully bifurcating morphospecies phylogeny
#' # aM: budding/bifurcating morphospecies phylogeny
#' # aLb: fully bifurcating lineage phylogeny
#' # aL: budding/bifurcating lineage phylogeny
#' #
#' # 'Start Date gives the first occurence of the species according
#' # to the particular phylogeny; End Date gives the last occurence
#' # according to the particular phylogeny.'
#'
#' \dontrun{
#'
#' #load the data
#' #given in supplemental as XLS sheets
#' #converted to separate tab-deliminated text files
#'
#' # aM: budding/bifurcating morphospecies phylogeny
#' foramAM<-read.table(file.choose(),stringsAsFactors=FALSE,header=TRUE)
#' # aL: budding/bifurcating lineage phylogeny
#' foramAL<-read.table(file.choose(),stringsAsFactors=FALSE,header=TRUE)
#' # aMb: fully bifurcating morphospecies phylogeny
#' foramAMb<-read.table(file.choose(),stringsAsFactors=FALSE,header=TRUE)
#' # aLb: fully bifurcating lineage phylogeny
#' foramALb<-read.table(file.choose(),stringsAsFactors=FALSE,header=TRUE)
#'
#' save.image("macroperforateForam.rdata")
#'
#' }
#'
#' #instead, we'll just load the data directly
#' data(macroperforateForam)
#'
#' #Two distinctions among the four datasets:
#' #(1): morphospecies vs morphospecies combined into sequences of anagenetic
#' # morpospecies referred to as 'lineages'. Thus far more morphospecies
#' # than lineages. The names of lineages are given as the sequence of
#' # their respective component morphospecies.
#' #(2): Datasets where taxon units (morphospecies or lineages) are broken up
#' # at 'budding' branching events (where the ancestral taxon persists)
#' # so that final dataset is 'fully bifurcating', presumably
#' # to make comparison easier to extant-taxon only datasets.
#' # (This isn't a limitation for paleotree, though!).
#' # This division of taxon units requires abstracting the taxon IDs,
#' # requiring another column for Species Name.
#'
#' dim(foramAM)
#' dim(foramAL)
#' dim(foramAMb)
#' dim(foramALb)
#'
#' #Need to convert these to same format as simFossilTaxa output.
#' #simFossilTaxa output has 6 columns...
#' #taxon.id ancestor.id orig.time ext.time still.alive looks.like
#'
#' #for the purposes of this, we'll make taxon.id=looks.like
#' # (That's only for simulating cryptic speciation anyway)
#' #still.alive should be TRUE (1) if ext.time=0
#'
#' #a function to convert Aze et al's suppmat to paleotree-readable format
#'
#' createTaxaData<-function(table){
#' #reorder table by first appearance time
#' table<-table[order(-as.numeric(table[,3])),]
#' ID<-1:nrow(table)
#' anc<-sapply(table[,2],function(x)
#' if(!is.na(x)){
#' which(x==table[,1])
#' }else{ NA })
#' stillAlive<-as.numeric(table[,4]==0)
#' ages<-cbind(as.numeric(table[,3]),as.numeric(table[,4]))
#' res<-cbind(ID,anc,ages,stillAlive,ID)
#' colnames(res)<-c('taxon.id','ancestor.id','orig.time',
#' 'ext.time','still.alive','looks.like')
#' rownames(res)<-table[,1]
#' return(res)
#' }
#'
#' taxaAM<-createTaxaData(foramAM)
#' taxaAMb<-createTaxaData(foramAMb)
#' taxaAL<-createTaxaData(foramAL)
#' taxaALb<-createTaxaData(foramALb)
#'
#' ##################################
#'
#' #Checking Ancestor-Descendant Relationships for Irregularities
#'
#' #For each of these, there should only be a single taxon
#' # without a parent listed (essentially, the root ancestor)
#'
#' countParentsWithoutMatch<-function(table){
#' parentMatch<-match(unique(table[,2]),table[,1])
#' sum(is.na(parentMatch))
#' }
#'
#' #test this on the provided ancestor-descendant relationships
#' countParentsWithoutMatch(foramAM)
#' countParentsWithoutMatch(foramAL)
#' countParentsWithoutMatch(foramAMb)
#' countParentsWithoutMatch(foramALb)
#'
#' #and on the converted datasets
#' countParentsWithoutMatch(taxaAM)
#' countParentsWithoutMatch(taxaAL)
#' countParentsWithoutMatch(taxaAMb)
#' countParentsWithoutMatch(taxaALb)
#'
#' \donttest{
#'
#' #can construct the parentChild2taxonTree
#' #using the ancestor-descendant relationships
#'
#' #can be very slow...
#'
#' treeAM<-parentChild2taxonTree(foramAM[,2:1])
#' treeAL<-parentChild2taxonTree(foramAL[,2:1])
#' treeAMb<-parentChild2taxonTree(foramAMb[,2:1])
#' treeALb<-parentChild2taxonTree(foramALb[,2:1])
#'
#' layout(matrix(1:4,2,2))
#' plot(treeAM,main='treeAM',show.tip.label=FALSE)
#' plot(treeAL,main='treeAL',show.tip.label=FALSE)
#' plot(treeAMb,main='treeAMb',show.tip.label=FALSE)
#' plot(treeALb,main='treeALb',show.tip.label=FALSE)
#'
#' # FYI
#' # in case you were wondering
#' # you would *not* time-scale these Frankenstein monsters
#'
#' }
#'
#' ###########################################
#'
#' # Checking stratigraphic ranges
#'
#' # do all first occurrence dates occur before last occurrence dates?
#' # we'll check the original datasets here
#'
#' checkFoLo<-function(data){
#' diffDate<-data[,3]-data[,4] #subtract LO from FO
#' isGood<-all(diffDate>=0) #is it good
#' return(isGood)
#' }
#'
#' checkFoLo(foramAM)
#' checkFoLo(foramAL)
#' checkFoLo(foramAMb)
#' checkFoLo(foramALb)
#'
#' #cool, but do all ancestors appear before their descendents?
#' # easier to check unified simFossilTaxa format here
#'
#' checkAncOrder<-function(taxa){
#' #get ancestor's first occurrence
#' ancFO<-taxa[taxa[,2],3]
#' #get descendant's first occurrence
#' descFO<-taxa[,3]
#' diffDate<-ancFO-descFO #subtract descFO from ancFO
#' #remove NAs due to root taxon
#' diffDate<-diffDate[!is.na(diffDate)]
#' isGood<-all(diffDate>=0) #is it all good
#' return(isGood)
#' }
#'
#' checkAncOrder(taxaAM)
#' checkAncOrder(taxaAL)
#' checkAncOrder(taxaAMb)
#' checkAncOrder(taxaALb)
#'
#' #now, are there gaps between the last occurrence of ancestors
#' # and the first occurrence of descendents?
#' # (shall we call these 'stratophenetic ghost branches'?!)
#' # These shouldn't be problematic, but do they occur in this data?
#' # After all, simFossilTaxa output was designed for fully observed
#' #simulated fossil records with no gaps.
#'
#' sumAncDescGap<-function(taxa){
#' #get ancestor's last occurrence
#' ancLO<-taxa[taxa[,2],4]
#' #get descendant's first occurrence
#' descFO<-taxa[,3]
#' diffDate<-ancLO-descFO #subtract descFO from ancFO
#' #remove NAs due to root taxon
#' diffDate<-diffDate[!is.na(diffDate)]
#' #should be negative or zero, positive values are gaps
#' gaps<-c(0,diffDate[diffDate>0])
#' sumGap<-sum(gaps)
#' return(sumGap)
#' }
#'
#' #get the total gap between ancestor LO and child FO
#' sumAncDescGap(taxaAM)
#' sumAncDescGap(taxaAL)
#' sumAncDescGap(taxaAMb)
#' sumAncDescGap(taxaALb)
#'
#' #It appears there is *no* gaps between ancestors and their descendants
#' #in the Aze et al. foram dataset... wow!
#'
#' ###############
#'
#' \donttest{
#'
#' # Creating time-scaled phylogenies from the Aze et al. data
#'
#' # Aze et al. (2011) defines anagenesis such that taxa may overlap
#' # in time during a transitional period (see Ezard et al. 2012
#' # for discussion of this definition). Thus, we would expect that
#' # paleotree obtains very different trees for morphospecies versus
#' # lineages, but very similar phylogenies for datasets where budding
#' # taxa are retained or arbitrarily broken into bifurcating units.
#'
#' # We can use the function taxa2phylo to directly create
#' # time-scaled phylogenies from the Aze et al. stratophenetic data
#'
#' timetreeAM<-taxa2phylo(taxaAM)
#' timetreeAL<-taxa2phylo(taxaAL)
#' timetreeAMb<-taxa2phylo(taxaAMb)
#' timetreeALb<-taxa2phylo(taxaALb)
#'
#' layout(matrix(1:4,2,2))
#' plot(timetreeAM,main='timetreeAM',show.tip.label=FALSE)
#' axisPhylo()
#' plot(timetreeAL,main='timetreeAL',show.tip.label=FALSE)
#' axisPhylo()
#' plot(timetreeAMb,main='timetreeAMb',show.tip.label=FALSE)
#' axisPhylo()
#' plot(timetreeALb,main='timetreeALb',show.tip.label=FALSE)
#' axisPhylo()
#'
#' #visually compare the two pairs we expect to be close to identical
#'
#' #morpospecies
#' layout(1:2)
#' plot(timetreeAM,main='timetreeAM',show.tip.label=FALSE)
#' axisPhylo()
#' plot(timetreeAMb,main='timetreeAMb',show.tip.label=FALSE)
#' axisPhylo()
#'
#' #lineages
#' layout(1:2)
#' plot(timetreeAL,main='timetreeAL',show.tip.label=FALSE)
#' axisPhylo()
#' plot(timetreeALb,main='timetreeALb',show.tip.label=FALSE)
#' axisPhylo()
#'
#' layout(1)
#'
#' #compare the summary statistics of the trees
#' Ntip(timetreeAM)
#' Ntip(timetreeAMb)
#' Ntip(timetreeAL)
#' Ntip(timetreeALb)
#' # very different!
#'
#' # after dropping anagenetic zero-length-terminal-edge ancestors
#' # we would expect morphospecies and lineage phylogenies to be very similar
#'
#' #morphospecies
#' Ntip(dropZLB(timetreeAM))
#' Ntip(dropZLB(timetreeAMb))
#' #identical!
#'
#' #lineages
#' Ntip(dropZLB(timetreeAL))
#' Ntip(dropZLB(timetreeALb))
#' # ah, very close, off by a single tip
#' # ...probably a very short ZLB outside tolerance
#'
#' #we can create some diversity plots to compare
#'
#' multiDiv(data=list(timetreeAM,timetreeAMb),
#' plotMultCurves=TRUE)
#'
#' multiDiv(data=list(timetreeAL,timetreeALb),
#' plotMultCurves=TRUE)
#'
#' # we can see that the morphospecies datasets are identical
#' # that's why we can only see one line
#' # some very slight disagreement between the lineage datasets
#' # around ~30-20 Ma
#'
#' #can also compare morphospecies and lineages diversity curves
#'
#' multiDiv(data=list(timetreeAM,timetreeAL),
#' plotMultCurves=TRUE)
#'
#' #they are similar, but some peaks are missing from lineages
#' # particularly around ~20-10 Ma
#'
#'
#' }
#'
#'
#'
NULL
|
/R/macroperforateForam.R
|
permissive
|
pnovack-gottshall/paleotree
|
R
| false | false | 13,742 |
r
|
#' Ancestor-Descendant Relationships for Macroperforate Foraminifera, from Aze et al. (2011)
#'
#' An example dataset of ancestor-descendent relationships and first and last appearance dates for
#' a set of macroperforate Foramanifera, taken from the supplemental materials of Aze et al. (2011).
#' This dataset is included here primarily for testing functions \code{parentChild2taxonTree}
#' and \code{taxa2phylo}.
#' @name macroperforateForam
#' @rdname macroperforateForam
#' @aliases foramAL foramAM foramALb foramAMb
#' @details
#' This example dataset is composed of four tables, each containing information
#' on the ancestor-descendant relationships and first and last appearances of
#' species of macroperforate foraminifera species from the fossil record.
#' Each of the four tables are for the same set of taxa, but divide and
#' concatanate the included foram species in four different ways, relating to
#' the use of morpospecies versus combined anagenetic lineages (see Ezard et
#' al., 2012), and whether taxa are retained as units related by budding-cladogensis
#' or the splitting of taxa at branching points to create a fully 'bifurcating' set
#' of relationships, independent of ancestral morphotaxon persistance through branching
#' events. See the examples section for more details.
#' @format
#' The 'foramAM' and 'foramAL' tables include budding taxon units
#' for morphospecies and lineages respective, with four columns:
#' taxon name, ancestral taxon's name, first appearance date and last appearance
#' date (note that column headings vary). The 'foramAMb' and 'foramALb' tables are
#' composed of data for the same taxon units as the previous
#" set, except parent taxa that persist through
#' branching events are split so that the relationships are fully 'bifurcating', rather
#' than 'budding'. As this obscures taxonomic identity, taxon identification labels
#' are included in an additional, fifth column in these tables.
#' See the examples section for more details.
#' @source
#' This dataset is obtained from the supplementary materials of, specifically
#' 'Appendix S5':
#'
#' Aze, T., T. H. G. Ezard, A. Purvis, H. K. Coxall, D. R. M. Stewart,
#' B. S. Wade, and P. N. Pearson. 2011. A phylogeny of Cenozoic macroperforate
#' planktonic foraminifera from fossil data. \emph{Biological Reviews} 86(4):900-927.
#' @references
#'
#' This dataset has been used or referenced in a number of works, including:
#'
#' Aze, T., T. H. G. Ezard, A. Purvis, H. K. Coxall, D. R. M. Stewart, B. S. Wade, and P. N. Pearson. 2013.
#' Identifying anagenesis and cladogenesis in the fossil record.
#' \emph{Proceedings of the National Academy of Sciences} 110(32):E2946-E2946.
#'
#' Ezard, T. H. G., T. Aze, P. N. Pearson, and A. Purvis. 2011. Interplay Between Changing Climate and Species'
#' Ecology Drives Macroevolutionary Dynamics. \emph{Science} 332(6027):349-351.
#'
#' Ezard, T. H. G., P. N. Pearson, T. Aze, and A. Purvis. 2012. The meaning of birth and death (in
#' macroevolutionary birth-death models). \emph{Biology Letters} 8(1):139-142.
#'
#' Ezard, T. H. G., G. H. Thomas, and A. Purvis. 2013. Inclusion of a near-complete fossil record reveals
#' speciation-related molecular evolution. \emph{Methods in Ecology and Evolution} 4(8):745-753.
#'
#' Strotz, L. C., and A. P. Allen. 2013. Assessing the role of cladogenesis in macroevolution by integrating
#' fossil and molecular evidence. \emph{Proceedings of the National Academy of Sciences} 110(8):2904-2909.
#'
#' Strotz, L. C., and A. P. Allen. 2013. Reply to Aze et al.: Distinguishing speciation modes based on
#' multiple lines of evidence. \emph{Proceedings of the National Academy of Sciences} 110(32):E2947-E2947.
#'
#' @keywords datasets
#' @docType data
#' @examples
#'
#' # Following Text Reproduced from Aze et al. 2011's Supplemental Material
#' # Appendix S5
#' #
#' # 'Data required to produce all of the phylogenies included in the manuscript
#' # using paleoPhylo (Ezard & Purvis, 2009) a free software package to draw
#' # paleobiological phylogenies in R.'
#' #
#' # 'The four tabs hold different versions of our phylogeny:
#' # aMb: fully bifurcating morphospecies phylogeny
#' # aM: budding/bifurcating morphospecies phylogeny
#' # aLb: fully bifurcating lineage phylogeny
#' # aL: budding/bifurcating lineage phylogeny
#' #
#' # 'Start Date gives the first occurence of the species according
#' # to the particular phylogeny; End Date gives the last occurence
#' # according to the particular phylogeny.'
#'
#' \dontrun{
#'
#' #load the data
#' #given in supplemental as XLS sheets
#' #converted to separate tab-deliminated text files
#'
#' # aM: budding/bifurcating morphospecies phylogeny
#' foramAM<-read.table(file.choose(),stringsAsFactors=FALSE,header=TRUE)
#' # aL: budding/bifurcating lineage phylogeny
#' foramAL<-read.table(file.choose(),stringsAsFactors=FALSE,header=TRUE)
#' # aMb: fully bifurcating morphospecies phylogeny
#' foramAMb<-read.table(file.choose(),stringsAsFactors=FALSE,header=TRUE)
#' # aLb: fully bifurcating lineage phylogeny
#' foramALb<-read.table(file.choose(),stringsAsFactors=FALSE,header=TRUE)
#'
#' save.image("macroperforateForam.rdata")
#'
#' }
#'
#' #instead, we'll just load the data directly
#' data(macroperforateForam)
#'
#' #Two distinctions among the four datasets:
#' #(1): morphospecies vs morphospecies combined into sequences of anagenetic
#' # morpospecies referred to as 'lineages'. Thus far more morphospecies
#' # than lineages. The names of lineages are given as the sequence of
#' # their respective component morphospecies.
#' #(2): Datasets where taxon units (morphospecies or lineages) are broken up
#' # at 'budding' branching events (where the ancestral taxon persists)
#' # so that final dataset is 'fully bifurcating', presumably
#' # to make comparison easier to extant-taxon only datasets.
#' # (This isn't a limitation for paleotree, though!).
#' # This division of taxon units requires abstracting the taxon IDs,
#' # requiring another column for Species Name.
#'
#' dim(foramAM)
#' dim(foramAL)
#' dim(foramAMb)
#' dim(foramALb)
#'
#' #Need to convert these to same format as simFossilTaxa output.
#' #simFossilTaxa output has 6 columns...
#' #taxon.id ancestor.id orig.time ext.time still.alive looks.like
#'
#' #for the purposes of this, we'll make taxon.id=looks.like
#' # (That's only for simulating cryptic speciation anyway)
#' #still.alive should be TRUE (1) if ext.time=0
#'
#' #a function to convert Aze et al's suppmat to paleotree-readable format
#'
#' createTaxaData<-function(table){
#' #reorder table by first appearance time
#' table<-table[order(-as.numeric(table[,3])),]
#' ID<-1:nrow(table)
#' anc<-sapply(table[,2],function(x)
#' if(!is.na(x)){
#' which(x==table[,1])
#' }else{ NA })
#' stillAlive<-as.numeric(table[,4]==0)
#' ages<-cbind(as.numeric(table[,3]),as.numeric(table[,4]))
#' res<-cbind(ID,anc,ages,stillAlive,ID)
#' colnames(res)<-c('taxon.id','ancestor.id','orig.time',
#' 'ext.time','still.alive','looks.like')
#' rownames(res)<-table[,1]
#' return(res)
#' }
#'
#' taxaAM<-createTaxaData(foramAM)
#' taxaAMb<-createTaxaData(foramAMb)
#' taxaAL<-createTaxaData(foramAL)
#' taxaALb<-createTaxaData(foramALb)
#'
#' ##################################
#'
#' #Checking Ancestor-Descendant Relationships for Irregularities
#'
#' #For each of these, there should only be a single taxon
#' # without a parent listed (essentially, the root ancestor)
#'
#' countParentsWithoutMatch<-function(table){
#' parentMatch<-match(unique(table[,2]),table[,1])
#' sum(is.na(parentMatch))
#' }
#'
#' #test this on the provided ancestor-descendant relationships
#' countParentsWithoutMatch(foramAM)
#' countParentsWithoutMatch(foramAL)
#' countParentsWithoutMatch(foramAMb)
#' countParentsWithoutMatch(foramALb)
#'
#' #and on the converted datasets
#' countParentsWithoutMatch(taxaAM)
#' countParentsWithoutMatch(taxaAL)
#' countParentsWithoutMatch(taxaAMb)
#' countParentsWithoutMatch(taxaALb)
#'
#' \donttest{
#'
#' #can construct the parentChild2taxonTree
#' #using the ancestor-descendant relationships
#'
#' #can be very slow...
#'
#' treeAM<-parentChild2taxonTree(foramAM[,2:1])
#' treeAL<-parentChild2taxonTree(foramAL[,2:1])
#' treeAMb<-parentChild2taxonTree(foramAMb[,2:1])
#' treeALb<-parentChild2taxonTree(foramALb[,2:1])
#'
#' layout(matrix(1:4,2,2))
#' plot(treeAM,main='treeAM',show.tip.label=FALSE)
#' plot(treeAL,main='treeAL',show.tip.label=FALSE)
#' plot(treeAMb,main='treeAMb',show.tip.label=FALSE)
#' plot(treeALb,main='treeALb',show.tip.label=FALSE)
#'
#' # FYI
#' # in case you were wondering
#' # you would *not* time-scale these Frankenstein monsters
#'
#' }
#'
#' ###########################################
#'
#' # Checking stratigraphic ranges
#'
#' # do all first occurrence dates occur before last occurrence dates?
#' # we'll check the original datasets here
#'
#' checkFoLo<-function(data){
#' diffDate<-data[,3]-data[,4] #subtract LO from FO
#' isGood<-all(diffDate>=0) #is it good
#' return(isGood)
#' }
#'
#' checkFoLo(foramAM)
#' checkFoLo(foramAL)
#' checkFoLo(foramAMb)
#' checkFoLo(foramALb)
#'
#' #cool, but do all ancestors appear before their descendents?
#' # easier to check unified simFossilTaxa format here
#'
#' checkAncOrder<-function(taxa){
#' #get ancestor's first occurrence
#' ancFO<-taxa[taxa[,2],3]
#' #get descendant's first occurrence
#' descFO<-taxa[,3]
#' diffDate<-ancFO-descFO #subtract descFO from ancFO
#' #remove NAs due to root taxon
#' diffDate<-diffDate[!is.na(diffDate)]
#' isGood<-all(diffDate>=0) #is it all good
#' return(isGood)
#' }
#'
#' checkAncOrder(taxaAM)
#' checkAncOrder(taxaAL)
#' checkAncOrder(taxaAMb)
#' checkAncOrder(taxaALb)
#'
#' #now, are there gaps between the last occurrence of ancestors
#' # and the first occurrence of descendents?
#' # (shall we call these 'stratophenetic ghost branches'?!)
#' # These shouldn't be problematic, but do they occur in this data?
#' # After all, simFossilTaxa output was designed for fully observed
#' #simulated fossil records with no gaps.
#'
#' sumAncDescGap<-function(taxa){
#' #get ancestor's last occurrence
#' ancLO<-taxa[taxa[,2],4]
#' #get descendant's first occurrence
#' descFO<-taxa[,3]
#' diffDate<-ancLO-descFO #subtract descFO from ancFO
#' #remove NAs due to root taxon
#' diffDate<-diffDate[!is.na(diffDate)]
#' #should be negative or zero, positive values are gaps
#' gaps<-c(0,diffDate[diffDate>0])
#' sumGap<-sum(gaps)
#' return(sumGap)
#' }
#'
#' #get the total gap between ancestor LO and child FO
#' sumAncDescGap(taxaAM)
#' sumAncDescGap(taxaAL)
#' sumAncDescGap(taxaAMb)
#' sumAncDescGap(taxaALb)
#'
#' #It appears there is *no* gaps between ancestors and their descendants
#' #in the Aze et al. foram dataset... wow!
#'
#' ###############
#'
#' \donttest{
#'
#' # Creating time-scaled phylogenies from the Aze et al. data
#'
#' # Aze et al. (2011) defines anagenesis such that taxa may overlap
#' # in time during a transitional period (see Ezard et al. 2012
#' # for discussion of this definition). Thus, we would expect that
#' # paleotree obtains very different trees for morphospecies versus
#' # lineages, but very similar phylogenies for datasets where budding
#' # taxa are retained or arbitrarily broken into bifurcating units.
#'
#' # We can use the function taxa2phylo to directly create
#' # time-scaled phylogenies from the Aze et al. stratophenetic data
#'
#' timetreeAM<-taxa2phylo(taxaAM)
#' timetreeAL<-taxa2phylo(taxaAL)
#' timetreeAMb<-taxa2phylo(taxaAMb)
#' timetreeALb<-taxa2phylo(taxaALb)
#'
#' layout(matrix(1:4,2,2))
#' plot(timetreeAM,main='timetreeAM',show.tip.label=FALSE)
#' axisPhylo()
#' plot(timetreeAL,main='timetreeAL',show.tip.label=FALSE)
#' axisPhylo()
#' plot(timetreeAMb,main='timetreeAMb',show.tip.label=FALSE)
#' axisPhylo()
#' plot(timetreeALb,main='timetreeALb',show.tip.label=FALSE)
#' axisPhylo()
#'
#' #visually compare the two pairs we expect to be close to identical
#'
#' #morpospecies
#' layout(1:2)
#' plot(timetreeAM,main='timetreeAM',show.tip.label=FALSE)
#' axisPhylo()
#' plot(timetreeAMb,main='timetreeAMb',show.tip.label=FALSE)
#' axisPhylo()
#'
#' #lineages
#' layout(1:2)
#' plot(timetreeAL,main='timetreeAL',show.tip.label=FALSE)
#' axisPhylo()
#' plot(timetreeALb,main='timetreeALb',show.tip.label=FALSE)
#' axisPhylo()
#'
#' layout(1)
#'
#' #compare the summary statistics of the trees
#' Ntip(timetreeAM)
#' Ntip(timetreeAMb)
#' Ntip(timetreeAL)
#' Ntip(timetreeALb)
#' # very different!
#'
#' # after dropping anagenetic zero-length-terminal-edge ancestors
#' # we would expect morphospecies and lineage phylogenies to be very similar
#'
#' #morphospecies
#' Ntip(dropZLB(timetreeAM))
#' Ntip(dropZLB(timetreeAMb))
#' #identical!
#'
#' #lineages
#' Ntip(dropZLB(timetreeAL))
#' Ntip(dropZLB(timetreeALb))
#' # ah, very close, off by a single tip
#' # ...probably a very short ZLB outside tolerance
#'
#' #we can create some diversity plots to compare
#'
#' multiDiv(data=list(timetreeAM,timetreeAMb),
#' plotMultCurves=TRUE)
#'
#' multiDiv(data=list(timetreeAL,timetreeALb),
#' plotMultCurves=TRUE)
#'
#' # we can see that the morphospecies datasets are identical
#' # that's why we can only see one line
#' # some very slight disagreement between the lineage datasets
#' # around ~30-20 Ma
#'
#' #can also compare morphospecies and lineages diversity curves
#'
#' multiDiv(data=list(timetreeAM,timetreeAL),
#' plotMultCurves=TRUE)
#'
#' #they are similar, but some peaks are missing from lineages
#' # particularly around ~20-10 Ma
#'
#'
#' }
#'
#'
#'
NULL
|
##############################
#### Analisi multivariata ####
##############################
library(raster)
library(RStoolbox)
setwd("/Users/federicotossani/lab/")
p224r63_2011<-brick("p224r63_2011_masked.grd")
#uso brick per caricare un set multiplo di dati!
#raster invece carica un set per volta!!
p224r63_2011
#plot(p224r63_2011$B1_sre, p224r63_2011$B2_sre)
#l'ordine delle 2 bande dipende dall'ordine scritto nella funzione.
#per rendere il plot più carino possiamo dargli un colore e cambiare il carattere dei punti ed aumentarne la dimensione
plot(p224r63_2011$B1_sre, p224r63_2011$B2_sre, col="blue", pch=13, cex=2)
#dopo il plot esce un warrning che dice che il plot usa il 2.2% delle celle, infatti i pixel nell'immagine sono più di 4milioni e non riuscirebbe plottarli tutti
#questo sistema in statistica si chiama multicollinearità. significa che le variabili sono correlate tra loro.
#questa forma di correlazione è usata in modo causale!!! Bisogna stare attenti a correlare 2 fenomeni, esempio delle cicogne e dei bambini in Germania.
#per plottare tutte le correlazioni possibili di tutte le variabili presenti nel dataset uso la funzione pairs
pairs(p224r63_2011)
#nella parte bassa della matrice troviamo i grafici con tutte le correlazioni, nella parte alta invece gli indici di correlazione di pearson.
#se siamo positivamente correlate l'indice tende a 1, se lo siamo negativamente tende a -1.
#grazie a pairs vediamo quanto molte bande siano correlate tra loro.
#procediamo con la PCA, ma prima riduciamo le dimensioni dei nostri dati per velocizzare il processo.
#per farlo uso la funzione aggregate, questo processo si chiama ricampionamento o resampling
p224r63_2011_res<-aggregate(p224r63_2011, fact=10)
#questo processo ci ha permesso di passare da un pixel a 30m ad uno a 300m. aumentando la la grandezza del pixel abbiamo diminuito la risoluzione e il peso.
#per apprezzare la differenza possiamo fare un pannello per confrontare le due immagini
par(mfrow=c(2,1))
plotRGB(p224r63_2011,4,3,2, stretch="lin")
plotRGB(p224r63_2011_res,4,3,2, stretch="lin")
#ora applichiamo la rasterPCA che in poche parole prende l nostro paccheto di dati e li compatta in un numero minore di bande
p224r63_2011_pca<-rasterPCA(p224r63_2011_res)
plot(p224r63_2011_pca$map)
#nel plot ho legato la mappa al nome dell'immagine perchè oltra a questa la funzione rsaterPCA ha creato anche il modello ed altre cose.
summary(p224r63_2011_pca$model)
#la funzione summary è una funzione del pacchetto di base che crea un sommario del modello (in questo caso)
#dal risultato notiamo che la prima banda ha lo 0.998 della varianza totale, quindi quasi il totale della variabilità è contenuto in una banda sola.
#per avere il 100% della variabilità ovviamento dobbiamo usare tutte le bande, ma non è il nostro scopo. noi vogliamo la max variabilità con il minimo delle bande.
#anche nel plot delle immaini è così! nella banda 1 riusciamo a vedere tutto, foresta, zone agricole etc, nella banda 7 praticamente abbiamo solo rumore, un'immagine in cui è difficile distinguere le componenti.
p224r63_2011_pca
#cosa c'è nel modello?
#la prima componente è la call, legata con il dollaro all'oggetto generato, è la funzione.
#poi c'è il modello che è quello visto con la funzione summary.
#poi la mappa, che è un rasterbrick, con una certa risoluzione e le varia componenti.
#adesso facciamo un plot rgb con le 3 componenti principali
plotRGB(p224r63_2011_pca$map, 1,2,3, stretch="lin")
##################################
# con al PCA abbiamo generato delle nuove componenti che diminuiscono l'iniziale forte correlazione tra le bande e con un numero minore di componenti possiamo spiegare tutta l'immagine originale.
#è importante fare analisi multivariata quando ad esempio facciamo una funzione lineare (distribuzione di una specie). Se usiamo le temperature (min, max etc) sono tutte correlate tra loro.
#molti modelli lineari presuppongono che le variabili non siano correlate tra loro. Quando questo lo sono infatti si aumenta la potenza del modello con il rischio di ottenere buoni risultati che in realtà sono falsati da questo fatto.
#riprendi da lezione del 28/04 al min 1:11:00
|
/R_code_PCA.r
|
no_license
|
FedericoTossani/Telerilevamento_2021
|
R
| false | false | 4,224 |
r
|
##############################
#### Analisi multivariata ####
##############################
library(raster)
library(RStoolbox)
setwd("/Users/federicotossani/lab/")
p224r63_2011<-brick("p224r63_2011_masked.grd")
#uso brick per caricare un set multiplo di dati!
#raster invece carica un set per volta!!
p224r63_2011
#plot(p224r63_2011$B1_sre, p224r63_2011$B2_sre)
#l'ordine delle 2 bande dipende dall'ordine scritto nella funzione.
#per rendere il plot più carino possiamo dargli un colore e cambiare il carattere dei punti ed aumentarne la dimensione
plot(p224r63_2011$B1_sre, p224r63_2011$B2_sre, col="blue", pch=13, cex=2)
#dopo il plot esce un warrning che dice che il plot usa il 2.2% delle celle, infatti i pixel nell'immagine sono più di 4milioni e non riuscirebbe plottarli tutti
#questo sistema in statistica si chiama multicollinearità. significa che le variabili sono correlate tra loro.
#questa forma di correlazione è usata in modo causale!!! Bisogna stare attenti a correlare 2 fenomeni, esempio delle cicogne e dei bambini in Germania.
#per plottare tutte le correlazioni possibili di tutte le variabili presenti nel dataset uso la funzione pairs
pairs(p224r63_2011)
#nella parte bassa della matrice troviamo i grafici con tutte le correlazioni, nella parte alta invece gli indici di correlazione di pearson.
#se siamo positivamente correlate l'indice tende a 1, se lo siamo negativamente tende a -1.
#grazie a pairs vediamo quanto molte bande siano correlate tra loro.
#procediamo con la PCA, ma prima riduciamo le dimensioni dei nostri dati per velocizzare il processo.
#per farlo uso la funzione aggregate, questo processo si chiama ricampionamento o resampling
p224r63_2011_res<-aggregate(p224r63_2011, fact=10)
#questo processo ci ha permesso di passare da un pixel a 30m ad uno a 300m. aumentando la la grandezza del pixel abbiamo diminuito la risoluzione e il peso.
#per apprezzare la differenza possiamo fare un pannello per confrontare le due immagini
par(mfrow=c(2,1))
plotRGB(p224r63_2011,4,3,2, stretch="lin")
plotRGB(p224r63_2011_res,4,3,2, stretch="lin")
#ora applichiamo la rasterPCA che in poche parole prende l nostro paccheto di dati e li compatta in un numero minore di bande
p224r63_2011_pca<-rasterPCA(p224r63_2011_res)
plot(p224r63_2011_pca$map)
#nel plot ho legato la mappa al nome dell'immagine perchè oltra a questa la funzione rsaterPCA ha creato anche il modello ed altre cose.
summary(p224r63_2011_pca$model)
#la funzione summary è una funzione del pacchetto di base che crea un sommario del modello (in questo caso)
#dal risultato notiamo che la prima banda ha lo 0.998 della varianza totale, quindi quasi il totale della variabilità è contenuto in una banda sola.
#per avere il 100% della variabilità ovviamento dobbiamo usare tutte le bande, ma non è il nostro scopo. noi vogliamo la max variabilità con il minimo delle bande.
#anche nel plot delle immaini è così! nella banda 1 riusciamo a vedere tutto, foresta, zone agricole etc, nella banda 7 praticamente abbiamo solo rumore, un'immagine in cui è difficile distinguere le componenti.
p224r63_2011_pca
#cosa c'è nel modello?
#la prima componente è la call, legata con il dollaro all'oggetto generato, è la funzione.
#poi c'è il modello che è quello visto con la funzione summary.
#poi la mappa, che è un rasterbrick, con una certa risoluzione e le varia componenti.
#adesso facciamo un plot rgb con le 3 componenti principali
plotRGB(p224r63_2011_pca$map, 1,2,3, stretch="lin")
##################################
# con al PCA abbiamo generato delle nuove componenti che diminuiscono l'iniziale forte correlazione tra le bande e con un numero minore di componenti possiamo spiegare tutta l'immagine originale.
#è importante fare analisi multivariata quando ad esempio facciamo una funzione lineare (distribuzione di una specie). Se usiamo le temperature (min, max etc) sono tutte correlate tra loro.
#molti modelli lineari presuppongono che le variabili non siano correlate tra loro. Quando questo lo sono infatti si aumenta la potenza del modello con il rischio di ottenere buoni risultati che in realtà sono falsati da questo fatto.
#riprendi da lezione del 28/04 al min 1:11:00
|
# testplot <- function(meansdf, xvar = "condition", yvar = "means",
# fillvar = "condition") {
# p <- ggplot(meansdf,
# aes_string(x = xvar, y= yvar, fill = fillvar)) +
# geom_bar(position="dodge", stat="identity")
# }
#
#
#
# testplot <- function(meansdf)
# {
# scale <- 0.5
# p <- ggplot(meansdf,
# aes(fill = condition,
# y = means * scale,
# x = condition),
# environment = environment()) # This is the KEY line
# p + geom_bar(position = "dodge", stat = "identity")
# }
#
# ## Now, the following works
# testplot(means)
|
/R/scrub_code.R
|
no_license
|
moj-analytical-services/las_rap_code_library
|
R
| false | false | 642 |
r
|
# testplot <- function(meansdf, xvar = "condition", yvar = "means",
# fillvar = "condition") {
# p <- ggplot(meansdf,
# aes_string(x = xvar, y= yvar, fill = fillvar)) +
# geom_bar(position="dodge", stat="identity")
# }
#
#
#
# testplot <- function(meansdf)
# {
# scale <- 0.5
# p <- ggplot(meansdf,
# aes(fill = condition,
# y = means * scale,
# x = condition),
# environment = environment()) # This is the KEY line
# p + geom_bar(position = "dodge", stat = "identity")
# }
#
# ## Now, the following works
# testplot(means)
|
# This file is focusing on PCA and persistant homology for iris data
library(devtools)
library(TDA)
library(TDAmapper)
library(ggplot2)
library("FactoMineR")
library("factoextra")
# Read data
data <- read.table("data/Iris.csv", header=TRUE, sep=",", dec=".", quote = "\"", na.strings = "NA")
data <- data[, 2:5] # Pick numerical data
# PCA With normalized data
res.pca <- PCA(data, scale.unit = TRUE, ncp = 2, graph = TRUE)
# Visualization
fviz_eig(res.pca, addlabels = TRUE, ylim = c(0, 80))
fviz_pca_ind(res.pca,
geom.ind = "point",
col.ind = iris$Species,
palette = c("#00AFBB", "#E7B800", "#FC4E07"),
addEllipses = TRUE,
legend.title = "Groups")
################################################
# Persistant homology over a grid + differents distance
Xlim <- c(min(res.pca$ind$coord[,1]), max(res.pca$ind$coord[,1]))
Ylim <- c(min(res.pca$ind$coord[,2]), max(res.pca$ind$coord[,2]))
by <- 0.05
Xseq <- seq(from = Xlim[1], to = Xlim[2], by = by)
Yseq <- seq(from = Ylim[1], to = Ylim[2], by = by)
Grid <- expand.grid(Xseq, Yseq)
# Distance functions and density estimators
distance <- distFct(X = res.pca$ind$coord, Grid = Grid)
DTM <- dtm(X = res.pca$ind$coord, Grid = Grid, m0 = 0.1)
kNN <- knnDE(X = res.pca$ind$coord, Grid = Grid, k = 60)
KDE <- kde(X = res.pca$ind$coord, Grid = Grid, h = 0.3)
Kdist <- kernelDist(X = res.pca$ind$coord, Grid = Grid, h = 0.3)
#####
# ! # We must change FUN argument depending on selected distance
#####
# Persistant homology example
Diag <- gridDiag(X = res.pca$ind$coord, FUN = kde, lim = cbind(Xlim, Ylim), by = by,
sublevel = FALSE, library = "Dionysus", printProgress = FALSE, h = 0.3)
# Bootstrap for band
band <- bootstrapBand(X = res.pca$ind$coord, FUN = kde, Grid = Grid, B = 100,
parallel = FALSE, alpha = 0.1, h = 0.3)
# Plotting persistence diagram
par(mfrow = c(1,3))
plot(res.pca$ind$coord, main = "Iris Sample X")
persp(x = Xseq, y = Yseq,
z = matrix(KDE, nrow = length(Xseq), ncol = length(Yseq)),
xlab = "", ylab = "", zlab = "", theta = -20, phi = 35, scale = FALSE,
expand = 3, col = "red", border = NA, ltheta = 50, shade = 0.9,
main = "Distance Selected : ")
plot(x = Diag[["diagram"]], band = 2 * band[["width"]], main = "KDE Diagram")
|
/R/iris_pca.R
|
no_license
|
bmagnette/AstraZeneca_TDA
|
R
| false | false | 2,337 |
r
|
# This file is focusing on PCA and persistant homology for iris data
library(devtools)
library(TDA)
library(TDAmapper)
library(ggplot2)
library("FactoMineR")
library("factoextra")
# Read data
data <- read.table("data/Iris.csv", header=TRUE, sep=",", dec=".", quote = "\"", na.strings = "NA")
data <- data[, 2:5] # Pick numerical data
# PCA With normalized data
res.pca <- PCA(data, scale.unit = TRUE, ncp = 2, graph = TRUE)
# Visualization
fviz_eig(res.pca, addlabels = TRUE, ylim = c(0, 80))
fviz_pca_ind(res.pca,
geom.ind = "point",
col.ind = iris$Species,
palette = c("#00AFBB", "#E7B800", "#FC4E07"),
addEllipses = TRUE,
legend.title = "Groups")
################################################
# Persistant homology over a grid + differents distance
Xlim <- c(min(res.pca$ind$coord[,1]), max(res.pca$ind$coord[,1]))
Ylim <- c(min(res.pca$ind$coord[,2]), max(res.pca$ind$coord[,2]))
by <- 0.05
Xseq <- seq(from = Xlim[1], to = Xlim[2], by = by)
Yseq <- seq(from = Ylim[1], to = Ylim[2], by = by)
Grid <- expand.grid(Xseq, Yseq)
# Distance functions and density estimators
distance <- distFct(X = res.pca$ind$coord, Grid = Grid)
DTM <- dtm(X = res.pca$ind$coord, Grid = Grid, m0 = 0.1)
kNN <- knnDE(X = res.pca$ind$coord, Grid = Grid, k = 60)
KDE <- kde(X = res.pca$ind$coord, Grid = Grid, h = 0.3)
Kdist <- kernelDist(X = res.pca$ind$coord, Grid = Grid, h = 0.3)
#####
# ! # We must change FUN argument depending on selected distance
#####
# Persistant homology example
Diag <- gridDiag(X = res.pca$ind$coord, FUN = kde, lim = cbind(Xlim, Ylim), by = by,
sublevel = FALSE, library = "Dionysus", printProgress = FALSE, h = 0.3)
# Bootstrap for band
band <- bootstrapBand(X = res.pca$ind$coord, FUN = kde, Grid = Grid, B = 100,
parallel = FALSE, alpha = 0.1, h = 0.3)
# Plotting persistence diagram
par(mfrow = c(1,3))
plot(res.pca$ind$coord, main = "Iris Sample X")
persp(x = Xseq, y = Yseq,
z = matrix(KDE, nrow = length(Xseq), ncol = length(Yseq)),
xlab = "", ylab = "", zlab = "", theta = -20, phi = 35, scale = FALSE,
expand = 3, col = "red", border = NA, ltheta = 50, shade = 0.9,
main = "Distance Selected : ")
plot(x = Diag[["diagram"]], band = 2 * band[["width"]], main = "KDE Diagram")
|
\name{nonparboot-methods}
\docType{methods}
\alias{nonparboot}
\alias{nonparboot-methods}
\alias{nonparboot,unmarkedFit-method}
\alias{nonparboot,unmarkedFitColExt-method}
\alias{nonparboot,unmarkedFitDS-method}
\alias{nonparboot,unmarkedFitMPois-method}
\alias{nonparboot,unmarkedFitOccu-method}
\alias{nonparboot,unmarkedFitOccuRN-method}
\alias{nonparboot,unmarkedFitPCount-method}
\title{ Nonparametric bootstrapping in unmarked }
\description{
Call \code{nonparboot} on an unmarkedFit to obtain non-parametric
bootstrap samples. These can then be used by \code{vcov} in order to
get bootstrap estimates of standard errors.
}
\section{Methods}{
\describe{
\item{\code{signature(object = "unmarkedFit")}}{ Obtain nonparametric
bootstrap samples for a general unmarkedFit. }
\item{\code{signature(object = "unmarkedFitColExt")}}{ Obtain nonparametric
bootstrap samples for colext fits. }
\item{\code{signature(object = "unmarkedFitDS")}}{ Obtain nonparametric
bootstrap samples for a distsamp fits. }
\item{\code{signature(object = "unmarkedFitMPois")}}{ Obtain nonparametric
bootstrap samples for a distsamp fits. }
\item{\code{signature(object = "unmarkedFitOccu")}}{ Obtain nonparametric
bootstrap samples for a occu fits. }
\item{\code{signature(object = "unmarkedFitOccuRN")}}{ Obtain nonparametric
bootstrap samples for a occuRN fits. }
\item{\code{signature(object = "unmarkedFitPCount")}}{ Obtain nonparametric
bootstrap samples for a pcount fits. }
}}
\details{
Calling \code{nonparboot} on an unmarkedFit returns the original
unmarkedFit, with the bootstrap samples added on. Then subsequent
calls to \code{\link{vcov}} with the argument
\code{method="nonparboot"} will use these bootstrap samples.
Additionally, standard errors of derived estimates from either
\code{\link{linearComb}} or \code{\link{backTransform}} can be
instructed to use bootstrap samples by providing the argument
\code{method = "nonparboot"}.
}
\examples{
data(ovendata)
ovenFrame <- unmarkedFrameMPois(ovendata.list$data,
siteCovs=as.data.frame(scale(ovendata.list$covariates[,-1])), type = "removal")
(fm <- multinomPois(~ 1 ~ ufp + trba, ovenFrame))
fm <- nonparboot(fm, B = 20) # should use larger B in real life.
vcov(fm, method = "hessian")
vcov(fm, method = "nonparboot")
avg.abundance <- backTransform(linearComb(fm, type = "state", coefficients = c(1, 0, 0)))
## Bootstrap sample information propagates through to derived quantities.
vcov(avg.abundance, method = "hessian")
vcov(avg.abundance, method = "nonparboot")
SE(avg.abundance, method = "nonparboot")
}
\keyword{methods}
|
/man/nonparboot-methods.Rd
|
no_license
|
buco/unmarked
|
R
| false | false | 2,621 |
rd
|
\name{nonparboot-methods}
\docType{methods}
\alias{nonparboot}
\alias{nonparboot-methods}
\alias{nonparboot,unmarkedFit-method}
\alias{nonparboot,unmarkedFitColExt-method}
\alias{nonparboot,unmarkedFitDS-method}
\alias{nonparboot,unmarkedFitMPois-method}
\alias{nonparboot,unmarkedFitOccu-method}
\alias{nonparboot,unmarkedFitOccuRN-method}
\alias{nonparboot,unmarkedFitPCount-method}
\title{ Nonparametric bootstrapping in unmarked }
\description{
Call \code{nonparboot} on an unmarkedFit to obtain non-parametric
bootstrap samples. These can then be used by \code{vcov} in order to
get bootstrap estimates of standard errors.
}
\section{Methods}{
\describe{
\item{\code{signature(object = "unmarkedFit")}}{ Obtain nonparametric
bootstrap samples for a general unmarkedFit. }
\item{\code{signature(object = "unmarkedFitColExt")}}{ Obtain nonparametric
bootstrap samples for colext fits. }
\item{\code{signature(object = "unmarkedFitDS")}}{ Obtain nonparametric
bootstrap samples for a distsamp fits. }
\item{\code{signature(object = "unmarkedFitMPois")}}{ Obtain nonparametric
bootstrap samples for a distsamp fits. }
\item{\code{signature(object = "unmarkedFitOccu")}}{ Obtain nonparametric
bootstrap samples for a occu fits. }
\item{\code{signature(object = "unmarkedFitOccuRN")}}{ Obtain nonparametric
bootstrap samples for a occuRN fits. }
\item{\code{signature(object = "unmarkedFitPCount")}}{ Obtain nonparametric
bootstrap samples for a pcount fits. }
}}
\details{
Calling \code{nonparboot} on an unmarkedFit returns the original
unmarkedFit, with the bootstrap samples added on. Then subsequent
calls to \code{\link{vcov}} with the argument
\code{method="nonparboot"} will use these bootstrap samples.
Additionally, standard errors of derived estimates from either
\code{\link{linearComb}} or \code{\link{backTransform}} can be
instructed to use bootstrap samples by providing the argument
\code{method = "nonparboot"}.
}
\examples{
data(ovendata)
ovenFrame <- unmarkedFrameMPois(ovendata.list$data,
siteCovs=as.data.frame(scale(ovendata.list$covariates[,-1])), type = "removal")
(fm <- multinomPois(~ 1 ~ ufp + trba, ovenFrame))
fm <- nonparboot(fm, B = 20) # should use larger B in real life.
vcov(fm, method = "hessian")
vcov(fm, method = "nonparboot")
avg.abundance <- backTransform(linearComb(fm, type = "state", coefficients = c(1, 0, 0)))
## Bootstrap sample information propagates through to derived quantities.
vcov(avg.abundance, method = "hessian")
vcov(avg.abundance, method = "nonparboot")
SE(avg.abundance, method = "nonparboot")
}
\keyword{methods}
|
# Read in files
rawdata<- read.csv("MERGED2016_17_PP.csv",stringsAsFactors = F)
dim(rawdata)
rankingdata<- read.csv("ForbesRank.csv")
dim(rankingdata)
variables<- c("UNITID","OPEID","OPEID6","INSTNM","CITY","STABBR","ZIP","INSTURL","LATITUDE",
"LONGITUDE","ADM_RATE","SATVR25","SATVR75","SATMT25","SATMT75",
"SATVRMID","SATMTMID","ACTCM25","ACTCM75","ACTEN25",
"ACTEN75","ACTMT25","ACTMT75","ACTCMMID","ACTENMID",
"ACTMTMID","DISTANCEONLY", "CURROPER", "SAT_AVG","PCIP01","PCIP03","PCIP04","PCIP05",
"PCIP09","PCIP10","PCIP11","PCIP12","PCIP13","PCIP14" ,"PCIP15",
"PCIP16","PCIP19","PCIP22","PCIP23","PCIP24","PCIP25","PCIP26","PCIP27",
"PCIP29","PCIP30","PCIP31","PCIP38","PCIP39","PCIP40","PCIP41","PCIP42",
"PCIP43","PCIP44","PCIP45","PCIP46","PCIP47","PCIP48","PCIP49","PCIP50",
"PCIP51","PCIP52","PCIP54","COSTT4_A","UGDS_MEN","UGDS_WOMEN",
"UGDS_WHITE", "UGDS_BLACK","UGDS_HISP", "UGDS_ASIAN","UGDS_AIAN","UGDS_NHPI",
"UGDS","TUITIONFEE_IN","TUITIONFEE_OUT","CONTROL","LOCALE","HIGHDEG")
rawdata1<- rawdata[,variables]
dim(rawdata1)
# Merge with ranking data
merged<- merge(rawdata1,rankingdata,by.x = "INSTNM",by.y = "Name")
dim(merged)
# replace NULL with NA
for(i in 1:nrow(merged)){
for (j in 1:ncol(merged)) {
if(merged[i,j]=="NULL"){
merged[i,j]=NA
}
}
}
# Remove NA values
merged_cleaned<- na.omit(merged)
dim(merged_cleaned)
clean<- merged_cleaned
# Add Citytype, Schooltype, Highest degree
clean$Citytype<- NA
for (i in 1:nrow(clean)) {
if (clean$LOCALE[i]==11 | clean$LOCALE[i]== 12 | clean$LOCALE[i]==13) {
clean$Citytype[i]<-"City"
}
else if (clean$LOCALE[i]==21 | clean$LOCALE[i]== 22 | clean$LOCALE[i]==23) {
clean$Citytype[i]<- "Suburb"
}
else if (clean$LOCALE[i]==31 | clean$LOCALE[i]== 32 | clean$LOCALE[i]==33) {
clean$Citytype[i]<- "Town"
}
else if (clean$LOCALE[i]==41 | clean$LOCALE[i]== 42 | clean$LOCALE[i]==43){
clean$Citytype[i]<- "Rural"
}
}
clean$Schooltype<- NA
for (i in 1:nrow(clean)) {
if (clean$CONTROL[i]==1){
clean$Schooltype[i]<-"Public"}
else if (clean$CONTROL[i]==2){
clean$Schooltype[i]<- "Private for-profit"}
else if (clean$CONTROL[i]==3){
clean$Schooltype[i]<-"Private nonprofit"}
}
clean$Highestdegree<- NA
for (i in 1:nrow(clean)) {
if (clean$HIGHDEG[i]==4){
clean$Highestdegree[i]<- "Graduate degree"
}
else if (clean$HIGHDEG[i]==3){
clean$Highestdegree[i]<- "Bachelor’s degree"
}
else if (clean$HIGHDEG[i]==2){
clean$Highestdegree[i]<- "Associate’s degree"
}
else if (clean$HIGHDEG[i]==1){
clean$Highestdegree[i]<- "Certificate"
}
}
###Complete the URLs
clean$url<- NA
for(i in 1:nrow(clean)){
if (grepl('http',clean$INSTURL[i])==FALSE){
clean$url[i]<- paste("https://",clean$INSTURL[i],sep="")
}
else{
clean$url[i]<- clean$INSTURL[i]
}
}
# Write out new csv file
write.csv(clean, file = "cleanedData.csv")
|
/R Shiny-College Recommendation System/doc/Interactive map/DataCleaning.R
|
no_license
|
stella-chen28/My-repository
|
R
| false | false | 3,041 |
r
|
# Read in files
rawdata<- read.csv("MERGED2016_17_PP.csv",stringsAsFactors = F)
dim(rawdata)
rankingdata<- read.csv("ForbesRank.csv")
dim(rankingdata)
variables<- c("UNITID","OPEID","OPEID6","INSTNM","CITY","STABBR","ZIP","INSTURL","LATITUDE",
"LONGITUDE","ADM_RATE","SATVR25","SATVR75","SATMT25","SATMT75",
"SATVRMID","SATMTMID","ACTCM25","ACTCM75","ACTEN25",
"ACTEN75","ACTMT25","ACTMT75","ACTCMMID","ACTENMID",
"ACTMTMID","DISTANCEONLY", "CURROPER", "SAT_AVG","PCIP01","PCIP03","PCIP04","PCIP05",
"PCIP09","PCIP10","PCIP11","PCIP12","PCIP13","PCIP14" ,"PCIP15",
"PCIP16","PCIP19","PCIP22","PCIP23","PCIP24","PCIP25","PCIP26","PCIP27",
"PCIP29","PCIP30","PCIP31","PCIP38","PCIP39","PCIP40","PCIP41","PCIP42",
"PCIP43","PCIP44","PCIP45","PCIP46","PCIP47","PCIP48","PCIP49","PCIP50",
"PCIP51","PCIP52","PCIP54","COSTT4_A","UGDS_MEN","UGDS_WOMEN",
"UGDS_WHITE", "UGDS_BLACK","UGDS_HISP", "UGDS_ASIAN","UGDS_AIAN","UGDS_NHPI",
"UGDS","TUITIONFEE_IN","TUITIONFEE_OUT","CONTROL","LOCALE","HIGHDEG")
rawdata1<- rawdata[,variables]
dim(rawdata1)
# Merge with ranking data
merged<- merge(rawdata1,rankingdata,by.x = "INSTNM",by.y = "Name")
dim(merged)
# replace NULL with NA
for(i in 1:nrow(merged)){
for (j in 1:ncol(merged)) {
if(merged[i,j]=="NULL"){
merged[i,j]=NA
}
}
}
# Remove NA values
merged_cleaned<- na.omit(merged)
dim(merged_cleaned)
clean<- merged_cleaned
# Add Citytype, Schooltype, Highest degree
clean$Citytype<- NA
for (i in 1:nrow(clean)) {
if (clean$LOCALE[i]==11 | clean$LOCALE[i]== 12 | clean$LOCALE[i]==13) {
clean$Citytype[i]<-"City"
}
else if (clean$LOCALE[i]==21 | clean$LOCALE[i]== 22 | clean$LOCALE[i]==23) {
clean$Citytype[i]<- "Suburb"
}
else if (clean$LOCALE[i]==31 | clean$LOCALE[i]== 32 | clean$LOCALE[i]==33) {
clean$Citytype[i]<- "Town"
}
else if (clean$LOCALE[i]==41 | clean$LOCALE[i]== 42 | clean$LOCALE[i]==43){
clean$Citytype[i]<- "Rural"
}
}
clean$Schooltype<- NA
for (i in 1:nrow(clean)) {
if (clean$CONTROL[i]==1){
clean$Schooltype[i]<-"Public"}
else if (clean$CONTROL[i]==2){
clean$Schooltype[i]<- "Private for-profit"}
else if (clean$CONTROL[i]==3){
clean$Schooltype[i]<-"Private nonprofit"}
}
clean$Highestdegree<- NA
for (i in 1:nrow(clean)) {
if (clean$HIGHDEG[i]==4){
clean$Highestdegree[i]<- "Graduate degree"
}
else if (clean$HIGHDEG[i]==3){
clean$Highestdegree[i]<- "Bachelor’s degree"
}
else if (clean$HIGHDEG[i]==2){
clean$Highestdegree[i]<- "Associate’s degree"
}
else if (clean$HIGHDEG[i]==1){
clean$Highestdegree[i]<- "Certificate"
}
}
###Complete the URLs
clean$url<- NA
for(i in 1:nrow(clean)){
if (grepl('http',clean$INSTURL[i])==FALSE){
clean$url[i]<- paste("https://",clean$INSTURL[i],sep="")
}
else{
clean$url[i]<- clean$INSTURL[i]
}
}
# Write out new csv file
write.csv(clean, file = "cleanedData.csv")
|
##############################################
#
# General description:
#
# The following script loads standard packages and functions to be used in
# the L1polymORF project
# Input:
#
#
# Output:
#
# :
##############################################
########################################
# #
# Source packages and set parameters #
# #
########################################
# Load packages
# library(seqinr)
# library(ape)
# Source all functions from GeneralRFunctions folder
AllFunctions <- list.files(path = "D:/OneDrive - American University of Beirut/GeneralRFunctions/",
pattern = "\\.[rR]", full.names = T)
sapply(AllFunctions, source)
# Source all functions from Functions folder
AllFunctions <- list.files(path = "D:/L1polymORFgit/Functions/",
pattern = "\\.[rR]", full.names = T)
sapply(AllFunctions, source)
|
/Scripts/_Start_L1polymORF.R
|
no_license
|
hdohna/L1polymORFgit
|
R
| false | false | 1,014 |
r
|
##############################################
#
# General description:
#
# The following script loads standard packages and functions to be used in
# the L1polymORF project
# Input:
#
#
# Output:
#
# :
##############################################
########################################
# #
# Source packages and set parameters #
# #
########################################
# Load packages
# library(seqinr)
# library(ape)
# Source all functions from GeneralRFunctions folder
AllFunctions <- list.files(path = "D:/OneDrive - American University of Beirut/GeneralRFunctions/",
pattern = "\\.[rR]", full.names = T)
sapply(AllFunctions, source)
# Source all functions from Functions folder
AllFunctions <- list.files(path = "D:/L1polymORFgit/Functions/",
pattern = "\\.[rR]", full.names = T)
sapply(AllFunctions, source)
|
library(spatstat)
### Name: lohboot
### Title: Bootstrap Confidence Bands for Summary Function
### Aliases: lohboot
### Keywords: spatial nonparametric
### ** Examples
p <- lohboot(simdat, stoyan=0.5)
plot(p)
|
/data/genthat_extracted_code/spatstat/examples/lohboot.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 220 |
r
|
library(spatstat)
### Name: lohboot
### Title: Bootstrap Confidence Bands for Summary Function
### Aliases: lohboot
### Keywords: spatial nonparametric
### ** Examples
p <- lohboot(simdat, stoyan=0.5)
plot(p)
|
# Load the packages we plan to use
library(magrittr)
library(dplyr)
library(tidyr)
library(reshape2)
library(ggplot2)
library(shiny)
# Read the data
read.csv("CSV data/K12headcount_salary.csv", header=TRUE, strip.white = TRUE) -> staffing
# Checking the numbers in K12headcount_salary.csv vs. K12headcount_school.csv
# They are *close*, but not exactly the same
staffing %>%
filter(SUName == "Burlington SD") %>%
group_by(SY) %>%
summarise(FTESum=sum(TOTFTE),SALSum=sum(SAL),BenSum=sum(BEN),SubClass="Total") %>%
mutate(maxFTE=max(FTESum),FTERatio=FTESum/maxFTE) -> TotStaff
print(TotStaff)
read.csv("CSV data/K12headcount_school.csv", header=TRUE, strip.white = TRUE) %>%
filter(SUName == "Burlington SD") %>%
group_by(Fiscal.Year) %>%
summarise(EnrollmentSum=sum(Enrollment),
K12FTESum=sum(K12FTE),
TotStaffFTESum=sum(TotStaffFTE))
# Total staff
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SY,SubClass) %>%
summarise(FTESum=sum(TOTFTE),SALSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(maxFTE=max(FTESum),FTERatio=FTESum/maxFTE) %>%
ggplot(aes(x=SY, y=FTESum, color=SubClass)) + geom_line() +
labs(x="School Year",y="FTE Staff") + scale_x_continuous(breaks = seq(2008,2017,2))
# Staff ratio to maximum year
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SubClass,SY) %>%
summarise(FTESum=sum(TOTFTE),SALSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(maxFTE=max(FTESum),FTERatio=FTESum/maxFTE)%>%
bind_rows(TotStaff) %>%
ggplot(aes(x=SY, y=FTERatio, color=SubClass)) + geom_line() +
labs(x="School Year",y="FTE Staff") + scale_x_continuous(breaks = seq(2008,2017,2))
# Salary
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SubClass,SY) %>%
summarise(FTESum=sum(TOTFTE),SalSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(maxFTE=max(FTESum),FTERatio=FTESum/maxFTE,SalRatio=SalSum/FTESum)%>%
ggplot(aes(x=SY, y=SalSum, color=SubClass)) + geom_line() +
labs(x="School Year",y="Salary") + scale_x_continuous(breaks = seq(2008,2017,2))
# Salary per FTE
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SubClass,SY) %>%
summarise(FTESum=sum(TOTFTE),SalSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(maxFTE=max(FTESum),FTERatio=FTESum/maxFTE,SalRatio=SalSum/FTESum)%>%
ggplot(aes(x=SY, y=SalRatio, color=SubClass)) + geom_line() +
labs(x="School Year",y="Salary per FTE") + scale_x_continuous(breaks = seq(2008,2017,2))
# Benefits
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SubClass,SY) %>%
summarise(FTESum=sum(TOTFTE),SalSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(FTERatio=FTESum/max(FTESum),SalRatio=SalSum/FTESum,BenRatio=BenSum/FTESum)%>%
ggplot(aes(x=SY, y=BenSum, color=SubClass)) + geom_line() +
labs(x="School Year",y="Benefits") + scale_x_continuous(breaks = seq(2008,2017,2))
# Benefits per FTE
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SubClass,SY) %>%
summarise(FTESum=sum(TOTFTE),SalSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(FTERatio=FTESum/max(FTESum),SalRatio=SalSum/FTESum,BenRatio=BenSum/FTESum)%>%
ggplot(aes(x=SY, y=BenRatio, color=SubClass)) + geom_line() +
labs(x="School Year",y="Benefits per FTE") + scale_x_continuous(breaks = seq(2008,2017,2))
|
/staffing.r
|
no_license
|
AlexReutter/VT-Schools
|
R
| false | false | 3,634 |
r
|
# Load the packages we plan to use
library(magrittr)
library(dplyr)
library(tidyr)
library(reshape2)
library(ggplot2)
library(shiny)
# Read the data
read.csv("CSV data/K12headcount_salary.csv", header=TRUE, strip.white = TRUE) -> staffing
# Checking the numbers in K12headcount_salary.csv vs. K12headcount_school.csv
# They are *close*, but not exactly the same
staffing %>%
filter(SUName == "Burlington SD") %>%
group_by(SY) %>%
summarise(FTESum=sum(TOTFTE),SALSum=sum(SAL),BenSum=sum(BEN),SubClass="Total") %>%
mutate(maxFTE=max(FTESum),FTERatio=FTESum/maxFTE) -> TotStaff
print(TotStaff)
read.csv("CSV data/K12headcount_school.csv", header=TRUE, strip.white = TRUE) %>%
filter(SUName == "Burlington SD") %>%
group_by(Fiscal.Year) %>%
summarise(EnrollmentSum=sum(Enrollment),
K12FTESum=sum(K12FTE),
TotStaffFTESum=sum(TotStaffFTE))
# Total staff
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SY,SubClass) %>%
summarise(FTESum=sum(TOTFTE),SALSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(maxFTE=max(FTESum),FTERatio=FTESum/maxFTE) %>%
ggplot(aes(x=SY, y=FTESum, color=SubClass)) + geom_line() +
labs(x="School Year",y="FTE Staff") + scale_x_continuous(breaks = seq(2008,2017,2))
# Staff ratio to maximum year
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SubClass,SY) %>%
summarise(FTESum=sum(TOTFTE),SALSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(maxFTE=max(FTESum),FTERatio=FTESum/maxFTE)%>%
bind_rows(TotStaff) %>%
ggplot(aes(x=SY, y=FTERatio, color=SubClass)) + geom_line() +
labs(x="School Year",y="FTE Staff") + scale_x_continuous(breaks = seq(2008,2017,2))
# Salary
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SubClass,SY) %>%
summarise(FTESum=sum(TOTFTE),SalSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(maxFTE=max(FTESum),FTERatio=FTESum/maxFTE,SalRatio=SalSum/FTESum)%>%
ggplot(aes(x=SY, y=SalSum, color=SubClass)) + geom_line() +
labs(x="School Year",y="Salary") + scale_x_continuous(breaks = seq(2008,2017,2))
# Salary per FTE
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SubClass,SY) %>%
summarise(FTESum=sum(TOTFTE),SalSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(maxFTE=max(FTESum),FTERatio=FTESum/maxFTE,SalRatio=SalSum/FTESum)%>%
ggplot(aes(x=SY, y=SalRatio, color=SubClass)) + geom_line() +
labs(x="School Year",y="Salary per FTE") + scale_x_continuous(breaks = seq(2008,2017,2))
# Benefits
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SubClass,SY) %>%
summarise(FTESum=sum(TOTFTE),SalSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(FTERatio=FTESum/max(FTESum),SalRatio=SalSum/FTESum,BenRatio=BenSum/FTESum)%>%
ggplot(aes(x=SY, y=BenSum, color=SubClass)) + geom_line() +
labs(x="School Year",y="Benefits") + scale_x_continuous(breaks = seq(2008,2017,2))
# Benefits per FTE
staffing %>%
filter(SUName == "Burlington SD") %>%
filter(SubClass!="#N/A") %>%
filter(SubClass!="Other") %>%
group_by(SubClass,SY) %>%
summarise(FTESum=sum(TOTFTE),SalSum=sum(SAL),BenSum=sum(BEN))%>%
mutate(FTERatio=FTESum/max(FTESum),SalRatio=SalSum/FTESum,BenRatio=BenSum/FTESum)%>%
ggplot(aes(x=SY, y=BenRatio, color=SubClass)) + geom_line() +
labs(x="School Year",y="Benefits per FTE") + scale_x_continuous(breaks = seq(2008,2017,2))
|
setwd("U:/gwdata")
rm(list=ls())
library(xlsx)
library(sandwich)
source('R/goos.r')
source('R/feval.r')
source('R/supfun.r')
#############################################################
############## Goyal-Welch 2008 Data #######################
#############################################################
# sheet 1: monthly data, 1704 obs, 18 col
# sheet 2: quarterly data, 568 obs, 22 col
# sheet 3: annual data, 142 obs, 21 col
# This exercise follows Rapach, Strauss and Guo 2010 RFS results
gw <- read.xlsx2("Data/gw.xlsx", sheetIndex=2,colClasses=rep('numeric',22))
# RSG uses quarterly data from 1947:Q1 to 2005:Q4, they use 3 OOS periods
# OOS-1: P = 164, 1965:Q1 - 2005:Q4
# OOS-2: P = 120, 1976:Q1 - 2005:Q4
# OOS-3: P = 24, 2000:Q1 - 2005:Q4
dta <- gw[305:540,] #1947.1 - 2005.4, Zhou and Rapach Sample
#dta <- gw[305:568,] #1947.1 - 2012.4, New Sample
n <- nrow(dta) # 236 obs
P<-164
dta.ret <- ts(gw[304:540,])
# dta.ret <- ts(gw[304:568,]) # New Sample
ret <- (dta.ret[,'Index'] + dta.ret[,'D12'])/lag(dta.ret[,'Index'],k=-1)-1
e.ret <- log1p(ret)-log1p(dta$Rfree) # excess return
dy <- log(dta.ret[,'D12'])-log(lag(dta.ret[,'Index'],-1))
dp<-(log(dta$D12)-log(dta$Index)) # log divident price ratio
ep<-(log(dta$E12)-log(dta$Index)) # log earnings price ratio
de<-(log(dta$D12)-log(dta$E12)) #log dividend payout ratio
svar<-dta$svar # stock variance
csp<-dta$csp # cross sectional premium, NaN end of sample
bm<-dta$b.m # book to market ratio
ntis<-dta$ntis # net equity expansiongw
tbl<-dta$tbl # short term treasury bills
ltr<-dta$ltr
lty<-dta$lty # long term government bond yield
tms<-(dta$lty-dta$tbl) # term spread or slope of yield curve
dfy<-(dta$AAA-dta$BAA) # Default Yield Spread or Default premium
dfr<-(dta$corpr-dta$lty) # default return spread
infl<-dta$infl # inflation
elag1<-e.ret[-n]
e.ret <- e.ret[-1]
dy<-dy[-n]
dp<-dp[-n]
ep<-ep[-n]
de<-de[-n]
svar<-svar[-n]
csp<-csp[-n]
bm<-bm[-n]
ntis<-ntis[-n]
tbl<-tbl[-n]
ltr<-ltr[-n]
lty<-lty[-n]
tms<-tms[-n]
dfy<-dfy[-n]
dfr<-dfr[-n]
infl<-infl[-n]
theta <- 1 # Stock-Watson combination discount factor
feval(y=e.ret,X=dy,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=dp,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=ep,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=de,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=svar,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=bm,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=ntis,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=tbl,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=ltr,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=lty,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=tms,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=dfy,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=dfr,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=infl,P=P, theta=theta,Window='recursive')$mat
##### Indivisual Cumulative Square Forecast Error Comparison relative to stable historical average benchmark #####
method <- 'CV'
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=dy,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('dy',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=dp,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('dp',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=ep,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('ep',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=de,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('de',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=svar,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('svar',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=bm,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('bm',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=ntis,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('ntis',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=tbl,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('tbl',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=ltr,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('ltr',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=lty,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('lty',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=tms,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('tms',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=dfy,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('dfy',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=dfr,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('dfr',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=infl,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('infl',method));abline(h=0,col=2,lty=2)
##### Double Combination (equal weight across models) Cumulative Square Forecast Error Comparison relative to stable historical average benchmark #####
method <- 'Cp'
m <- 14
w <- 1/m
m1 <- feval(y=e.ret,X=dy,P=P)$forecast[,method]*w
m2 <- feval(y=e.ret,X=dp,P=P)$forecast[,method]*w
m3 <- feval(y=e.ret,X=ep,P=P)$forecast[,method]*w
m4 <- feval(y=e.ret,X=de,P=P)$forecast[,method]*w
m5 <- feval(y=e.ret,X=svar,P=P)$forecast[,method]*w
m6 <- feval(y=e.ret,X=bm,P=P)$forecast[,method]*w
m7 <- feval(y=e.ret,X=ntis,P=P)$forecast[,method]*w
m8 <- feval(y=e.ret,X=tbl,P=P)$forecast[,method]*w
m9 <- feval(y=e.ret,X=ltr,P=P)$forecast[,method]*w
m10 <- feval(y=e.ret,X=lty,P=P)$forecast[,method]*w
m11 <- feval(y=e.ret,X=tms,P=P)$forecast[,method]*w
m12 <- feval(y=e.ret,X=dfy,P=P)$forecast[,method]*w
m13 <- feval(y=e.ret,X=dfr,P=P)$forecast[,method]*w
m14 <- feval(y=e.ret,X=infl,P=P)$forecast[,method]*w
f.combo <- m1+m2+m3+m4+m5+m6+m7+m8+m9+m10+m11+m12+m13+m14
T <- length(e.ret)
R <- T - P
yp <- e.ret[(R+1):T]
sfe.combo <- (yp - f.combo)^2
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(sfe.combo),col=4,ylab="CSFE Diff",main=paste('D-Combo',method));abline(h=0,col=2,lty=2)
CT.R2 <- 100*(1 - sum(sfe.combo)/sum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])) # CV is better than Cp
CT.R2
|
/R/forecast excess returns.R
|
permissive
|
anwenyin/ooscombo
|
R
| false | false | 6,821 |
r
|
setwd("U:/gwdata")
rm(list=ls())
library(xlsx)
library(sandwich)
source('R/goos.r')
source('R/feval.r')
source('R/supfun.r')
#############################################################
############## Goyal-Welch 2008 Data #######################
#############################################################
# sheet 1: monthly data, 1704 obs, 18 col
# sheet 2: quarterly data, 568 obs, 22 col
# sheet 3: annual data, 142 obs, 21 col
# This exercise follows Rapach, Strauss and Guo 2010 RFS results
gw <- read.xlsx2("Data/gw.xlsx", sheetIndex=2,colClasses=rep('numeric',22))
# RSG uses quarterly data from 1947:Q1 to 2005:Q4, they use 3 OOS periods
# OOS-1: P = 164, 1965:Q1 - 2005:Q4
# OOS-2: P = 120, 1976:Q1 - 2005:Q4
# OOS-3: P = 24, 2000:Q1 - 2005:Q4
dta <- gw[305:540,] #1947.1 - 2005.4, Zhou and Rapach Sample
#dta <- gw[305:568,] #1947.1 - 2012.4, New Sample
n <- nrow(dta) # 236 obs
P<-164
dta.ret <- ts(gw[304:540,])
# dta.ret <- ts(gw[304:568,]) # New Sample
ret <- (dta.ret[,'Index'] + dta.ret[,'D12'])/lag(dta.ret[,'Index'],k=-1)-1
e.ret <- log1p(ret)-log1p(dta$Rfree) # excess return
dy <- log(dta.ret[,'D12'])-log(lag(dta.ret[,'Index'],-1))
dp<-(log(dta$D12)-log(dta$Index)) # log divident price ratio
ep<-(log(dta$E12)-log(dta$Index)) # log earnings price ratio
de<-(log(dta$D12)-log(dta$E12)) #log dividend payout ratio
svar<-dta$svar # stock variance
csp<-dta$csp # cross sectional premium, NaN end of sample
bm<-dta$b.m # book to market ratio
ntis<-dta$ntis # net equity expansiongw
tbl<-dta$tbl # short term treasury bills
ltr<-dta$ltr
lty<-dta$lty # long term government bond yield
tms<-(dta$lty-dta$tbl) # term spread or slope of yield curve
dfy<-(dta$AAA-dta$BAA) # Default Yield Spread or Default premium
dfr<-(dta$corpr-dta$lty) # default return spread
infl<-dta$infl # inflation
elag1<-e.ret[-n]
e.ret <- e.ret[-1]
dy<-dy[-n]
dp<-dp[-n]
ep<-ep[-n]
de<-de[-n]
svar<-svar[-n]
csp<-csp[-n]
bm<-bm[-n]
ntis<-ntis[-n]
tbl<-tbl[-n]
ltr<-ltr[-n]
lty<-lty[-n]
tms<-tms[-n]
dfy<-dfy[-n]
dfr<-dfr[-n]
infl<-infl[-n]
theta <- 1 # Stock-Watson combination discount factor
feval(y=e.ret,X=dy,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=dp,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=ep,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=de,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=svar,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=bm,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=ntis,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=tbl,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=ltr,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=lty,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=tms,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=dfy,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=dfr,P=P, theta=theta,Window='recursive')$mat
feval(y=e.ret,X=infl,P=P, theta=theta,Window='recursive')$mat
##### Indivisual Cumulative Square Forecast Error Comparison relative to stable historical average benchmark #####
method <- 'CV'
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=dy,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('dy',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=dp,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('dp',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=ep,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('ep',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=de,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('de',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=svar,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('svar',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=bm,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('bm',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=ntis,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('ntis',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=tbl,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('tbl',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=ltr,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('ltr',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=lty,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('lty',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=tms,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('tms',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=dfy,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('dfy',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=dfr,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('dfr',method));abline(h=0,col=2,lty=2)
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(feval(y=e.ret,X=infl,P=P)$sfe[,method]),col=4,ylab="CSFE Diff",main=paste('infl',method));abline(h=0,col=2,lty=2)
##### Double Combination (equal weight across models) Cumulative Square Forecast Error Comparison relative to stable historical average benchmark #####
method <- 'Cp'
m <- 14
w <- 1/m
m1 <- feval(y=e.ret,X=dy,P=P)$forecast[,method]*w
m2 <- feval(y=e.ret,X=dp,P=P)$forecast[,method]*w
m3 <- feval(y=e.ret,X=ep,P=P)$forecast[,method]*w
m4 <- feval(y=e.ret,X=de,P=P)$forecast[,method]*w
m5 <- feval(y=e.ret,X=svar,P=P)$forecast[,method]*w
m6 <- feval(y=e.ret,X=bm,P=P)$forecast[,method]*w
m7 <- feval(y=e.ret,X=ntis,P=P)$forecast[,method]*w
m8 <- feval(y=e.ret,X=tbl,P=P)$forecast[,method]*w
m9 <- feval(y=e.ret,X=ltr,P=P)$forecast[,method]*w
m10 <- feval(y=e.ret,X=lty,P=P)$forecast[,method]*w
m11 <- feval(y=e.ret,X=tms,P=P)$forecast[,method]*w
m12 <- feval(y=e.ret,X=dfy,P=P)$forecast[,method]*w
m13 <- feval(y=e.ret,X=dfr,P=P)$forecast[,method]*w
m14 <- feval(y=e.ret,X=infl,P=P)$forecast[,method]*w
f.combo <- m1+m2+m3+m4+m5+m6+m7+m8+m9+m10+m11+m12+m13+m14
T <- length(e.ret)
R <- T - P
yp <- e.ret[(R+1):T]
sfe.combo <- (yp - f.combo)^2
plot.ts(cumsum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])-cumsum(sfe.combo),col=4,ylab="CSFE Diff",main=paste('D-Combo',method));abline(h=0,col=2,lty=2)
CT.R2 <- 100*(1 - sum(sfe.combo)/sum(feval(y=e.ret,X=0,P=P)$sfe[,'Stable'])) # CV is better than Cp
CT.R2
|
#' @export
load_redshift <- function(stock) {
rs_conn <- tastytrade::redshift_connect("TASTYTRADE")
if (!RJDBC::dbExistsTable(rs_conn, stock)) {
RJDBC::dbSendUpdate(rs_conn,
paste0("create table if not exists ", stock, "(
symbol varchar(5) not null,
quotedate date,
calliv float(4),
putiv float(4),
meaniv float(4),
callvol float(4),
putvol float(4),
calloi float(4),
putoi float(4),
open_price float(4),
high_price float(4),
low_price float(4),
close_price float(4),
volume float(4),
type varchar(4),
expiration date,
strike float(4),
last float(4),
bid float(4),
ask float(4),
option_volume float(4),
open_interest float(4),
iv_strike float(4),
delta_strike float(4),
gamma float(4),
theta float(4),
vega float(4),
dte float(4),
exp_type varchar(8),
mid float(4))
distkey (symbol)
sortkey (symbol, quotedate);"))
}
tastytrade::truncate_redshift(rs_conn, stock)
# Use Manage IAM roles on cluster to add the redshift role prior to copy
tastytrade::copy_S3_redshift(env = "TASTYTRADE", connection = rs_conn,
table_name = stock,
bucket_path = paste0("s3://rds-options-files/", stock, "_options.csv"))
}
|
/R/load_redshift.R
|
no_license
|
IanMadlenya/tastytrade
|
R
| false | false | 1,397 |
r
|
#' @export
load_redshift <- function(stock) {
rs_conn <- tastytrade::redshift_connect("TASTYTRADE")
if (!RJDBC::dbExistsTable(rs_conn, stock)) {
RJDBC::dbSendUpdate(rs_conn,
paste0("create table if not exists ", stock, "(
symbol varchar(5) not null,
quotedate date,
calliv float(4),
putiv float(4),
meaniv float(4),
callvol float(4),
putvol float(4),
calloi float(4),
putoi float(4),
open_price float(4),
high_price float(4),
low_price float(4),
close_price float(4),
volume float(4),
type varchar(4),
expiration date,
strike float(4),
last float(4),
bid float(4),
ask float(4),
option_volume float(4),
open_interest float(4),
iv_strike float(4),
delta_strike float(4),
gamma float(4),
theta float(4),
vega float(4),
dte float(4),
exp_type varchar(8),
mid float(4))
distkey (symbol)
sortkey (symbol, quotedate);"))
}
tastytrade::truncate_redshift(rs_conn, stock)
# Use Manage IAM roles on cluster to add the redshift role prior to copy
tastytrade::copy_S3_redshift(env = "TASTYTRADE", connection = rs_conn,
table_name = stock,
bucket_path = paste0("s3://rds-options-files/", stock, "_options.csv"))
}
|
library(tm) ; library(wordcloud2)
library(dplyr)
rmwords <- c("a", "the", "A", "The", "and","but","And", "But",
"is", "are","Is","Are","was", "were","Was", "Were",
"of", "in","this","that", "I", )
sub <- function (c){
a <- c
a <- lapply(a, function(x) tolower(a))
for (i in rmwords) {
a<-lapply(a, function(x) gsub(i, "", a))
}
lapply(c, )
}
sub(nixon$word)
nixon <- read.csv("test_nixon.csv",header = T, stringsAsFactors = F) %>%
mutate(word=gsub(""))
filter(!word %in% rmwords) %>%
arrange(desc(count)) %>%
head(200)
obama <- read.csv("test_obama.csv", header = T,stringsAsFactors = F) %>%
filter(!word %in% rmwords) %>%
arrange(desc(count)) %>%
head(200)
roosevelt <- read.csv("test_roosevelt.csv", header = T,stringsAsFactors = F)%>%
filter(!word %in% rmwords) %>%
arrange(desc(count)) %>%
head(200)
tonyb <- read.csv("test_tony.csv",header = T, stringsAsFactors = F) %>%
filter(!word %in% rmwords) %>%
arrange(desc(count)) %>%
head(200)
trump <- read.csv("test_trump.csv", header = T,stringsAsFactors = F)%>%
filter(!word %in% rmwords) %>%
arrange(desc(count)) %>%
head(200)
dfs <- list(nixon, obama, roosevelt, tonyb, trump)
wcs <- c()
par(mfrow=c(3,2))
wordcloud2(nixon, size =1.6)
|
/Linux_Hadoop/Hadoop/unfinished_RWordcloud.R
|
no_license
|
psm9619/SQL-Linux-Hadoop
|
R
| false | false | 1,285 |
r
|
library(tm) ; library(wordcloud2)
library(dplyr)
rmwords <- c("a", "the", "A", "The", "and","but","And", "But",
"is", "are","Is","Are","was", "were","Was", "Were",
"of", "in","this","that", "I", )
sub <- function (c){
a <- c
a <- lapply(a, function(x) tolower(a))
for (i in rmwords) {
a<-lapply(a, function(x) gsub(i, "", a))
}
lapply(c, )
}
sub(nixon$word)
nixon <- read.csv("test_nixon.csv",header = T, stringsAsFactors = F) %>%
mutate(word=gsub(""))
filter(!word %in% rmwords) %>%
arrange(desc(count)) %>%
head(200)
obama <- read.csv("test_obama.csv", header = T,stringsAsFactors = F) %>%
filter(!word %in% rmwords) %>%
arrange(desc(count)) %>%
head(200)
roosevelt <- read.csv("test_roosevelt.csv", header = T,stringsAsFactors = F)%>%
filter(!word %in% rmwords) %>%
arrange(desc(count)) %>%
head(200)
tonyb <- read.csv("test_tony.csv",header = T, stringsAsFactors = F) %>%
filter(!word %in% rmwords) %>%
arrange(desc(count)) %>%
head(200)
trump <- read.csv("test_trump.csv", header = T,stringsAsFactors = F)%>%
filter(!word %in% rmwords) %>%
arrange(desc(count)) %>%
head(200)
dfs <- list(nixon, obama, roosevelt, tonyb, trump)
wcs <- c()
par(mfrow=c(3,2))
wordcloud2(nixon, size =1.6)
|
# Momente einer Verteilung für Daten interessant
# Passen die Daten zu meiner Verteilung?
# Aufgabe 1 ####################################################################
X <- rnorm(10000, mean = 1, sd = sqrt(2))
U <- runif(10000, -1 + x, 1 + x)
Y <- X + U
# mean
emp.mean1 <- mean(Y)
emp.mean2 <- mean(X) + mean(Y)
print(c(emp.mean1, emp.mean2))
# var
var.y1 <- var(Y)
var.y2 <- var(X) + var(U)
print(c(var.y1, var.y2))
# Aufgabe 2 ####################################################################
# Poissonverteilung unendlich, positiver Bereich
# Binominal Münzwurf n-mal. Anzahl der Erfolge. Wertebereich 0 bis n
# Bernoulli 0,1
# Prüfen auf negative Wahrscheinlichkeiten
#
esample <-
function (value,
prob = (rep(1 / (length(value)), length(value))),
g = function(x)
x) {
if (length(value) != length(prob)){
stop("Not every value can be matched to a probensity")
}
# istrue
if (!(all.equal(sum(prob), 1))) {
stop ("The distribution function must not be greater than 1")
}
expectation.value <- 0
for (i in 1:length(value)) {
expectation.value <- expectation.value + prob[i] * g(value[i])
# Vektorisieren prop * g aber kein Wissen ob g eine vektorisierte Funktion ist
}
return (expectation.value)
}
n <- 10
esample(1:n)
esample(0:n, prob = dpois(0:n, 1))
esample(
0:n,
g = function(x)
x ^ 2
)
# Aufgabe 3 ####################################################################
rtnorm <- function (n, mean, sd, lower, upper) {
samp <- rnorm(n = 2 * n, sd = sd, mean = mean)
samp <- samp[samp > lower & samp < upper]
if (length(samp) < n) {
stop("too less observations")
}
return(samp[1:n])
}
set.seed(1234)
test <- rtnorm(10000, 0, 1, 0, Inf)
mean(test)
# Aufgabe 4 #################################################################################
test.rtnorm <- function(m, n) {
success <- 0
for (i in 1:m) {
try(rtnorm(n, 0, 1, 0, Inf), success <- success + 1)
}
return (success / m)
}
test.rtnorm(200, 10)
test.rtnorm(2000, 100)
test.rtnorm(20000, 100)
test.rtnorm(200000, 10)
|
/ue8.R
|
no_license
|
rgr02/Programmieren-mit-Statistischer-Software
|
R
| false | false | 2,150 |
r
|
# Momente einer Verteilung für Daten interessant
# Passen die Daten zu meiner Verteilung?
# Aufgabe 1 ####################################################################
X <- rnorm(10000, mean = 1, sd = sqrt(2))
U <- runif(10000, -1 + x, 1 + x)
Y <- X + U
# mean
emp.mean1 <- mean(Y)
emp.mean2 <- mean(X) + mean(Y)
print(c(emp.mean1, emp.mean2))
# var
var.y1 <- var(Y)
var.y2 <- var(X) + var(U)
print(c(var.y1, var.y2))
# Aufgabe 2 ####################################################################
# Poissonverteilung unendlich, positiver Bereich
# Binominal Münzwurf n-mal. Anzahl der Erfolge. Wertebereich 0 bis n
# Bernoulli 0,1
# Prüfen auf negative Wahrscheinlichkeiten
#
esample <-
function (value,
prob = (rep(1 / (length(value)), length(value))),
g = function(x)
x) {
if (length(value) != length(prob)){
stop("Not every value can be matched to a probensity")
}
# istrue
if (!(all.equal(sum(prob), 1))) {
stop ("The distribution function must not be greater than 1")
}
expectation.value <- 0
for (i in 1:length(value)) {
expectation.value <- expectation.value + prob[i] * g(value[i])
# Vektorisieren prop * g aber kein Wissen ob g eine vektorisierte Funktion ist
}
return (expectation.value)
}
n <- 10
esample(1:n)
esample(0:n, prob = dpois(0:n, 1))
esample(
0:n,
g = function(x)
x ^ 2
)
# Aufgabe 3 ####################################################################
rtnorm <- function (n, mean, sd, lower, upper) {
samp <- rnorm(n = 2 * n, sd = sd, mean = mean)
samp <- samp[samp > lower & samp < upper]
if (length(samp) < n) {
stop("too less observations")
}
return(samp[1:n])
}
set.seed(1234)
test <- rtnorm(10000, 0, 1, 0, Inf)
mean(test)
# Aufgabe 4 #################################################################################
test.rtnorm <- function(m, n) {
success <- 0
for (i in 1:m) {
try(rtnorm(n, 0, 1, 0, Inf), success <- success + 1)
}
return (success / m)
}
test.rtnorm(200, 10)
test.rtnorm(2000, 100)
test.rtnorm(20000, 100)
test.rtnorm(200000, 10)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mergeTerms_Function.R
\name{mergeTerms}
\alias{mergeTerms}
\title{Transform, merge, separate, or delete terms from a corpus}
\usage{
mergeTerms(data.td, term, term.replacement)
}
\arguments{
\item{data.td}{A tidy dataset}
\item{term}{A user identified term}
\item{term.replacement}{A user specified term to replace the original term found in the text}
}
\value{
Permanently alters the corpus by replacing all instances of \code{term} with \code{term.replacement}
}
\description{
Manipulate the content of text that will be analyzed. Merge like terms into a single term, replace names
found in text, and remove content deemed irrelevant.
}
\details{
Supplied a search term and a replacement term, this function seeks
to identify each instance of the search term within the corpus and replace
it with the replacement term. Should the user elect to delete a term, instead of replacing it, the
replacement term should be identified as "" within the function. **Warning:
Once a term is deleted from the corpus it can not be replaced. To return
the term to the analysis, the corpus must be rebuilt to its previous state.
}
\examples{
\donttest{
Articles <- mergeTerms(Articles,"Affordable Care Act","ACA")
Articles <- mergeTerms(Articles,"White House", "White_House")
Articles <- mergeTerms(Articles, "Fox News", "")
}
}
|
/man/mergeTerms.Rd
|
no_license
|
JSmith146/CoRpEx
|
R
| false | true | 1,411 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mergeTerms_Function.R
\name{mergeTerms}
\alias{mergeTerms}
\title{Transform, merge, separate, or delete terms from a corpus}
\usage{
mergeTerms(data.td, term, term.replacement)
}
\arguments{
\item{data.td}{A tidy dataset}
\item{term}{A user identified term}
\item{term.replacement}{A user specified term to replace the original term found in the text}
}
\value{
Permanently alters the corpus by replacing all instances of \code{term} with \code{term.replacement}
}
\description{
Manipulate the content of text that will be analyzed. Merge like terms into a single term, replace names
found in text, and remove content deemed irrelevant.
}
\details{
Supplied a search term and a replacement term, this function seeks
to identify each instance of the search term within the corpus and replace
it with the replacement term. Should the user elect to delete a term, instead of replacing it, the
replacement term should be identified as "" within the function. **Warning:
Once a term is deleted from the corpus it can not be replaced. To return
the term to the analysis, the corpus must be rebuilt to its previous state.
}
\examples{
\donttest{
Articles <- mergeTerms(Articles,"Affordable Care Act","ACA")
Articles <- mergeTerms(Articles,"White House", "White_House")
Articles <- mergeTerms(Articles, "Fox News", "")
}
}
|
library(tidyverse)
library(pgirmess)
almost_sas <- function(aov.results){
aov_residuals <- residuals(aov.results)
par(mfrow=c(2,2))
plot(aov.results, which=1)
hist(aov_residuals)
plot(aov.results, which=2)
plot(density(aov_residuals))
}
HDL <- c(54, 43, 38, 30, 61, 53, 35, 34, 39, 46, 50, 35,
61, 41, 44, 47, 33, 29, 59, 35, 34, 74, 50, 65,
44, 65, 62, 53, 51, 49, 49, 42, 35, 44, 37, 38)
age <- c(rep("20 to 29", 12),
rep("40 to 49", 12),
rep("60 to 69", 12))
data <- tibble(age, HDL)
# find ranks
data$rank <- rank(data$HDL, ties.method = "average")
# sum of ranks
data %>%
group_by(age) %>%
summarize_at(vars(rank),
list(name = sum))
# check ANOVA assumptions
almost_sas(lm(HDL ~ age, data = data))
# Kruskal-Wallis
kruskal.test(HDL ~ age, data = data)
# Kruskal-Wallis posthoc
kruskalmc(HDL ~ age, data = data)
|
/15-7 Kruskal-Wallis.R
|
no_license
|
kaylinregan/STA4173
|
R
| false | false | 895 |
r
|
library(tidyverse)
library(pgirmess)
almost_sas <- function(aov.results){
aov_residuals <- residuals(aov.results)
par(mfrow=c(2,2))
plot(aov.results, which=1)
hist(aov_residuals)
plot(aov.results, which=2)
plot(density(aov_residuals))
}
HDL <- c(54, 43, 38, 30, 61, 53, 35, 34, 39, 46, 50, 35,
61, 41, 44, 47, 33, 29, 59, 35, 34, 74, 50, 65,
44, 65, 62, 53, 51, 49, 49, 42, 35, 44, 37, 38)
age <- c(rep("20 to 29", 12),
rep("40 to 49", 12),
rep("60 to 69", 12))
data <- tibble(age, HDL)
# find ranks
data$rank <- rank(data$HDL, ties.method = "average")
# sum of ranks
data %>%
group_by(age) %>%
summarize_at(vars(rank),
list(name = sum))
# check ANOVA assumptions
almost_sas(lm(HDL ~ age, data = data))
# Kruskal-Wallis
kruskal.test(HDL ~ age, data = data)
# Kruskal-Wallis posthoc
kruskalmc(HDL ~ age, data = data)
|
#' Engines of other languages
#'
#' This object controls how to execute the code from languages other than R
#' (when the chunk option \code{engine} is not \code{'R'}). Each component in
#' this object is a function that takes a list of current chunk options
#' (including the source code) and returns a character string to be written into
#' the output.
#' @export
#' @references Usage: \url{http://yihui.name/knitr/objects}
#' @examples knit_engines$get('python'); knit_engines$get('awk')
knit_engines = new_defaults()
wrap_fmt = function(x, lang = '') {
fmt = opts_knit$get('out.format')
tpl = if (fmt %in% c('latex', 'listings', 'sweave')) {
'\\begin{verbatim}\n%s\\end{verbatim}'
} else switch(fmt, html = '<pre class="knitr">%s</pre>',
markdown = str_c('```', lang, '\n%s\n```'),
rst = str_c('::\n\n', indent_block(x), '\n'),
jekyll = str_c('{%% highlight ', if (lang == '') 'text' else lang,
' %%}\n%s\n{%% endhighlight %%}'),
'%s')
sprintf(tpl, str_c(x, collapse = '\n'))
}
## Python
eng_python = function(options) {
code = str_c(options$code, collapse = '\n')
cmd = sprintf('python -c %s', shQuote(code))
out = system(cmd, intern = TRUE)
str_c(wrap_fmt(code, 'python'), '\n', wrap_fmt(out))
}
## Awk: file is the file to read in; awk.opts are other options to pass to awk
eng_awk = function(options) {
code = str_c(options$code, collapse = '\n')
cmd = paste(options$engine, shQuote(code), shQuote(options$file), options$awk.opts)
out = system(cmd, intern = TRUE)
str_c(wrap_fmt(code, 'awk'), '\n', wrap_fmt(out))
}
## C
## Java
knit_engines$set(python = eng_python, awk = eng_awk, gawk = eng_awk)
|
/R/engine.R
|
no_license
|
Honglongwu/knitr
|
R
| false | false | 1,735 |
r
|
#' Engines of other languages
#'
#' This object controls how to execute the code from languages other than R
#' (when the chunk option \code{engine} is not \code{'R'}). Each component in
#' this object is a function that takes a list of current chunk options
#' (including the source code) and returns a character string to be written into
#' the output.
#' @export
#' @references Usage: \url{http://yihui.name/knitr/objects}
#' @examples knit_engines$get('python'); knit_engines$get('awk')
knit_engines = new_defaults()
wrap_fmt = function(x, lang = '') {
fmt = opts_knit$get('out.format')
tpl = if (fmt %in% c('latex', 'listings', 'sweave')) {
'\\begin{verbatim}\n%s\\end{verbatim}'
} else switch(fmt, html = '<pre class="knitr">%s</pre>',
markdown = str_c('```', lang, '\n%s\n```'),
rst = str_c('::\n\n', indent_block(x), '\n'),
jekyll = str_c('{%% highlight ', if (lang == '') 'text' else lang,
' %%}\n%s\n{%% endhighlight %%}'),
'%s')
sprintf(tpl, str_c(x, collapse = '\n'))
}
## Python
eng_python = function(options) {
code = str_c(options$code, collapse = '\n')
cmd = sprintf('python -c %s', shQuote(code))
out = system(cmd, intern = TRUE)
str_c(wrap_fmt(code, 'python'), '\n', wrap_fmt(out))
}
## Awk: file is the file to read in; awk.opts are other options to pass to awk
eng_awk = function(options) {
code = str_c(options$code, collapse = '\n')
cmd = paste(options$engine, shQuote(code), shQuote(options$file), options$awk.opts)
out = system(cmd, intern = TRUE)
str_c(wrap_fmt(code, 'awk'), '\n', wrap_fmt(out))
}
## C
## Java
knit_engines$set(python = eng_python, awk = eng_awk, gawk = eng_awk)
|
source('/datascience/projects/statisticallyfit/github/learningstatistics/RStatistics/StatsFormulas.R')
critique <- matrix(c(24,8,10,8,13,9,13,11,64),nrow=3,
dimnames=list("Siskel"=c("con","mixed","pro"),
"Ebert"=c("con","mixed","pro")))
critique
LikelihoodRatioTest(critique)
ChiSquareIndependence(critique)
CohenKappaTest(critique)
|
/UniPennState_GeneralizedLinearModels/TwoWayTable/Agreement_Kappa_Movie.R
|
no_license
|
statisticallyfit/RStatistics
|
R
| false | false | 388 |
r
|
source('/datascience/projects/statisticallyfit/github/learningstatistics/RStatistics/StatsFormulas.R')
critique <- matrix(c(24,8,10,8,13,9,13,11,64),nrow=3,
dimnames=list("Siskel"=c("con","mixed","pro"),
"Ebert"=c("con","mixed","pro")))
critique
LikelihoodRatioTest(critique)
ChiSquareIndependence(critique)
CohenKappaTest(critique)
|
#' @title Search Author Content on SCOPUS
#'
#' @description Searches SCOPUS to get information about documents
#' on an author.
#' Note, \code{author_list} returns a list of the entries from
#' \code{author_search},
#' but allows you to put in a name.
#' @param au_id Author ID number. Overrides any first/last name argument
#' @param last_name last name of author
#' @param api_key Elsevier API key
#' @param first_name first name of author
#' @param verbose Print diagnostic messages
#' @param all_author_info Should all author info be recorded instead of
#' that just to the
#' author given
#' @param http Address for scopus api
#' @param view type of view to give, see
#' \url{https://api.elsevier.com/documentation/ScopusSearchAPI.wadl}
#' @param count number of records to retrieve (below 25, see
#' \url{https://dev.elsevier.com/api_key_settings.html})
#' @param general Should \code{\link{gen_entries_to_df}} instead of the
#' way before version 0.5.10.9001
#' @param scrub Should `scrub_identifier` be run on the identifier?
#' @param ... Arguments to be passed to \code{\link{author_search}}
#' @param headers Headers passed to \code{\link{add_headers}},
#' passed to \code{\link{GET}}
#'
#' @export
#' @seealso \code{\link{get_author_info}}
#' @return List of entries from SCOPUS
#' @examples
#' if (have_api_key()) {
#' res = author_df(last_name = "Muschelli", first_name = "John",
#' verbose = FALSE)
#' }
#' @note The \code{author_data} command will return the list of all
#' entries as well as
#' the \code{data.frame}.
author_df = function(
au_id = NULL, last_name = NULL,
first_name = NULL,
api_key = NULL,
verbose = TRUE,
all_author_info = FALSE,
http = "https://api.elsevier.com/content/search/scopus",
view = "COMPLETE",
count = 25,
general = TRUE,
scrub = FALSE,
headers = NULL,
...){
L = author_data(au_id = au_id,
last_name = last_name,
first_name = first_name,
api_key = api_key,
verbose = verbose,
all_author_info = all_author_info,
http = http,
view = view,
count = count,
general = general,
scrub = scrub,
headers = headers,
... = ...)
df = L$df
return(df)
}
#' @rdname author_df
#' @export
author_df_orig = function(..., general = FALSE) {
author_df(..., general = general)
}
#' @rdname author_df
#' @export
author_list = function(au_id = NULL, last_name = NULL,
first_name = NULL,
api_key = NULL,
verbose = TRUE,
http = "https://api.elsevier.com/content/search/scopus",
view = "COMPLETE",
count = 25,
headers = NULL,
...){
api_key = get_api_key(api_key)
L = process_author_name(au_id = au_id,
first_name = first_name,
last_name = last_name,
api_key = api_key,
verbose = verbose,
headers = headers)
first_name = L$first_name
last_name = L$last_name
au_id = L$au_id
### Getting author information
entries = author_search(au_id = au_id,
api_key = api_key,
verbose = verbose,
view = view,
http = http,
count = count,
headers = headers,
...)
entries$au_id = au_id
entries$first_name = first_name
entries$last_name = last_name
return(entries)
}
#' @rdname author_df
#' @export
author_data = function(...,
verbose = TRUE,
all_author_info = FALSE,
general = TRUE,
scrub = FALSE){
entries = author_list(..., verbose = verbose)
au_id = entries$au_id
first_name = entries$first_name
last_name = entries$last_name
entries = entries$entries
if (general) {
xdf = gen_entries_to_df(entries, scrub = scrub)
df = xdf$df
} else {
if ( all_author_info ) {
# df$indexer = seq(nrow(df))
df = entries_to_df(entries = entries,
au_id = NULL,
verbose = verbose)
# df = merge(df, df2, sort = FALSE, all.x = TRUE)
# df = df[ order(df$indexer), ]
# df$indexer = NULL
} else {
df = entries_to_df(entries = entries,
au_id = au_id,
verbose = verbose)
}
xdf = NULL
}
# df$n_affiliations = n_affils
df$first_name = first_name
df$last_name = last_name
df$au_id = au_id
L = list(entries = entries,
df = df)
L$first_name = first_name
L$last_name = last_name
L$au_id = au_id
L$full_data = xdf
return(L)
}
#' @title Process Author Name
#' @description Process author ID and names for generic use
#' @param au_id Author ID number. Overrides any first/last name argument
#' @param last_name last name of author
#' @param first_name first name of author
#' @param api_key Elsevier API key
#' @param affil_id ID of affiliation (optional)
#' @param verbose Print diagnostic messages
#' @param headers Headers passed to \code{\link{add_headers}},
#' passed to \code{\link{GET}}
#'
#' @return List of first/last name and author ID
#' @note This function is really to avoid duplication
#' @export
process_author_name = function(
au_id = NULL, last_name = NULL,
first_name = NULL,
affil_id = NULL,
api_key = NULL,
verbose = TRUE,
headers = NULL) {
if (is.null(last_name) &
is.null(first_name) &
is.null(au_id)) {
stop("au_id or names must be specified!")
}
# Getting AU-ID
if (
(!is.null(last_name) | !is.null(first_name) ) &
!is.null(au_id)) {
warning("AU-ID overriding first/last name combination")
}
if (is.null(au_id)) {
last_name = replace_non_ascii(last_name)
first_name = replace_non_ascii(first_name)
if (length(first_name) == 0) {
first_name = NULL
} else if (first_name %in% c("", NA)) {
first_name = NULL
}
if (length(last_name) == 0) {
last_name = NULL
} else if (last_name %in% c("", NA)) {
last_name = NULL
}
author_name = get_author_info(
last_name = last_name,
first_name = first_name,
api_key = api_key, verbose = verbose,
affil_id = affil_id,
headers = headers)
if (NROW(author_name) == 0) {
stop("No author name found")
}
if (all(is.na(author_name$au_id))) {
stop("No author name found")
}
if (verbose) {
message("Authors found:")
print(author_name[1,])
}
au_id = author_name$au_id[1]
}
if (is.na(au_id) | is.null(au_id)) {
stop("AU-ID not found, must be specified - names didn't work")
}
au_id = as.character(au_id)
au_id = gsub("AUTHOR_ID:", "", au_id, fixed = TRUE)
L = list(au_id = au_id)
L$first_name = first_name
L$last_name = last_name
return(L)
}
|
/R/author_df.R
|
no_license
|
cran/rscopus
|
R
| false | false | 7,166 |
r
|
#' @title Search Author Content on SCOPUS
#'
#' @description Searches SCOPUS to get information about documents
#' on an author.
#' Note, \code{author_list} returns a list of the entries from
#' \code{author_search},
#' but allows you to put in a name.
#' @param au_id Author ID number. Overrides any first/last name argument
#' @param last_name last name of author
#' @param api_key Elsevier API key
#' @param first_name first name of author
#' @param verbose Print diagnostic messages
#' @param all_author_info Should all author info be recorded instead of
#' that just to the
#' author given
#' @param http Address for scopus api
#' @param view type of view to give, see
#' \url{https://api.elsevier.com/documentation/ScopusSearchAPI.wadl}
#' @param count number of records to retrieve (below 25, see
#' \url{https://dev.elsevier.com/api_key_settings.html})
#' @param general Should \code{\link{gen_entries_to_df}} instead of the
#' way before version 0.5.10.9001
#' @param scrub Should `scrub_identifier` be run on the identifier?
#' @param ... Arguments to be passed to \code{\link{author_search}}
#' @param headers Headers passed to \code{\link{add_headers}},
#' passed to \code{\link{GET}}
#'
#' @export
#' @seealso \code{\link{get_author_info}}
#' @return List of entries from SCOPUS
#' @examples
#' if (have_api_key()) {
#' res = author_df(last_name = "Muschelli", first_name = "John",
#' verbose = FALSE)
#' }
#' @note The \code{author_data} command will return the list of all
#' entries as well as
#' the \code{data.frame}.
author_df = function(
au_id = NULL, last_name = NULL,
first_name = NULL,
api_key = NULL,
verbose = TRUE,
all_author_info = FALSE,
http = "https://api.elsevier.com/content/search/scopus",
view = "COMPLETE",
count = 25,
general = TRUE,
scrub = FALSE,
headers = NULL,
...){
L = author_data(au_id = au_id,
last_name = last_name,
first_name = first_name,
api_key = api_key,
verbose = verbose,
all_author_info = all_author_info,
http = http,
view = view,
count = count,
general = general,
scrub = scrub,
headers = headers,
... = ...)
df = L$df
return(df)
}
#' @rdname author_df
#' @export
author_df_orig = function(..., general = FALSE) {
author_df(..., general = general)
}
#' @rdname author_df
#' @export
author_list = function(au_id = NULL, last_name = NULL,
first_name = NULL,
api_key = NULL,
verbose = TRUE,
http = "https://api.elsevier.com/content/search/scopus",
view = "COMPLETE",
count = 25,
headers = NULL,
...){
api_key = get_api_key(api_key)
L = process_author_name(au_id = au_id,
first_name = first_name,
last_name = last_name,
api_key = api_key,
verbose = verbose,
headers = headers)
first_name = L$first_name
last_name = L$last_name
au_id = L$au_id
### Getting author information
entries = author_search(au_id = au_id,
api_key = api_key,
verbose = verbose,
view = view,
http = http,
count = count,
headers = headers,
...)
entries$au_id = au_id
entries$first_name = first_name
entries$last_name = last_name
return(entries)
}
#' @rdname author_df
#' @export
author_data = function(...,
verbose = TRUE,
all_author_info = FALSE,
general = TRUE,
scrub = FALSE){
entries = author_list(..., verbose = verbose)
au_id = entries$au_id
first_name = entries$first_name
last_name = entries$last_name
entries = entries$entries
if (general) {
xdf = gen_entries_to_df(entries, scrub = scrub)
df = xdf$df
} else {
if ( all_author_info ) {
# df$indexer = seq(nrow(df))
df = entries_to_df(entries = entries,
au_id = NULL,
verbose = verbose)
# df = merge(df, df2, sort = FALSE, all.x = TRUE)
# df = df[ order(df$indexer), ]
# df$indexer = NULL
} else {
df = entries_to_df(entries = entries,
au_id = au_id,
verbose = verbose)
}
xdf = NULL
}
# df$n_affiliations = n_affils
df$first_name = first_name
df$last_name = last_name
df$au_id = au_id
L = list(entries = entries,
df = df)
L$first_name = first_name
L$last_name = last_name
L$au_id = au_id
L$full_data = xdf
return(L)
}
#' @title Process Author Name
#' @description Process author ID and names for generic use
#' @param au_id Author ID number. Overrides any first/last name argument
#' @param last_name last name of author
#' @param first_name first name of author
#' @param api_key Elsevier API key
#' @param affil_id ID of affiliation (optional)
#' @param verbose Print diagnostic messages
#' @param headers Headers passed to \code{\link{add_headers}},
#' passed to \code{\link{GET}}
#'
#' @return List of first/last name and author ID
#' @note This function is really to avoid duplication
#' @export
process_author_name = function(
au_id = NULL, last_name = NULL,
first_name = NULL,
affil_id = NULL,
api_key = NULL,
verbose = TRUE,
headers = NULL) {
if (is.null(last_name) &
is.null(first_name) &
is.null(au_id)) {
stop("au_id or names must be specified!")
}
# Getting AU-ID
if (
(!is.null(last_name) | !is.null(first_name) ) &
!is.null(au_id)) {
warning("AU-ID overriding first/last name combination")
}
if (is.null(au_id)) {
last_name = replace_non_ascii(last_name)
first_name = replace_non_ascii(first_name)
if (length(first_name) == 0) {
first_name = NULL
} else if (first_name %in% c("", NA)) {
first_name = NULL
}
if (length(last_name) == 0) {
last_name = NULL
} else if (last_name %in% c("", NA)) {
last_name = NULL
}
author_name = get_author_info(
last_name = last_name,
first_name = first_name,
api_key = api_key, verbose = verbose,
affil_id = affil_id,
headers = headers)
if (NROW(author_name) == 0) {
stop("No author name found")
}
if (all(is.na(author_name$au_id))) {
stop("No author name found")
}
if (verbose) {
message("Authors found:")
print(author_name[1,])
}
au_id = author_name$au_id[1]
}
if (is.na(au_id) | is.null(au_id)) {
stop("AU-ID not found, must be specified - names didn't work")
}
au_id = as.character(au_id)
au_id = gsub("AUTHOR_ID:", "", au_id, fixed = TRUE)
L = list(au_id = au_id)
L$first_name = first_name
L$last_name = last_name
return(L)
}
|
ApparrentCindex <- pec::cindex(list("Cox X1"=cox1,
"Cox X2"=cox2,
"Cox X1+X2"=cox12,
"RSF"=rsf1),
formula=Surv(time,status)~X1+X2,
data=dat,
eval.times=seq(5,500,50))
library(rms)
Cindex <- purrr::map(.x = pred_KM,
.f = ~pec::cindex(object = .x,
formula = Surv(os_months, os_deceased) ~ )
print(ApparrentCindex)
plot(ApparrentCindex)
|
/cindex.R
|
no_license
|
csetraynor/Code_BYM_Warwick
|
R
| false | false | 597 |
r
|
ApparrentCindex <- pec::cindex(list("Cox X1"=cox1,
"Cox X2"=cox2,
"Cox X1+X2"=cox12,
"RSF"=rsf1),
formula=Surv(time,status)~X1+X2,
data=dat,
eval.times=seq(5,500,50))
library(rms)
Cindex <- purrr::map(.x = pred_KM,
.f = ~pec::cindex(object = .x,
formula = Surv(os_months, os_deceased) ~ )
print(ApparrentCindex)
plot(ApparrentCindex)
|
library(ape)
testtree <- read.tree("4910_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4910_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/4910_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 135 |
r
|
library(ape)
testtree <- read.tree("4910_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4910_0_unrooted.txt")
|
##########################################################################################
# Designed and developed by Tinniam V Ganesh
# Date : 30 Mar 2016
# Function: saveAllMatchesBetweenTeams
# This function saves all matches between 2 teams as a single dataframe
##################################################################################
#' @title
#' Saves all matches between 2 teams as dataframe
#'
#' @description
#' This function saves all matches between 2 teams as a single dataframe in the
#' current directory
#'
#' @usage
#' saveAllMatchesBetweenTeams()
#'
#' @return None
#' @references
#' \url{http://cricsheet.org/}\cr
#' \url{https://gigadom.wordpress.com/}\cr
#' \url{https://github.com/tvganesh/yorkrData}
#'
#'
#' @author
#' Tinniam V Ganesh
#' @note
#' Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
#'
#' @examples
#' \dontrun{
#' saveAllMatchesBetweenTeams
#' }
#' @seealso
#' \code{\link{batsmanDismissals}}\cr
#' \code{\link{batsmanRunsVsDeliveries}}\cr
#' \code{\link{batsmanRunsVsStrikeRate}}\cr
#' \code{\link{getAllMatchesAllOpposition}}\cr
#' \code{\link{getAllMatchesBetweenTeams}}\cr
#'
#' @export
#'
saveAllMatchesBetweenTeams <- function(){
teams <-c("Australia","India","Pakistan","West Indies", 'Sri Lanka',
"England", "Bangladesh","Netherlands","Scotland", "Afghanistan",
"Zimbabwe","Ireland","New Zealand","South Africa","Canada",
"Bermuda","Kenya")
matches <- NULL
#Create all combinations of teams
for(i in seq_along(teams)){
for(j in seq_along(teams)){
if(teams[i] != teams[j]){
cat("Team1=",teams[i],"Team2=",teams[j],"\n")
tryCatch(matches <- getAllMatchesBetweenTeams(teams[i],teams[j],dir="../data",save=TRUE),
error = function(e) {
print("No matches")
}
)
}
}
matches <- NULL
}
}
|
/R/saveAllMatchesBetweenTeams.R
|
no_license
|
bcdunbar/yorkr
|
R
| false | false | 1,990 |
r
|
##########################################################################################
# Designed and developed by Tinniam V Ganesh
# Date : 30 Mar 2016
# Function: saveAllMatchesBetweenTeams
# This function saves all matches between 2 teams as a single dataframe
##################################################################################
#' @title
#' Saves all matches between 2 teams as dataframe
#'
#' @description
#' This function saves all matches between 2 teams as a single dataframe in the
#' current directory
#'
#' @usage
#' saveAllMatchesBetweenTeams()
#'
#' @return None
#' @references
#' \url{http://cricsheet.org/}\cr
#' \url{https://gigadom.wordpress.com/}\cr
#' \url{https://github.com/tvganesh/yorkrData}
#'
#'
#' @author
#' Tinniam V Ganesh
#' @note
#' Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
#'
#' @examples
#' \dontrun{
#' saveAllMatchesBetweenTeams
#' }
#' @seealso
#' \code{\link{batsmanDismissals}}\cr
#' \code{\link{batsmanRunsVsDeliveries}}\cr
#' \code{\link{batsmanRunsVsStrikeRate}}\cr
#' \code{\link{getAllMatchesAllOpposition}}\cr
#' \code{\link{getAllMatchesBetweenTeams}}\cr
#'
#' @export
#'
saveAllMatchesBetweenTeams <- function(){
teams <-c("Australia","India","Pakistan","West Indies", 'Sri Lanka',
"England", "Bangladesh","Netherlands","Scotland", "Afghanistan",
"Zimbabwe","Ireland","New Zealand","South Africa","Canada",
"Bermuda","Kenya")
matches <- NULL
#Create all combinations of teams
for(i in seq_along(teams)){
for(j in seq_along(teams)){
if(teams[i] != teams[j]){
cat("Team1=",teams[i],"Team2=",teams[j],"\n")
tryCatch(matches <- getAllMatchesBetweenTeams(teams[i],teams[j],dir="../data",save=TRUE),
error = function(e) {
print("No matches")
}
)
}
}
matches <- NULL
}
}
|
## Reading Dataset in R
power <- read.table('C:/Users/swastikaa/Documents/DataScience/Course4/household_power_consumption.txt', header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## Format date to Type Date
power$Date <- as.Date(power$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
power <- subset(power,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
power <- power[complete.cases(power),]
## Combine Date and Time column
dateTime <- paste(power$Date, power$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
power <- power[ ,!(names(power) %in% c("Date","Time"))]
## Add DateTime column
power <- cbind(dateTime, power)
## Format dateTime Column
power$dateTime <- as.POSIXct(dateTime)
##Plot 4 - Sub Metering Stats by days
png("plot4.png",width=480, height=480) ## Open PNG device; create 'plot1.png' in my working directory
## Create plot and send to a file (no plot appears on screen)
par(mar = c(4,4,2,1), mfrow = c(2,2))
plot(power$Global_active_power~power$dateTime, type="l", ylab="Global Active Power", xlab="")
plot(power$Voltage~power$dateTime, type="l", ylab="Voltage", xlab="datetime")
plot(power$Sub_metering_1~power$dateTime, type="l",
ylab="Energy sub metering", xlab="")
lines(power$Sub_metering_2~power$dateTime,col='Red')
lines(power$Sub_metering_3~power$dateTime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(power$Global_reactive_power~power$dateTime, type="l", ylab="Global_reactive_power", xlab="datetime")
## Annotate plot; still nothing on screen
dev.off() ## Close the PNG file device
|
/course1_week1_project_plot4.R
|
no_license
|
swastikaacharya1/ds_course4_week1_project
|
R
| false | false | 1,882 |
r
|
## Reading Dataset in R
power <- read.table('C:/Users/swastikaa/Documents/DataScience/Course4/household_power_consumption.txt', header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## Format date to Type Date
power$Date <- as.Date(power$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
power <- subset(power,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
power <- power[complete.cases(power),]
## Combine Date and Time column
dateTime <- paste(power$Date, power$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
power <- power[ ,!(names(power) %in% c("Date","Time"))]
## Add DateTime column
power <- cbind(dateTime, power)
## Format dateTime Column
power$dateTime <- as.POSIXct(dateTime)
##Plot 4 - Sub Metering Stats by days
png("plot4.png",width=480, height=480) ## Open PNG device; create 'plot1.png' in my working directory
## Create plot and send to a file (no plot appears on screen)
par(mar = c(4,4,2,1), mfrow = c(2,2))
plot(power$Global_active_power~power$dateTime, type="l", ylab="Global Active Power", xlab="")
plot(power$Voltage~power$dateTime, type="l", ylab="Voltage", xlab="datetime")
plot(power$Sub_metering_1~power$dateTime, type="l",
ylab="Energy sub metering", xlab="")
lines(power$Sub_metering_2~power$dateTime,col='Red')
lines(power$Sub_metering_3~power$dateTime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(power$Global_reactive_power~power$dateTime, type="l", ylab="Global_reactive_power", xlab="datetime")
## Annotate plot; still nothing on screen
dev.off() ## Close the PNG file device
|
#' A function for analyzing two-sample problems
#'
#' The rank.two.sample() function
#'
#' @param formula A model \code{\link{formula}} object. The left hand side
#' contains the response variable and the right hand side contains the factor
#' variables of interest. An interaction term must be specified.
#' @param data A data.frame, list or environment containing the variables in
#' \code{formula}. The default option is \code{NULL}.
#' @param conf.level A number specifying the confidence level; the default is 0.95.
#' @param alternative Which alternative is considered? One of "two.sided", "less", "greater".
#' @param rounds Value specifying the number of digits the results are rounded to.
#' @param method specifying the method used for calculation of the confidence intervals.
#' One of "logit", "probit", "normal", "t.app" and "permu".
#' @param plot.simci Logical, indicating whether or not confidence intervals
#' should be plotted
#' @param info Logical. info = FALSE suppresses the output of additional information
#' concerning e.g. the interpretation of the test results.
#' @param wilcoxon asymptotic or exact calculation of Wilcoxon test.
#' @param shift.int Logical, indicating whether or not shift effects should be considered.
#' @param nperm Number of permutations used, default is 10000.
#'
#'
#' @examples
#' data(Muco)
#' Muco2 <- subset(Muco, Disease != "OAD")
#' twosample <- rank.two.samples(HalfTime ~ Disease, data = Muco2,
#' alternative = "greater", method = "probit", wilcoxon="exact")
#'
#'
#' @export
rank.two.samples <- function (formula, data, conf.level = 0.95,
alternative = c("two.sided",
"less", "greater"), rounds = 3, method = c("logit", "probit",
"normal", "t.app", "permu"), plot.simci = FALSE, info = TRUE,
wilcoxon=c("asymptotic","exact"),shift.int=TRUE,
nperm = 10000)
{
alpha <- 1 - conf.level
if (alpha >= 1 || alpha <= 0) {
stop("The confidence level must be between 0 and 1!")
if (is.null(alternative)) {
stop("Please declare the alternative! (two.sided, less, greater)")
}
}
alternative <- match.arg(alternative)
method <- match.arg(method)
wilcoxon <- match.arg(wilcoxon)
if (length(formula) != 3) {
stop("You can only analyse one-way layouts!")
}
dat <- model.frame(formula, droplevels(data))
if (ncol(dat) != 2) {
stop("Specify one response and only one class variable in the formula")
}
if (is.numeric(dat[, 1]) == FALSE) {
stop("Response variable must be numeric")
}
response <- dat[, 1]
factorx <- as.factor(dat[, 2])
fl <- levels(factorx)
a <- nlevels(factorx)
if (a > 2) {
stop("You want to perform a contrast test (the factor variable has more than two levels). Please use the function mctp!")
}
samples <- split(response, factorx)
n <- sapply(samples, length)
n1 <- n[1]
n2 <- n[2]
if (any(n == 1)) {
warn <- paste("The factor level", fl[n == 1], "has got only one observation!")
stop(warn)
}
N <- sum(n)
cmpid <- paste("p(", fl[1], ",", fl[2], ")", sep = "")
plotz <- 1
rxy <- rank(c(samples[[1]], samples[[2]]))
rx <- rank(c(samples[[1]]))
ry <- rank(c(samples[[2]]))
pl1 <- 1/n2 * (rxy[1:n1] - rx)
pl2 <- 1/n1 * (rxy[(n1 + 1):N] - ry)
pd <- mean(pl2)
pd1 <- (pd == 1)
pd0 <- (pd == 0)
pd[pd1] <- 0.999
pd[pd0] <- 0.001
s1 <- var(pl1)/n1
s2 <- var(pl2)/n2
V <- N * (s1 + s2)
singular.bf <- (V == 0)
V[singular.bf] <- N/(2 * n1 * n2)
switch(method, normal = {
AsyMethod <- "Normal - Approximation"
T <- sqrt(N) * (pd - 1/2)/sqrt(V)
switch(alternative, two.sided = {
text.Output <- paste("True relative effect p is less or equal than 1/2")
p.Value <- min(2 - 2 * pnorm(T), 2 * pnorm(T))
crit <- qnorm(1 - alpha/2)
Lower <- pd - crit/sqrt(N) * sqrt(V)
Upper <- pd + crit/sqrt(N) * sqrt(V)
}, less = {
text.Output <- paste("True relative effect p is less than 1/2")
p.Value <- pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- 0
Upper <- pd + crit/sqrt(N) * sqrt(V)
}, greater = {
text.Output <- paste("True relative effect p is greater than 1/2")
p.Value <- 1 - pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- pd - crit/sqrt(N) * sqrt(V)
Upper <- 1
})
data.info <- data.frame(Sample = fl, Size = n)
Analysis <- data.frame(Effect = cmpid, Estimator = round(pd,
rounds), Lower = round(Lower, rounds), Upper = round(Upper,
rounds), T = round(T, rounds), p.Value = round(p.Value,
rounds))
rownames(Analysis) <- 1
}, t.app = {
T <- sqrt(N) * (pd - 1/2)/sqrt(V)
df.sw <- (s1 + s2)^2/(s1^2/(n1 - 1) + s2^2/(n2 - 1))
df.sw[is.nan(df.sw)] <- 1000
AsyMethod <- paste("Brunner - Munzel - T - Approx with",
round(df.sw, rounds), "DF")
switch(alternative, two.sided = {
text.Output <- paste("True relative effect p is less or equal than 1/2")
p.Value <- min(2 - 2 * pt(T, df = df.sw), 2 * pt(T,
df = df.sw))
crit <- qt(1 - alpha/2, df = df.sw)
Lower <- pd - crit/sqrt(N) * sqrt(V)
Upper <- pd + crit/sqrt(N) * sqrt(V)
}, less = {
text.Output <- paste("True relative effect p is less than 1/2")
p.Value <- pt(T, df = df.sw)
crit <- qt(1 - alpha, df = df.sw)
Lower <- 0
Upper <- pd + crit/sqrt(N) * sqrt(V)
}, greater = {
text.Output <- paste("True relative effect p is greater than 1/2")
p.Value <- 1 - pt(T, df = df.sw)
crit <- qt(1 - alpha, df = df.sw)
Lower <- pd - crit/sqrt(N) * sqrt(V)
Upper <- 1
})
data.info <- data.frame(Sample = fl, Size = n)
Analysis <- data.frame(Effect = cmpid, Estimator = round(pd,
rounds), Lower = round(Lower, rounds), Upper = round(Upper,
rounds), T = round(T, rounds), p.Value = round(p.Value,
rounds))
rownames(Analysis) <- 1
result <- list(Info = data.info, Analysis = Analysis)
}, logit = {
AsyMethod <- "Logit - Transformation"
logitf <- function(p) {
log(p/(1 - p))
}
expit <- function(G) {
exp(G)/(1 + exp(G))
}
logit.pd <- logitf(pd)
logit.dev <- 1/(pd * (1 - pd))
vd.logit <- logit.dev^2 * V
T <- (logit.pd) * sqrt(N/vd.logit)
switch(alternative, two.sided = {
text.Output <- paste("True relative effect p is less or equal than 1/2")
p.Value <- min(2 - 2 * pnorm(T), 2 * pnorm(T))
crit <- qnorm(1 - alpha/2)
Lower <- expit(logit.pd - crit/sqrt(N) * sqrt(vd.logit))
Upper <- expit(logit.pd + crit/sqrt(N) * sqrt(vd.logit))
}, less = {
text.Output <- paste("True relative effect p is less than 1/2")
p.Value <- pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- 0
Upper <- expit(logit.pd + crit/sqrt(N) * sqrt(vd.logit))
}, greater = {
text.Output <- paste("True relative effect p is greater than 1/2")
p.Value <- 1 - pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- expit(logit.pd - crit/sqrt(N) * sqrt(vd.logit))
Upper <- 1
})
data.info <- data.frame(Sample = fl, Size = n)
Analysis <- data.frame(Effect = cmpid, Estimator = round(pd,
rounds), Lower = round(Lower, rounds), Upper = round(Upper,
rounds), T = round(T, rounds), p.Value = round(p.Value,
rounds))
rownames(Analysis) <- 1
result <- list(Info = data.info, Analysis = Analysis)
}, probit = {
AsyMethod <- "Probit - Transformation"
probit.pd <- qnorm(pd)
probit.dev <- sqrt(2 * pi)/(exp(-0.5 * qnorm(pd) * qnorm(pd)))
vd.probit <- probit.dev^2 * V
T <- (probit.pd) * sqrt(N/vd.probit)
switch(alternative, two.sided = {
text.Output <- paste("True relative effect p is less or equal than 1/2")
p.Value <- min(2 - 2 * pnorm(T), 2 * pnorm(T))
crit <- qnorm(1 - alpha/2)
Lower <- pnorm(probit.pd - crit/sqrt(N) * sqrt(vd.probit))
Upper <- pnorm(probit.pd + crit/sqrt(N) * sqrt(vd.probit))
}, less = {
text.Output <- paste("True relative effect p is less than 1/2")
p.Value <- pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- 0
Upper <- pnorm(probit.pd + crit/sqrt(N) * sqrt(vd.probit))
}, greater = {
text.Output <- paste("True relative effect p is greater than 1/2")
p.Value <- 1 - pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- pnorm(probit.pd - crit/sqrt(N) * sqrt(vd.probit))
Upper <- 1
})
data.info <- data.frame(Sample = fl, Size = n)
Analysis <- data.frame(Effect = cmpid, Estimator = round(pd,
rounds), Lower = round(Lower, rounds), Upper = round(Upper,
rounds), T = round(T, rounds), p.Value = round(p.Value,
rounds))
rownames(Analysis) <- 1
result <- list(Info = data.info, Analysis = Analysis)
}, permu = {
Tperm=Tlogitperm=Tprobitperm=c()
ausgang = BMstat(samples[[1]],samples[[2]],n1,n2)
for(h in 1:nperm){
respperm=sample(response)
phelp=BMstat(respperm[1:n1],respperm[(n1+1):N],n1,n2)
Tperm[h] = phelp$T
Tlogitperm[h] = phelp$Logit
Tprobitperm[h] = phelp$Probit
}
p.PERM1 = mean(ausgang$T >= Tperm)
p.PERMLogit1 = mean(ausgang$Logit >= Tlogitperm)
p.PERMProbit1 = mean(ausgang$Probit >= Tprobitperm)
c1 = quantile(Tperm,(1-conf.level)/2)
c2 = quantile(Tperm,1-(1-conf.level)/2)
c1LOGIT = quantile(Tlogitperm,(1-conf.level)/2)
c2LOGIT = quantile(Tlogitperm,1-(1-conf.level)/2)
c1PROBIT = quantile(Tprobitperm,(1-conf.level)/2)
c2PROBIT = quantile(Tprobitperm,1-(1-conf.level)/2)
c1lower = quantile(Tperm,(1-conf.level))
c2upper = quantile(Tperm,1-(1-conf.level))
c1LOGITlower = quantile(Tlogitperm,(1-conf.level))
c2LOGITupper = quantile(Tlogitperm,1-(1-conf.level))
c1PROBITlower = quantile(Tprobitperm,(1-conf.level))
c2PROBITupper = quantile(Tprobitperm,1-(1-conf.level))
switch(alternative, two.sided = {
text.Output <- paste("True relative effect p is less or equal than 1/2")
p.PERM <- min(2 - 2 * p.PERM1, 2 * p.PERM1)
p.LOGIT <- min(2 - 2 * p.PERMLogit1, 2 * p.PERMLogit1)
p.PROBIT <- min(2 - 2 * p.PERMProbit1, 2 *p.PERMProbit1)
UntenRS <- pd - sqrt(ausgang$sdx/N) * c2
ObenRS <- pd - sqrt(ausgang$sdx/N) * c1
ULogitRS <- logit(pd) - ausgang$slogit/sqrt(N) * c2LOGIT
OLogitRS <- logit(pd) - ausgang$slogit/sqrt(N) * c1LOGIT
UntenLogitRS <- expit(ULogitRS)
ObenLogitRS <- expit(OLogitRS)
UProbitRS <- qnorm(pd) - ausgang$sprobit/sqrt(N) * c2PROBIT
OProbitRS <- qnorm(pd) - ausgang$sprobit/sqrt(N) * c1PROBIT
UntenProbitRS <- pnorm(UProbitRS)
ObenProbitRS <- pnorm(OProbitRS)
Statistic <- round(c(ausgang$T, ausgang$Logit, ausgang$Probit), rounds)
Estimator <- round(rep(pd, 3), rounds)
Lower <- round(c(UntenRS, UntenLogitRS, UntenProbitRS),
rounds)
Upper <- round(c(ObenRS, ObenLogitRS, ObenProbitRS),
rounds)
p.value <- c(p.PERM, p.LOGIT, p.PROBIT)
Analysis <- data.frame(Estimator, Statistic, Lower,
Upper, p.value, row.names = c("id", "logit", "probit"))
}, less = {
text.Output <- paste("True relative effect p is less than 1/2")
p.PERM = p.PERM1
p.LOGIT <- p.PERMLogit1
p.PROBIT <- p.PERMProbit1
UntenRS <- 0
ObenRS <- pd - sqrt(ausgang$sdx/N) * c1lower
OLogitRS <- logit(pd) - ausgang$slogit/sqrt(N) * c1LOGITlower
UntenLogitRS <-0
ObenLogitRS <- expit(OLogitRS)
OProbitRS <- qnorm(pd) - ausgang$sprobit/sqrt(N) * c1PROBITlower
UntenProbitRS <- 0
ObenProbitRS <- pnorm(OProbitRS)
Statistic <- round(c(ausgang$T, ausgang$Logit, ausgang$Probit), rounds)
Estimator <- round(rep(pd, 3), rounds)
Lower <- round(c(UntenRS, UntenLogitRS, UntenProbitRS),
rounds)
Upper <- round(c(ObenRS, ObenLogitRS, ObenProbitRS),
rounds)
p.value <- c(p.PERM, p.LOGIT, p.PROBIT)
Analysis <- data.frame(Estimator, Statistic, Lower,
Upper, p.value, row.names = c("id", "logit", "probit"))
}, greater = {
text.Output <- paste("True relative effect p is greater than 1/2")
p.PERM = 1-p.PERM1
p.LOGIT <-1- p.PERMLogit1
p.PROBIT <- 1-p.PERMProbit1
UntenRS <- pd - sqrt(ausgang$sdx/N) * c2upper
ObenRS <- 1
ULogitRS <- logit(pd) - ausgang$slogit/sqrt(N) * c2LOGITupper
UntenLogitRS <-expit(ULogitRS)
ObenLogitRS <- 1
UProbitRS <- qnorm(pd) - ausgang$sprobit/sqrt(N) * c2PROBITupper
UntenProbitRS <- pnorm(UProbitRS)
ObenProbitRS <- 1
Statistic <- round(c(ausgang$T, ausgang$Logit, ausgang$Probit), rounds)
Estimator <- round(rep(pd, 3), rounds)
Lower <- round(c(UntenRS, UntenLogitRS, UntenProbitRS),
rounds)
Upper <- round(c(ObenRS, ObenLogitRS, ObenProbitRS),
rounds)
p.value <- round(c(p.PERM, p.LOGIT, p.PROBIT), rounds)
Analysis <- data.frame(Estimator, Statistic, Lower,
Upper, p.value, row.names = c("id", "logit", "probit"))
})
AsyMethod <- "Studentized Permutation Test (+ delta-method)"
#cmpid <- c("id", "logit", "probit")
data.info <- data.frame(Sample = fl, Size = n)
result <- list(Info = data.info, Analysis = Analysis)
})
#------------SHIFT EFFECTS--------------------------------#
HL.help=expand.grid(samples[[1]],samples[[2]])
HL=median(HL.help[,2]-HL.help[,1])
switch(wilcoxon,asymptotic={
Wilcox = wilcox_test(response~factorx,distribution="asymptotic",
alternative=alternative,
conf.int=TRUE,conf.level=(1 - alpha))
p.wilcox=pvalue(Wilcox)
Z.wilcox=statistic(Wilcox)
if(shift.int==TRUE){
shiftint=sort(-1*c(confint(Wilcox)$conf.int))
Lower.Shift=shiftint[1]
Upper.Shift=shiftint[2]
}
},
exact={
Wilcox = wilcox_test(response~factorx,distribution="exact",
alternative=alternative,conf.int=TRUE,conf.level=(1 - alpha))
p.wilcox=pvalue(Wilcox)
Z.wilcox=sum(ry)
if(shift.int==TRUE){
shiftint=sort(-1*c(confint(Wilcox)$conf.int))
Lower.Shift=shiftint[1]
Upper.Shift=shiftint[2]
}
})
if(shift.int==FALSE){
Lower.Shift = NA
Upper.Shift= NA
HL = NA
}
cmpidWilcoxon <- paste("delta","(",fl[2], "-", fl[1], ")", sep = "")
Wilcoxon.Test=data.frame(Effect = cmpid,Estimator=pd,
Statistic=Z.wilcox,p.Value=p.wilcox,Shift=cmpidWilcoxon, Hodges.Lehmann=HL,Lower=Lower.Shift,Upper=Upper.Shift)
result <- list(Info = data.info, Analysis = Analysis, Wilcoxon=Wilcoxon.Test)
if (plot.simci == TRUE) {
text.Ci <- paste((1 - alpha) * 100, "%", "Confidence Interval for p")
Lowerp <- "|"
plot(rep(pd, plotz), 1:plotz, xlim = c(0, 1), pch = 15,
axes = FALSE, xlab = "", ylab = "")
points(Lower, 1:plotz, pch = Lowerp, font = 2, cex = 2)
points(Upper, 1:plotz, pch = Lowerp, font = 2, cex = 2)
abline(v = 0.5, lty = 3, lwd = 2)
for (ss in 1:plotz) {
polygon(x = c(Lower[ss], Upper[ss]), y = c(ss, ss),
lwd = 2)
}
axis(1, at = seq(0, 1, 0.1))
axis(2, at = 1:plotz, labels = cmpid, font = 2)
box()
title(main = c(text.Ci, paste("Method:", AsyMethod)))
}
if (info == TRUE) {
cat("\n", "#------Nonparametric Test Procedures and Confidence Intervals for relative effects-----#",
"\n", "\n", "-", "Alternative Hypothesis: ", text.Output,
"\n", "-", "Confidence level:", (1 - alpha) * 100,
"%", "\n", "-", "Method", "=", AsyMethod, "\n", "\n",
"#---------------------------Interpretation----------------------------------#",
"\n", "p(a,b)", ">", "1/2", ":", "b tends to be larger than a",
"\n", "#---------------------------------------------------------------------------#",
"\n", "\n")
}
#result$input <- input.list
#result$text.Output <- text.Output
#result$cmpid <- cmpid
#result$AsyMethod <- AsyMethod
#class(result) <- "ranktwosamples"
return(result)
}
|
/rankFD/R/ranktwosamples.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 16,893 |
r
|
#' A function for analyzing two-sample problems
#'
#' The rank.two.sample() function
#'
#' @param formula A model \code{\link{formula}} object. The left hand side
#' contains the response variable and the right hand side contains the factor
#' variables of interest. An interaction term must be specified.
#' @param data A data.frame, list or environment containing the variables in
#' \code{formula}. The default option is \code{NULL}.
#' @param conf.level A number specifying the confidence level; the default is 0.95.
#' @param alternative Which alternative is considered? One of "two.sided", "less", "greater".
#' @param rounds Value specifying the number of digits the results are rounded to.
#' @param method specifying the method used for calculation of the confidence intervals.
#' One of "logit", "probit", "normal", "t.app" and "permu".
#' @param plot.simci Logical, indicating whether or not confidence intervals
#' should be plotted
#' @param info Logical. info = FALSE suppresses the output of additional information
#' concerning e.g. the interpretation of the test results.
#' @param wilcoxon asymptotic or exact calculation of Wilcoxon test.
#' @param shift.int Logical, indicating whether or not shift effects should be considered.
#' @param nperm Number of permutations used, default is 10000.
#'
#'
#' @examples
#' data(Muco)
#' Muco2 <- subset(Muco, Disease != "OAD")
#' twosample <- rank.two.samples(HalfTime ~ Disease, data = Muco2,
#' alternative = "greater", method = "probit", wilcoxon="exact")
#'
#'
#' @export
rank.two.samples <- function (formula, data, conf.level = 0.95,
alternative = c("two.sided",
"less", "greater"), rounds = 3, method = c("logit", "probit",
"normal", "t.app", "permu"), plot.simci = FALSE, info = TRUE,
wilcoxon=c("asymptotic","exact"),shift.int=TRUE,
nperm = 10000)
{
alpha <- 1 - conf.level
if (alpha >= 1 || alpha <= 0) {
stop("The confidence level must be between 0 and 1!")
if (is.null(alternative)) {
stop("Please declare the alternative! (two.sided, less, greater)")
}
}
alternative <- match.arg(alternative)
method <- match.arg(method)
wilcoxon <- match.arg(wilcoxon)
if (length(formula) != 3) {
stop("You can only analyse one-way layouts!")
}
dat <- model.frame(formula, droplevels(data))
if (ncol(dat) != 2) {
stop("Specify one response and only one class variable in the formula")
}
if (is.numeric(dat[, 1]) == FALSE) {
stop("Response variable must be numeric")
}
response <- dat[, 1]
factorx <- as.factor(dat[, 2])
fl <- levels(factorx)
a <- nlevels(factorx)
if (a > 2) {
stop("You want to perform a contrast test (the factor variable has more than two levels). Please use the function mctp!")
}
samples <- split(response, factorx)
n <- sapply(samples, length)
n1 <- n[1]
n2 <- n[2]
if (any(n == 1)) {
warn <- paste("The factor level", fl[n == 1], "has got only one observation!")
stop(warn)
}
N <- sum(n)
cmpid <- paste("p(", fl[1], ",", fl[2], ")", sep = "")
plotz <- 1
rxy <- rank(c(samples[[1]], samples[[2]]))
rx <- rank(c(samples[[1]]))
ry <- rank(c(samples[[2]]))
pl1 <- 1/n2 * (rxy[1:n1] - rx)
pl2 <- 1/n1 * (rxy[(n1 + 1):N] - ry)
pd <- mean(pl2)
pd1 <- (pd == 1)
pd0 <- (pd == 0)
pd[pd1] <- 0.999
pd[pd0] <- 0.001
s1 <- var(pl1)/n1
s2 <- var(pl2)/n2
V <- N * (s1 + s2)
singular.bf <- (V == 0)
V[singular.bf] <- N/(2 * n1 * n2)
switch(method, normal = {
AsyMethod <- "Normal - Approximation"
T <- sqrt(N) * (pd - 1/2)/sqrt(V)
switch(alternative, two.sided = {
text.Output <- paste("True relative effect p is less or equal than 1/2")
p.Value <- min(2 - 2 * pnorm(T), 2 * pnorm(T))
crit <- qnorm(1 - alpha/2)
Lower <- pd - crit/sqrt(N) * sqrt(V)
Upper <- pd + crit/sqrt(N) * sqrt(V)
}, less = {
text.Output <- paste("True relative effect p is less than 1/2")
p.Value <- pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- 0
Upper <- pd + crit/sqrt(N) * sqrt(V)
}, greater = {
text.Output <- paste("True relative effect p is greater than 1/2")
p.Value <- 1 - pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- pd - crit/sqrt(N) * sqrt(V)
Upper <- 1
})
data.info <- data.frame(Sample = fl, Size = n)
Analysis <- data.frame(Effect = cmpid, Estimator = round(pd,
rounds), Lower = round(Lower, rounds), Upper = round(Upper,
rounds), T = round(T, rounds), p.Value = round(p.Value,
rounds))
rownames(Analysis) <- 1
}, t.app = {
T <- sqrt(N) * (pd - 1/2)/sqrt(V)
df.sw <- (s1 + s2)^2/(s1^2/(n1 - 1) + s2^2/(n2 - 1))
df.sw[is.nan(df.sw)] <- 1000
AsyMethod <- paste("Brunner - Munzel - T - Approx with",
round(df.sw, rounds), "DF")
switch(alternative, two.sided = {
text.Output <- paste("True relative effect p is less or equal than 1/2")
p.Value <- min(2 - 2 * pt(T, df = df.sw), 2 * pt(T,
df = df.sw))
crit <- qt(1 - alpha/2, df = df.sw)
Lower <- pd - crit/sqrt(N) * sqrt(V)
Upper <- pd + crit/sqrt(N) * sqrt(V)
}, less = {
text.Output <- paste("True relative effect p is less than 1/2")
p.Value <- pt(T, df = df.sw)
crit <- qt(1 - alpha, df = df.sw)
Lower <- 0
Upper <- pd + crit/sqrt(N) * sqrt(V)
}, greater = {
text.Output <- paste("True relative effect p is greater than 1/2")
p.Value <- 1 - pt(T, df = df.sw)
crit <- qt(1 - alpha, df = df.sw)
Lower <- pd - crit/sqrt(N) * sqrt(V)
Upper <- 1
})
data.info <- data.frame(Sample = fl, Size = n)
Analysis <- data.frame(Effect = cmpid, Estimator = round(pd,
rounds), Lower = round(Lower, rounds), Upper = round(Upper,
rounds), T = round(T, rounds), p.Value = round(p.Value,
rounds))
rownames(Analysis) <- 1
result <- list(Info = data.info, Analysis = Analysis)
}, logit = {
AsyMethod <- "Logit - Transformation"
logitf <- function(p) {
log(p/(1 - p))
}
expit <- function(G) {
exp(G)/(1 + exp(G))
}
logit.pd <- logitf(pd)
logit.dev <- 1/(pd * (1 - pd))
vd.logit <- logit.dev^2 * V
T <- (logit.pd) * sqrt(N/vd.logit)
switch(alternative, two.sided = {
text.Output <- paste("True relative effect p is less or equal than 1/2")
p.Value <- min(2 - 2 * pnorm(T), 2 * pnorm(T))
crit <- qnorm(1 - alpha/2)
Lower <- expit(logit.pd - crit/sqrt(N) * sqrt(vd.logit))
Upper <- expit(logit.pd + crit/sqrt(N) * sqrt(vd.logit))
}, less = {
text.Output <- paste("True relative effect p is less than 1/2")
p.Value <- pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- 0
Upper <- expit(logit.pd + crit/sqrt(N) * sqrt(vd.logit))
}, greater = {
text.Output <- paste("True relative effect p is greater than 1/2")
p.Value <- 1 - pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- expit(logit.pd - crit/sqrt(N) * sqrt(vd.logit))
Upper <- 1
})
data.info <- data.frame(Sample = fl, Size = n)
Analysis <- data.frame(Effect = cmpid, Estimator = round(pd,
rounds), Lower = round(Lower, rounds), Upper = round(Upper,
rounds), T = round(T, rounds), p.Value = round(p.Value,
rounds))
rownames(Analysis) <- 1
result <- list(Info = data.info, Analysis = Analysis)
}, probit = {
AsyMethod <- "Probit - Transformation"
probit.pd <- qnorm(pd)
probit.dev <- sqrt(2 * pi)/(exp(-0.5 * qnorm(pd) * qnorm(pd)))
vd.probit <- probit.dev^2 * V
T <- (probit.pd) * sqrt(N/vd.probit)
switch(alternative, two.sided = {
text.Output <- paste("True relative effect p is less or equal than 1/2")
p.Value <- min(2 - 2 * pnorm(T), 2 * pnorm(T))
crit <- qnorm(1 - alpha/2)
Lower <- pnorm(probit.pd - crit/sqrt(N) * sqrt(vd.probit))
Upper <- pnorm(probit.pd + crit/sqrt(N) * sqrt(vd.probit))
}, less = {
text.Output <- paste("True relative effect p is less than 1/2")
p.Value <- pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- 0
Upper <- pnorm(probit.pd + crit/sqrt(N) * sqrt(vd.probit))
}, greater = {
text.Output <- paste("True relative effect p is greater than 1/2")
p.Value <- 1 - pnorm(T)
crit <- qnorm(1 - alpha)
Lower <- pnorm(probit.pd - crit/sqrt(N) * sqrt(vd.probit))
Upper <- 1
})
data.info <- data.frame(Sample = fl, Size = n)
Analysis <- data.frame(Effect = cmpid, Estimator = round(pd,
rounds), Lower = round(Lower, rounds), Upper = round(Upper,
rounds), T = round(T, rounds), p.Value = round(p.Value,
rounds))
rownames(Analysis) <- 1
result <- list(Info = data.info, Analysis = Analysis)
}, permu = {
Tperm=Tlogitperm=Tprobitperm=c()
ausgang = BMstat(samples[[1]],samples[[2]],n1,n2)
for(h in 1:nperm){
respperm=sample(response)
phelp=BMstat(respperm[1:n1],respperm[(n1+1):N],n1,n2)
Tperm[h] = phelp$T
Tlogitperm[h] = phelp$Logit
Tprobitperm[h] = phelp$Probit
}
p.PERM1 = mean(ausgang$T >= Tperm)
p.PERMLogit1 = mean(ausgang$Logit >= Tlogitperm)
p.PERMProbit1 = mean(ausgang$Probit >= Tprobitperm)
c1 = quantile(Tperm,(1-conf.level)/2)
c2 = quantile(Tperm,1-(1-conf.level)/2)
c1LOGIT = quantile(Tlogitperm,(1-conf.level)/2)
c2LOGIT = quantile(Tlogitperm,1-(1-conf.level)/2)
c1PROBIT = quantile(Tprobitperm,(1-conf.level)/2)
c2PROBIT = quantile(Tprobitperm,1-(1-conf.level)/2)
c1lower = quantile(Tperm,(1-conf.level))
c2upper = quantile(Tperm,1-(1-conf.level))
c1LOGITlower = quantile(Tlogitperm,(1-conf.level))
c2LOGITupper = quantile(Tlogitperm,1-(1-conf.level))
c1PROBITlower = quantile(Tprobitperm,(1-conf.level))
c2PROBITupper = quantile(Tprobitperm,1-(1-conf.level))
switch(alternative, two.sided = {
text.Output <- paste("True relative effect p is less or equal than 1/2")
p.PERM <- min(2 - 2 * p.PERM1, 2 * p.PERM1)
p.LOGIT <- min(2 - 2 * p.PERMLogit1, 2 * p.PERMLogit1)
p.PROBIT <- min(2 - 2 * p.PERMProbit1, 2 *p.PERMProbit1)
UntenRS <- pd - sqrt(ausgang$sdx/N) * c2
ObenRS <- pd - sqrt(ausgang$sdx/N) * c1
ULogitRS <- logit(pd) - ausgang$slogit/sqrt(N) * c2LOGIT
OLogitRS <- logit(pd) - ausgang$slogit/sqrt(N) * c1LOGIT
UntenLogitRS <- expit(ULogitRS)
ObenLogitRS <- expit(OLogitRS)
UProbitRS <- qnorm(pd) - ausgang$sprobit/sqrt(N) * c2PROBIT
OProbitRS <- qnorm(pd) - ausgang$sprobit/sqrt(N) * c1PROBIT
UntenProbitRS <- pnorm(UProbitRS)
ObenProbitRS <- pnorm(OProbitRS)
Statistic <- round(c(ausgang$T, ausgang$Logit, ausgang$Probit), rounds)
Estimator <- round(rep(pd, 3), rounds)
Lower <- round(c(UntenRS, UntenLogitRS, UntenProbitRS),
rounds)
Upper <- round(c(ObenRS, ObenLogitRS, ObenProbitRS),
rounds)
p.value <- c(p.PERM, p.LOGIT, p.PROBIT)
Analysis <- data.frame(Estimator, Statistic, Lower,
Upper, p.value, row.names = c("id", "logit", "probit"))
}, less = {
text.Output <- paste("True relative effect p is less than 1/2")
p.PERM = p.PERM1
p.LOGIT <- p.PERMLogit1
p.PROBIT <- p.PERMProbit1
UntenRS <- 0
ObenRS <- pd - sqrt(ausgang$sdx/N) * c1lower
OLogitRS <- logit(pd) - ausgang$slogit/sqrt(N) * c1LOGITlower
UntenLogitRS <-0
ObenLogitRS <- expit(OLogitRS)
OProbitRS <- qnorm(pd) - ausgang$sprobit/sqrt(N) * c1PROBITlower
UntenProbitRS <- 0
ObenProbitRS <- pnorm(OProbitRS)
Statistic <- round(c(ausgang$T, ausgang$Logit, ausgang$Probit), rounds)
Estimator <- round(rep(pd, 3), rounds)
Lower <- round(c(UntenRS, UntenLogitRS, UntenProbitRS),
rounds)
Upper <- round(c(ObenRS, ObenLogitRS, ObenProbitRS),
rounds)
p.value <- c(p.PERM, p.LOGIT, p.PROBIT)
Analysis <- data.frame(Estimator, Statistic, Lower,
Upper, p.value, row.names = c("id", "logit", "probit"))
}, greater = {
text.Output <- paste("True relative effect p is greater than 1/2")
p.PERM = 1-p.PERM1
p.LOGIT <-1- p.PERMLogit1
p.PROBIT <- 1-p.PERMProbit1
UntenRS <- pd - sqrt(ausgang$sdx/N) * c2upper
ObenRS <- 1
ULogitRS <- logit(pd) - ausgang$slogit/sqrt(N) * c2LOGITupper
UntenLogitRS <-expit(ULogitRS)
ObenLogitRS <- 1
UProbitRS <- qnorm(pd) - ausgang$sprobit/sqrt(N) * c2PROBITupper
UntenProbitRS <- pnorm(UProbitRS)
ObenProbitRS <- 1
Statistic <- round(c(ausgang$T, ausgang$Logit, ausgang$Probit), rounds)
Estimator <- round(rep(pd, 3), rounds)
Lower <- round(c(UntenRS, UntenLogitRS, UntenProbitRS),
rounds)
Upper <- round(c(ObenRS, ObenLogitRS, ObenProbitRS),
rounds)
p.value <- round(c(p.PERM, p.LOGIT, p.PROBIT), rounds)
Analysis <- data.frame(Estimator, Statistic, Lower,
Upper, p.value, row.names = c("id", "logit", "probit"))
})
AsyMethod <- "Studentized Permutation Test (+ delta-method)"
#cmpid <- c("id", "logit", "probit")
data.info <- data.frame(Sample = fl, Size = n)
result <- list(Info = data.info, Analysis = Analysis)
})
#------------SHIFT EFFECTS--------------------------------#
HL.help=expand.grid(samples[[1]],samples[[2]])
HL=median(HL.help[,2]-HL.help[,1])
switch(wilcoxon,asymptotic={
Wilcox = wilcox_test(response~factorx,distribution="asymptotic",
alternative=alternative,
conf.int=TRUE,conf.level=(1 - alpha))
p.wilcox=pvalue(Wilcox)
Z.wilcox=statistic(Wilcox)
if(shift.int==TRUE){
shiftint=sort(-1*c(confint(Wilcox)$conf.int))
Lower.Shift=shiftint[1]
Upper.Shift=shiftint[2]
}
},
exact={
Wilcox = wilcox_test(response~factorx,distribution="exact",
alternative=alternative,conf.int=TRUE,conf.level=(1 - alpha))
p.wilcox=pvalue(Wilcox)
Z.wilcox=sum(ry)
if(shift.int==TRUE){
shiftint=sort(-1*c(confint(Wilcox)$conf.int))
Lower.Shift=shiftint[1]
Upper.Shift=shiftint[2]
}
})
if(shift.int==FALSE){
Lower.Shift = NA
Upper.Shift= NA
HL = NA
}
cmpidWilcoxon <- paste("delta","(",fl[2], "-", fl[1], ")", sep = "")
Wilcoxon.Test=data.frame(Effect = cmpid,Estimator=pd,
Statistic=Z.wilcox,p.Value=p.wilcox,Shift=cmpidWilcoxon, Hodges.Lehmann=HL,Lower=Lower.Shift,Upper=Upper.Shift)
result <- list(Info = data.info, Analysis = Analysis, Wilcoxon=Wilcoxon.Test)
if (plot.simci == TRUE) {
text.Ci <- paste((1 - alpha) * 100, "%", "Confidence Interval for p")
Lowerp <- "|"
plot(rep(pd, plotz), 1:plotz, xlim = c(0, 1), pch = 15,
axes = FALSE, xlab = "", ylab = "")
points(Lower, 1:plotz, pch = Lowerp, font = 2, cex = 2)
points(Upper, 1:plotz, pch = Lowerp, font = 2, cex = 2)
abline(v = 0.5, lty = 3, lwd = 2)
for (ss in 1:plotz) {
polygon(x = c(Lower[ss], Upper[ss]), y = c(ss, ss),
lwd = 2)
}
axis(1, at = seq(0, 1, 0.1))
axis(2, at = 1:plotz, labels = cmpid, font = 2)
box()
title(main = c(text.Ci, paste("Method:", AsyMethod)))
}
if (info == TRUE) {
cat("\n", "#------Nonparametric Test Procedures and Confidence Intervals for relative effects-----#",
"\n", "\n", "-", "Alternative Hypothesis: ", text.Output,
"\n", "-", "Confidence level:", (1 - alpha) * 100,
"%", "\n", "-", "Method", "=", AsyMethod, "\n", "\n",
"#---------------------------Interpretation----------------------------------#",
"\n", "p(a,b)", ">", "1/2", ":", "b tends to be larger than a",
"\n", "#---------------------------------------------------------------------------#",
"\n", "\n")
}
#result$input <- input.list
#result$text.Output <- text.Output
#result$cmpid <- cmpid
#result$AsyMethod <- AsyMethod
#class(result) <- "ranktwosamples"
return(result)
}
|
########## Gets Data Organized To Go Into Models
bframe1 <-data.frame(read.table("TaggedSdl.txt",header=T))
######################### Tree plots
#####Standard plots
allplots <- c('YOHOPIPO','CCRPIPO','CRCRPIPO','FFS7CONTROL','FFS6BURN','FFS5BURN','FFS2BURN','LOTHAR','UPTHAR','LOLOG','UPLOG','LOGPIJE','SFTRABMA','WTABMA','POFLABMA','PGABMA','BBBPIPO','FRPIJE','LMCC','LOGSEGI','EMSLOPE','EMRIDGE')
nplots <- length(allplots)
####"Extra" plots (the 3 that are next to each other)
extplots <- c('SUPILA','SURIP','SUABCO')
neplots <- length(extplots)
plt.sdl <- numeric(0)
for(p in 1:nplots){
plt.sdl <- c(plt.sdl,which(bframe1[,1]==allplots[p]))
}
for(p in 1:neplots){
plt.sdl <- c(plt.sdl,which(bframe1[,1]==extplots[p]))
}
##########Only seedlings in selected plots
Seedlings <- bframe1[plt.sdl,]
#######Size classes = <10,10-25,25-50,50-75,75-100,<137 cm
SizeClass <- c(10,25,50,75,100,137)
N.Cls <- length(SizeClass)
Year <- seq(1999,2008,by=1)
NYr <- length(Year)
NSdl <- nrow(Seedlings)
max.col <- ncol(Seedlings)
Species <- c('ABCO','ABMA','ABPS','ABXX','CADE','PICO','PIJE','PILA','PIMO','PIPO','PIXX','PSME','QUCH', 'QUKE','SEGI')
NSpp <- length(Species)
#Make a survival matrix and size matrix. -33 indicates not yet tagged, -22 that seedling is dead
Surv.Mat <- matrix(-33,NSdl,NYr); Size.Mat <- matrix(-33,NSdl,NYr)
#Create species indicators
Spp.Ind <- matrix(0,NSdl,NSpp)
#Create plot indicators
Plt.Ind <- matrix(0,NSdl,nplots+neplots)
for(i in 1:NSdl){
Spp.Ind[i,which(Species == Seedlings[i,'SPPCODE'])] <- 1
Plt.Ind[i,which(c(allplots,extplots) == Seedlings[i,'PLOT_NAME'])] <- 1
for(t in 1:NYr){
if(Seedlings[i,7+t]>0){ #measured this year?
Size.Mat[i,t] <- Seedlings[i,7+t]
Surv.Mat[i,t] <- 1
}
if(Seedlings[i,7+t]==-77){ #transition to adult size
Size.Mat[i,t] <- 150
Surv.Mat[i,t] <- 1
}
if(Seedlings[i,7+t]==-66){ #known dead
Size.Mat[i,t] <- 0
Surv.Mat[i,t] <- 0
}
if(Seedlings[i,7+t]==-88){ #tag recovered, likely dead
Size.Mat[i,t] <- 0
Surv.Mat[i,t] <- 0
}
if(Seedlings[i,7+t]==-99){ #missing
if(length(which(Seedlings[i,(7+t):max.col]>0))>=1){ #recorded in future
if(length(which(Seedlings[i,(7+1):(7+t)]>0))>=1){ #recorded in past
Surv.Mat[i,t] <- 1
aa <- which(Seedlings[i,8:(7+t)]>0)
if(length(aa)>0) Size.Mat[i,t] <- Seedlings[i,7+max(aa)] #last recorded height
}}
else { #not recorded in future
Size.Mat[i,t] <- 0
Surv.Mat[i,t] <- 0
}
}#end "missing" loop
if(Seedlings[i,t+7]==-22 & Seedlings[i,(t+7)-1]>0) { #marked previously dead but clearly not
Surv.Mat[i,t] <- 0
Size.Mat[i,t] <- 0
}
if(Seedlings[i,t+7]==-22 & Seedlings[i,(t+7)-1]==-22) { #marked previously dead this year and last
Surv.Mat[i,t] <- -22
Size.Mat[i,t] <- -22
}
if(Seedlings[i,t+7]==-22 & Seedlings[i,(t+7)-1]< -60) { #marked previously dead this year and dead or likely dead earlier
Surv.Mat[i,t] <- -22
Size.Mat[i,t] <- -22
}
if(t>1){
if(Surv.Mat[i,t]==0 & Surv.Mat[i,t-1]==0) {
Surv.Mat[i,t] <- -22
Size.Mat[i,t] <- -22 #previously dead
}
if(Surv.Mat[i,t]==0 & Surv.Mat[i,t-1]==-22) {
Surv.Mat[i,t] <- -22
Size.Mat[i,t] <- -22 #previously dead
}}
}#end year loop
print(i)
}#end seedling loop
####Basal area and trees near seedling
bframe2 <-data.frame(read.table("PlotInfo2.txt",header=T))
bframe3 <- data.frame(read.table("treeyears.txt",header=T))
bframe4 <-read.csv("tree2.csv",header=T)
bframe5 <-read.csv("quadrat_precise.csv",header=T)
# to calculate distances
distmat <- function(x1,y1,x2,y2){
xd <- outer(x1,x2,function(x1,x2) (x1 - x2)^2)
yd <- outer(y1,y2,function(y1,y2) (y1 - y2)^2)
d <- t(sqrt(xd + yd))
return(d)
}
BA.Mat <- numeric(0); TNum.Mat <- numeric(0)
for (p in 1:nplots){
Quad <-bframe5[which(bframe5[,1]==allplots[p]),] #set of quadrat locations for plot
nquad <- nrow(Quad)
trees <- bframe4[bframe4$PLOT==allplots[p],] #Trees in plot
ntree <- nrow(trees)
treex <- trees$newX
treey <- trees$newY
#distances between trees and quadrats
TQdist <- distmat(treex,treey,Quad[,6],Quad[,7])
####calculate basal areas
treeyears<-bframe3[which(bframe3[,'plot']==allplots[p]),2:8]
#which census periods match sdl census?
rel.census <- which(treeyears>=1999 & treeyears<=2008)
tree.sdl.yrs <- treeyears[rel.census]
ba <- matrix(0,ntree,NYr)
for(i in 1:ntree){
for(t in 1:NYr){
if(Year[t]<tree.sdl.yrs[1]) dbh <- trees[i,(7+rel.census[1])]
if(length(tree.sdl.yrs)>1){
if(Year[t]>=tree.sdl.yrs[1] & Year[t]<tree.sdl.yrs[2]) dbh <- trees[i,
(7+rel.census[1])]
if(length(tree.sdl.yrs)>2){
if(Year[t]>=tree.sdl.yrs[2] & Year[t]<tree.sdl.yrs[3]) dbh <- trees[i,
(7+rel.census[2])]
if(Year[t]>=tree.sdl.yrs[3]) dbh <- trees[i,(7+rel.census[3])]
}
else {if(Year[t]>=tree.sdl.yrs[2]) dbh <- trees[i,(7+rel.census[2])]}
}
if(length(which(dbh>=0))>0) ba[i,t]<-pi*((dbh/200)^2) #in meters, not cm
if(length(which(trees[i,"MortalityYear"]>=0))>0 & trees[i,"MortalityYear"]<=Year[t])
ba[i,t]<-0
if(length(which(trees[i,"IngrowthYear"]>=0))>0 & trees[i,"IngrowthYear"]>Year[t])
ba[i,t]<-0
}
}
##calculate tree number and basal area within 10 m of quadrat
ba.quad <- matrix(0,nquad,NYr)
tn.quad <- matrix(0,nquad,NYr)
for (j in 1:nquad){
q <- which(TQdist[j,]<10)
if(length(q)>0) {
tree.sub <- trees[q,]
ba.sub <- ba[q,]
for (t in 1:NYr){
if(length(q)>1){
qq <- which(ba.sub[,t]>0)
tn.quad[j,t] <- length(qq)
ba.quad[j,t] <- sum(ba.sub[,t])
}
if(length(q)==1){
if(ba.sub[t]>0) tn.quad[j,t] <- 1
ba.quad[j,t] <- ba.sub[t]
}
} #end t loop
}} #end if dist and nquad loops
temp.BA <- cbind(rep(p,nquad),Quad[,2:3],Quad[,6:7],ba.quad)
temp.Num <- cbind(rep(p,nquad),Quad[,2:3],Quad[,6:7],tn.quad)
if(p==1) {
BA.Mat <- temp.BA
TNum.Mat <- temp.Num
}
if(p>1){
BA.Mat <- rbind(BA.Mat,temp.BA)
TNum.Mat <- rbind(TNum.Mat,temp.Num)
}
print(p)
} #end plot loop
##Add on additional plots
####SUPILA first
Quad1 <-bframe5[which(bframe5[,1]==extplots[1]),] #set of quadrat locations for plot
nquad1 <- nrow(Quad1)
trees1 <- bframe4[bframe4$PLOT==extplots[1],] #Trees in plot
ntree1 <- nrow(trees1)
treex1 <- trees1$newX
treey1 <- trees1$newY
Qx1 <- Quad1[,6]
Qy1 <- Quad1[,7]
######Add on SURIP
Quad2 <-bframe5[which(bframe5[,1]==extplots[2]),] #set of quadrat locations for plot
nquad2 <- nrow(Quad2)
trees2 <- bframe4[bframe4$PLOT==extplots[2],]
trees2 <- trees2[c(1:290,292:length(treey2)),] #Trees in plot
ntree2 <- nrow(trees2)
treex2 <- trees2$newX
treey2 <- trees2$newY-125
Qx2 <- Quad2[,6]
Qy2 <- Quad2[,7]-125
######Add on SUABCO
Quad3 <-bframe5[which(bframe5[,1]==extplots[3]),] #set of quadrat locations for plot
nquad3 <- nrow(Quad3)
trees3 <- bframe4[bframe4$PLOT==extplots[3],]
#Trees in plot
ntree3 <- nrow(trees3)
treex3 <- trees3$newX+50
treey3 <- trees3$newY-200
Qx3 <- Quad3[,6]+50
Qy3 <- Quad3[,7]-200
treex <- c(treex1,treex2,treex3)
treey <- c(treey1,treey2,treey3)
Qx <- c(Qx1,Qx2,Qx3)
Qy <- c(Qy1,Qy2,Qy3)
ntree <- length(treex)
trees <- rbind(trees1,trees2,trees3)
nquad <- length(Qx)
#distances between trees and quadrats
TQdist <- distmat(treex,treey,Qx,Qy)
####calculate basal areas
treeyears<-rbind(bframe3[which(bframe3[,'plot']==extplots[1]),2:8],bframe3[which(bframe3[,'plot']==extplots[2]),2:8],bframe3[which(bframe3[,'plot']==extplots[3]),2:8])
#which census periods match sdl census?
rel.census <- which(treeyears[1,]>=1999 & treeyears[1,]<=2008)
tree.sdl.yrs <- treeyears[1,rel.census]
ba <- matrix(0,ntree,NYr)
for(i in 1:ntree){
for(t in 1:NYr){
if(Year[t]<tree.sdl.yrs[1]) dbh <- trees[i,(7+rel.census[1])]
if(length(tree.sdl.yrs)>1){
if(Year[t]>=tree.sdl.yrs[1] & Year[t]<tree.sdl.yrs[2]) dbh <- trees[i,
(7+rel.census[1])]
if(length(tree.sdl.yrs)>2){
if(Year[t]>=tree.sdl.yrs[2] & Year[t]<tree.sdl.yrs[3]) dbh <- trees[i,
(7+rel.census[2])]
if(Year[t]>=tree.sdl.yrs[3]) dbh <- trees[i,(7+rel.census[3])]
}
else {if(Year[t]>=tree.sdl.yrs[2]) dbh <- trees[i,(7+rel.census[2])]}
}
if(length(which(dbh>=0))>0) ba[i,t]<-pi*((dbh/200)^2) #in meters, not cm
if(length(which(trees[i,"MortalityYear"]>=0))>0 & trees[i,"MortalityYear"]<=Year[t])
ba[i,t]<-0
if(length(which(trees[i,"IngrowthYear"]>=0))>0 & trees[i,"IngrowthYear"]>Year[t])
ba[i,t]<-0
}
}
##calculate tree number and basal area within 10 m of quadrat
ba.quad <- matrix(0,nquad,NYr)
tn.quad <- matrix(0,nquad,NYr)
for (j in 1:nquad){
q <- which(TQdist[j,]<10)
if(length(q)>0) {
tree.sub <- trees[q,]
ba.sub <- ba[q,]
for (t in 1:NYr){
if(length(q)>1){
qq <- which(ba.sub[,t]>0)
tn.quad[j,t] <- length(qq)
ba.quad[j,t] <- sum(ba.sub[,t])
}
if(length(q)==1){
if(ba.sub[t]>0) tn.quad[j,t] <- 1
ba.quad[j,t] <- ba.sub[t]
}
} #end t loop
}} #end if dist and nquad loops
plot.ind <- c(rep(23,nquad1),rep(24,nquad2),rep(25,nquad3))
Quad.ind <- rbind(Quad1[,2:3],Quad2[,2:3],Quad3[,2:3])
BA.Temp <- cbind(plot.ind,Quad.ind,Qx,Qy,ba.quad)
TNum.Temp <- cbind(plot.ind,Quad.ind,Qx,Qy,tn.quad)
colnames(BA.Mat) <- colnames(BA.Temp)
BA.Mat <- rbind(BA.Mat,BA.Temp)
colnames(TNum.Mat) <- colnames(TNum.Temp)
TNum.Mat <- rbind(TNum.Mat,TNum.Temp)
#### Climate
####Need to get new "basins" set up.
bframe6 <- data.frame(read.table("climate_data_long.txt",header=T))
basin <- c(27,1,3,10,9,8,7,17,25,16,24,14,20,26,19,18,0,11,13,15,6,4,21,21,21)
for(p in 1:(nplots+neplots)){
q <- which(bframe6[,3]==basin[p])
if(length(q)>0){
clim.temp <- bframe6[q,]
Plt <- rep(p,length(Year))
precip <- numeric(0); JulMax <- numeric(0)
JanMin <- numeric(0); AvTemp <- numeric(0)
snow <- numeric(0); CWD <- numeric(0)
for(t in Year){
precip <- c(precip,sum(clim.temp[which(clim.temp[,1]==t),4]))
snow <- c(snow,sum(clim.temp[which(clim.temp[,1]==t),9]))
CWD <- c(CWD,sum(clim.temp[which(clim.temp[,1]==t),15]))
JulMax <- c(JulMax,clim.temp[which(clim.temp[,1]==t & clim.temp[,2]==10),6])
JanMin <- c(JanMin,clim.temp[which(clim.temp[,1]==t & clim.temp[,2]==4),7])
AvTemp <- c(AvTemp,mean(clim.temp[which(clim.temp[,1]==t),8]))
}
if(p==1) Climate <- cbind(Plt,Year,precip,JulMax,JanMin,AvTemp,snow,CWD)
if(p>1) Climate <- rbind(Climate,cbind(Plt,Year,precip,JulMax,JanMin,AvTemp,snow,CWD))
}
}
##Average climate by plot
est.yrs <- seq(1976,2013,by=1)
for(p in 1:(nplots+neplots)){
q <- which(bframe6[,3]==basin[p])
if(length(q)>0){
clim.temp <- bframe6[q,]
Plt <- rep(p,length(est.yrs))
precip.all <- numeric(0); JulMax.all <- numeric(0)
JanMin.all <- numeric(0); AvTemp.all <- numeric(0)
snow.all <- numeric(0); CWD.all <- numeric(0)
for(t in est.yrs){
precip.all <- c(precip.all,sum(clim.temp[which(clim.temp[,1]==t),4]))
snow.all <- c(snow.all,sum(clim.temp[which(clim.temp[,1]==t),9]))
CWD.all <- c(CWD.all,sum(clim.temp[which(clim.temp[,1]==t),15]))
JulMax.all <- c(JulMax.all,clim.temp[which(clim.temp[,1]==t & clim.temp[,2]==10),6])
JanMin.all <- c(JanMin.all,clim.temp[which(clim.temp[,1]==t & clim.temp[,2]==4),7])
AvTemp.all <- c(AvTemp.all,mean(clim.temp[which(clim.temp[,1]==t),8]))
}
plot.clim <- cbind(precip.all,JulMax.all,JanMin.all,AvTemp.all,snow.all,CWD.all)
mn.plot.clim <- apply(plot.clim,2,mean)
if(p==1) {
Climate.all <- cbind(Plt,est.yrs,plot.clim)
Climate.mean <- c(p,mn.plot.clim)
}
if(p>1) {
Climate.all <- rbind(Climate.all,cbind(Plt,est.yrs,plot.clim))
Climate.mean <- rbind(Climate.mean,c(p,mn.plot.clim))
}
}
}
##### Setting up x's and y's
Y.surv <- numeric(0) #survival vector
Y.grow <- numeric(0) #growth vector
SPP <- numeric(0) #Species indicator
Elev <- numeric(0) #elevation
PLT <- numeric(0) #Plot indicator
YR <- numeric(0) #Year indicator
SZ <- numeric(0) #Size-class indicator
T.BA <- numeric(0) #tree BA
T.N <- numeric(0) #tree num
P.C <- numeric(0) #precip current yr
JMx.C <- numeric(0) #JulMax current yr
JMn.C <- numeric(0) #JanMin current yr
AT.C <- numeric(0)#AvTemp current yr
S.C <- numeric(0)#snow current yr
CWD.C <-numeric(0) #CWD current yr
P.P <- numeric(0)#precip prev yr
JMx.P <-numeric(0)#JulMax prev yr
JMn.P <-numeric(0)#JanMin prev yr
AT.P <-numeric(0)#AvTemp prev yr
S.P <-numeric(0)#snow prev yr
CWD.P <-numeric(0)#CWD prev yr
P.CD <- numeric(0) #precip current yr rel to mean
JMx.CD <- numeric(0) #JulMax current yr rel to mean
JMn.CD <- numeric(0) #JanMin current yr rel to mean
AT.CD <- numeric(0)#AvTemp current yr rel to mean
S.CD <- numeric(0)#snow current yr rel to mean
CWD.CD <-numeric(0) #CWD current yr rel to mean
P.PD <- numeric(0)#precip prev yr rel to mean
JMx.PD <-numeric(0)#JulMax prev yr rel to mean
JMn.PD <-numeric(0)#JanMin prev yr rel to mean
AT.PD <-numeric(0)#AvTemp prev yr rel to mean
S.PD <-numeric(0)#snow prev yr rel to mean
CWD.PD <-numeric(0)#CWD prev yr rel to mean
for(i in 1:4000){ #1:NSdl
lyr <- which(Surv.Mat[i,]>=0)
for(t in lyr){
if(t<10){
if(Surv.Mat[i,t]==1){
#The survival vector
if(Surv.Mat[i,t+1]==1) Y.surv <- c(Y.surv,1)
if(Surv.Mat[i,t+1]==0) Y.surv <- c(Y.surv,0)
#The growth vector
if(Size.Mat[i,t]>=Size.Mat[i,t+1]) Y.grow <- c(Y.grow,0)
if(Size.Mat[i,t]<Size.Mat[i,t+1]) Y.grow <- c(Y.grow,1)
#The X's
SPP <- rbind(SPP,Spp.Ind[i,]) #species
PLT <- rbind(PLT,Plt.Ind[i,]) #plot
yr <- rep(0,NYr); yr[t+1]<- 1
Elev <- c(Elev,bframe2[which(bframe2[, 1]==allplots[which(Plt.Ind[i,]==1)]),2]) #elevation
YR <- rbind(YR,yr) #year
a <- which(SizeClass==Size.Mat[i,t])
sz<- rep(0,N.Cls); sz[a] <- 1
SZ<- rbind(SZ,sz) #size class
qd <- which(BA.Mat[,1]==which(Plt.Ind[i,]==1) & BA.Mat[,
2]==Seedlings[i,2] & BA.Mat[,3]==Seedlings[i,3])
T.BA <- c(T.BA,BA.Mat[qd,5+t]) #local basal area
T.N <- c(T.N,TNum.Mat[qd,5+t]) #local tree number
clm.c <- which(Climate[,1]==which(Plt.Ind[i,]==1) &
Climate[,2]==Year[t+1])
clm.p <- which(Climate[,1]==which(Plt.Ind[i,]==1) &
Climate[,2]==Year[t])
P.C <- c(P.C,Climate[clm.c,3]); P.P <- c(P.P,Climate[clm.p,3])
JMx.C <- c(JMx.C,Climate[clm.c,4]); JMx.P <- c(JMx.P,Climate[clm.p,4])
JMn.C <- c(JMn.C,Climate[clm.c,5]); JMn.P <- c(JMn.P,Climate[clm.p,5])
AT.C <- c(AT.C,Climate[clm.c,6]); AT.P <- c(AT.P,Climate[clm.p,6])
S.C <- c(S.C,Climate[clm.c,7]); S.P <- c(S.P,Climate[clm.p,7])
CWD.C <- c(CWD.C,Climate[clm.c,8]); CWD.P <- c(CWD.P,Climate[clm.p,8])
P.CD <- c(P.CD,Climate[clm.c,3]-Climate.mean[which(Plt.Ind[i,]==1),2])
P.PD <-c(P.PD,Climate[clm.p,3]-Climate.mean[which(Plt.Ind[i,]==1),2])
JMx.CD <- c(JMx.CD,Climate[clm.c,4]-Climate.mean[which(Plt.Ind[i,]==1),3])
JMx.PD <- c(JMx.PD,Climate[clm.p,4]-Climate.mean[which(Plt.Ind[i,]==1),3])
JMn.CD <- c(JMn.CD,Climate[clm.c,5]-Climate.mean[which(Plt.Ind[i,]==1),4])
JMn.PD <- c(JMn.PD,Climate[clm.p,5]-Climate.mean[which(Plt.Ind[i,]==1),4])
AT.CD <- c(AT.CD,Climate[clm.c,6]-Climate.mean[which(Plt.Ind[i,]==1),5])
AT.PD <- c(AT.PD,Climate[clm.p,6]-Climate.mean[which(Plt.Ind[i,]==1),5])
S.CD <- c(S.CD,Climate[clm.c,7]-Climate.mean[which(Plt.Ind[i,]==1),6])
S.PD <- c(S.PD,Climate[clm.p,7]-Climate.mean[which(Plt.Ind[i,]==1),6])
CWD.CD <- c(CWD.CD,Climate[clm.c,8]-Climate.mean[which(Plt.Ind[i,]==1),7])
CWD.PD <- c(CWD.PD,Climate[clm.p,8]-Climate.mean[which(Plt.Ind[i,]==1),7])
}# end if sdl. alive loop
} #end "if t < 10"
} #end t loop
print(i)
if(length(Y.surv)!=length(T.BA)) break
} #end i loop
#Length Y.surv = 63904
#Fire parameter
N1 <- length(Y.surv)
Fire <- matrix(0,N1,3)#disturbed this year, -1 yr, fire 2-7 years ago
for(j in 1:N1){
if(PLT[j,1]==1){ #YOHOPIPO fire in 2007
if(which(YR[j,]==1)==9) Fire[j,1]<-1
if(which(YR[j,]==1)==10) Fire[j,2]<-1
}
if(PLT[j,5]==1){ #FFS6Burn fire in 2001 fall (so 2002 "current")
if(which(YR[j,]==1)==4) Fire[j,1]<-1
if(which(YR[j,]==1)==5) Fire[j,2]<-1
if(which(YR[j,]==1)==6) Fire[j,3]<-1
if(which(YR[j,]==1)==7) Fire[j,3]<-1
if(which(YR[j,]==1)==8) Fire[j,3]<-1
if(which(YR[j,]==1)==9) Fire[j,3]<-1
if(which(YR[j,]==1)==10) Fire[j,3]<-1
}
if(PLT[j,6]==1){ #FFS5Burn fire in 2001 fall (so 2002 "current")
if(which(YR[j,]==1)==4) Fire[j,1]<-1
if(which(YR[j,]==1)==5) Fire[j,2]<-1
if(which(YR[j,]==1)==6) Fire[j,3]<-1
if(which(YR[j,]==1)==7) Fire[j,3]<-1
if(which(YR[j,]==1)==8) Fire[j,3]<-1
if(which(YR[j,]==1)==9) Fire[j,3]<-1
if(which(YR[j,]==1)==10) Fire[j,3]<-1
}
if(PLT[j,7]==1){ #FFS2Burn fire in 2001 fall (so 2002 "current")
if(which(YR[j,]==1)==4) Fire[j,1]<-1
if(which(YR[j,]==1)==5) Fire[j,2]<-1
if(which(YR[j,]==1)==6) Fire[j,3]<-1
if(which(YR[j,]==1)==7) Fire[j,3]<-1
if(which(YR[j,]==1)==8) Fire[j,3]<-1
if(which(YR[j,]==1)==9) Fire[j,3]<-1
if(which(YR[j,]==1)==10) Fire[j,3]<-1
}
if(PLT[j,8]==1){ #LOTHAR fire in 2004 (so 2005 "current")
if(which(YR[j,]==1)==7) Fire[j,1]<-1
if(which(YR[j,]==1)==8) Fire[j,2]<-1
if(which(YR[j,]==1)==9) Fire[j,3]<-1
if(which(YR[j,]==1)==10) Fire[j,3]<-1
}
if(PLT[j,9]==1){ #UPTHAR fire in 2004 (so 2005 "current")
if(which(YR[j,]==1)==7) Fire[j,1]<-1
if(which(YR[j,]==1)==8) Fire[j,2]<-1
if(which(YR[j,]==1)==9) Fire[j,3]<-1
if(which(YR[j,]==1)==10) Fire[j,3]<-1
}
print(j)
}
save.image(file="TagSdl_Data5.RData")
|
/DataOrganization.R
|
no_license
|
emoran5/SeedlingModel
|
R
| false | false | 17,543 |
r
|
########## Gets Data Organized To Go Into Models
bframe1 <-data.frame(read.table("TaggedSdl.txt",header=T))
######################### Tree plots
#####Standard plots
allplots <- c('YOHOPIPO','CCRPIPO','CRCRPIPO','FFS7CONTROL','FFS6BURN','FFS5BURN','FFS2BURN','LOTHAR','UPTHAR','LOLOG','UPLOG','LOGPIJE','SFTRABMA','WTABMA','POFLABMA','PGABMA','BBBPIPO','FRPIJE','LMCC','LOGSEGI','EMSLOPE','EMRIDGE')
nplots <- length(allplots)
####"Extra" plots (the 3 that are next to each other)
extplots <- c('SUPILA','SURIP','SUABCO')
neplots <- length(extplots)
plt.sdl <- numeric(0)
for(p in 1:nplots){
plt.sdl <- c(plt.sdl,which(bframe1[,1]==allplots[p]))
}
for(p in 1:neplots){
plt.sdl <- c(plt.sdl,which(bframe1[,1]==extplots[p]))
}
##########Only seedlings in selected plots
Seedlings <- bframe1[plt.sdl,]
#######Size classes = <10,10-25,25-50,50-75,75-100,<137 cm
SizeClass <- c(10,25,50,75,100,137)
N.Cls <- length(SizeClass)
Year <- seq(1999,2008,by=1)
NYr <- length(Year)
NSdl <- nrow(Seedlings)
max.col <- ncol(Seedlings)
Species <- c('ABCO','ABMA','ABPS','ABXX','CADE','PICO','PIJE','PILA','PIMO','PIPO','PIXX','PSME','QUCH', 'QUKE','SEGI')
NSpp <- length(Species)
#Make a survival matrix and size matrix. -33 indicates not yet tagged, -22 that seedling is dead
Surv.Mat <- matrix(-33,NSdl,NYr); Size.Mat <- matrix(-33,NSdl,NYr)
#Create species indicators
Spp.Ind <- matrix(0,NSdl,NSpp)
#Create plot indicators
Plt.Ind <- matrix(0,NSdl,nplots+neplots)
for(i in 1:NSdl){
Spp.Ind[i,which(Species == Seedlings[i,'SPPCODE'])] <- 1
Plt.Ind[i,which(c(allplots,extplots) == Seedlings[i,'PLOT_NAME'])] <- 1
for(t in 1:NYr){
if(Seedlings[i,7+t]>0){ #measured this year?
Size.Mat[i,t] <- Seedlings[i,7+t]
Surv.Mat[i,t] <- 1
}
if(Seedlings[i,7+t]==-77){ #transition to adult size
Size.Mat[i,t] <- 150
Surv.Mat[i,t] <- 1
}
if(Seedlings[i,7+t]==-66){ #known dead
Size.Mat[i,t] <- 0
Surv.Mat[i,t] <- 0
}
if(Seedlings[i,7+t]==-88){ #tag recovered, likely dead
Size.Mat[i,t] <- 0
Surv.Mat[i,t] <- 0
}
if(Seedlings[i,7+t]==-99){ #missing
if(length(which(Seedlings[i,(7+t):max.col]>0))>=1){ #recorded in future
if(length(which(Seedlings[i,(7+1):(7+t)]>0))>=1){ #recorded in past
Surv.Mat[i,t] <- 1
aa <- which(Seedlings[i,8:(7+t)]>0)
if(length(aa)>0) Size.Mat[i,t] <- Seedlings[i,7+max(aa)] #last recorded height
}}
else { #not recorded in future
Size.Mat[i,t] <- 0
Surv.Mat[i,t] <- 0
}
}#end "missing" loop
if(Seedlings[i,t+7]==-22 & Seedlings[i,(t+7)-1]>0) { #marked previously dead but clearly not
Surv.Mat[i,t] <- 0
Size.Mat[i,t] <- 0
}
if(Seedlings[i,t+7]==-22 & Seedlings[i,(t+7)-1]==-22) { #marked previously dead this year and last
Surv.Mat[i,t] <- -22
Size.Mat[i,t] <- -22
}
if(Seedlings[i,t+7]==-22 & Seedlings[i,(t+7)-1]< -60) { #marked previously dead this year and dead or likely dead earlier
Surv.Mat[i,t] <- -22
Size.Mat[i,t] <- -22
}
if(t>1){
if(Surv.Mat[i,t]==0 & Surv.Mat[i,t-1]==0) {
Surv.Mat[i,t] <- -22
Size.Mat[i,t] <- -22 #previously dead
}
if(Surv.Mat[i,t]==0 & Surv.Mat[i,t-1]==-22) {
Surv.Mat[i,t] <- -22
Size.Mat[i,t] <- -22 #previously dead
}}
}#end year loop
print(i)
}#end seedling loop
####Basal area and trees near seedling
bframe2 <-data.frame(read.table("PlotInfo2.txt",header=T))
bframe3 <- data.frame(read.table("treeyears.txt",header=T))
bframe4 <-read.csv("tree2.csv",header=T)
bframe5 <-read.csv("quadrat_precise.csv",header=T)
# to calculate distances
distmat <- function(x1,y1,x2,y2){
xd <- outer(x1,x2,function(x1,x2) (x1 - x2)^2)
yd <- outer(y1,y2,function(y1,y2) (y1 - y2)^2)
d <- t(sqrt(xd + yd))
return(d)
}
BA.Mat <- numeric(0); TNum.Mat <- numeric(0)
for (p in 1:nplots){
Quad <-bframe5[which(bframe5[,1]==allplots[p]),] #set of quadrat locations for plot
nquad <- nrow(Quad)
trees <- bframe4[bframe4$PLOT==allplots[p],] #Trees in plot
ntree <- nrow(trees)
treex <- trees$newX
treey <- trees$newY
#distances between trees and quadrats
TQdist <- distmat(treex,treey,Quad[,6],Quad[,7])
####calculate basal areas
treeyears<-bframe3[which(bframe3[,'plot']==allplots[p]),2:8]
#which census periods match sdl census?
rel.census <- which(treeyears>=1999 & treeyears<=2008)
tree.sdl.yrs <- treeyears[rel.census]
ba <- matrix(0,ntree,NYr)
for(i in 1:ntree){
for(t in 1:NYr){
if(Year[t]<tree.sdl.yrs[1]) dbh <- trees[i,(7+rel.census[1])]
if(length(tree.sdl.yrs)>1){
if(Year[t]>=tree.sdl.yrs[1] & Year[t]<tree.sdl.yrs[2]) dbh <- trees[i,
(7+rel.census[1])]
if(length(tree.sdl.yrs)>2){
if(Year[t]>=tree.sdl.yrs[2] & Year[t]<tree.sdl.yrs[3]) dbh <- trees[i,
(7+rel.census[2])]
if(Year[t]>=tree.sdl.yrs[3]) dbh <- trees[i,(7+rel.census[3])]
}
else {if(Year[t]>=tree.sdl.yrs[2]) dbh <- trees[i,(7+rel.census[2])]}
}
if(length(which(dbh>=0))>0) ba[i,t]<-pi*((dbh/200)^2) #in meters, not cm
if(length(which(trees[i,"MortalityYear"]>=0))>0 & trees[i,"MortalityYear"]<=Year[t])
ba[i,t]<-0
if(length(which(trees[i,"IngrowthYear"]>=0))>0 & trees[i,"IngrowthYear"]>Year[t])
ba[i,t]<-0
}
}
##calculate tree number and basal area within 10 m of quadrat
ba.quad <- matrix(0,nquad,NYr)
tn.quad <- matrix(0,nquad,NYr)
for (j in 1:nquad){
q <- which(TQdist[j,]<10)
if(length(q)>0) {
tree.sub <- trees[q,]
ba.sub <- ba[q,]
for (t in 1:NYr){
if(length(q)>1){
qq <- which(ba.sub[,t]>0)
tn.quad[j,t] <- length(qq)
ba.quad[j,t] <- sum(ba.sub[,t])
}
if(length(q)==1){
if(ba.sub[t]>0) tn.quad[j,t] <- 1
ba.quad[j,t] <- ba.sub[t]
}
} #end t loop
}} #end if dist and nquad loops
temp.BA <- cbind(rep(p,nquad),Quad[,2:3],Quad[,6:7],ba.quad)
temp.Num <- cbind(rep(p,nquad),Quad[,2:3],Quad[,6:7],tn.quad)
if(p==1) {
BA.Mat <- temp.BA
TNum.Mat <- temp.Num
}
if(p>1){
BA.Mat <- rbind(BA.Mat,temp.BA)
TNum.Mat <- rbind(TNum.Mat,temp.Num)
}
print(p)
} #end plot loop
##Add on additional plots
####SUPILA first
Quad1 <-bframe5[which(bframe5[,1]==extplots[1]),] #set of quadrat locations for plot
nquad1 <- nrow(Quad1)
trees1 <- bframe4[bframe4$PLOT==extplots[1],] #Trees in plot
ntree1 <- nrow(trees1)
treex1 <- trees1$newX
treey1 <- trees1$newY
Qx1 <- Quad1[,6]
Qy1 <- Quad1[,7]
######Add on SURIP
Quad2 <-bframe5[which(bframe5[,1]==extplots[2]),] #set of quadrat locations for plot
nquad2 <- nrow(Quad2)
trees2 <- bframe4[bframe4$PLOT==extplots[2],]
trees2 <- trees2[c(1:290,292:length(treey2)),] #Trees in plot
ntree2 <- nrow(trees2)
treex2 <- trees2$newX
treey2 <- trees2$newY-125
Qx2 <- Quad2[,6]
Qy2 <- Quad2[,7]-125
######Add on SUABCO
Quad3 <-bframe5[which(bframe5[,1]==extplots[3]),] #set of quadrat locations for plot
nquad3 <- nrow(Quad3)
trees3 <- bframe4[bframe4$PLOT==extplots[3],]
#Trees in plot
ntree3 <- nrow(trees3)
treex3 <- trees3$newX+50
treey3 <- trees3$newY-200
Qx3 <- Quad3[,6]+50
Qy3 <- Quad3[,7]-200
treex <- c(treex1,treex2,treex3)
treey <- c(treey1,treey2,treey3)
Qx <- c(Qx1,Qx2,Qx3)
Qy <- c(Qy1,Qy2,Qy3)
ntree <- length(treex)
trees <- rbind(trees1,trees2,trees3)
nquad <- length(Qx)
#distances between trees and quadrats
TQdist <- distmat(treex,treey,Qx,Qy)
####calculate basal areas
treeyears<-rbind(bframe3[which(bframe3[,'plot']==extplots[1]),2:8],bframe3[which(bframe3[,'plot']==extplots[2]),2:8],bframe3[which(bframe3[,'plot']==extplots[3]),2:8])
#which census periods match sdl census?
rel.census <- which(treeyears[1,]>=1999 & treeyears[1,]<=2008)
tree.sdl.yrs <- treeyears[1,rel.census]
ba <- matrix(0,ntree,NYr)
for(i in 1:ntree){
for(t in 1:NYr){
if(Year[t]<tree.sdl.yrs[1]) dbh <- trees[i,(7+rel.census[1])]
if(length(tree.sdl.yrs)>1){
if(Year[t]>=tree.sdl.yrs[1] & Year[t]<tree.sdl.yrs[2]) dbh <- trees[i,
(7+rel.census[1])]
if(length(tree.sdl.yrs)>2){
if(Year[t]>=tree.sdl.yrs[2] & Year[t]<tree.sdl.yrs[3]) dbh <- trees[i,
(7+rel.census[2])]
if(Year[t]>=tree.sdl.yrs[3]) dbh <- trees[i,(7+rel.census[3])]
}
else {if(Year[t]>=tree.sdl.yrs[2]) dbh <- trees[i,(7+rel.census[2])]}
}
if(length(which(dbh>=0))>0) ba[i,t]<-pi*((dbh/200)^2) #in meters, not cm
if(length(which(trees[i,"MortalityYear"]>=0))>0 & trees[i,"MortalityYear"]<=Year[t])
ba[i,t]<-0
if(length(which(trees[i,"IngrowthYear"]>=0))>0 & trees[i,"IngrowthYear"]>Year[t])
ba[i,t]<-0
}
}
##calculate tree number and basal area within 10 m of quadrat
ba.quad <- matrix(0,nquad,NYr)
tn.quad <- matrix(0,nquad,NYr)
for (j in 1:nquad){
q <- which(TQdist[j,]<10)
if(length(q)>0) {
tree.sub <- trees[q,]
ba.sub <- ba[q,]
for (t in 1:NYr){
if(length(q)>1){
qq <- which(ba.sub[,t]>0)
tn.quad[j,t] <- length(qq)
ba.quad[j,t] <- sum(ba.sub[,t])
}
if(length(q)==1){
if(ba.sub[t]>0) tn.quad[j,t] <- 1
ba.quad[j,t] <- ba.sub[t]
}
} #end t loop
}} #end if dist and nquad loops
plot.ind <- c(rep(23,nquad1),rep(24,nquad2),rep(25,nquad3))
Quad.ind <- rbind(Quad1[,2:3],Quad2[,2:3],Quad3[,2:3])
BA.Temp <- cbind(plot.ind,Quad.ind,Qx,Qy,ba.quad)
TNum.Temp <- cbind(plot.ind,Quad.ind,Qx,Qy,tn.quad)
colnames(BA.Mat) <- colnames(BA.Temp)
BA.Mat <- rbind(BA.Mat,BA.Temp)
colnames(TNum.Mat) <- colnames(TNum.Temp)
TNum.Mat <- rbind(TNum.Mat,TNum.Temp)
#### Climate
####Need to get new "basins" set up.
bframe6 <- data.frame(read.table("climate_data_long.txt",header=T))
basin <- c(27,1,3,10,9,8,7,17,25,16,24,14,20,26,19,18,0,11,13,15,6,4,21,21,21)
for(p in 1:(nplots+neplots)){
q <- which(bframe6[,3]==basin[p])
if(length(q)>0){
clim.temp <- bframe6[q,]
Plt <- rep(p,length(Year))
precip <- numeric(0); JulMax <- numeric(0)
JanMin <- numeric(0); AvTemp <- numeric(0)
snow <- numeric(0); CWD <- numeric(0)
for(t in Year){
precip <- c(precip,sum(clim.temp[which(clim.temp[,1]==t),4]))
snow <- c(snow,sum(clim.temp[which(clim.temp[,1]==t),9]))
CWD <- c(CWD,sum(clim.temp[which(clim.temp[,1]==t),15]))
JulMax <- c(JulMax,clim.temp[which(clim.temp[,1]==t & clim.temp[,2]==10),6])
JanMin <- c(JanMin,clim.temp[which(clim.temp[,1]==t & clim.temp[,2]==4),7])
AvTemp <- c(AvTemp,mean(clim.temp[which(clim.temp[,1]==t),8]))
}
if(p==1) Climate <- cbind(Plt,Year,precip,JulMax,JanMin,AvTemp,snow,CWD)
if(p>1) Climate <- rbind(Climate,cbind(Plt,Year,precip,JulMax,JanMin,AvTemp,snow,CWD))
}
}
##Average climate by plot
est.yrs <- seq(1976,2013,by=1)
for(p in 1:(nplots+neplots)){
q <- which(bframe6[,3]==basin[p])
if(length(q)>0){
clim.temp <- bframe6[q,]
Plt <- rep(p,length(est.yrs))
precip.all <- numeric(0); JulMax.all <- numeric(0)
JanMin.all <- numeric(0); AvTemp.all <- numeric(0)
snow.all <- numeric(0); CWD.all <- numeric(0)
for(t in est.yrs){
precip.all <- c(precip.all,sum(clim.temp[which(clim.temp[,1]==t),4]))
snow.all <- c(snow.all,sum(clim.temp[which(clim.temp[,1]==t),9]))
CWD.all <- c(CWD.all,sum(clim.temp[which(clim.temp[,1]==t),15]))
JulMax.all <- c(JulMax.all,clim.temp[which(clim.temp[,1]==t & clim.temp[,2]==10),6])
JanMin.all <- c(JanMin.all,clim.temp[which(clim.temp[,1]==t & clim.temp[,2]==4),7])
AvTemp.all <- c(AvTemp.all,mean(clim.temp[which(clim.temp[,1]==t),8]))
}
plot.clim <- cbind(precip.all,JulMax.all,JanMin.all,AvTemp.all,snow.all,CWD.all)
mn.plot.clim <- apply(plot.clim,2,mean)
if(p==1) {
Climate.all <- cbind(Plt,est.yrs,plot.clim)
Climate.mean <- c(p,mn.plot.clim)
}
if(p>1) {
Climate.all <- rbind(Climate.all,cbind(Plt,est.yrs,plot.clim))
Climate.mean <- rbind(Climate.mean,c(p,mn.plot.clim))
}
}
}
##### Setting up x's and y's
Y.surv <- numeric(0) #survival vector
Y.grow <- numeric(0) #growth vector
SPP <- numeric(0) #Species indicator
Elev <- numeric(0) #elevation
PLT <- numeric(0) #Plot indicator
YR <- numeric(0) #Year indicator
SZ <- numeric(0) #Size-class indicator
T.BA <- numeric(0) #tree BA
T.N <- numeric(0) #tree num
P.C <- numeric(0) #precip current yr
JMx.C <- numeric(0) #JulMax current yr
JMn.C <- numeric(0) #JanMin current yr
AT.C <- numeric(0)#AvTemp current yr
S.C <- numeric(0)#snow current yr
CWD.C <-numeric(0) #CWD current yr
P.P <- numeric(0)#precip prev yr
JMx.P <-numeric(0)#JulMax prev yr
JMn.P <-numeric(0)#JanMin prev yr
AT.P <-numeric(0)#AvTemp prev yr
S.P <-numeric(0)#snow prev yr
CWD.P <-numeric(0)#CWD prev yr
P.CD <- numeric(0) #precip current yr rel to mean
JMx.CD <- numeric(0) #JulMax current yr rel to mean
JMn.CD <- numeric(0) #JanMin current yr rel to mean
AT.CD <- numeric(0)#AvTemp current yr rel to mean
S.CD <- numeric(0)#snow current yr rel to mean
CWD.CD <-numeric(0) #CWD current yr rel to mean
P.PD <- numeric(0)#precip prev yr rel to mean
JMx.PD <-numeric(0)#JulMax prev yr rel to mean
JMn.PD <-numeric(0)#JanMin prev yr rel to mean
AT.PD <-numeric(0)#AvTemp prev yr rel to mean
S.PD <-numeric(0)#snow prev yr rel to mean
CWD.PD <-numeric(0)#CWD prev yr rel to mean
for(i in 1:4000){ #1:NSdl
lyr <- which(Surv.Mat[i,]>=0)
for(t in lyr){
if(t<10){
if(Surv.Mat[i,t]==1){
#The survival vector
if(Surv.Mat[i,t+1]==1) Y.surv <- c(Y.surv,1)
if(Surv.Mat[i,t+1]==0) Y.surv <- c(Y.surv,0)
#The growth vector
if(Size.Mat[i,t]>=Size.Mat[i,t+1]) Y.grow <- c(Y.grow,0)
if(Size.Mat[i,t]<Size.Mat[i,t+1]) Y.grow <- c(Y.grow,1)
#The X's
SPP <- rbind(SPP,Spp.Ind[i,]) #species
PLT <- rbind(PLT,Plt.Ind[i,]) #plot
yr <- rep(0,NYr); yr[t+1]<- 1
Elev <- c(Elev,bframe2[which(bframe2[, 1]==allplots[which(Plt.Ind[i,]==1)]),2]) #elevation
YR <- rbind(YR,yr) #year
a <- which(SizeClass==Size.Mat[i,t])
sz<- rep(0,N.Cls); sz[a] <- 1
SZ<- rbind(SZ,sz) #size class
qd <- which(BA.Mat[,1]==which(Plt.Ind[i,]==1) & BA.Mat[,
2]==Seedlings[i,2] & BA.Mat[,3]==Seedlings[i,3])
T.BA <- c(T.BA,BA.Mat[qd,5+t]) #local basal area
T.N <- c(T.N,TNum.Mat[qd,5+t]) #local tree number
clm.c <- which(Climate[,1]==which(Plt.Ind[i,]==1) &
Climate[,2]==Year[t+1])
clm.p <- which(Climate[,1]==which(Plt.Ind[i,]==1) &
Climate[,2]==Year[t])
P.C <- c(P.C,Climate[clm.c,3]); P.P <- c(P.P,Climate[clm.p,3])
JMx.C <- c(JMx.C,Climate[clm.c,4]); JMx.P <- c(JMx.P,Climate[clm.p,4])
JMn.C <- c(JMn.C,Climate[clm.c,5]); JMn.P <- c(JMn.P,Climate[clm.p,5])
AT.C <- c(AT.C,Climate[clm.c,6]); AT.P <- c(AT.P,Climate[clm.p,6])
S.C <- c(S.C,Climate[clm.c,7]); S.P <- c(S.P,Climate[clm.p,7])
CWD.C <- c(CWD.C,Climate[clm.c,8]); CWD.P <- c(CWD.P,Climate[clm.p,8])
P.CD <- c(P.CD,Climate[clm.c,3]-Climate.mean[which(Plt.Ind[i,]==1),2])
P.PD <-c(P.PD,Climate[clm.p,3]-Climate.mean[which(Plt.Ind[i,]==1),2])
JMx.CD <- c(JMx.CD,Climate[clm.c,4]-Climate.mean[which(Plt.Ind[i,]==1),3])
JMx.PD <- c(JMx.PD,Climate[clm.p,4]-Climate.mean[which(Plt.Ind[i,]==1),3])
JMn.CD <- c(JMn.CD,Climate[clm.c,5]-Climate.mean[which(Plt.Ind[i,]==1),4])
JMn.PD <- c(JMn.PD,Climate[clm.p,5]-Climate.mean[which(Plt.Ind[i,]==1),4])
AT.CD <- c(AT.CD,Climate[clm.c,6]-Climate.mean[which(Plt.Ind[i,]==1),5])
AT.PD <- c(AT.PD,Climate[clm.p,6]-Climate.mean[which(Plt.Ind[i,]==1),5])
S.CD <- c(S.CD,Climate[clm.c,7]-Climate.mean[which(Plt.Ind[i,]==1),6])
S.PD <- c(S.PD,Climate[clm.p,7]-Climate.mean[which(Plt.Ind[i,]==1),6])
CWD.CD <- c(CWD.CD,Climate[clm.c,8]-Climate.mean[which(Plt.Ind[i,]==1),7])
CWD.PD <- c(CWD.PD,Climate[clm.p,8]-Climate.mean[which(Plt.Ind[i,]==1),7])
}# end if sdl. alive loop
} #end "if t < 10"
} #end t loop
print(i)
if(length(Y.surv)!=length(T.BA)) break
} #end i loop
#Length Y.surv = 63904
#Fire parameter
N1 <- length(Y.surv)
Fire <- matrix(0,N1,3)#disturbed this year, -1 yr, fire 2-7 years ago
for(j in 1:N1){
if(PLT[j,1]==1){ #YOHOPIPO fire in 2007
if(which(YR[j,]==1)==9) Fire[j,1]<-1
if(which(YR[j,]==1)==10) Fire[j,2]<-1
}
if(PLT[j,5]==1){ #FFS6Burn fire in 2001 fall (so 2002 "current")
if(which(YR[j,]==1)==4) Fire[j,1]<-1
if(which(YR[j,]==1)==5) Fire[j,2]<-1
if(which(YR[j,]==1)==6) Fire[j,3]<-1
if(which(YR[j,]==1)==7) Fire[j,3]<-1
if(which(YR[j,]==1)==8) Fire[j,3]<-1
if(which(YR[j,]==1)==9) Fire[j,3]<-1
if(which(YR[j,]==1)==10) Fire[j,3]<-1
}
if(PLT[j,6]==1){ #FFS5Burn fire in 2001 fall (so 2002 "current")
if(which(YR[j,]==1)==4) Fire[j,1]<-1
if(which(YR[j,]==1)==5) Fire[j,2]<-1
if(which(YR[j,]==1)==6) Fire[j,3]<-1
if(which(YR[j,]==1)==7) Fire[j,3]<-1
if(which(YR[j,]==1)==8) Fire[j,3]<-1
if(which(YR[j,]==1)==9) Fire[j,3]<-1
if(which(YR[j,]==1)==10) Fire[j,3]<-1
}
if(PLT[j,7]==1){ #FFS2Burn fire in 2001 fall (so 2002 "current")
if(which(YR[j,]==1)==4) Fire[j,1]<-1
if(which(YR[j,]==1)==5) Fire[j,2]<-1
if(which(YR[j,]==1)==6) Fire[j,3]<-1
if(which(YR[j,]==1)==7) Fire[j,3]<-1
if(which(YR[j,]==1)==8) Fire[j,3]<-1
if(which(YR[j,]==1)==9) Fire[j,3]<-1
if(which(YR[j,]==1)==10) Fire[j,3]<-1
}
if(PLT[j,8]==1){ #LOTHAR fire in 2004 (so 2005 "current")
if(which(YR[j,]==1)==7) Fire[j,1]<-1
if(which(YR[j,]==1)==8) Fire[j,2]<-1
if(which(YR[j,]==1)==9) Fire[j,3]<-1
if(which(YR[j,]==1)==10) Fire[j,3]<-1
}
if(PLT[j,9]==1){ #UPTHAR fire in 2004 (so 2005 "current")
if(which(YR[j,]==1)==7) Fire[j,1]<-1
if(which(YR[j,]==1)==8) Fire[j,2]<-1
if(which(YR[j,]==1)==9) Fire[j,3]<-1
if(which(YR[j,]==1)==10) Fire[j,3]<-1
}
print(j)
}
save.image(file="TagSdl_Data5.RData")
|
\name{summary.propagate}
\alias{summary.propagate}
\title{Summary function for 'propagate' objects}
\description{
Provides a printed summary of the results obtained by \code{\link{propagate}}, such as statistics of the first/second-order uncertainty propagation, Monte Carlo simulation, the covariance matrix, symbolic as well as evaluated versions of the Gradient ("sensitivity") and Hessian matrices, relative contributions, the coverage factor and the Welch-Satterthwaite degrees of freedom. If \code{do.sim = TRUE} was set in \code{propagate}, skewness/kurtosis and Shapiro-Wilks/Kolmogorov-Smirnov tests for normality are calculated on the Monte-Carlo evaluations.
}
\usage{
\method{summary}{propagate}(object, ...)
}
\arguments{
\item{object}{an object returned from \code{\link{propagate}}.}
\item{...}{other parameters for future methods.}
}
\value{
A printed output with the items listed in 'Description'.
}
\details{
Calculates the "sensitivity"" \eqn{S_i} of each variable \eqn{x_i} to the propagated uncertainty, as defined in the \emph{Expression of the Uncertainty of Measurement in Calibration, Eqn 4.2, page 9} (see 'References'):\cr
\deqn{S_i = \mathrm{eval}\left(\frac{\partial f}{\partial x_i}\right)}\cr
The "contribution" matrix is then \eqn{\mathbf{C} = \mathbf{SS}^T\mathbf{\Sigma}}, where \eqn{\mathbf{\Sigma}} is the covariance matrix. In the implementation here, the "relative contribution" matrix \eqn{\mathbf{C}_{\mathrm{rel}}} is rescaled to sum up to 1.
}
\references{
Expression of the Uncertainty of Measurement in Calibration.\cr
European Cooperation for Accreditation (EA-4/02), 1999.
}
\author{
Andrej-Nikolai Spiess
}
\examples{
EXPR1 <- expression(x^2 * sin(y))
x <- c(5, 0.01)
y <- c(1, 0.01)
DF1 <- cbind(x, y)
RES1 <- propagate(expr = EXPR1, data = DF1, type = "stat",
do.sim = TRUE, verbose = TRUE, nsim = 100000)
summary(RES1)
}
\keyword{models}
\keyword{nonlinear}
|
/man/summary.propagate.Rd
|
no_license
|
ProfessorPeregrine/propagate
|
R
| false | false | 2,016 |
rd
|
\name{summary.propagate}
\alias{summary.propagate}
\title{Summary function for 'propagate' objects}
\description{
Provides a printed summary of the results obtained by \code{\link{propagate}}, such as statistics of the first/second-order uncertainty propagation, Monte Carlo simulation, the covariance matrix, symbolic as well as evaluated versions of the Gradient ("sensitivity") and Hessian matrices, relative contributions, the coverage factor and the Welch-Satterthwaite degrees of freedom. If \code{do.sim = TRUE} was set in \code{propagate}, skewness/kurtosis and Shapiro-Wilks/Kolmogorov-Smirnov tests for normality are calculated on the Monte-Carlo evaluations.
}
\usage{
\method{summary}{propagate}(object, ...)
}
\arguments{
\item{object}{an object returned from \code{\link{propagate}}.}
\item{...}{other parameters for future methods.}
}
\value{
A printed output with the items listed in 'Description'.
}
\details{
Calculates the "sensitivity"" \eqn{S_i} of each variable \eqn{x_i} to the propagated uncertainty, as defined in the \emph{Expression of the Uncertainty of Measurement in Calibration, Eqn 4.2, page 9} (see 'References'):\cr
\deqn{S_i = \mathrm{eval}\left(\frac{\partial f}{\partial x_i}\right)}\cr
The "contribution" matrix is then \eqn{\mathbf{C} = \mathbf{SS}^T\mathbf{\Sigma}}, where \eqn{\mathbf{\Sigma}} is the covariance matrix. In the implementation here, the "relative contribution" matrix \eqn{\mathbf{C}_{\mathrm{rel}}} is rescaled to sum up to 1.
}
\references{
Expression of the Uncertainty of Measurement in Calibration.\cr
European Cooperation for Accreditation (EA-4/02), 1999.
}
\author{
Andrej-Nikolai Spiess
}
\examples{
EXPR1 <- expression(x^2 * sin(y))
x <- c(5, 0.01)
y <- c(1, 0.01)
DF1 <- cbind(x, y)
RES1 <- propagate(expr = EXPR1, data = DF1, type = "stat",
do.sim = TRUE, verbose = TRUE, nsim = 100000)
summary(RES1)
}
\keyword{models}
\keyword{nonlinear}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/connection.R
\docType{methods}
\name{dbConnect,PqDriver-method}
\alias{dbConnect,PqDriver-method}
\alias{dbDisconnect,PqConnection-method}
\title{Connect to a PostgreSQL database.}
\usage{
\S4method{dbConnect}{PqDriver}(drv, dbname = NULL, host = NULL,
port = NULL, password = NULL, user = NULL, ...)
\S4method{dbDisconnect}{PqConnection}(conn, ...)
}
\arguments{
\item{drv}{\code{RPostgres::Postgres()}}
\item{dbname}{Database name. If \code{NULL}, defaults to the user name.}
\item{host,port}{Host and port. If \code{NULL}, will be retrieved from
\code{PGHOST} and \code{PGPORT} env vars.}
\item{user,password}{User name and password. If \code{NULL}, will be
retrieved from \code{PGUSER} and \code{PGPASSWORD} envvars, or from the
appropriate line in \code{~/.pgpass}. See
\url{http://www.postgresql.org/docs/9.4/static/libpq-pgpass.html} for
more details.}
\item{...}{Other name-value pairs that describe additional connection
options as described at
\url{http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS}}
\item{conn}{Connection to disconnect.}
}
\description{
Note that manually disconnecting a connection is not necessary with RPostgres;
if you delete the object containing the connection, it will be automatcally
disconnected during the next GC.
}
\examples{
library(DBI)
con <- dbConnect(RPostgres::Postgres())
dbDisconnect(con)
}
|
/man/dbConnect-PqDriver-method.Rd
|
no_license
|
robertzk/RPostgres
|
R
| false | false | 1,466 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/connection.R
\docType{methods}
\name{dbConnect,PqDriver-method}
\alias{dbConnect,PqDriver-method}
\alias{dbDisconnect,PqConnection-method}
\title{Connect to a PostgreSQL database.}
\usage{
\S4method{dbConnect}{PqDriver}(drv, dbname = NULL, host = NULL,
port = NULL, password = NULL, user = NULL, ...)
\S4method{dbDisconnect}{PqConnection}(conn, ...)
}
\arguments{
\item{drv}{\code{RPostgres::Postgres()}}
\item{dbname}{Database name. If \code{NULL}, defaults to the user name.}
\item{host,port}{Host and port. If \code{NULL}, will be retrieved from
\code{PGHOST} and \code{PGPORT} env vars.}
\item{user,password}{User name and password. If \code{NULL}, will be
retrieved from \code{PGUSER} and \code{PGPASSWORD} envvars, or from the
appropriate line in \code{~/.pgpass}. See
\url{http://www.postgresql.org/docs/9.4/static/libpq-pgpass.html} for
more details.}
\item{...}{Other name-value pairs that describe additional connection
options as described at
\url{http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS}}
\item{conn}{Connection to disconnect.}
}
\description{
Note that manually disconnecting a connection is not necessary with RPostgres;
if you delete the object containing the connection, it will be automatcally
disconnected during the next GC.
}
\examples{
library(DBI)
con <- dbConnect(RPostgres::Postgres())
dbDisconnect(con)
}
|
source('getDataGeneral.R')
getDataCovid19datahubWithoutRecovered('Peru')
|
/getPeru.R
|
permissive
|
proffsg2020/nCovForecast
|
R
| false | false | 74 |
r
|
source('getDataGeneral.R')
getDataCovid19datahubWithoutRecovered('Peru')
|
library(RSQLite)
conn <- dbConnect(RSQLite::SQLite(), "../perso/smiles_clean.db")
set.seed(1)
n_train = 0
n_test = 0
MolFormulas = data.frame()
for(k in dir('~/perso/process_full/')){
print(k)
data = read.csv(paste0('~/perso/process_full/',k))
colnames(data)[1] = 'id'
data = data[data$orga == 'True',]
MolF = unique(data$MolFormula)
MolF_new = setdiff(MolF, MolFormulas$MolFormulas)
Molf_new_train = sample(c(T,F),length(MolF_new),prob = c(0.9975,0.0025), replace = T)
logp_new = sapply(MolF_new,
function(x){
sample(data$logp[data$MolFormula == x],1)
})
if(length(MolF_new) != 0){
MolFormulas = rbind(MolFormulas,
data.frame(MolFormulas = MolF_new, train = Molf_new_train, logp = logp_new))
}
train = data$MolFormula %in% MolFormulas$MolFormulas[MolFormulas$train]
data_train = data[train,]
data_test = data[!train,]
data_train[,1] = seq(nrow(data_train)) + n_train
n_train = n_train + nrow(data_train)
if(nrow(data_test) != 0){
data_test[,1] = seq(nrow(data_test)) + n_test
n_test = n_test + nrow(data_test)
}
dbWriteTable(conn,"smiles_train", data_train, append = TRUE)
dbWriteTable(conn,"smiles_test", data_test, append = TRUE)
print(length(MolF_new))
print(nrow(data_test))
}
dbWriteTable(conn,"MolFormulas", MolFormulas, append = TRUE)
dbExecute(conn, 'CREATE UNIQUE INDEX id_test ON smiles_test (id);')
dbExecute(conn, 'CREATE UNIQUE INDEX id_train ON smiles_train (id);')
dbDisconnect(conn)
|
/notebook/20201212_convert_db.R
|
no_license
|
DimitriF/gpt-smiles
|
R
| false | false | 1,547 |
r
|
library(RSQLite)
conn <- dbConnect(RSQLite::SQLite(), "../perso/smiles_clean.db")
set.seed(1)
n_train = 0
n_test = 0
MolFormulas = data.frame()
for(k in dir('~/perso/process_full/')){
print(k)
data = read.csv(paste0('~/perso/process_full/',k))
colnames(data)[1] = 'id'
data = data[data$orga == 'True',]
MolF = unique(data$MolFormula)
MolF_new = setdiff(MolF, MolFormulas$MolFormulas)
Molf_new_train = sample(c(T,F),length(MolF_new),prob = c(0.9975,0.0025), replace = T)
logp_new = sapply(MolF_new,
function(x){
sample(data$logp[data$MolFormula == x],1)
})
if(length(MolF_new) != 0){
MolFormulas = rbind(MolFormulas,
data.frame(MolFormulas = MolF_new, train = Molf_new_train, logp = logp_new))
}
train = data$MolFormula %in% MolFormulas$MolFormulas[MolFormulas$train]
data_train = data[train,]
data_test = data[!train,]
data_train[,1] = seq(nrow(data_train)) + n_train
n_train = n_train + nrow(data_train)
if(nrow(data_test) != 0){
data_test[,1] = seq(nrow(data_test)) + n_test
n_test = n_test + nrow(data_test)
}
dbWriteTable(conn,"smiles_train", data_train, append = TRUE)
dbWriteTable(conn,"smiles_test", data_test, append = TRUE)
print(length(MolF_new))
print(nrow(data_test))
}
dbWriteTable(conn,"MolFormulas", MolFormulas, append = TRUE)
dbExecute(conn, 'CREATE UNIQUE INDEX id_test ON smiles_test (id);')
dbExecute(conn, 'CREATE UNIQUE INDEX id_train ON smiles_train (id);')
dbDisconnect(conn)
|
shinyDataFilter_resourcePath <- function() {
singleton(shiny::addResourcePath(
"shinyDataFilter_shared",
system.file(package = "shinyDataFilter", "www", "shared")))
}
|
/R/singleton_resourcePath.R
|
permissive
|
dgkf/shinyDataFilter
|
R
| false | false | 176 |
r
|
shinyDataFilter_resourcePath <- function() {
singleton(shiny::addResourcePath(
"shinyDataFilter_shared",
system.file(package = "shinyDataFilter", "www", "shared")))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stream_data.R
\name{stream_write_memory}
\alias{stream_write_memory}
\title{Write Memory Stream}
\usage{
stream_write_memory(x, name = random_string("sparklyr_tmp_"),
mode = c("append", "complete", "update"),
trigger = stream_trigger_interval(), checkpoint = file.path("checkpoints",
name, random_string("")), options = list(), ...)
}
\arguments{
\item{x}{A Spark DataFrame or dplyr operation}
\item{name}{The name to assign to the newly generated stream.}
\item{mode}{Specifies how data is written to a streaming sink. Valid values are
\code{"append"}, \code{"complete"} or \code{"update"}.}
\item{trigger}{The trigger for the stream query, defaults to micro-batches runnnig
every 5 seconds. See \code{\link{stream_trigger_interval}} and
\code{\link{stream_trigger_continuous}}.}
\item{checkpoint}{The location where the system will write all the checkpoint.
information to guarantee end-to-end fault-tolerance.}
\item{options}{A list of strings with additional options.}
\item{...}{Optional arguments; currently unused.}
}
\description{
Writes a Spark dataframe stream into a memory stream.
}
\examples{
\dontrun{
sc <- spark_connect(master = "local")
dir.create("iris-in")
write.csv(iris, "iris-in/iris.csv", row.names = FALSE)
stream <- stream_read_csv(sc, "iris-in") \%>\% stream_write_memory()
stop_stream(stream)
}
}
\seealso{
Other Spark stream serialization: \code{\link{stream_read_csv}},
\code{\link{stream_read_jdbc}},
\code{\link{stream_read_json}},
\code{\link{stream_read_kafka}},
\code{\link{stream_read_orc}},
\code{\link{stream_read_parquet}},
\code{\link{stream_read_text}},
\code{\link{stream_write_csv}},
\code{\link{stream_write_jdbc}},
\code{\link{stream_write_json}},
\code{\link{stream_write_kafka}},
\code{\link{stream_write_orc}},
\code{\link{stream_write_parquet}},
\code{\link{stream_write_text}}
}
|
/man/stream_write_memory.Rd
|
permissive
|
shabbybanks/sparklyr
|
R
| false | true | 1,950 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stream_data.R
\name{stream_write_memory}
\alias{stream_write_memory}
\title{Write Memory Stream}
\usage{
stream_write_memory(x, name = random_string("sparklyr_tmp_"),
mode = c("append", "complete", "update"),
trigger = stream_trigger_interval(), checkpoint = file.path("checkpoints",
name, random_string("")), options = list(), ...)
}
\arguments{
\item{x}{A Spark DataFrame or dplyr operation}
\item{name}{The name to assign to the newly generated stream.}
\item{mode}{Specifies how data is written to a streaming sink. Valid values are
\code{"append"}, \code{"complete"} or \code{"update"}.}
\item{trigger}{The trigger for the stream query, defaults to micro-batches runnnig
every 5 seconds. See \code{\link{stream_trigger_interval}} and
\code{\link{stream_trigger_continuous}}.}
\item{checkpoint}{The location where the system will write all the checkpoint.
information to guarantee end-to-end fault-tolerance.}
\item{options}{A list of strings with additional options.}
\item{...}{Optional arguments; currently unused.}
}
\description{
Writes a Spark dataframe stream into a memory stream.
}
\examples{
\dontrun{
sc <- spark_connect(master = "local")
dir.create("iris-in")
write.csv(iris, "iris-in/iris.csv", row.names = FALSE)
stream <- stream_read_csv(sc, "iris-in") \%>\% stream_write_memory()
stop_stream(stream)
}
}
\seealso{
Other Spark stream serialization: \code{\link{stream_read_csv}},
\code{\link{stream_read_jdbc}},
\code{\link{stream_read_json}},
\code{\link{stream_read_kafka}},
\code{\link{stream_read_orc}},
\code{\link{stream_read_parquet}},
\code{\link{stream_read_text}},
\code{\link{stream_write_csv}},
\code{\link{stream_write_jdbc}},
\code{\link{stream_write_json}},
\code{\link{stream_write_kafka}},
\code{\link{stream_write_orc}},
\code{\link{stream_write_parquet}},
\code{\link{stream_write_text}}
}
|
source("element.R")
library(xgboost)
threads=detectCores()
number_of_core=threads/2
time_start<-Sys.time()
source("dlt_sum_L3.R")
m_r_ab_org<-as.matrix(read.csv(file = "l3_ab.csv", header = FALSE))[-1,]
m_r_ab_org<-m_r_ab_org[,-1]
row_result<-dim(m_r_ab_org)[1]-1
m_r_ab<-head(m_r_ab_org,row_result)
result<-tail(dlt,row_result)
m_record_l4<-c(
20075,02,09,09,18,33,04,10,
20076,02,11,19,27,29,02,08,
20077,02,11,23,29,29,02,04,
20078,06,15,17,19,34,04,11,
20079,04,12,24,26,33,03,09,
20080,05,14,17,26,29,08,10,
20081,08,10,21,25,27,03,06,
20082,02,10,16,23,30,06,09,
20083,06,11,15,23,30,05,06,
20084,06,13,16,21,28,04,05,
20085,03,06,18,26,33,04,09,
20086,05,16,19,25,31,08,10,
20087,02,03,19,28,30,06,07,
20088,05,08,21,22,29,03,09,
20089,01,08,13,22,30,03,10,
20090,05,08,16,26,29,08,10,
20091,03,10,18,26,28,05,06,
20092,02,05,12,19,25,03,08,
20093,09,18,22,26,33,04,09,
20094,05,10,18,24,28,01,08,
20095,03,07,12,18,29,03,07,
20096,09,12,20,29,30,04,07,
20097,04,09,21,22,26,04,07,
20098,10,11,21,22,28,06,09
)
trains.T.ab<-Matrix(m_r_ab,sparse=T)
if (threads <= 8) {
bst.a1<-xgboost(data = trains.T.ab,label = result$a1,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.a2<-xgboost(data = trains.T.ab,label = result$a2,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.a3<-xgboost(data = trains.T.ab,label = result$a3,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.a4<-xgboost(data = trains.T.ab,label = result$a4,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.a5<-xgboost(data = trains.T.ab,label = result$a5,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.b1<-xgboost(data = trains.T.ab,label = result$b1,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.b2<-xgboost(data = trains.T.ab,label = result$b2,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
}else{
bst.a1<-xgboost(data = trains.T.ab,label = result$a1,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.a2<-xgboost(data = trains.T.ab,label = result$a2,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.a3<-xgboost(data = trains.T.ab,label = result$a3,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.a4<-xgboost(data = trains.T.ab,label = result$a4,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.a5<-xgboost(data = trains.T.ab,label = result$a5,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.b1<-xgboost(data = trains.T.ab,label = result$b1,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.b2<-xgboost(data = trains.T.ab,label = result$b2,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
}
tests.T.ab<-Matrix(tail(m_r_ab_org,1),sparse=T)
testPredictions.a1 <- predict(object = bst.a1,newdata = tests.T.ab)
testPredictions.a2 <- predict(object = bst.a2,newdata = tests.T.ab)
testPredictions.a3 <- predict(object = bst.a3,newdata = tests.T.ab)
testPredictions.a4 <- predict(object = bst.a4,newdata = tests.T.ab)
testPredictions.a5 <- predict(object = bst.a5,newdata = tests.T.ab)
testPredictions.b1 <- predict(object = bst.b1,newdata = tests.T.ab)
testPredictions.b2 <- predict(object = bst.b2,newdata = tests.T.ab)
sum_l4_dlt<-c(sort(c(round(testPredictions.a1),
round(testPredictions.a2),
round(testPredictions.a3),
round(testPredictions.a4),
round(testPredictions.a5)
)),
sort(c(round(testPredictions.b1),
round(testPredictions.b2))))
time_end<-Sys.time()
time_dur<-time_end-time_start
time_dur
sum_l4_dlt<-c(max(dlt$n)+1,sum_l4_dlt)
sum_l4_dlt
|
/dlt_sum_L4.R
|
no_license
|
dlt-lee/dlt
|
R
| false | false | 3,881 |
r
|
source("element.R")
library(xgboost)
threads=detectCores()
number_of_core=threads/2
time_start<-Sys.time()
source("dlt_sum_L3.R")
m_r_ab_org<-as.matrix(read.csv(file = "l3_ab.csv", header = FALSE))[-1,]
m_r_ab_org<-m_r_ab_org[,-1]
row_result<-dim(m_r_ab_org)[1]-1
m_r_ab<-head(m_r_ab_org,row_result)
result<-tail(dlt,row_result)
m_record_l4<-c(
20075,02,09,09,18,33,04,10,
20076,02,11,19,27,29,02,08,
20077,02,11,23,29,29,02,04,
20078,06,15,17,19,34,04,11,
20079,04,12,24,26,33,03,09,
20080,05,14,17,26,29,08,10,
20081,08,10,21,25,27,03,06,
20082,02,10,16,23,30,06,09,
20083,06,11,15,23,30,05,06,
20084,06,13,16,21,28,04,05,
20085,03,06,18,26,33,04,09,
20086,05,16,19,25,31,08,10,
20087,02,03,19,28,30,06,07,
20088,05,08,21,22,29,03,09,
20089,01,08,13,22,30,03,10,
20090,05,08,16,26,29,08,10,
20091,03,10,18,26,28,05,06,
20092,02,05,12,19,25,03,08,
20093,09,18,22,26,33,04,09,
20094,05,10,18,24,28,01,08,
20095,03,07,12,18,29,03,07,
20096,09,12,20,29,30,04,07,
20097,04,09,21,22,26,04,07,
20098,10,11,21,22,28,06,09
)
trains.T.ab<-Matrix(m_r_ab,sparse=T)
if (threads <= 8) {
bst.a1<-xgboost(data = trains.T.ab,label = result$a1,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.a2<-xgboost(data = trains.T.ab,label = result$a2,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.a3<-xgboost(data = trains.T.ab,label = result$a3,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.a4<-xgboost(data = trains.T.ab,label = result$a4,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.a5<-xgboost(data = trains.T.ab,label = result$a5,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.b1<-xgboost(data = trains.T.ab,label = result$b1,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
bst.b2<-xgboost(data = trains.T.ab,label = result$b2,nrounds = 300,verbose=0,params = list(tree_method = 'hist'))
}else{
bst.a1<-xgboost(data = trains.T.ab,label = result$a1,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.a2<-xgboost(data = trains.T.ab,label = result$a2,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.a3<-xgboost(data = trains.T.ab,label = result$a3,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.a4<-xgboost(data = trains.T.ab,label = result$a4,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.a5<-xgboost(data = trains.T.ab,label = result$a5,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.b1<-xgboost(data = trains.T.ab,label = result$b1,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
bst.b2<-xgboost(data = trains.T.ab,label = result$b2,nrounds = 300,verbose=0,params = list(tree_method = 'hist',nthread=number_of_core))
}
tests.T.ab<-Matrix(tail(m_r_ab_org,1),sparse=T)
testPredictions.a1 <- predict(object = bst.a1,newdata = tests.T.ab)
testPredictions.a2 <- predict(object = bst.a2,newdata = tests.T.ab)
testPredictions.a3 <- predict(object = bst.a3,newdata = tests.T.ab)
testPredictions.a4 <- predict(object = bst.a4,newdata = tests.T.ab)
testPredictions.a5 <- predict(object = bst.a5,newdata = tests.T.ab)
testPredictions.b1 <- predict(object = bst.b1,newdata = tests.T.ab)
testPredictions.b2 <- predict(object = bst.b2,newdata = tests.T.ab)
sum_l4_dlt<-c(sort(c(round(testPredictions.a1),
round(testPredictions.a2),
round(testPredictions.a3),
round(testPredictions.a4),
round(testPredictions.a5)
)),
sort(c(round(testPredictions.b1),
round(testPredictions.b2))))
time_end<-Sys.time()
time_dur<-time_end-time_start
time_dur
sum_l4_dlt<-c(max(dlt$n)+1,sum_l4_dlt)
sum_l4_dlt
|
\name{dnacopy.smoothvec}
\alias{dnacopy.smoothvec}
\title{dnacopy smoothvec}
\description{Smooth a profile using DNAcopy.}
\usage{dnacopy.smoothvec(profile, var, vals, ...)}
\arguments{
\item{profile}{A profile data.frame.}
\item{var}{Smoothness variable.}
\item{vals}{Smoothness values.}
\item{\dots}{Other arguments, passed to segment.}
}
\value{Matrix of smoothed profiles: nparam x nprobes.}
\author{Toby Dylan Hocking}
|
/man/dnacopy.smoothvec.Rd
|
no_license
|
tdhock/bams
|
R
| false | false | 459 |
rd
|
\name{dnacopy.smoothvec}
\alias{dnacopy.smoothvec}
\title{dnacopy smoothvec}
\description{Smooth a profile using DNAcopy.}
\usage{dnacopy.smoothvec(profile, var, vals, ...)}
\arguments{
\item{profile}{A profile data.frame.}
\item{var}{Smoothness variable.}
\item{vals}{Smoothness values.}
\item{\dots}{Other arguments, passed to segment.}
}
\value{Matrix of smoothed profiles: nparam x nprobes.}
\author{Toby Dylan Hocking}
|
x <- tibble::as_tibble(iris[c(1, 2, 51, 52), c(5, 1)])
test_that("as_flextable.data.frame", {
expect_identical(as_flextable(x), flextable::flextable(x))
expect_identical(
as_flextable(x, with_blanks("Species", "Sepal.Length")),
flextable::flextable(
x,
col_keys = c("Species", "..after1", "..before1", "Sepal.Length")
)
)
})
test_that("as.flextable.grouped_df", {
d <- dplyr::group_by(x, .data$Species)
f <- flextable::as_grouped_data(x, "Species")
expect_identical(as_flextable(d), as_flextable(f))
expect_identical(
as_flextable(d, groups_to = "merged"),
x %>%
flextable::flextable() %>%
flextable::merge_v("Species") %>%
flextable::theme_vanilla()
)
expect_identical(
as_flextable(d, groups_to = "merged", col_keys = with_blanks("Species")),
x %>%
flextable::flextable(
col_keys = c("Species", "..after1", "Sepal.Length")
) %>%
flextable::merge_v("Species") %>%
flextable::theme_vanilla()
)
expect_identical(
as_flextable(d, groups_to = "asis"),
flextable::flextable(x)
)
})
|
/tests/testthat/test-as-flextable.R
|
permissive
|
jmbarbone/ftExtra
|
R
| false | false | 1,107 |
r
|
x <- tibble::as_tibble(iris[c(1, 2, 51, 52), c(5, 1)])
test_that("as_flextable.data.frame", {
expect_identical(as_flextable(x), flextable::flextable(x))
expect_identical(
as_flextable(x, with_blanks("Species", "Sepal.Length")),
flextable::flextable(
x,
col_keys = c("Species", "..after1", "..before1", "Sepal.Length")
)
)
})
test_that("as.flextable.grouped_df", {
d <- dplyr::group_by(x, .data$Species)
f <- flextable::as_grouped_data(x, "Species")
expect_identical(as_flextable(d), as_flextable(f))
expect_identical(
as_flextable(d, groups_to = "merged"),
x %>%
flextable::flextable() %>%
flextable::merge_v("Species") %>%
flextable::theme_vanilla()
)
expect_identical(
as_flextable(d, groups_to = "merged", col_keys = with_blanks("Species")),
x %>%
flextable::flextable(
col_keys = c("Species", "..after1", "Sepal.Length")
) %>%
flextable::merge_v("Species") %>%
flextable::theme_vanilla()
)
expect_identical(
as_flextable(d, groups_to = "asis"),
flextable::flextable(x)
)
})
|
##FUNCTIONS
##HW8 Data Analysis
#Separate a string with ","
x<-c("n","a","i","v","y")
stc<- function(x){
str_c(x, collapse=",")
}
stc(x)
#Finding the daily mean grouping by site and date
o3summarize<-function(x){
out<- group_by(x, site=as.factor(site), date)%>%
summarize(o3=mean(obs, na.rm=TRUE))
}
#Summarize data with year and month
o3summarize2<-function(x){
out2<- group_by(x, date=as.factor(date), date)%>%
summarize(meano3=mean(o3))
}
yearl<-daily%>%
map(o3summarize2)
yearl
#Function that shows diurnal pattern
o3summarize3<-function(x){
out3<- group_by(x, start_hour=as.factor(start_hour[0:12|12:24]), date)%>%
summarize(o3d=mean(obs, na.rm=TRUE))
}
diurnal<-o3.filelist%>%
map(o3summarize3)
diurnal
or
diurnal <- o3.filelist %>%
rbind_list() %>%
filter((start_hour > 7) & (start_hour <19 )) %>%
group_by(site, date) %>%
summarize(meanobs = mean(obs, rm.na=TRUE))
diurnal
#Annual daily mean for O3 in Merced, CA
o3.annual.mean<- function(x){
out<-group_by(x, year, `County Name`)%>%
summarize(annual.mean=mean(o3, na.rm=TRUE))
}
groupdaily<-group_by(daily.site, year, `County Name`)
groupdaily<- groupdaily%>%
filter(str_detect(`County Name`, 'Merced'))
o33<- o3.annual.mean(groupdaily)
o33
|
/functions_es207_hw8.R
|
no_license
|
nrodalmorales/es_207_hw8
|
R
| false | false | 1,332 |
r
|
##FUNCTIONS
##HW8 Data Analysis
#Separate a string with ","
x<-c("n","a","i","v","y")
stc<- function(x){
str_c(x, collapse=",")
}
stc(x)
#Finding the daily mean grouping by site and date
o3summarize<-function(x){
out<- group_by(x, site=as.factor(site), date)%>%
summarize(o3=mean(obs, na.rm=TRUE))
}
#Summarize data with year and month
o3summarize2<-function(x){
out2<- group_by(x, date=as.factor(date), date)%>%
summarize(meano3=mean(o3))
}
yearl<-daily%>%
map(o3summarize2)
yearl
#Function that shows diurnal pattern
o3summarize3<-function(x){
out3<- group_by(x, start_hour=as.factor(start_hour[0:12|12:24]), date)%>%
summarize(o3d=mean(obs, na.rm=TRUE))
}
diurnal<-o3.filelist%>%
map(o3summarize3)
diurnal
or
diurnal <- o3.filelist %>%
rbind_list() %>%
filter((start_hour > 7) & (start_hour <19 )) %>%
group_by(site, date) %>%
summarize(meanobs = mean(obs, rm.na=TRUE))
diurnal
#Annual daily mean for O3 in Merced, CA
o3.annual.mean<- function(x){
out<-group_by(x, year, `County Name`)%>%
summarize(annual.mean=mean(o3, na.rm=TRUE))
}
groupdaily<-group_by(daily.site, year, `County Name`)
groupdaily<- groupdaily%>%
filter(str_detect(`County Name`, 'Merced'))
o33<- o3.annual.mean(groupdaily)
o33
|
models <- list()
|
/src/core/learning/models/__AUTOLOAD.R
|
no_license
|
imclab/cai
|
R
| false | false | 17 |
r
|
models <- list()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psut.R
\name{collapse_to_psut}
\alias{collapse_to_psut}
\title{Convert a tidy data frame to PSUT matrices}
\usage{
collapse_to_psut(
.df,
matrix_class = c("matrix", "Matrix"),
country = MWTools::mw_cols$country,
year = MWTools::mw_cols$year,
method = MWTools::mw_cols$method,
energy_type = MWTools::mw_cols$energy_type,
last_stage = MWTools::mw_cols$last_stage,
unit = MWTools::mw_cols$unit,
e_dot = MWTools::mw_cols$e_dot,
matnames = MWTools::mat_meta_cols$matnames,
matvals = MWTools::mat_meta_cols$matvals,
rownames = MWTools::mat_meta_cols$rownames,
colnames = MWTools::mat_meta_cols$colnames,
rowtypes = MWTools::mat_meta_cols$rowtypes,
coltypes = MWTools::mat_meta_cols$coltypes
)
}
\arguments{
\item{.df}{A data frame created by \code{add_row_col_meta()} so that it contains
metadata columns for creating PSUT matrices.}
\item{matrix_class}{The type of matrix to be created, one of "matrix" or "Matrix".
Default is "matrix".}
\item{country, year, method, energy_type, last_stage, unit, e_dot}{See \code{MWTools::mw_cols}.}
\item{matnames, matvals, rownames, colnames, rowtypes, coltypes}{See \code{MWTools::mat_meta_cols}.}
}
\value{
A \code{matsindf}-style, wide-by-matrices data frame of muscle work matrices.
}
\description{
A tidy data frame of muscle work information can be converted to
a \code{matsindf} data frame via this function.
}
\details{
Prior to forming matrices, this function deletes unneeded columns
(columns that are neither metadata nor energy values).
It also aggregates data frame rows that will end up at the same
row, column location in the matrices.
}
\examples{
ilo_working_hours_data <- read.csv(file = MWTools::ilo_working_hours_test_data_path())
ilo_employment_data <- read.csv(file = MWTools::ilo_employment_test_data_path())
hmw_data <- prepareRawILOData(ilo_working_hours_data = ilo_working_hours_data,
ilo_employment_data = ilo_employment_data)
hmw_df <- hmw_data \%>\%
calc_hmw_pfu() \%>\%
# Keep only a few years for speed.
dplyr::filter(Year \%in\% 2000:2002)
amw_df <- amw_test_data_path() \%>\%
read.csv() \%>\%
calc_amw_pfu() \%>\%
# Keep only a few years for speed.
dplyr::filter(Year \%in\% 2000:2002)
specify_energy_type_method(hmw_df, amw_df) \%>\%
specify_product() \%>\%
specify_TJ() \%>\%
MWTools::specify_primary_production() \%>\%
specify_useful_products() \%>\%
specify_fu_machines() \%>\%
specify_last_stages() \%>\%
MWTools::add_row_col_meta() \%>\%
MWTools::collapse_to_psut()
}
|
/man/collapse_to_psut.Rd
|
permissive
|
EnergyEconomyDecoupling/MWTools
|
R
| false | true | 2,610 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psut.R
\name{collapse_to_psut}
\alias{collapse_to_psut}
\title{Convert a tidy data frame to PSUT matrices}
\usage{
collapse_to_psut(
.df,
matrix_class = c("matrix", "Matrix"),
country = MWTools::mw_cols$country,
year = MWTools::mw_cols$year,
method = MWTools::mw_cols$method,
energy_type = MWTools::mw_cols$energy_type,
last_stage = MWTools::mw_cols$last_stage,
unit = MWTools::mw_cols$unit,
e_dot = MWTools::mw_cols$e_dot,
matnames = MWTools::mat_meta_cols$matnames,
matvals = MWTools::mat_meta_cols$matvals,
rownames = MWTools::mat_meta_cols$rownames,
colnames = MWTools::mat_meta_cols$colnames,
rowtypes = MWTools::mat_meta_cols$rowtypes,
coltypes = MWTools::mat_meta_cols$coltypes
)
}
\arguments{
\item{.df}{A data frame created by \code{add_row_col_meta()} so that it contains
metadata columns for creating PSUT matrices.}
\item{matrix_class}{The type of matrix to be created, one of "matrix" or "Matrix".
Default is "matrix".}
\item{country, year, method, energy_type, last_stage, unit, e_dot}{See \code{MWTools::mw_cols}.}
\item{matnames, matvals, rownames, colnames, rowtypes, coltypes}{See \code{MWTools::mat_meta_cols}.}
}
\value{
A \code{matsindf}-style, wide-by-matrices data frame of muscle work matrices.
}
\description{
A tidy data frame of muscle work information can be converted to
a \code{matsindf} data frame via this function.
}
\details{
Prior to forming matrices, this function deletes unneeded columns
(columns that are neither metadata nor energy values).
It also aggregates data frame rows that will end up at the same
row, column location in the matrices.
}
\examples{
ilo_working_hours_data <- read.csv(file = MWTools::ilo_working_hours_test_data_path())
ilo_employment_data <- read.csv(file = MWTools::ilo_employment_test_data_path())
hmw_data <- prepareRawILOData(ilo_working_hours_data = ilo_working_hours_data,
ilo_employment_data = ilo_employment_data)
hmw_df <- hmw_data \%>\%
calc_hmw_pfu() \%>\%
# Keep only a few years for speed.
dplyr::filter(Year \%in\% 2000:2002)
amw_df <- amw_test_data_path() \%>\%
read.csv() \%>\%
calc_amw_pfu() \%>\%
# Keep only a few years for speed.
dplyr::filter(Year \%in\% 2000:2002)
specify_energy_type_method(hmw_df, amw_df) \%>\%
specify_product() \%>\%
specify_TJ() \%>\%
MWTools::specify_primary_production() \%>\%
specify_useful_products() \%>\%
specify_fu_machines() \%>\%
specify_last_stages() \%>\%
MWTools::add_row_col_meta() \%>\%
MWTools::collapse_to_psut()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ttest.R
\name{t_fit}
\alias{t_fit}
\title{hypothesis function}
\usage{
t_fit(mydata)
}
\arguments{
\item{mydata}{...}
}
\value{
}
\description{
hypothesis function
}
|
/man/t_fit.Rd
|
no_license
|
EthanTat/FinalPackage
|
R
| false | true | 245 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ttest.R
\name{t_fit}
\alias{t_fit}
\title{hypothesis function}
\usage{
t_fit(mydata)
}
\arguments{
\item{mydata}{...}
}
\value{
}
\description{
hypothesis function
}
|
## Look at rivaslab data
external_files_path = "/oak/stanford/groups/mrivas/private_data/ukbb/24983/imp/pgen/"
analysis_name = "rivaslab_ukbb_imputed_30k_rand_controls_sex_age"
covariates_path = "/oak/stanford/groups/mrivas/private_data/ukbb/24983/phe_qc/ukb24983_GWAS_covar.phe"
subject_ids_path = "/oak/stanford/groups/mrivas/private_data/ukbb/24983/sqc/population_stratification/ukb24983_white_british.phe"
out_path = "/oak/stanford/groups/euan/projects/fitness_genetics/ukbb/"
mega_data_bim_file = "/oak/stanford/groups/euan/projects/fitness_genetics/illu_processed_plink_data/no_reclustering/MEGA_Consortium_recall/plink_test_mega_consortium_data.bim"
# # Read in sex, age, and PCs
# cov_data = read.delim(covariates_path)
# rownames(cov_data) = cov_data[,1]
# print("covariates loaded, dim: ")
# print(dim(cov_data))
chrs = 1:22
all_files = list.files(external_files_path)
if(analysis_name != ""){
out_path = paste(out_path,analysis_name,"/",sep="")
system(paste("mkdir",out_path))
}
script_file = "/oak/stanford/groups/euan/projects/fitness_genetics/scripts/fitness_genetics/R/gwas_flow_helper_functions.R"
source(script_file)
mega_bim = read.table(mega_data_bim_file,stringsAsFactors = F,header = F)
id_is_loc = grepl(":",mega_bim[,2])
mega_locations = paste(mega_bim[,1],mega_bim[,4],sep=":")
ids = mega_bim[,2]
names(mega_locations) = ids
corrected_locations = sapply(ids[id_is_loc],function(x)strsplit(x,split="-")[[1]][1])
mega_locations[id_is_loc] = corrected_locations
####################################################################################################
####################################################################################################
####################################################################################################
# Create frq files (useful for comparison with other mafs)
for (chr in chrs){
curr_file = all_files[grepl(".bed$",all_files) & grepl(paste("chr",chr,"_",sep=""),all_files)]
curr_file = gsub(pattern = ".bed$",replacement = "",curr_file)
err_path = paste(out_path,"merge_geno.err",sep="")
log_path = paste(out_path,"merge_geno.log",sep="")
curr_cmd = paste("plink --bfile",paste(external_files_path,curr_file,sep=''),
"--keep",subject_ids_path,
"--freq --out",paste(out_path,"chr",chr,sep=""))
curr_sh_file = "merge_geno.sh"
print_sh_file(paste(out_path,curr_sh_file,sep=''),
get_sh_default_prefix(err_path,log_path),curr_cmd)
system(paste("sbatch",paste(out_path,curr_sh_file,sep='')))
}
# Comment this out: compare to our old mafs
# ukbb_hrc_data = read.table("/oak/stanford/groups/euan/projects/fitness_genetics/ukbb/ukbb_imputed_20k_rand_controls_sex_age/FreqPlot-merged_control_geno-HRC.txt",
# stringsAsFactors = F)
# ukbb_frq = read.table("/oak/stanford/groups/euan/projects/fitness_genetics/ukbb/ukbb_imputed_20k_rand_controls_sex_age/merged_control_geno.frq",
# stringsAsFactors = F,header=T)
# inds = which(abs(ukbb_hrc_data[,4]) > 0.05)
# ukbb_hrc_data[inds,]
# Comment this out: compare rivaslab freqs with our imputation
chr = 10
rivaslab_freq = read.table(paste(out_path,"chr",chr,".frq",sep=""),stringsAsFactors = F,header=T)
rownames(rivaslab_freq) = rivaslab_freq[,2]
# from direct imp folder
ashleylab_bim = read.table(paste("/oak/stanford/groups/euan/projects/ukbb/data/genetic_data/v2/plink_small/ukb_imp_chr",chr,"_v2.bim",sep=""),stringsAsFactors = F,
header=F)
ashleylab_freq = read.table(paste("/oak/stanford/groups/euan/projects/ukbb/data/genetic_data/v2/plink_small/freqs/chr",chr,".frq",sep=""),stringsAsFactors = F,
header=T)
# Our subset
ashleylab_bim = read.table("/oak/stanford/groups/euan/projects/fitness_genetics/ukbb/ukbb_imputed_20k_rand_controls_sex_age/merged_control_geno.bim",
stringsAsFactors = F,header=F)
ashleylab_freq = read.table("/oak/stanford/groups/euan/projects/fitness_genetics/ukbb/ukbb_imputed_20k_rand_controls_sex_age/merged_control_geno.frq",
stringsAsFactors = F,header=T)
all(ashleylab_bim[,2]==ashleylab_freq[,2])
ashleylab_ids = paste(paste(ashleylab_bim[,1],ashleylab_bim[,4],sep=":"),ashleylab_bim[,5],ashleylab_bim[,6],sep="_")
rownames(ashleylab_freq) = ashleylab_ids
inds = intersect(ashleylab_ids,rivaslab_freq[,2])
r_mafs = rivaslab_freq[inds,"MAF"]
a_mafs = ashleylab_freq[inds,"MAF"]
cor(r_mafs,a_mafs)
diffs = r_mafs-a_mafs
table(diffs > 0.01)/sum(a_mafs | r_mafs > 0.01)
# # chr 2 output:
# > cor(r_mafs,a_mafs)
# [1] 0.9985048
# > diffs = r_mafs-a_mafs
# > table(diffs > 0.01)/length(diffs)
# FALSE TRUE
# 0.94054091 0.05945909
# check one of our results: "22:35884154_G_A"
is.element(set=ashleylab_ids,"22:35884154_G_A")
ashleylab_freq["22:35884154_A_G",]
rivaslab_freq["22:35884154_G_A",]
# Check: rs143940620, 10:18054131_C_T
# Or 10:105815241_A_C
ashleylab_freq["10:18054131_T_C",]
rivaslab_freq["10:18054131_T_C",]
# > ashleylab_freq["10:18054131_T_C",]
# CHR SNP A1 A2 MAF NCHROBS
# 10:18054131_T_C 10 rs143940620 T C 0.1033 26508
# > rivaslab_freq["10:18054131_T_C",]
# CHR SNP A1 A2 MAF NCHROBS
# 10:18054131_T_C 10 10:18054131_T_C T C 0.1094 444406
# >
# rsids: rs1747677;rs10748861
ashleylab_freq["10:105815241_A_C",]
rivaslab_freq["10:105815241_A_C",]
# > ashleylab_freq["10:105815241_A_C",]
# CHR SNP A1 A2 MAF NCHROBS
# 10:105815241_A_C 10 rs1747677 A C 0.3283 22314
# > rivaslab_freq["10:105815241_A_C",]
# CHR SNP A1 A2 MAF NCHROBS
# 10:105815241_A_C 10 10:105815241_A_C A C 0.3081 375630
# cd /oak/stanford/groups/euan/projects/fitness_genetics/analysis/no_recl_mega_separate_recalls
# less bfile1.bim | grep 105815241
# 10 rs1747677 122.922 105815241 A C
# less bfile1.hwe | grep rs1747677
# 10 rs1747677 ALL(NP) A C 48/414/883 0.3078 0.3073 1
# less bfile1.frq | grep rs1747677
# 10 rs1747677 A C 0.1896 2690
# Check freqs in recalling with genepool
# cd /oak/stanford/groups/euan/projects/fitness_genetics/illu_processed_plink_data/no_reclustering/MEGA_Consortium_recall
# less plink_test_mega_consortium_data.frq | grep rs1747677
# rs1747677 A C 0.3095 6928
# cd /oak/stanford/groups/euan/projects/fitness_genetics/analysis/no_recl_mega_separate_recalls/
# less FreqPlot-merged_mega_data_autosomal-HRC.txt | grep rs1747677
# rs1747677 0.340006 0.1901 0.149906 1
####################################################################################################
####################################################################################################
####################################################################################################
# Create reduced data files: ukbb whites and mega locations
for (chr in chrs){
curr_file = all_files[grepl(".bed$",all_files) & grepl(paste("chr",chr,"_",sep=""),all_files)]
curr_file = gsub(pattern = ".bed$",replacement = "",curr_file)
# curr_bim = read.table(paste(external_files_path,curr_file,".bim",sep=""),stringsAsFactors = F,header = F)
# curr_locations = paste(curr_bim[,1],curr_bim[,4],sep=":")
# to_rem = !is.element(curr_locations,set=mega_locations)
# print(paste("looking at chromosome",chr,"number of locations shared with mega:",sum(!to_rem)))
# print(paste("out of a total of mega snps:",sum(grepl(paste(chr,":",sep=""),mega_locations))))
# curr_excluded = curr_bim[to_rem,2]
curr_excluded_file = paste(out_path,"excluded_chr",chr,".txt",sep="")
# write.table(t(t(curr_excluded)),quote = F,sep="",row.names = F,col.names = F,file=curr_excluded_file)
err_path = paste(out_path,"merge_geno.err",sep="")
log_path = paste(out_path,"merge_geno.log",sep="")
curr_cmd = paste("plink --bfile",paste(external_files_path,curr_file,sep=''),
"--keep",subject_ids_path,
"--exclude",curr_excluded_file,
"--freq --make-bed --out",paste(out_path,"mega_snps_chr",chr,sep=""))
curr_sh_file = "merge_geno.sh"
print_sh_file(paste(out_path,curr_sh_file,sep=''),
get_sh_default_prefix(err_path,log_path),curr_cmd)
system(paste("sbatch",paste(out_path,curr_sh_file,sep='')))
}
|
/archive/rivaslab_imp_data_create_control_set.R
|
no_license
|
david-dd-amar/fitness_genetics
|
R
| false | false | 8,302 |
r
|
## Look at rivaslab data
external_files_path = "/oak/stanford/groups/mrivas/private_data/ukbb/24983/imp/pgen/"
analysis_name = "rivaslab_ukbb_imputed_30k_rand_controls_sex_age"
covariates_path = "/oak/stanford/groups/mrivas/private_data/ukbb/24983/phe_qc/ukb24983_GWAS_covar.phe"
subject_ids_path = "/oak/stanford/groups/mrivas/private_data/ukbb/24983/sqc/population_stratification/ukb24983_white_british.phe"
out_path = "/oak/stanford/groups/euan/projects/fitness_genetics/ukbb/"
mega_data_bim_file = "/oak/stanford/groups/euan/projects/fitness_genetics/illu_processed_plink_data/no_reclustering/MEGA_Consortium_recall/plink_test_mega_consortium_data.bim"
# # Read in sex, age, and PCs
# cov_data = read.delim(covariates_path)
# rownames(cov_data) = cov_data[,1]
# print("covariates loaded, dim: ")
# print(dim(cov_data))
chrs = 1:22
all_files = list.files(external_files_path)
if(analysis_name != ""){
out_path = paste(out_path,analysis_name,"/",sep="")
system(paste("mkdir",out_path))
}
script_file = "/oak/stanford/groups/euan/projects/fitness_genetics/scripts/fitness_genetics/R/gwas_flow_helper_functions.R"
source(script_file)
mega_bim = read.table(mega_data_bim_file,stringsAsFactors = F,header = F)
id_is_loc = grepl(":",mega_bim[,2])
mega_locations = paste(mega_bim[,1],mega_bim[,4],sep=":")
ids = mega_bim[,2]
names(mega_locations) = ids
corrected_locations = sapply(ids[id_is_loc],function(x)strsplit(x,split="-")[[1]][1])
mega_locations[id_is_loc] = corrected_locations
####################################################################################################
####################################################################################################
####################################################################################################
# Create frq files (useful for comparison with other mafs)
for (chr in chrs){
curr_file = all_files[grepl(".bed$",all_files) & grepl(paste("chr",chr,"_",sep=""),all_files)]
curr_file = gsub(pattern = ".bed$",replacement = "",curr_file)
err_path = paste(out_path,"merge_geno.err",sep="")
log_path = paste(out_path,"merge_geno.log",sep="")
curr_cmd = paste("plink --bfile",paste(external_files_path,curr_file,sep=''),
"--keep",subject_ids_path,
"--freq --out",paste(out_path,"chr",chr,sep=""))
curr_sh_file = "merge_geno.sh"
print_sh_file(paste(out_path,curr_sh_file,sep=''),
get_sh_default_prefix(err_path,log_path),curr_cmd)
system(paste("sbatch",paste(out_path,curr_sh_file,sep='')))
}
# Comment this out: compare to our old mafs
# ukbb_hrc_data = read.table("/oak/stanford/groups/euan/projects/fitness_genetics/ukbb/ukbb_imputed_20k_rand_controls_sex_age/FreqPlot-merged_control_geno-HRC.txt",
# stringsAsFactors = F)
# ukbb_frq = read.table("/oak/stanford/groups/euan/projects/fitness_genetics/ukbb/ukbb_imputed_20k_rand_controls_sex_age/merged_control_geno.frq",
# stringsAsFactors = F,header=T)
# inds = which(abs(ukbb_hrc_data[,4]) > 0.05)
# ukbb_hrc_data[inds,]
# Comment this out: compare rivaslab freqs with our imputation
chr = 10
rivaslab_freq = read.table(paste(out_path,"chr",chr,".frq",sep=""),stringsAsFactors = F,header=T)
rownames(rivaslab_freq) = rivaslab_freq[,2]
# from direct imp folder
ashleylab_bim = read.table(paste("/oak/stanford/groups/euan/projects/ukbb/data/genetic_data/v2/plink_small/ukb_imp_chr",chr,"_v2.bim",sep=""),stringsAsFactors = F,
header=F)
ashleylab_freq = read.table(paste("/oak/stanford/groups/euan/projects/ukbb/data/genetic_data/v2/plink_small/freqs/chr",chr,".frq",sep=""),stringsAsFactors = F,
header=T)
# Our subset
ashleylab_bim = read.table("/oak/stanford/groups/euan/projects/fitness_genetics/ukbb/ukbb_imputed_20k_rand_controls_sex_age/merged_control_geno.bim",
stringsAsFactors = F,header=F)
ashleylab_freq = read.table("/oak/stanford/groups/euan/projects/fitness_genetics/ukbb/ukbb_imputed_20k_rand_controls_sex_age/merged_control_geno.frq",
stringsAsFactors = F,header=T)
all(ashleylab_bim[,2]==ashleylab_freq[,2])
ashleylab_ids = paste(paste(ashleylab_bim[,1],ashleylab_bim[,4],sep=":"),ashleylab_bim[,5],ashleylab_bim[,6],sep="_")
rownames(ashleylab_freq) = ashleylab_ids
inds = intersect(ashleylab_ids,rivaslab_freq[,2])
r_mafs = rivaslab_freq[inds,"MAF"]
a_mafs = ashleylab_freq[inds,"MAF"]
cor(r_mafs,a_mafs)
diffs = r_mafs-a_mafs
table(diffs > 0.01)/sum(a_mafs | r_mafs > 0.01)
# # chr 2 output:
# > cor(r_mafs,a_mafs)
# [1] 0.9985048
# > diffs = r_mafs-a_mafs
# > table(diffs > 0.01)/length(diffs)
# FALSE TRUE
# 0.94054091 0.05945909
# check one of our results: "22:35884154_G_A"
is.element(set=ashleylab_ids,"22:35884154_G_A")
ashleylab_freq["22:35884154_A_G",]
rivaslab_freq["22:35884154_G_A",]
# Check: rs143940620, 10:18054131_C_T
# Or 10:105815241_A_C
ashleylab_freq["10:18054131_T_C",]
rivaslab_freq["10:18054131_T_C",]
# > ashleylab_freq["10:18054131_T_C",]
# CHR SNP A1 A2 MAF NCHROBS
# 10:18054131_T_C 10 rs143940620 T C 0.1033 26508
# > rivaslab_freq["10:18054131_T_C",]
# CHR SNP A1 A2 MAF NCHROBS
# 10:18054131_T_C 10 10:18054131_T_C T C 0.1094 444406
# >
# rsids: rs1747677;rs10748861
ashleylab_freq["10:105815241_A_C",]
rivaslab_freq["10:105815241_A_C",]
# > ashleylab_freq["10:105815241_A_C",]
# CHR SNP A1 A2 MAF NCHROBS
# 10:105815241_A_C 10 rs1747677 A C 0.3283 22314
# > rivaslab_freq["10:105815241_A_C",]
# CHR SNP A1 A2 MAF NCHROBS
# 10:105815241_A_C 10 10:105815241_A_C A C 0.3081 375630
# cd /oak/stanford/groups/euan/projects/fitness_genetics/analysis/no_recl_mega_separate_recalls
# less bfile1.bim | grep 105815241
# 10 rs1747677 122.922 105815241 A C
# less bfile1.hwe | grep rs1747677
# 10 rs1747677 ALL(NP) A C 48/414/883 0.3078 0.3073 1
# less bfile1.frq | grep rs1747677
# 10 rs1747677 A C 0.1896 2690
# Check freqs in recalling with genepool
# cd /oak/stanford/groups/euan/projects/fitness_genetics/illu_processed_plink_data/no_reclustering/MEGA_Consortium_recall
# less plink_test_mega_consortium_data.frq | grep rs1747677
# rs1747677 A C 0.3095 6928
# cd /oak/stanford/groups/euan/projects/fitness_genetics/analysis/no_recl_mega_separate_recalls/
# less FreqPlot-merged_mega_data_autosomal-HRC.txt | grep rs1747677
# rs1747677 0.340006 0.1901 0.149906 1
####################################################################################################
####################################################################################################
####################################################################################################
# Create reduced data files: ukbb whites and mega locations
for (chr in chrs){
curr_file = all_files[grepl(".bed$",all_files) & grepl(paste("chr",chr,"_",sep=""),all_files)]
curr_file = gsub(pattern = ".bed$",replacement = "",curr_file)
# curr_bim = read.table(paste(external_files_path,curr_file,".bim",sep=""),stringsAsFactors = F,header = F)
# curr_locations = paste(curr_bim[,1],curr_bim[,4],sep=":")
# to_rem = !is.element(curr_locations,set=mega_locations)
# print(paste("looking at chromosome",chr,"number of locations shared with mega:",sum(!to_rem)))
# print(paste("out of a total of mega snps:",sum(grepl(paste(chr,":",sep=""),mega_locations))))
# curr_excluded = curr_bim[to_rem,2]
curr_excluded_file = paste(out_path,"excluded_chr",chr,".txt",sep="")
# write.table(t(t(curr_excluded)),quote = F,sep="",row.names = F,col.names = F,file=curr_excluded_file)
err_path = paste(out_path,"merge_geno.err",sep="")
log_path = paste(out_path,"merge_geno.log",sep="")
curr_cmd = paste("plink --bfile",paste(external_files_path,curr_file,sep=''),
"--keep",subject_ids_path,
"--exclude",curr_excluded_file,
"--freq --make-bed --out",paste(out_path,"mega_snps_chr",chr,sep=""))
curr_sh_file = "merge_geno.sh"
print_sh_file(paste(out_path,curr_sh_file,sep=''),
get_sh_default_prefix(err_path,log_path),curr_cmd)
system(paste("sbatch",paste(out_path,curr_sh_file,sep='')))
}
|
library(CEGO)
### Name: distanceNumericLCStr
### Title: Longest Common Substring for Numeric Vectors
### Aliases: distanceNumericLCStr
### ** Examples
#e.g., used for distance between bit strings
x <- c(0,1,0,1,0)
y <- c(1,1,0,0,1)
distanceNumericLCStr(x,y)
p <- replicate(10,sample(c(0,1),5,replace=TRUE),simplify=FALSE)
distanceMatrix(p,distanceNumericLCStr)
|
/data/genthat_extracted_code/CEGO/examples/distanceNumericLCStr.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 369 |
r
|
library(CEGO)
### Name: distanceNumericLCStr
### Title: Longest Common Substring for Numeric Vectors
### Aliases: distanceNumericLCStr
### ** Examples
#e.g., used for distance between bit strings
x <- c(0,1,0,1,0)
y <- c(1,1,0,0,1)
distanceNumericLCStr(x,y)
p <- replicate(10,sample(c(0,1),5,replace=TRUE),simplify=FALSE)
distanceMatrix(p,distanceNumericLCStr)
|
#' @title
#' Summarize forest inventory data
#' @description
#' Get informations about forest inventory plots, like number of individuals,
#' mean DBH, q, height, basal area, volume, etc.
#'
#' @param df A data frame.
#' @param plot Quoted name of the plot variable. used to differentiate the data's plots. If this argument is missing, the defined groups in the data frame will be used, If there are no groups in the data, the function will fail.
#' @param plot_area Quoted name of the plot area variable, or a numeric vector with the plot area value. The plot area value must be in square meters.
#' @param dbh Optional parameter. Quoted name of the diameter at breast height variable. If supplied, will be used to calculate the mean diameter per plot, quadratic diameter (q), basal area and basal area per hectare. Default \code{NA}.
#' @param th Optional parameter. Quoted name of the total height variable. If supplied, will be used to calculate the mean total height, and the dominant height variable, if the \code{dh} is \code{NA}. Default \code{NA}.
#' @param .groups Optional argument. Quoted name(s) of grouping variables that can be added to differentiate subdivisions of the data. Default: \code{NA}.
#' @param total_area Optional argument. Quoted name of the total area variable, or a numeric vector with the total area value. The total area value must be in hectares. Default: \code{NA}.
#' @param vwb Optional parameter. Quoted name of the volume with bark variable. If supplied, will be used to calculate the total vwb per plot, and vwb per hectare per plot. Default \code{NA}.
#' @param vwob Optional parameter. Quoted name of the volume without bark variable. If supplied, will be used to calculate the total vwob per plot, and vwob per hectare per plot. Default \code{NA}.
#' @param dh Optional parameter. Quoted name of the dominant height variable. If supplied, will be used to calculate the mean dominant height per plot. If not, the \code{ht} variable supplied will be used to calculate the average of the top two trees of each plot, and use that as dh. Default: \code{NA}.
#' @param age Optional parameter. Quoted name of the age variable. If supplied, will be used to calculate the average age per plot. Default: \code{NA}.
#' @param dec_places Numeric value for the number of decimal places to be used in the output tables. Default: \code{4}.
#' @return A data frame with informations per plot.
#'
#' @export
#'
#' @examples
#' library(forestmangr)
#' data("exfm21")
#' exfm21
#'
#' # Obligatory arguments. Basic informations about the plot.
#' plot_summarise(exfm21, "PLOT", 810)
#'
#' # Area values can be numeric, or a variable name
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA")
#'
#' # With DBH supplied, we get the mean diameter, quadratic diameter,
#' # basal area and basal area per hectare:
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH")
#'
#' # With TH supplied, we get the mean total height and dominant height
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST")
#'
#' # With strata supplied, we divide the data into 2 strata
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA")
#'
#' # The strata area can also be supplied
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA", "STRATA_AREA")
#'
#' # With VWB supplied, we get the total vwb, and vwb per hectare
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA", "STRATA_AREA",
#' "VWB")
#'
#' # With VWOB supplied, we get the total vwob, and vwob per hectare
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA", "STRATA_AREA",
#' "VWB", "VWOB")
#'
#' # If the data already has a dominant height variable, it can also be supplied here
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA", "STRATA_AREA",
#' "VWB", "VWOB", "DH")
#'
#' # With the AGE variable supplied, we get the average age of each plot
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA", "STRATA_AREA",
#' "VWB", "VWOB", "DH", "AGE")
#'
#' @author Sollano Rabelo Braga \email{sollanorb@@gmail.com}
#'
plot_summarise <- function(df, plot, plot_area, dbh, th, .groups, total_area, vwb, vwob, dh, age, dec_places = 4) {
# ####
CSA<-Indv<-G<-NULL
# checagem de variaveis ####
# se df nao for fornecido, nulo, ou nao for dataframe, ou nao tiver tamanho e nrow maior que 1,parar
if( missing(df) ){
stop("df not set", call. = F)
}else if(!is.data.frame(df)){
stop("df must be a dataframe", call.=F)
}else if(length(df)<=1 | nrow(df)<=1){
stop("Length and number of rows of 'df' must be greater than 1", call.=F)
}
# se dbh nao for fornecido nao for character, ou nao for um nome de variavel,ou nao for de tamanho 1, parar
if( missing(dbh) || is.null(dbh) || is.na(dbh) || dbh == "" ){
df $ dbh <- NA
dbh <- "dbh"
}else if( !is.character(dbh) ){
stop("'dbh' must be a character containing a variable name", call.=F)
}else if(length(dbh)!=1){
stop("Length of 'dbh' must be 1", call.=F)
}else if(forestmangr::check_names(df, dbh)==F){
stop(forestmangr::check_names(df, dbh, boolean=F), call.=F)
}
# se th nao for fornecido, for igual "", nulo ou NA, criar variavel vazia
# se existir e nao for character, parar
if(missing(th) || is.null(th) || is.na(th) || th == "" ){
df $ th <- NA
th <- "th"
}else if(!is.character(th)){
stop("'th' must be a character containing a variable name", call.=F)
}else if(length(th)!=1){
stop("Length of 'th' must be 1", call.=F)
}else if(forestmangr::check_names(df, th)==F){
stop(forestmangr::check_names(df, th, boolean=F), call.=F)
}
# se vwb nao for fornecido, for igual "", nulo ou NA, criar variavel vazia
# se existir e nao for character, parar
if(missing(vwb) || is.null(vwb) || is.na(vwb) || vwb == "" ){
df $ vwb <- NA
vwb <- "vwb"
}else if(!is.character(vwb)){
stop("'vwb' must be a character containing a variable name", call.=F)
}else if(length(vwb)!=1){
stop("Length of 'vwb' must be 1", call.=F)
}else if(forestmangr::check_names(df, vwb)==F){
stop(forestmangr::check_names(df, vwb, boolean=F), call.=F)
}
# se plot_area nao for fornecido, nao for numerico nem character, ou nao existir no dataframe,ou nao for de tamanho 1, parar
if( missing(plot_area) || is.null(plot_area) || is.na(plot_area) || plot_area == "" ){
stop("plot_area not set", call. = F)
}else if(is.numeric(plot_area) & length(plot_area)==1){
df $ plot_area <- plot_area
plot_area <- "plot_area"
}else if(!is.character(plot_area)){
stop("'plot_area' must be a character containing a variable name or a numeric value", call.=F)
}else if(length(plot_area)!=1){
stop("Length of 'plot_area' must be 1", call.=F)
}else if(forestmangr::check_names(df, plot_area)==F){
stop(forestmangr::check_names(df, plot_area, boolean = F), call.=F)
}
# se total_area nao for fornecido, nao for numerico nem character, ou nao existir no dataframe,ou nao for de tamanho 1, criar variavel vazia
# Se for fornecida verificar se e numerica ou nome de variavel
if( missing(total_area) || is.null(total_area) || is.na(total_area) || total_area == "" ){
df $ total_area <- NA
total_area <- "total_area"
}else if(is.numeric(total_area) & length(total_area)==1){
df $ total_area <- total_area
total_area <- "total_area"
}else if(!is.character(total_area)){
stop("'total_area' must be a character containing a variable name or a numeric value", call.=F)
}else if(length(total_area)!=1){
stop("Length of 'total_area' must be 1", call.=F)
}else if(forestmangr::check_names(df, total_area)==F){
stop(forestmangr::check_names(df, total_area, boolean = F), call.=F)
}
# se age nao for fornecido, for igual "", nulo ou NA, criar variavel vazia
# se existir e nao for character, parar
if(missing(age) || is.null(age) || is.na(age) || age == "" ){
df $ age <- NA
age <- "age"
}else if(!is.character(age)){
stop("'age' must be a character containing a variable name", call.=F)
}else if(length(age)!=1){
stop("Length of 'age' must be 1", call.=F)
}else if(forestmangr::check_names(df, age)==F){
stop(forestmangr::check_names(df, age, boolean=F), call.=F)
}
# se vwob nao for fornecido, for igual "", nulo ou NA, criar variavel vazia
# se existir e nao for character, parar
if(missing(vwob) || is.null(vwob) || is.na(vwob) || vwob == "" ){
df $ vwob <- NA
vwob <- "vwob"
}else if(!is.character(vwob)){
stop("'vwob' must be a character containing a variable name", call.=F)
}else if(length(vwob)!=1){
stop("Length of 'vwob' must be 1", call.=F)
}else if(forestmangr::check_names(df, vwob)==F){
stop(forestmangr::check_names(df, vwob, boolean=F), call.=F)
}
# Se plot nao for fornecido, criar objeto que dplyr::group_by ignora, sem causar erro
if(missing(plot) && is.null(dplyr::groups(df)) ){
stop("plot not set. plot must be set if data doesn't have any groups", call. = F)
}else if(missing(plot) && !is.null(dplyr::groups(df)) ){
plot_syms <- rlang::syms(dplyr::groups(df))
}else if(!is.character(plot)){
stop("plot must be a character", call. = F)
}else if(! length(plot)%in% 1:10){
stop("Length of 'plot' must be between 1 and 10", call.=F)
}else if(forestmangr::check_names(df,plot)==F){
# Parar se algum nome nao existir, e avisar qual nome nao existe
stop(forestmangr::check_names(df,plot, boolean=F), call.=F)
}else{
plot_syms <- rlang::syms(plot)
}
# Se .groups nao for fornecido, criar objeto que dplyr::group_by ignora, sem causar erro
if(missing(.groups)||any(is.null(.groups))||any(is.na(.groups))||any(.groups==F)||any(.groups=="") ){
.groups_syms <- character()
# Se groups for fornecido verificar se todos os nomes de variaveis fornecidos existem no dado
}else if(!is.character(.groups)){
stop(".groups must be a character", call. = F)
}else if(! length(.groups)%in% 1:10){
stop("Length of '.groups' must be between 1 and 10", call.=F)
}else if(forestmangr::check_names(df,.groups)==F){
# Parar se algum nome nao existir, e avisar qual nome nao existe
stop(forestmangr::check_names(df,.groups, boolean=F), call.=F)
# se os grupos forem fornecidos e forem nomes dos dados
# Transformar o objeto em simbolo, para que dplyr entenda
# e procure o nome das variaveis dentro dos objetos
}else{
.groups_syms <- rlang::syms(.groups)
}
# Se dec_places nao for numerico, nao for de tamanho 1, ou nao estiver dentro dos limites, parar
if(!is.numeric( dec_places )){
stop( "'dec_places' must be numeric", call.=F)
}else if(length(dec_places)!=1){
stop("Length of 'dec_places' must be 1", call.=F)
}else if(! dec_places %in% seq(from=0,to=9,by=1) ){
stop("'dec_places' must be a number between 0 and 9", call.=F)
}
dbh_name <- dbh
th_name <- th
plot_area_name <- plot_area
total_area_name <- total_area
vwb_name <- vwb
vwb_ha_name <- paste(vwb,"ha",sep="_")
vwob_name <- vwob
vwob_ha_name <- paste(vwob,"ha",sep="_")
age_name <- age
dbh_sym <- rlang::sym( dbh )
th_sym <- rlang::sym( th )
vwb_sym <- rlang::sym( vwb )
plot_area_sym <- rlang::sym( plot_area )
total_area_sym <- rlang::sym( total_area )
age_sym <- rlang::sym( age )
vwob_sym <- rlang::sym( vwob )
# ####
if(missing(dh) || dh=="" || is.null(dh) || is.na(dh) ){ # se a altura dominante nao for fornecida
# se ja existir uma variavel chamada "DH", deletar
if( "DH" %in% names(df) ){ df$DH <- NULL }
# estimar altura dominante
df <- forestmangr::dom_height(df = df, th = th,plot = plot, .groups = .groups, merge_data = T)
dh_name <- "DH"
dh_sym <- rlang::sym( "DH" )
# caso contrario, so criar os symbolos
} else{
dh_name <- dh
dh_sym <- rlang::sym( dh )
}
# novo nome = nome antigo
df %>%
dplyr::group_by(!!!.groups_syms, !!!plot_syms, add=T) %>%
dplyr::mutate(CSA = pi * (!!dbh_sym)^2 / 40000 ) %>%
dplyr::summarise(
!!age_name := round( mean(as.numeric( (!!age_sym) ), na.rm=T) ),
!!total_area_name := mean( !!total_area_sym, na.rm=T),
!!plot_area_name := mean( !!plot_area_sym, na.rm=T),
!!dbh_name := mean(!!dbh_sym, na.rm=T),
q = sqrt(mean(CSA, na.rm=T) * 40000 / pi),
!!th_name := mean(!!th_sym, na.rm=T),
!!dh_name := mean(!!dh_sym),
Indv = dplyr::n(),
Indvha = Indv* 10000/(!!plot_area_sym),
G = sum(CSA, na.rm=T),
G_ha = G * 10000/(!!plot_area_sym),
!!vwb_name := sum(!!vwb_sym, na.rm=T),
!!vwb_ha_name := (!!rlang::sym(vwb_name)) * 10000/ (!!plot_area_sym),
!!vwob_name := sum(!!vwob_sym, na.rm=T),
!!vwob_ha_name := (!!rlang::sym(vwob_name)) * 10000/ (!!plot_area_sym) ) %>% #sumarise
dplyr::na_if(0) %>% # substitui 0 por NA
rm_empty_col %>% # remove variaveis que nao foram informadas (argumentos opicionais nao inseridos viram NA)
forestmangr::round_df(dec_places)
}
|
/R/plot_summarise.R
|
permissive
|
Dawa406/forestmangr
|
R
| false | false | 13,255 |
r
|
#' @title
#' Summarize forest inventory data
#' @description
#' Get informations about forest inventory plots, like number of individuals,
#' mean DBH, q, height, basal area, volume, etc.
#'
#' @param df A data frame.
#' @param plot Quoted name of the plot variable. used to differentiate the data's plots. If this argument is missing, the defined groups in the data frame will be used, If there are no groups in the data, the function will fail.
#' @param plot_area Quoted name of the plot area variable, or a numeric vector with the plot area value. The plot area value must be in square meters.
#' @param dbh Optional parameter. Quoted name of the diameter at breast height variable. If supplied, will be used to calculate the mean diameter per plot, quadratic diameter (q), basal area and basal area per hectare. Default \code{NA}.
#' @param th Optional parameter. Quoted name of the total height variable. If supplied, will be used to calculate the mean total height, and the dominant height variable, if the \code{dh} is \code{NA}. Default \code{NA}.
#' @param .groups Optional argument. Quoted name(s) of grouping variables that can be added to differentiate subdivisions of the data. Default: \code{NA}.
#' @param total_area Optional argument. Quoted name of the total area variable, or a numeric vector with the total area value. The total area value must be in hectares. Default: \code{NA}.
#' @param vwb Optional parameter. Quoted name of the volume with bark variable. If supplied, will be used to calculate the total vwb per plot, and vwb per hectare per plot. Default \code{NA}.
#' @param vwob Optional parameter. Quoted name of the volume without bark variable. If supplied, will be used to calculate the total vwob per plot, and vwob per hectare per plot. Default \code{NA}.
#' @param dh Optional parameter. Quoted name of the dominant height variable. If supplied, will be used to calculate the mean dominant height per plot. If not, the \code{ht} variable supplied will be used to calculate the average of the top two trees of each plot, and use that as dh. Default: \code{NA}.
#' @param age Optional parameter. Quoted name of the age variable. If supplied, will be used to calculate the average age per plot. Default: \code{NA}.
#' @param dec_places Numeric value for the number of decimal places to be used in the output tables. Default: \code{4}.
#' @return A data frame with informations per plot.
#'
#' @export
#'
#' @examples
#' library(forestmangr)
#' data("exfm21")
#' exfm21
#'
#' # Obligatory arguments. Basic informations about the plot.
#' plot_summarise(exfm21, "PLOT", 810)
#'
#' # Area values can be numeric, or a variable name
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA")
#'
#' # With DBH supplied, we get the mean diameter, quadratic diameter,
#' # basal area and basal area per hectare:
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH")
#'
#' # With TH supplied, we get the mean total height and dominant height
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST")
#'
#' # With strata supplied, we divide the data into 2 strata
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA")
#'
#' # The strata area can also be supplied
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA", "STRATA_AREA")
#'
#' # With VWB supplied, we get the total vwb, and vwb per hectare
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA", "STRATA_AREA",
#' "VWB")
#'
#' # With VWOB supplied, we get the total vwob, and vwob per hectare
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA", "STRATA_AREA",
#' "VWB", "VWOB")
#'
#' # If the data already has a dominant height variable, it can also be supplied here
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA", "STRATA_AREA",
#' "VWB", "VWOB", "DH")
#'
#' # With the AGE variable supplied, we get the average age of each plot
#' plot_summarise(exfm21, "PLOT", "PLOT_AREA", "DBH", "TH_EST", "STRATA", "STRATA_AREA",
#' "VWB", "VWOB", "DH", "AGE")
#'
#' @author Sollano Rabelo Braga \email{sollanorb@@gmail.com}
#'
plot_summarise <- function(df, plot, plot_area, dbh, th, .groups, total_area, vwb, vwob, dh, age, dec_places = 4) {
# ####
CSA<-Indv<-G<-NULL
# checagem de variaveis ####
# se df nao for fornecido, nulo, ou nao for dataframe, ou nao tiver tamanho e nrow maior que 1,parar
if( missing(df) ){
stop("df not set", call. = F)
}else if(!is.data.frame(df)){
stop("df must be a dataframe", call.=F)
}else if(length(df)<=1 | nrow(df)<=1){
stop("Length and number of rows of 'df' must be greater than 1", call.=F)
}
# se dbh nao for fornecido nao for character, ou nao for um nome de variavel,ou nao for de tamanho 1, parar
if( missing(dbh) || is.null(dbh) || is.na(dbh) || dbh == "" ){
df $ dbh <- NA
dbh <- "dbh"
}else if( !is.character(dbh) ){
stop("'dbh' must be a character containing a variable name", call.=F)
}else if(length(dbh)!=1){
stop("Length of 'dbh' must be 1", call.=F)
}else if(forestmangr::check_names(df, dbh)==F){
stop(forestmangr::check_names(df, dbh, boolean=F), call.=F)
}
# se th nao for fornecido, for igual "", nulo ou NA, criar variavel vazia
# se existir e nao for character, parar
if(missing(th) || is.null(th) || is.na(th) || th == "" ){
df $ th <- NA
th <- "th"
}else if(!is.character(th)){
stop("'th' must be a character containing a variable name", call.=F)
}else if(length(th)!=1){
stop("Length of 'th' must be 1", call.=F)
}else if(forestmangr::check_names(df, th)==F){
stop(forestmangr::check_names(df, th, boolean=F), call.=F)
}
# se vwb nao for fornecido, for igual "", nulo ou NA, criar variavel vazia
# se existir e nao for character, parar
if(missing(vwb) || is.null(vwb) || is.na(vwb) || vwb == "" ){
df $ vwb <- NA
vwb <- "vwb"
}else if(!is.character(vwb)){
stop("'vwb' must be a character containing a variable name", call.=F)
}else if(length(vwb)!=1){
stop("Length of 'vwb' must be 1", call.=F)
}else if(forestmangr::check_names(df, vwb)==F){
stop(forestmangr::check_names(df, vwb, boolean=F), call.=F)
}
# se plot_area nao for fornecido, nao for numerico nem character, ou nao existir no dataframe,ou nao for de tamanho 1, parar
if( missing(plot_area) || is.null(plot_area) || is.na(plot_area) || plot_area == "" ){
stop("plot_area not set", call. = F)
}else if(is.numeric(plot_area) & length(plot_area)==1){
df $ plot_area <- plot_area
plot_area <- "plot_area"
}else if(!is.character(plot_area)){
stop("'plot_area' must be a character containing a variable name or a numeric value", call.=F)
}else if(length(plot_area)!=1){
stop("Length of 'plot_area' must be 1", call.=F)
}else if(forestmangr::check_names(df, plot_area)==F){
stop(forestmangr::check_names(df, plot_area, boolean = F), call.=F)
}
# se total_area nao for fornecido, nao for numerico nem character, ou nao existir no dataframe,ou nao for de tamanho 1, criar variavel vazia
# Se for fornecida verificar se e numerica ou nome de variavel
if( missing(total_area) || is.null(total_area) || is.na(total_area) || total_area == "" ){
df $ total_area <- NA
total_area <- "total_area"
}else if(is.numeric(total_area) & length(total_area)==1){
df $ total_area <- total_area
total_area <- "total_area"
}else if(!is.character(total_area)){
stop("'total_area' must be a character containing a variable name or a numeric value", call.=F)
}else if(length(total_area)!=1){
stop("Length of 'total_area' must be 1", call.=F)
}else if(forestmangr::check_names(df, total_area)==F){
stop(forestmangr::check_names(df, total_area, boolean = F), call.=F)
}
# se age nao for fornecido, for igual "", nulo ou NA, criar variavel vazia
# se existir e nao for character, parar
if(missing(age) || is.null(age) || is.na(age) || age == "" ){
df $ age <- NA
age <- "age"
}else if(!is.character(age)){
stop("'age' must be a character containing a variable name", call.=F)
}else if(length(age)!=1){
stop("Length of 'age' must be 1", call.=F)
}else if(forestmangr::check_names(df, age)==F){
stop(forestmangr::check_names(df, age, boolean=F), call.=F)
}
# se vwob nao for fornecido, for igual "", nulo ou NA, criar variavel vazia
# se existir e nao for character, parar
if(missing(vwob) || is.null(vwob) || is.na(vwob) || vwob == "" ){
df $ vwob <- NA
vwob <- "vwob"
}else if(!is.character(vwob)){
stop("'vwob' must be a character containing a variable name", call.=F)
}else if(length(vwob)!=1){
stop("Length of 'vwob' must be 1", call.=F)
}else if(forestmangr::check_names(df, vwob)==F){
stop(forestmangr::check_names(df, vwob, boolean=F), call.=F)
}
# Se plot nao for fornecido, criar objeto que dplyr::group_by ignora, sem causar erro
if(missing(plot) && is.null(dplyr::groups(df)) ){
stop("plot not set. plot must be set if data doesn't have any groups", call. = F)
}else if(missing(plot) && !is.null(dplyr::groups(df)) ){
plot_syms <- rlang::syms(dplyr::groups(df))
}else if(!is.character(plot)){
stop("plot must be a character", call. = F)
}else if(! length(plot)%in% 1:10){
stop("Length of 'plot' must be between 1 and 10", call.=F)
}else if(forestmangr::check_names(df,plot)==F){
# Parar se algum nome nao existir, e avisar qual nome nao existe
stop(forestmangr::check_names(df,plot, boolean=F), call.=F)
}else{
plot_syms <- rlang::syms(plot)
}
# Se .groups nao for fornecido, criar objeto que dplyr::group_by ignora, sem causar erro
if(missing(.groups)||any(is.null(.groups))||any(is.na(.groups))||any(.groups==F)||any(.groups=="") ){
.groups_syms <- character()
# Se groups for fornecido verificar se todos os nomes de variaveis fornecidos existem no dado
}else if(!is.character(.groups)){
stop(".groups must be a character", call. = F)
}else if(! length(.groups)%in% 1:10){
stop("Length of '.groups' must be between 1 and 10", call.=F)
}else if(forestmangr::check_names(df,.groups)==F){
# Parar se algum nome nao existir, e avisar qual nome nao existe
stop(forestmangr::check_names(df,.groups, boolean=F), call.=F)
# se os grupos forem fornecidos e forem nomes dos dados
# Transformar o objeto em simbolo, para que dplyr entenda
# e procure o nome das variaveis dentro dos objetos
}else{
.groups_syms <- rlang::syms(.groups)
}
# Se dec_places nao for numerico, nao for de tamanho 1, ou nao estiver dentro dos limites, parar
if(!is.numeric( dec_places )){
stop( "'dec_places' must be numeric", call.=F)
}else if(length(dec_places)!=1){
stop("Length of 'dec_places' must be 1", call.=F)
}else if(! dec_places %in% seq(from=0,to=9,by=1) ){
stop("'dec_places' must be a number between 0 and 9", call.=F)
}
dbh_name <- dbh
th_name <- th
plot_area_name <- plot_area
total_area_name <- total_area
vwb_name <- vwb
vwb_ha_name <- paste(vwb,"ha",sep="_")
vwob_name <- vwob
vwob_ha_name <- paste(vwob,"ha",sep="_")
age_name <- age
dbh_sym <- rlang::sym( dbh )
th_sym <- rlang::sym( th )
vwb_sym <- rlang::sym( vwb )
plot_area_sym <- rlang::sym( plot_area )
total_area_sym <- rlang::sym( total_area )
age_sym <- rlang::sym( age )
vwob_sym <- rlang::sym( vwob )
# ####
if(missing(dh) || dh=="" || is.null(dh) || is.na(dh) ){ # se a altura dominante nao for fornecida
# se ja existir uma variavel chamada "DH", deletar
if( "DH" %in% names(df) ){ df$DH <- NULL }
# estimar altura dominante
df <- forestmangr::dom_height(df = df, th = th,plot = plot, .groups = .groups, merge_data = T)
dh_name <- "DH"
dh_sym <- rlang::sym( "DH" )
# caso contrario, so criar os symbolos
} else{
dh_name <- dh
dh_sym <- rlang::sym( dh )
}
# novo nome = nome antigo
df %>%
dplyr::group_by(!!!.groups_syms, !!!plot_syms, add=T) %>%
dplyr::mutate(CSA = pi * (!!dbh_sym)^2 / 40000 ) %>%
dplyr::summarise(
!!age_name := round( mean(as.numeric( (!!age_sym) ), na.rm=T) ),
!!total_area_name := mean( !!total_area_sym, na.rm=T),
!!plot_area_name := mean( !!plot_area_sym, na.rm=T),
!!dbh_name := mean(!!dbh_sym, na.rm=T),
q = sqrt(mean(CSA, na.rm=T) * 40000 / pi),
!!th_name := mean(!!th_sym, na.rm=T),
!!dh_name := mean(!!dh_sym),
Indv = dplyr::n(),
Indvha = Indv* 10000/(!!plot_area_sym),
G = sum(CSA, na.rm=T),
G_ha = G * 10000/(!!plot_area_sym),
!!vwb_name := sum(!!vwb_sym, na.rm=T),
!!vwb_ha_name := (!!rlang::sym(vwb_name)) * 10000/ (!!plot_area_sym),
!!vwob_name := sum(!!vwob_sym, na.rm=T),
!!vwob_ha_name := (!!rlang::sym(vwob_name)) * 10000/ (!!plot_area_sym) ) %>% #sumarise
dplyr::na_if(0) %>% # substitui 0 por NA
rm_empty_col %>% # remove variaveis que nao foram informadas (argumentos opicionais nao inseridos viram NA)
forestmangr::round_df(dec_places)
}
|
# visitor_hist_adr_usd vs price
visitor_hist_adr_usd <- train$visitor_hist_adr_usd
price_usd <- train$price_usd
# Let's take about 15% standard deviation
booking_bool <- vector(mode = "logical", length = nrow(price_usd))
# assign to vectors
srch.prop_id <- c(1234-5678)
booking_bool <- c(TRUE)
# construct the data frame
df = data.frame(srch.prop_id, booking_bool)
# write to CSV
write.csv(df, file = 'rrishi_submission_1.csv')
|
/kaggle/expedia/visitor_hist_adr_usd__VS__price_usd.R
|
no_license
|
ryanrishi/csci183
|
R
| false | false | 433 |
r
|
# visitor_hist_adr_usd vs price
visitor_hist_adr_usd <- train$visitor_hist_adr_usd
price_usd <- train$price_usd
# Let's take about 15% standard deviation
booking_bool <- vector(mode = "logical", length = nrow(price_usd))
# assign to vectors
srch.prop_id <- c(1234-5678)
booking_bool <- c(TRUE)
# construct the data frame
df = data.frame(srch.prop_id, booking_bool)
# write to CSV
write.csv(df, file = 'rrishi_submission_1.csv')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.