content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
## MannWhitney_SplitYearSensitivity.R
# This script will test the sensitivity of the Mann-Whitney results to the year chosen for the split.
source(file.path("code", "paths+packages.R"))
## load data
gage_regions <-
readr::read_csv(file.path("results", "00_SelectGagesForAnalysis_GageRegions.csv"))
gage_sample_annual <-
readr::read_csv(file = file.path("results", "00_SelectGagesForAnalysis_GageSampleAnnual.csv")) %>%
dplyr::left_join(gage_regions, by = "gage_ID")
## variables to test?
vars_all <- c("annualnoflowdays", "zeroflowfirst", "peak2z_length")
## years to split for mann-whitney? (this will be included in the first set)
mw_yr_all <- seq(1994, by = 1, length.out = 9)
## loop through gages
sites <- unique(gage_sample_annual$gage_ID)
start_flag <- T
for (s in sites){
for (var in vars_all){
for (mw_yr_split in mw_yr_all){
# mann-whitney groups
group1 <-
gage_sample_annual %>%
subset(gage_ID == s & currentclimyear <= mw_yr_split) %>%
dplyr::pull(var)
group2 <-
gage_sample_annual %>%
subset(gage_ID == s & currentclimyear > mw_yr_split) %>%
dplyr::pull(var)
if (sum(is.finite(group1)) > 5 & sum(is.finite(group2)) > 5){
mw_test <- wilcox.test(group1, group2)
mw_p <- mw_test$p.value
mw_out <- tibble::tibble(gage_ID = s,
metric = var,
mw_yr = mw_yr_split,
mw_p = mw_p,
mw_meanGroup1 = mean(group1, na.rm = T),
mw_meanGroup2 = mean(group2, na.rm = T),
mw_medianGroup1 = median(group1, na.rm = T),
mw_medianGroup2 = median(group2, na.rm = T),
n_yrGroup1 = sum(is.finite(group1)),
n_yrGroup2 = sum(is.finite(group1)))
} else {
mw_out <- tibble::tibble(gage_ID = s,
metric = var,
mw_yr = mw_yr_split,
mw_p = NA,
mw_meanGroup1 = mean(group1, na.rm = T),
mw_meanGroup2 = mean(group2, na.rm = T),
mw_medianGroup1 = median(group1, na.rm = T),
mw_medianGroup2 = median(group2, na.rm = T),
n_yrGroup1 = sum(is.finite(group1)),
n_yrGroup2 = sum(is.finite(group1)))
}
if (start_flag){
mw_all <- mw_out
start_flag <- F
} else {
mw_all <- dplyr::bind_rows(mw_all, mw_out)
}
}
}
print(paste0(s, " complete"))
}
## plot
df_mw <-
mw_all %>%
dplyr::mutate(mw_diff_mean = mw_meanGroup2 - mw_meanGroup1,
mw_diff_median = mw_medianGroup2 - mw_medianGroup1) %>%
subset(complete.cases(.))
# make a column for Mann-Whitney about significance and directio of change
p_thres <- 0.05
df_mw$mw_sig[df_mw$mw_p > p_thres] <- "NotSig"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean < 0 &
df_mw$metric %in% c("zeroflowfirst", "peak2z_length")] <- "SigDry"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean < 0 &
df_mw$metric %in% c("annualnoflowdays")] <- "SigWet"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean > 0 &
df_mw$metric %in% c("zeroflowfirst", "peak2z_length")] <- "SigWet"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean > 0 &
df_mw$metric %in% c("annualnoflowdays")] <- "SigDry"
# histograms
p_mw_hist_afnf <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "annualnoflowdays"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in Annual No-Flow Days, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-annualnoflowdays.png"),
width = 190, height = 220, units = "mm")
p_mw_hist_zff <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "zeroflowfirst"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in First No-Flow Day, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-zeroflowfirst.png"),
width = 190, height = 220, units = "mm")
p_mw_hist_p2z <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "peak2z_length"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in Days from Peak to No-Flow, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-peak2z.png"),
width = 190, height = 220, units = "mm")
## organize data into a table:
# Metric, Split Year, # Sig Wet, # Sig Dry, # Not Sig, Mean Change Sig Wet, Mean Change Sig Dry
mw_summary_table <-
df_mw %>%
dplyr::group_by(metric, mw_yr) %>%
dplyr::summarize(n_SigDry = sum(mw_sig == "SigDry"),
n_SigWet = sum(mw_sig == "SigWet"),
n_NotSig = sum(mw_sig == "NotSig"),
n_tested = n_SigDry + n_SigWet + n_NotSig,
prc_SigDry = n_SigDry/540,
prc_SigWet = n_SigWet/540,
prc_NotSig = n_NotSig/540) %>%
dplyr::mutate(SigDryText = paste0("n = ", n_SigDry, " (", round(prc_SigDry*100, 1), "%)"),
SigWetText = paste0("n = ", n_SigWet, " (", round(prc_SigWet*100, 1), "%)")) %>%
dplyr::select(metric, mw_yr, SigDryText, SigWetText)
readr::write_csv(mw_summary_table, file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity.csv"))
|
/figures_manuscript/MannWhitney_SplitYearSensitivity.R
|
no_license
|
dry-rivers-rcn/IntermittencyTrends
|
R
| false | false | 7,667 |
r
|
## MannWhitney_SplitYearSensitivity.R
# This script will test the sensitivity of the Mann-Whitney results to the year chosen for the split.
source(file.path("code", "paths+packages.R"))
## load data
gage_regions <-
readr::read_csv(file.path("results", "00_SelectGagesForAnalysis_GageRegions.csv"))
gage_sample_annual <-
readr::read_csv(file = file.path("results", "00_SelectGagesForAnalysis_GageSampleAnnual.csv")) %>%
dplyr::left_join(gage_regions, by = "gage_ID")
## variables to test?
vars_all <- c("annualnoflowdays", "zeroflowfirst", "peak2z_length")
## years to split for mann-whitney? (this will be included in the first set)
mw_yr_all <- seq(1994, by = 1, length.out = 9)
## loop through gages
sites <- unique(gage_sample_annual$gage_ID)
start_flag <- T
for (s in sites){
for (var in vars_all){
for (mw_yr_split in mw_yr_all){
# mann-whitney groups
group1 <-
gage_sample_annual %>%
subset(gage_ID == s & currentclimyear <= mw_yr_split) %>%
dplyr::pull(var)
group2 <-
gage_sample_annual %>%
subset(gage_ID == s & currentclimyear > mw_yr_split) %>%
dplyr::pull(var)
if (sum(is.finite(group1)) > 5 & sum(is.finite(group2)) > 5){
mw_test <- wilcox.test(group1, group2)
mw_p <- mw_test$p.value
mw_out <- tibble::tibble(gage_ID = s,
metric = var,
mw_yr = mw_yr_split,
mw_p = mw_p,
mw_meanGroup1 = mean(group1, na.rm = T),
mw_meanGroup2 = mean(group2, na.rm = T),
mw_medianGroup1 = median(group1, na.rm = T),
mw_medianGroup2 = median(group2, na.rm = T),
n_yrGroup1 = sum(is.finite(group1)),
n_yrGroup2 = sum(is.finite(group1)))
} else {
mw_out <- tibble::tibble(gage_ID = s,
metric = var,
mw_yr = mw_yr_split,
mw_p = NA,
mw_meanGroup1 = mean(group1, na.rm = T),
mw_meanGroup2 = mean(group2, na.rm = T),
mw_medianGroup1 = median(group1, na.rm = T),
mw_medianGroup2 = median(group2, na.rm = T),
n_yrGroup1 = sum(is.finite(group1)),
n_yrGroup2 = sum(is.finite(group1)))
}
if (start_flag){
mw_all <- mw_out
start_flag <- F
} else {
mw_all <- dplyr::bind_rows(mw_all, mw_out)
}
}
}
print(paste0(s, " complete"))
}
## plot
df_mw <-
mw_all %>%
dplyr::mutate(mw_diff_mean = mw_meanGroup2 - mw_meanGroup1,
mw_diff_median = mw_medianGroup2 - mw_medianGroup1) %>%
subset(complete.cases(.))
# make a column for Mann-Whitney about significance and directio of change
p_thres <- 0.05
df_mw$mw_sig[df_mw$mw_p > p_thres] <- "NotSig"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean < 0 &
df_mw$metric %in% c("zeroflowfirst", "peak2z_length")] <- "SigDry"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean < 0 &
df_mw$metric %in% c("annualnoflowdays")] <- "SigWet"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean > 0 &
df_mw$metric %in% c("zeroflowfirst", "peak2z_length")] <- "SigWet"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean > 0 &
df_mw$metric %in% c("annualnoflowdays")] <- "SigDry"
# histograms
p_mw_hist_afnf <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "annualnoflowdays"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in Annual No-Flow Days, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-annualnoflowdays.png"),
width = 190, height = 220, units = "mm")
p_mw_hist_zff <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "zeroflowfirst"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in First No-Flow Day, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-zeroflowfirst.png"),
width = 190, height = 220, units = "mm")
p_mw_hist_p2z <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "peak2z_length"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in Days from Peak to No-Flow, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-peak2z.png"),
width = 190, height = 220, units = "mm")
## organize data into a table:
# Metric, Split Year, # Sig Wet, # Sig Dry, # Not Sig, Mean Change Sig Wet, Mean Change Sig Dry
mw_summary_table <-
df_mw %>%
dplyr::group_by(metric, mw_yr) %>%
dplyr::summarize(n_SigDry = sum(mw_sig == "SigDry"),
n_SigWet = sum(mw_sig == "SigWet"),
n_NotSig = sum(mw_sig == "NotSig"),
n_tested = n_SigDry + n_SigWet + n_NotSig,
prc_SigDry = n_SigDry/540,
prc_SigWet = n_SigWet/540,
prc_NotSig = n_NotSig/540) %>%
dplyr::mutate(SigDryText = paste0("n = ", n_SigDry, " (", round(prc_SigDry*100, 1), "%)"),
SigWetText = paste0("n = ", n_SigWet, " (", round(prc_SigWet*100, 1), "%)")) %>%
dplyr::select(metric, mw_yr, SigDryText, SigWetText)
readr::write_csv(mw_summary_table, file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity.csv"))
|
\name{CPTtools-package}
\alias{CPTtools-package}
\alias{CPTtools}
\docType{package}
\title{
\packageTitle{CPTtools}
}
\description{
\packageDescription{CPTtools}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{CPTtools}
CPTtools is a collection of various bits of R code useful for processing
Bayes net output. Some were designed to work with ETS's proprietary
StatShop code, and some with RNetica. The code collected in this
package is all free from explicit dependencies on the specific Bayes net
package and will hopefully be useful with other systems as well.
The majority of the code are related to building conditional probability
tables (CPTs) for Bayesian networks. The package has two output
representations for a CPT. The first is a \code{data.frame} object
where the first several columns are factor variables corresponding the
the parent variables, and the remaining columns are numeric variables
corresponding to the state of the child variables. The rows represent
possible configurations of the parent variables. An example is shown below.
\preformatted{
S1 S2 Full Partial None
1 High High 0.81940043 0.15821522 0.02238436
2 Medium High 0.46696668 0.46696668 0.06606664
3 Low High 0.14468106 0.74930671 0.10601223
4 High Medium 0.76603829 0.14791170 0.08605000
5 Medium Medium 0.38733177 0.38733177 0.22533647
6 Low Medium 0.10879020 0.56342707 0.32778273
7 High Low 0.65574465 0.12661548 0.21763987
8 Medium Low 0.26889642 0.26889642 0.46220715
9 Low Low 0.06630741 0.34340770 0.59028489
10 High LowerYet 0.39095414 0.07548799 0.53355787
11 Medium LowerYet 0.11027649 0.11027649 0.77944702
12 Low LowerYet 0.02337270 0.12104775 0.85557955
}
The second representation is a table (\code{matrix}) with just the
numeric part. Two approaches to building these tables from parameters
are described below. The more flexible discrete partial credit model is
used for the basis of the parameterized networks in the
\code{\link[Peanut:Peanut-package]{Peanut}} package.
In addition to the code for building partial credit networks, this
package contains some code for building Bayesian network structures from
(inverse) correlation matrixes, and graphical displays for Bayes net
output. The latter includes some diagnostic plots and additional
diagnostic tests.
}
\section{Discrete Partial Credit Framework}{
The original parameterization for creating conditional probability
tables based on Almond et al (2001) proved to be insufficiently
flexible. Almond (2015) describes a newer parameterization based on
three steps:
\enumerate{
\item{Translate the parent variables onto a numeric effective theta
scale (\code{\link{effectiveThetas}}).}
\item{Combine the parent effective thetas into a single effective
theta using a combination rule (\code{\link{Compensatory}},
\code{\link{OffsetConjunctive}}).}
\item{Convert the effective theta for each row of the table into
conditional probabilities using a link function
(\code{\link{gradedResponse}}, \code{\link{partialCredit}},
\code{\link{normalLink}}).}
}
The \code{\link{partialCredit}} link function is particularly flexible
as it allows different parameterizations and different combination rules
for each state of the child variable. This functionality is best
captured by the two high level functions:
\describe{
\item{\code{\link{calcDPCTable}}}{Creates the probability table for
the discrete partial credit model given the parameters.}
\item{\code{\link{mapDPC}}}{Finds an MAP estimate for the parameters
given an observed table of counts.}
}
This parameterization serves as basis for the model used in the
\code{\link[Peanut:Peanut-package]{Peanut}} package.
}
\section{Other parametric CPT models}{
The first two steps of the discrete partial credit framework outlined
above are due to a suggestion by Lou DiBello (Almond et al, 2001).
This lead to an older framework, in which the link function was hard
coded into the conditional probability table formation. The models
were called DiBello-\emph{XX}, where \emph{XX} is the name of the link
function. Almond et al. (2015) describes several additional
examples.
\describe{
\item{\code{\link{calcDDTable}}}{Calculates DiBello-Dirichlet model
probability and parameter tables.}
\item{\code{\link{calcDNTable}}}{Creates the probability table for
DiBello-Normal distribution. This is equivalent to using the
\code{\link{normalLink}} in the DPC framework. This also uses a link
scale parameter.}
\item{\code{\link{calcDSTable}}}{Creates the probability table for
DiBello-Samejima distribution. This is equivalent to using the
\code{\link{gradedResponse}} in the DPC framework.}
\item{\code{\link{calcDSllike}}}{Calculates the log-likelihood for
data from a DiBello-Samejima (Normal) distribution.}
}
Diez (1993) and Srinivas (1993) describe an older parametric framework
for Bayes nets based on the noisy-or or noisy-max function. These are
also available.
\describe{
\item{\code{\link{calcNoisyAndTable}}}{Calculate the conditional
probability table for a Noisy-And or Noisy-Min distribution.}
\item{\code{\link{calcNoisyOrTable}}}{Calculate the conditional
probability table for a Noisy-Or distribution.}
}
}
\section{Building Bayes nets from (inverse) correlation matrixes}{
Almond (2010) noted that in many cases the best information about the
relationship among variables came from a procedure that produces a
correlation matrix (e.g., a factor analysis). Applying a trick from
Whittaker (1990), connecting pairs of nodes corresponding to nonzero
entries in an inverse correlation matrix produces an undirected
graphical model. Ordering in the nodes in a perfect ordering allows
the undirected model to be converted into a directed model (Bayesian
network). The conditional probability tables can then be created
through a series of regressions.
The following functions implement this protocol:
\describe{
\item{\code{\link{structMatrix}}}{Finds graphical structure from a
covariance matrix.}
\item{\code{\link{mcSearch}}}{Orders variables using Maximum
Cardinality search.}
\item{\code{\link{buildParentList}}}{Builds a list of parents of
nodes in a graph.}
\item{\code{\link{buildRegressions}}}{Creates a series of regressions
from a covariance matrix.}
\item{\code{\link{buildRegressionTables}}}{Builds conditional
probability tables from regressions.}
}
}
\section{Other model construction tools}{
These functions are a grab bag of lower level utilities useful for
building CPTs:
\describe{
\item{\code{\link{areaProbs}}}{Translates between normal and
categorical probabilities.}
\item{\code{\link{numericPart}}}{Splits a mixed data frame into a
numeric matrix and a factor part..}
\item{\code{\link{dataTable}}}{Constructs a table of counts from a
setof discrete observations..}
\item{\code{\link{eThetaFrame}}}{Constructs a data frame showing the
effective thetas for each parent combination..}
\item{\code{\link{effectiveThetas}}}{Assigns effective theta levels
for categorical variable.}
\item{\code{\link{getTableStates}}}{Gets meta data about a
conditional probability table..}
\item{\code{\link{rescaleTable}}}{Rescales the numeric part of the
table.}
\item{\code{\link{scaleMatrix}}}{Scales a matrix to have a unit
diagonal.}
\item{\code{\link{scaleTable}}}{Scales a table according to the Sum
and Scale column.}
}
}
\section{Bayes net output displays and tests}{
Almond et al. (2009) suggested using hanging barplots for displaying
Bayes net output and gives several examples. The function
\code{\link{stackedBars}} produces the simple version of this plot and
the function \code{\link{compareBars}} compares two distributions
(e.g., prior and posterior). The function
\code{\link{buildFactorTab}} is useful for building the data and the
function \code{\link{colorspread}} is useful for building color
gradients.
Madigan, Mosurski and Almond (1997) describe a graphical weight of
evidence balance sheet (see also Almond et al, 2015, Chapter 7; Almond
et al, 2013). The function \code{\link{woeHist}} calculates the weights of
evidence for a series of observations and the function
\code{\link{woeBal}} produces a graphical display.
Sinharay and Almond (2006) propose a graphical fit test for
conditional probability tables (see also, Almond et al, 2015, Chapter
10). The function \code{\link{OCP}} implements this test, and the
function \code{\link{betaci}} creates the beta credibility intervals
around which the function is built.
The key to Bayesian network models are the assumptions of conditional
independence which underlie the model. The function
\code{\link{localDepTest}} tests these assumptions based on observed
(or imputed) data tables.
The function \code{\link{mutualInformation}} calculates the mutual
information of a two-way table, a measure of the strength of
association. This is similar to the measure used in many Bayes net
packages (e.g., \code{\link[RNetica]{MutualInfo}}).
}
\section{Data sets}{
Two data sets are provided with this package:
\describe{
\item{\code{\link{ACED}}}{Data from ACED field trial (Shute, Hansen,
and Almond, 2008). This example is based on a field trial of a
Bayesian network based Assessment for Learning system, and contains
both item-level response and high-level network summaries. A
complete description of the Bayes net can be found at
\url{http://ecd.ralmond.net/ecdwiki/ACED/ACED}.}
\item{\code{\link{MathGrades}}}{Grades on 5 mathematics tests from
Mardia, Kent and Bibby (from Whittaker, 1990).}
}
}
\section{Index}{
Complete index of all functions.
\packageIndices{CPTtools}
}
\author{
\packageAuthor{CPTtools}
Maintainer: \packageMaintainer{CPTtools}
}
\references{
Almond, R.G. (2015). An IRT-based Parameterization for Conditional
Probability Tables. Paper submitted to the 2015 Bayesian Application
Workshop at the Uncertainty in Artificial Intelligence conference.
Almond, R.G., Mislevy, R.J., Steinberg, L.S., Williamson, D.M. and
Yan, D. (2015) \emph{Bayesian Networks in Educational Assessment.}
Springer.
Almond, R. G. (2010). \sQuote{I can name that Bayesian network in two
matrixes.} \emph{International Journal of Approximate Reasoning.}
\bold{51}, 167-178.
Almond, R. G., Shute, V. J., Underwood, J. S., and Zapata-Rivera,
J.-D (2009). Bayesian Networks: A Teacher's View. \emph{International
Journal of Approximate Reasoning.} \bold{50}, 450-460.
Almond, R.G., DiBello, L., Jenkins, F., Mislevy, R.J.,
Senturk, D., Steinberg, L.S. and Yan, D. (2001) Models for Conditional
Probability Tables in Educational Assessment. \emph{Artificial
Intelligence and Statistics 2001} Jaakkola and Richardson (eds).,
Morgan Kaufmann, 137--143.
Diez, F. J. (1993) Parameter adjustment in Bayes networks. The
generalized noisy OR-gate. In Heckerman and Mamdani (eds)
\emph{Uncertainty in Artificial Intelligence 93.} Morgan Kaufmann.
99--105.
Muraki, E. (1992). A Generalized Partial Credit Model: Application
of an EM Algorithm. \emph{Applied Psychological Measurement}, \bold{16},
159-176. DOI: 10.1177/014662169201600206
Samejima, F. (1969) Estimation of latent ability using a
response pattern of graded scores. \emph{Psychometrika Monograph No.
17}, \bold{34}, (No. 4, Part 2).
Shute, V. J., Hansen, E. G., & Almond, R. G. (2008). You can't fatten
a hog by weighing it---Or can you? Evaluating an assessment for learning
system called ACED. \emph{International Journal of Artificial
Intelligence and Education}, \bold{18}(4), 289-316.
Sinharay, S. and Almond, R.G. (2006). Assessing Fit of Cognitively
Diagnostic Models: A case study. \emph{Educational and Psychological
Measurement}. \bold{67}(2), 239--257.
Srinivas, S. (1993) A generalization of the Noisy-Or model, the
generalized noisy OR-gate. In Heckerman and Mamdani (eds)
\emph{Uncertainty in Artificial Intelligence 93.} Morgan Kaufmann.
208--215.
Whittaker, J. (1990). \emph{Graphical Models in Applied Multivariate
Statistics}. Wiley.
Madigan, D., Mosurski, K. and Almond, R. (1997) Graphical explanation
in belief networks. \emph{Journal of Computational Graphics and
Statistics}, \bold{6}, 160-181.
Almond, R. G., Kim, Y. J., Shute, V. J. and Ventura, M. (2013).
Debugging the Evidence Chain. In Almond, R. G. and Mengshoel,
O. (Eds.) \emph{Proceedings of the 2013 UAI Application Workshops:
Big Data meet Complex Models and Models for Spatial, Temporal and
Network Data (UAI2013AW)}, 1-10.
\url{http://ceur-ws.org/Vol-1024/paper-01.pdf}
}
\keyword{ package }
\seealso{
\code{\link[RNetica]{RNetica}} ~~
\code{\link[Peanut:Peanut-package]{Peanut}} ~~
}
\examples{
## Set up variables
skill1l <- c("High","Medium","Low")
skill2l <- c("High","Medium","Low","LowerYet")
correctL <- c("Correct","Incorrect")
pcreditL <- c("Full","Partial","None")
gradeL <- c("A","B","C","D","E")
## New Discrete Partial Credit framework:
## Complex model, different rules for different levels
cptPC2 <- calcDPCFrame(list(S1=skill1l,S2=skill2l),pcreditL,
list(full=log(1),partial=log(c(S1=1,S2=.75))),
betas=list(full=c(0,999),partial=1.0),
rule=list("OffsetDisjunctive","Compensatory"))
## Graded Response using the older DiBello-Samejima framework.
cptGraded <- calcDSTable(list(S1=skill1l),gradeL, 0.0, 0.0, dinc=c(.3,.4,.3))
## Building a Bayes net from a correlation matrix.
data(MathGrades)
pl <- buildParentList(structMatrix(MathGrades$var),"Algebra")
rt <- buildRegressions(MathGrades$var,MathGrades$means,pl)
tabs <- buildRegressionTables(rt, MathGrades$pvecs, MathGrades$means,
sqrt(diag(MathGrades$var)))
## Stacked Barplots:
margins.prior <- data.frame (
Trouble=c(Novice=.19,Semester1=.24,Semester2=.28,Semseter3=.20,Semester4=.09),
NDK=c(Novice=.01,Semester1=.09,Semester2=.35,Semseter3=.41,Semester4=.14),
Model=c(Novice=.19,Semester1=.28,Semester2=.31,Semseter3=.18,Semester4=.04)
)
margins.post <- data.frame(
Trouble=c(Novice=.03,Semester1=.15,Semester2=.39,Semseter3=.32,Semester4=.11),
NDK=c(Novice=.00,Semester1=.03,Semester2=.28,Semseter3=.52,Semester4=.17),
Model=c(Novice=.10,Semester1=.25,Semester2=.37,Semseter3=.23,Semester4=.05))
stackedBars(margins.post,3,
main="Marginal Distributions for NetPASS skills",
sub="Baseline at 3rd Semester level.",
cex.names=.75, col=hsv(223/360,.2,0.10*(5:1)+.5))
compareBars(margins.prior,margins.post,3,c("Prior","Post"),
main="Margins before/after Medium Trouble Shooting Task",
sub="Observables: cfgCor=Medium, logCor=High, logEff=Medium",
legend.loc = "topright",
cex.names=.75, col1=hsv(h=.1,s=.2*1:5-.1,alpha=1),
col2=hsv(h=.6,s=.2*1:5-.1,alpha=1))
## Weight of evidence balance sheets
sampleSequence <- read.csv(paste(library(help="CPTtools")$path,
"testFiles","SampleStudent.csv",
sep=.Platform$file.sep),
header=TRUE,row.names=1)
woeBal(sampleSequence[,c("H","M","L")],c("H"),c("M","L"),lcex=1.25)
### Observable Characteristic Plot
pi <- c("+"=.15,"-"=.85)
nnn <- c("(0,0,0)"=20,"(0,0,1)"=10,
"(0,1,0)"=10,"(0,1,0)"=5,
"(1,0,0)"=10,"(1,0,1)"=10,
"(1,1,1)"=10,"(1,1,1)"=25)
xx1 <- c("(0,0,0)"=2,"(0,0,1)"=5,
"(0,1,0)"=1,"(0,1,1)"=3,
"(1,0,0)"=0,"(1,0,1)"=2,
"(1,1,0)"=5,"(1,1,1)"=24)
grouplabs <- c(rep("-",3),"+")
grouplabs1 <- rep(grouplabs,each=2)
OCP2 (xx1,nnn,grouplabs1,pi,c("-","+"),ylim=c(0,1), reflty=c(2,4),
setlabs=c("Low Skill3","High Skill3"),setat=-.8,
main="Data for which Skill 3 is relevant")
}
|
/man/CPTtools-package.Rd
|
permissive
|
erge324/CPTtools
|
R
| false | false | 16,265 |
rd
|
\name{CPTtools-package}
\alias{CPTtools-package}
\alias{CPTtools}
\docType{package}
\title{
\packageTitle{CPTtools}
}
\description{
\packageDescription{CPTtools}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{CPTtools}
CPTtools is a collection of various bits of R code useful for processing
Bayes net output. Some were designed to work with ETS's proprietary
StatShop code, and some with RNetica. The code collected in this
package is all free from explicit dependencies on the specific Bayes net
package and will hopefully be useful with other systems as well.
The majority of the code are related to building conditional probability
tables (CPTs) for Bayesian networks. The package has two output
representations for a CPT. The first is a \code{data.frame} object
where the first several columns are factor variables corresponding the
the parent variables, and the remaining columns are numeric variables
corresponding to the state of the child variables. The rows represent
possible configurations of the parent variables. An example is shown below.
\preformatted{
S1 S2 Full Partial None
1 High High 0.81940043 0.15821522 0.02238436
2 Medium High 0.46696668 0.46696668 0.06606664
3 Low High 0.14468106 0.74930671 0.10601223
4 High Medium 0.76603829 0.14791170 0.08605000
5 Medium Medium 0.38733177 0.38733177 0.22533647
6 Low Medium 0.10879020 0.56342707 0.32778273
7 High Low 0.65574465 0.12661548 0.21763987
8 Medium Low 0.26889642 0.26889642 0.46220715
9 Low Low 0.06630741 0.34340770 0.59028489
10 High LowerYet 0.39095414 0.07548799 0.53355787
11 Medium LowerYet 0.11027649 0.11027649 0.77944702
12 Low LowerYet 0.02337270 0.12104775 0.85557955
}
The second representation is a table (\code{matrix}) with just the
numeric part. Two approaches to building these tables from parameters
are described below. The more flexible discrete partial credit model is
used for the basis of the parameterized networks in the
\code{\link[Peanut:Peanut-package]{Peanut}} package.
In addition to the code for building partial credit networks, this
package contains some code for building Bayesian network structures from
(inverse) correlation matrixes, and graphical displays for Bayes net
output. The latter includes some diagnostic plots and additional
diagnostic tests.
}
\section{Discrete Partial Credit Framework}{
The original parameterization for creating conditional probability
tables based on Almond et al (2001) proved to be insufficiently
flexible. Almond (2015) describes a newer parameterization based on
three steps:
\enumerate{
\item{Translate the parent variables onto a numeric effective theta
scale (\code{\link{effectiveThetas}}).}
\item{Combine the parent effective thetas into a single effective
theta using a combination rule (\code{\link{Compensatory}},
\code{\link{OffsetConjunctive}}).}
\item{Convert the effective theta for each row of the table into
conditional probabilities using a link function
(\code{\link{gradedResponse}}, \code{\link{partialCredit}},
\code{\link{normalLink}}).}
}
The \code{\link{partialCredit}} link function is particularly flexible
as it allows different parameterizations and different combination rules
for each state of the child variable. This functionality is best
captured by the two high level functions:
\describe{
\item{\code{\link{calcDPCTable}}}{Creates the probability table for
the discrete partial credit model given the parameters.}
\item{\code{\link{mapDPC}}}{Finds an MAP estimate for the parameters
given an observed table of counts.}
}
This parameterization serves as basis for the model used in the
\code{\link[Peanut:Peanut-package]{Peanut}} package.
}
\section{Other parametric CPT models}{
The first two steps of the discrete partial credit framework outlined
above are due to a suggestion by Lou DiBello (Almond et al, 2001).
This lead to an older framework, in which the link function was hard
coded into the conditional probability table formation. The models
were called DiBello-\emph{XX}, where \emph{XX} is the name of the link
function. Almond et al. (2015) describes several additional
examples.
\describe{
\item{\code{\link{calcDDTable}}}{Calculates DiBello-Dirichlet model
probability and parameter tables.}
\item{\code{\link{calcDNTable}}}{Creates the probability table for
DiBello-Normal distribution. This is equivalent to using the
\code{\link{normalLink}} in the DPC framework. This also uses a link
scale parameter.}
\item{\code{\link{calcDSTable}}}{Creates the probability table for
DiBello-Samejima distribution. This is equivalent to using the
\code{\link{gradedResponse}} in the DPC framework.}
\item{\code{\link{calcDSllike}}}{Calculates the log-likelihood for
data from a DiBello-Samejima (Normal) distribution.}
}
Diez (1993) and Srinivas (1993) describe an older parametric framework
for Bayes nets based on the noisy-or or noisy-max function. These are
also available.
\describe{
\item{\code{\link{calcNoisyAndTable}}}{Calculate the conditional
probability table for a Noisy-And or Noisy-Min distribution.}
\item{\code{\link{calcNoisyOrTable}}}{Calculate the conditional
probability table for a Noisy-Or distribution.}
}
}
\section{Building Bayes nets from (inverse) correlation matrixes}{
Almond (2010) noted that in many cases the best information about the
relationship among variables came from a procedure that produces a
correlation matrix (e.g., a factor analysis). Applying a trick from
Whittaker (1990), connecting pairs of nodes corresponding to nonzero
entries in an inverse correlation matrix produces an undirected
graphical model. Ordering in the nodes in a perfect ordering allows
the undirected model to be converted into a directed model (Bayesian
network). The conditional probability tables can then be created
through a series of regressions.
The following functions implement this protocol:
\describe{
\item{\code{\link{structMatrix}}}{Finds graphical structure from a
covariance matrix.}
\item{\code{\link{mcSearch}}}{Orders variables using Maximum
Cardinality search.}
\item{\code{\link{buildParentList}}}{Builds a list of parents of
nodes in a graph.}
\item{\code{\link{buildRegressions}}}{Creates a series of regressions
from a covariance matrix.}
\item{\code{\link{buildRegressionTables}}}{Builds conditional
probability tables from regressions.}
}
}
\section{Other model construction tools}{
These functions are a grab bag of lower level utilities useful for
building CPTs:
\describe{
\item{\code{\link{areaProbs}}}{Translates between normal and
categorical probabilities.}
\item{\code{\link{numericPart}}}{Splits a mixed data frame into a
numeric matrix and a factor part..}
\item{\code{\link{dataTable}}}{Constructs a table of counts from a
setof discrete observations..}
\item{\code{\link{eThetaFrame}}}{Constructs a data frame showing the
effective thetas for each parent combination..}
\item{\code{\link{effectiveThetas}}}{Assigns effective theta levels
for categorical variable.}
\item{\code{\link{getTableStates}}}{Gets meta data about a
conditional probability table..}
\item{\code{\link{rescaleTable}}}{Rescales the numeric part of the
table.}
\item{\code{\link{scaleMatrix}}}{Scales a matrix to have a unit
diagonal.}
\item{\code{\link{scaleTable}}}{Scales a table according to the Sum
and Scale column.}
}
}
\section{Bayes net output displays and tests}{
Almond et al. (2009) suggested using hanging barplots for displaying
Bayes net output and gives several examples. The function
\code{\link{stackedBars}} produces the simple version of this plot and
the function \code{\link{compareBars}} compares two distributions
(e.g., prior and posterior). The function
\code{\link{buildFactorTab}} is useful for building the data and the
function \code{\link{colorspread}} is useful for building color
gradients.
Madigan, Mosurski and Almond (1997) describe a graphical weight of
evidence balance sheet (see also Almond et al, 2015, Chapter 7; Almond
et al, 2013). The function \code{\link{woeHist}} calculates the weights of
evidence for a series of observations and the function
\code{\link{woeBal}} produces a graphical display.
Sinharay and Almond (2006) propose a graphical fit test for
conditional probability tables (see also, Almond et al, 2015, Chapter
10). The function \code{\link{OCP}} implements this test, and the
function \code{\link{betaci}} creates the beta credibility intervals
around which the function is built.
The key to Bayesian network models are the assumptions of conditional
independence which underlie the model. The function
\code{\link{localDepTest}} tests these assumptions based on observed
(or imputed) data tables.
The function \code{\link{mutualInformation}} calculates the mutual
information of a two-way table, a measure of the strength of
association. This is similar to the measure used in many Bayes net
packages (e.g., \code{\link[RNetica]{MutualInfo}}).
}
\section{Data sets}{
Two data sets are provided with this package:
\describe{
\item{\code{\link{ACED}}}{Data from ACED field trial (Shute, Hansen,
and Almond, 2008). This example is based on a field trial of a
Bayesian network based Assessment for Learning system, and contains
both item-level response and high-level network summaries. A
complete description of the Bayes net can be found at
\url{http://ecd.ralmond.net/ecdwiki/ACED/ACED}.}
\item{\code{\link{MathGrades}}}{Grades on 5 mathematics tests from
Mardia, Kent and Bibby (from Whittaker, 1990).}
}
}
\section{Index}{
Complete index of all functions.
\packageIndices{CPTtools}
}
\author{
\packageAuthor{CPTtools}
Maintainer: \packageMaintainer{CPTtools}
}
\references{
Almond, R.G. (2015). An IRT-based Parameterization for Conditional
Probability Tables. Paper submitted to the 2015 Bayesian Application
Workshop at the Uncertainty in Artificial Intelligence conference.
Almond, R.G., Mislevy, R.J., Steinberg, L.S., Williamson, D.M. and
Yan, D. (2015) \emph{Bayesian Networks in Educational Assessment.}
Springer.
Almond, R. G. (2010). \sQuote{I can name that Bayesian network in two
matrixes.} \emph{International Journal of Approximate Reasoning.}
\bold{51}, 167-178.
Almond, R. G., Shute, V. J., Underwood, J. S., and Zapata-Rivera,
J.-D (2009). Bayesian Networks: A Teacher's View. \emph{International
Journal of Approximate Reasoning.} \bold{50}, 450-460.
Almond, R.G., DiBello, L., Jenkins, F., Mislevy, R.J.,
Senturk, D., Steinberg, L.S. and Yan, D. (2001) Models for Conditional
Probability Tables in Educational Assessment. \emph{Artificial
Intelligence and Statistics 2001} Jaakkola and Richardson (eds).,
Morgan Kaufmann, 137--143.
Diez, F. J. (1993) Parameter adjustment in Bayes networks. The
generalized noisy OR-gate. In Heckerman and Mamdani (eds)
\emph{Uncertainty in Artificial Intelligence 93.} Morgan Kaufmann.
99--105.
Muraki, E. (1992). A Generalized Partial Credit Model: Application
of an EM Algorithm. \emph{Applied Psychological Measurement}, \bold{16},
159-176. DOI: 10.1177/014662169201600206
Samejima, F. (1969) Estimation of latent ability using a
response pattern of graded scores. \emph{Psychometrika Monograph No.
17}, \bold{34}, (No. 4, Part 2).
Shute, V. J., Hansen, E. G., & Almond, R. G. (2008). You can't fatten
a hog by weighing it---Or can you? Evaluating an assessment for learning
system called ACED. \emph{International Journal of Artificial
Intelligence and Education}, \bold{18}(4), 289-316.
Sinharay, S. and Almond, R.G. (2006). Assessing Fit of Cognitively
Diagnostic Models: A case study. \emph{Educational and Psychological
Measurement}. \bold{67}(2), 239--257.
Srinivas, S. (1993) A generalization of the Noisy-Or model, the
generalized noisy OR-gate. In Heckerman and Mamdani (eds)
\emph{Uncertainty in Artificial Intelligence 93.} Morgan Kaufmann.
208--215.
Whittaker, J. (1990). \emph{Graphical Models in Applied Multivariate
Statistics}. Wiley.
Madigan, D., Mosurski, K. and Almond, R. (1997) Graphical explanation
in belief networks. \emph{Journal of Computational Graphics and
Statistics}, \bold{6}, 160-181.
Almond, R. G., Kim, Y. J., Shute, V. J. and Ventura, M. (2013).
Debugging the Evidence Chain. In Almond, R. G. and Mengshoel,
O. (Eds.) \emph{Proceedings of the 2013 UAI Application Workshops:
Big Data meet Complex Models and Models for Spatial, Temporal and
Network Data (UAI2013AW)}, 1-10.
\url{http://ceur-ws.org/Vol-1024/paper-01.pdf}
}
\keyword{ package }
\seealso{
\code{\link[RNetica]{RNetica}} ~~
\code{\link[Peanut:Peanut-package]{Peanut}} ~~
}
\examples{
## Set up variables
skill1l <- c("High","Medium","Low")
skill2l <- c("High","Medium","Low","LowerYet")
correctL <- c("Correct","Incorrect")
pcreditL <- c("Full","Partial","None")
gradeL <- c("A","B","C","D","E")
## New Discrete Partial Credit framework:
## Complex model, different rules for different levels
cptPC2 <- calcDPCFrame(list(S1=skill1l,S2=skill2l),pcreditL,
list(full=log(1),partial=log(c(S1=1,S2=.75))),
betas=list(full=c(0,999),partial=1.0),
rule=list("OffsetDisjunctive","Compensatory"))
## Graded Response using the older DiBello-Samejima framework.
cptGraded <- calcDSTable(list(S1=skill1l),gradeL, 0.0, 0.0, dinc=c(.3,.4,.3))
## Building a Bayes net from a correlation matrix.
data(MathGrades)
pl <- buildParentList(structMatrix(MathGrades$var),"Algebra")
rt <- buildRegressions(MathGrades$var,MathGrades$means,pl)
tabs <- buildRegressionTables(rt, MathGrades$pvecs, MathGrades$means,
sqrt(diag(MathGrades$var)))
## Stacked Barplots:
margins.prior <- data.frame (
Trouble=c(Novice=.19,Semester1=.24,Semester2=.28,Semseter3=.20,Semester4=.09),
NDK=c(Novice=.01,Semester1=.09,Semester2=.35,Semseter3=.41,Semester4=.14),
Model=c(Novice=.19,Semester1=.28,Semester2=.31,Semseter3=.18,Semester4=.04)
)
margins.post <- data.frame(
Trouble=c(Novice=.03,Semester1=.15,Semester2=.39,Semseter3=.32,Semester4=.11),
NDK=c(Novice=.00,Semester1=.03,Semester2=.28,Semseter3=.52,Semester4=.17),
Model=c(Novice=.10,Semester1=.25,Semester2=.37,Semseter3=.23,Semester4=.05))
stackedBars(margins.post,3,
main="Marginal Distributions for NetPASS skills",
sub="Baseline at 3rd Semester level.",
cex.names=.75, col=hsv(223/360,.2,0.10*(5:1)+.5))
compareBars(margins.prior,margins.post,3,c("Prior","Post"),
main="Margins before/after Medium Trouble Shooting Task",
sub="Observables: cfgCor=Medium, logCor=High, logEff=Medium",
legend.loc = "topright",
cex.names=.75, col1=hsv(h=.1,s=.2*1:5-.1,alpha=1),
col2=hsv(h=.6,s=.2*1:5-.1,alpha=1))
## Weight of evidence balance sheets
sampleSequence <- read.csv(paste(library(help="CPTtools")$path,
"testFiles","SampleStudent.csv",
sep=.Platform$file.sep),
header=TRUE,row.names=1)
woeBal(sampleSequence[,c("H","M","L")],c("H"),c("M","L"),lcex=1.25)
### Observable Characteristic Plot
pi <- c("+"=.15,"-"=.85)
nnn <- c("(0,0,0)"=20,"(0,0,1)"=10,
"(0,1,0)"=10,"(0,1,0)"=5,
"(1,0,0)"=10,"(1,0,1)"=10,
"(1,1,1)"=10,"(1,1,1)"=25)
xx1 <- c("(0,0,0)"=2,"(0,0,1)"=5,
"(0,1,0)"=1,"(0,1,1)"=3,
"(1,0,0)"=0,"(1,0,1)"=2,
"(1,1,0)"=5,"(1,1,1)"=24)
grouplabs <- c(rep("-",3),"+")
grouplabs1 <- rep(grouplabs,each=2)
OCP2 (xx1,nnn,grouplabs1,pi,c("-","+"),ylim=c(0,1), reflty=c(2,4),
setlabs=c("Low Skill3","High Skill3"),setat=-.8,
main="Data for which Skill 3 is relevant")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\name{data_path}
\alias{data_path}
\title{Represents a path to data in a datastore.}
\usage{
data_path(datastore, path_on_datastore = NULL, name = NULL)
}
\arguments{
\item{datastore}{The Datastore to reference.}
\item{path_on_datastore}{The relative path in the backing storage for the data reference.}
\item{name}{An optional name for the DataPath.}
}
\value{
The \code{DataPath} object.
}
\description{
The path represented by DataPath object can point to a directory or a data artifact (blob, file).
}
\examples{
\dontrun{
my_data <- register_azure_blob_container_datastore(
workspace = ws,
datastore_name = blob_datastore_name,
container_name = ws_blob_datastore$container_name,
account_name = ws_blob_datastore$account_name,
account_key = ws_blob_datastore$account_key,
create_if_not_exists = TRUE)
datapath <- data_path(my_data, <path_on_my_datastore>)
dataset <- create_file_dataset_from_files(datapath)
}
}
\seealso{
\code{\link{create_file_dataset_from_files}}
\code{\link{create_tabular_dataset_from_parquet_files}}
\code{\link{create_tabular_dataset_from_delimited_files}}
\code{\link{create_tabular_dataset_from_json_lines_files}}
\code{\link{create_tabular_dataset_from_sql_query}}
}
|
/man/data_path.Rd
|
permissive
|
revodavid/azureml-sdk-for-r
|
R
| false | true | 1,314 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\name{data_path}
\alias{data_path}
\title{Represents a path to data in a datastore.}
\usage{
data_path(datastore, path_on_datastore = NULL, name = NULL)
}
\arguments{
\item{datastore}{The Datastore to reference.}
\item{path_on_datastore}{The relative path in the backing storage for the data reference.}
\item{name}{An optional name for the DataPath.}
}
\value{
The \code{DataPath} object.
}
\description{
The path represented by DataPath object can point to a directory or a data artifact (blob, file).
}
\examples{
\dontrun{
my_data <- register_azure_blob_container_datastore(
workspace = ws,
datastore_name = blob_datastore_name,
container_name = ws_blob_datastore$container_name,
account_name = ws_blob_datastore$account_name,
account_key = ws_blob_datastore$account_key,
create_if_not_exists = TRUE)
datapath <- data_path(my_data, <path_on_my_datastore>)
dataset <- create_file_dataset_from_files(datapath)
}
}
\seealso{
\code{\link{create_file_dataset_from_files}}
\code{\link{create_tabular_dataset_from_parquet_files}}
\code{\link{create_tabular_dataset_from_delimited_files}}
\code{\link{create_tabular_dataset_from_json_lines_files}}
\code{\link{create_tabular_dataset_from_sql_query}}
}
|
# nocov - compat-purrr (last updated: rlang 0.0.0.9007)
# This file serves as a reference for compatibility functions for
# purrr. They are not drop-in replacements but allow a similar style
# of programming. This is useful in cases where purrr is too heavy a
# package to depend on. Please find the most recent version in rlang's
# repository.
map <- function(.x, .f, ...) {
lapply(.x, .f, ...)
}
map_mold <- function(.x, .f, .mold, ...) {
out <- vapply(.x, .f, .mold, ..., USE.NAMES = FALSE)
names(out) <- names(.x)
out
}
map_lgl <- function(.x, .f, ...) {
map_mold(.x, .f, logical(1), ...)
}
map_int <- function(.x, .f, ...) {
map_mold(.x, .f, integer(1), ...)
}
map_dbl <- function(.x, .f, ...) {
map_mold(.x, .f, double(1), ...)
}
map_chr <- function(.x, .f, ...) {
map_mold(.x, .f, character(1), ...)
}
map_cpl <- function(.x, .f, ...) {
map_mold(.x, .f, complex(1), ...)
}
pluck <- function(.x, .f) {
map(.x, `[[`, .f)
}
pluck_lgl <- function(.x, .f) {
map_lgl(.x, `[[`, .f)
}
pluck_int <- function(.x, .f) {
map_int(.x, `[[`, .f)
}
pluck_dbl <- function(.x, .f) {
map_dbl(.x, `[[`, .f)
}
pluck_chr <- function(.x, .f) {
map_chr(.x, `[[`, .f)
}
pluck_cpl <- function(.x, .f) {
map_cpl(.x, `[[`, .f)
}
map2 <- function(.x, .y, .f, ...) {
Map(.f, .x, .y, ...)
}
map2_lgl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "logical")
}
map2_int <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "integer")
}
map2_dbl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "double")
}
map2_chr <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "character")
}
map2_cpl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "complex")
}
args_recycle <- function(args) {
lengths <- map_int(args, length)
n <- max(lengths)
stopifnot(all(lengths == 1L | lengths == n))
to_recycle <- lengths == 1L
args[to_recycle] <- map(args[to_recycle], function(x) rep.int(x, n))
args
}
pmap <- function(.l, .f, ...) {
args <- args_recycle(.l)
do.call("mapply", c(
FUN = list(quote(.f)),
args, MoreArgs = quote(list(...)),
SIMPLIFY = FALSE, USE.NAMES = FALSE
))
}
probe <- function(.x, .p, ...) {
if (is_logical(.p)) {
stopifnot(length(.p) == length(.x))
.p
} else {
map_lgl(.x, .p, ...)
}
}
keep <- function(.x, .f, ...) {
.x[probe(.x, .f, ...)]
}
discard <- function(.x, .p, ...) {
sel <- probe(.x, .p, ...)
.x[is.na(sel) | !sel]
}
map_if <- function(.x, .p, .f, ...) {
matches <- probe(.x, .p)
.x[matches] <- map(.x[matches], .f, ...)
.x
}
compact <- function(.x) {
Filter(length, .x)
}
transpose <- function(.l) {
inner_names <- names(.l[[1]])
if (is.null(inner_names)) {
fields <- seq_along(.l[[1]])
} else {
fields <- set_names(inner_names)
}
map(fields, function(i) {
map(.l, .subset2, i)
})
}
every <- function(.x, .p, ...) {
for (i in seq_along(.x)) {
if (!rlang::is_true(.p(.x[[i]], ...))) return(FALSE)
}
TRUE
}
some <- function(.x, .p, ...) {
for (i in seq_along(.x)) {
if (rlang::is_true(.p(.x[[i]], ...))) return(TRUE)
}
FALSE
}
negate <- function(.p) {
function(...) !.p(...)
}
reduce <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(x, y, ...)
Reduce(f, .x, init = .init)
}
reduce_right <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(y, x, ...)
Reduce(f, .x, init = .init, right = TRUE)
}
accumulate <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(x, y, ...)
Reduce(f, .x, init = .init, accumulate = TRUE)
}
accumulate_right <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(y, x, ...)
Reduce(f, .x, init = .init, right = TRUE, accumulate = TRUE)
}
# nocov end
|
/R/compat-purrr.R
|
no_license
|
krlmlr/brushthat
|
R
| false | false | 3,736 |
r
|
# nocov - compat-purrr (last updated: rlang 0.0.0.9007)
# This file serves as a reference for compatibility functions for
# purrr. They are not drop-in replacements but allow a similar style
# of programming. This is useful in cases where purrr is too heavy a
# package to depend on. Please find the most recent version in rlang's
# repository.
map <- function(.x, .f, ...) {
lapply(.x, .f, ...)
}
map_mold <- function(.x, .f, .mold, ...) {
out <- vapply(.x, .f, .mold, ..., USE.NAMES = FALSE)
names(out) <- names(.x)
out
}
map_lgl <- function(.x, .f, ...) {
map_mold(.x, .f, logical(1), ...)
}
map_int <- function(.x, .f, ...) {
map_mold(.x, .f, integer(1), ...)
}
map_dbl <- function(.x, .f, ...) {
map_mold(.x, .f, double(1), ...)
}
map_chr <- function(.x, .f, ...) {
map_mold(.x, .f, character(1), ...)
}
map_cpl <- function(.x, .f, ...) {
map_mold(.x, .f, complex(1), ...)
}
pluck <- function(.x, .f) {
map(.x, `[[`, .f)
}
pluck_lgl <- function(.x, .f) {
map_lgl(.x, `[[`, .f)
}
pluck_int <- function(.x, .f) {
map_int(.x, `[[`, .f)
}
pluck_dbl <- function(.x, .f) {
map_dbl(.x, `[[`, .f)
}
pluck_chr <- function(.x, .f) {
map_chr(.x, `[[`, .f)
}
pluck_cpl <- function(.x, .f) {
map_cpl(.x, `[[`, .f)
}
map2 <- function(.x, .y, .f, ...) {
Map(.f, .x, .y, ...)
}
map2_lgl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "logical")
}
map2_int <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "integer")
}
map2_dbl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "double")
}
map2_chr <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "character")
}
map2_cpl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "complex")
}
args_recycle <- function(args) {
lengths <- map_int(args, length)
n <- max(lengths)
stopifnot(all(lengths == 1L | lengths == n))
to_recycle <- lengths == 1L
args[to_recycle] <- map(args[to_recycle], function(x) rep.int(x, n))
args
}
pmap <- function(.l, .f, ...) {
args <- args_recycle(.l)
do.call("mapply", c(
FUN = list(quote(.f)),
args, MoreArgs = quote(list(...)),
SIMPLIFY = FALSE, USE.NAMES = FALSE
))
}
probe <- function(.x, .p, ...) {
if (is_logical(.p)) {
stopifnot(length(.p) == length(.x))
.p
} else {
map_lgl(.x, .p, ...)
}
}
keep <- function(.x, .f, ...) {
.x[probe(.x, .f, ...)]
}
discard <- function(.x, .p, ...) {
sel <- probe(.x, .p, ...)
.x[is.na(sel) | !sel]
}
map_if <- function(.x, .p, .f, ...) {
matches <- probe(.x, .p)
.x[matches] <- map(.x[matches], .f, ...)
.x
}
compact <- function(.x) {
Filter(length, .x)
}
transpose <- function(.l) {
inner_names <- names(.l[[1]])
if (is.null(inner_names)) {
fields <- seq_along(.l[[1]])
} else {
fields <- set_names(inner_names)
}
map(fields, function(i) {
map(.l, .subset2, i)
})
}
every <- function(.x, .p, ...) {
for (i in seq_along(.x)) {
if (!rlang::is_true(.p(.x[[i]], ...))) return(FALSE)
}
TRUE
}
some <- function(.x, .p, ...) {
for (i in seq_along(.x)) {
if (rlang::is_true(.p(.x[[i]], ...))) return(TRUE)
}
FALSE
}
negate <- function(.p) {
function(...) !.p(...)
}
reduce <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(x, y, ...)
Reduce(f, .x, init = .init)
}
reduce_right <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(y, x, ...)
Reduce(f, .x, init = .init, right = TRUE)
}
accumulate <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(x, y, ...)
Reduce(f, .x, init = .init, accumulate = TRUE)
}
accumulate_right <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(y, x, ...)
Reduce(f, .x, init = .init, right = TRUE, accumulate = TRUE)
}
# nocov end
|
gammaresiduals <-
function(Y,X,model){
Y <- as.matrix(Y)
residuals <- model$residuals
variance <- model$variance
phi <- model$precision
yestimado <- model$fitted.values
#Absolute residuals
rabs<-abs(residuals)
#Standardized Weighted Residual 1
rp<-residuals/sqrt(variance)
# Res Deviance
rd = -2*(log(Y/yestimado) - (Y-yestimado)/yestimado)
#Residuals astesrisc
rast= (log(Y) + log(phi/yestimado) - digamma(phi))/sqrt(trigamma(phi))
gammaresiduals<- list()
gammaresiduals$abs <- rabs
gammaresiduals$pearson <-rp
gammaresiduals$deviance <- rd
gammaresiduals$rgamma<- rast
return(gammaresiduals)
}
|
/R/gammaresiduals.R
|
no_license
|
cran/Bayesiangammareg
|
R
| false | false | 688 |
r
|
gammaresiduals <-
function(Y,X,model){
Y <- as.matrix(Y)
residuals <- model$residuals
variance <- model$variance
phi <- model$precision
yestimado <- model$fitted.values
#Absolute residuals
rabs<-abs(residuals)
#Standardized Weighted Residual 1
rp<-residuals/sqrt(variance)
# Res Deviance
rd = -2*(log(Y/yestimado) - (Y-yestimado)/yestimado)
#Residuals astesrisc
rast= (log(Y) + log(phi/yestimado) - digamma(phi))/sqrt(trigamma(phi))
gammaresiduals<- list()
gammaresiduals$abs <- rabs
gammaresiduals$pearson <-rp
gammaresiduals$deviance <- rd
gammaresiduals$rgamma<- rast
return(gammaresiduals)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Package.R
\docType{package}
\name{PLEFinal-package}
\alias{PLEFinal}
\alias{PLEFinal-package}
\title{PLEFinal: A Package Skeleton for Comparative Effectiveness Studies}
\description{
A skeleton package, to be used as a starting point when implementing comparative effect studies.
}
\keyword{internal}
|
/PLEFinal/man/SkeletonComparativeEffectStudy-package.Rd
|
permissive
|
jennifercelane/PLEMSKAI_working
|
R
| false | true | 390 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Package.R
\docType{package}
\name{PLEFinal-package}
\alias{PLEFinal}
\alias{PLEFinal-package}
\title{PLEFinal: A Package Skeleton for Comparative Effectiveness Studies}
\description{
A skeleton package, to be used as a starting point when implementing comparative effect studies.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.IDR.discrete.R
\name{get.IDR.discrete}
\alias{get.IDR.discrete}
\title{compute IDR for discrete categories}
\usage{
get.IDR.discrete(idr, cat.counts)
}
\arguments{
\item{idr}{local idr for each category.}
\item{cat.counts}{the number of observations in each category.}
}
\value{
a numerical vector of the expected irreproducible discovery rate for categories that are as irreproducible or more irreproducible than the given categories.
}
\description{
compute IDR for discrete categories
}
|
/man/get.IDR.discrete.Rd
|
no_license
|
TaoYang-dev/gIDR
|
R
| false | true | 592 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.IDR.discrete.R
\name{get.IDR.discrete}
\alias{get.IDR.discrete}
\title{compute IDR for discrete categories}
\usage{
get.IDR.discrete(idr, cat.counts)
}
\arguments{
\item{idr}{local idr for each category.}
\item{cat.counts}{the number of observations in each category.}
}
\value{
a numerical vector of the expected irreproducible discovery rate for categories that are as irreproducible or more irreproducible than the given categories.
}
\description{
compute IDR for discrete categories
}
|
## Packages used
library(dplyr); library(tidyr)
## Download data
if(!file.exists("./data")){
dir.create("./data")
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/zip.zip", method = "curl")
rm(fileUrl)
unzip("./data/zip.zip", exdir = "./data")
}
## Read data
options("stringsAsFactors" = FALSE)
features <- read.table("./data/UCI Har Dataset/features.txt")
activity <- read.table("./data/UCI Har Dataset/activity_labels.txt")
test.subject <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
test.activity <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
test.measures <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
train.subject <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
train.activity <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
train.measures <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
## Merge test and train data
test <- data.frame(test.subject, test.activity, test.measures)
train <- data.frame(train.subject, train.activity, train.measures)
samsung <- rbind(test, train)
## Add column names
features <- features$V2
names(samsung) <- c("subject", "activity", features)
## Translate activity to all lower-case
activity <- activity$V2
activity <- tolower(activity)
## Convert activity and subject to factors
samsung$activity <- factor(samsung$activity, labels = activity)
samsung$subject <- factor(samsung$subject, ordered = FALSE)
## Subset mean() and std() from samsung
mean <- grep("mean[^F]", names(samsung))
std <- grep("std", names(samsung))
criteria <- c(mean, std)
samsung <- samsung[, c(1:2, criteria)]
## Clear memory of unnecessry objects
rm(features, activity, test.subject, test.activity, test.measures, train.subject,
train.activity, train.measures, test, train, mean, std, criteria)
## Convert samsung to a tbl_df object
samsung <- tbl_df(samsung)
## Add "all" to end of multidirectional features
index <- grep("[^X-Z]$", names(samsung))
index <- index[-(1:2)]
for(i in index){
names(samsung)[i] <- paste(names(samsung)[i], "all", sep = "-")
}
rm(index, i)
## Gather variables and separate into feature, summary, direction, and measure
samsung <- samsung %>%
gather(demo, measure, -subject, -activity) %>%
separate(demo, c("feature", "summary", "axis"), sep = "-")
## Convert feature, summary, and direction to factors
samsung <- samsung %>%
mutate(feature = factor(feature),
summary = factor(summary, labels = c("mean", "std")),
axis = factor(tolower(axis)))
## Write samsung to file
if(!file.exists("./samsung.txt")){
write.table(samsung, "./samsung.txt", row.name = FALSE)
}
## Summarize samsung by average of each summary for each direction,
## each feature, each activity, and each subject.
summarized <- samsung %>%
group_by(subject, activity, feature, axis, summary) %>%
summarize(average = mean(measure))
## Write summ to file
if(!file.exists("./summarized.txt")){
write.table(summarized, "./summarized.txt", row.name = FALSE)
}
|
/run_analysis.R
|
no_license
|
mattayes/samsung-har
|
R
| false | false | 3,177 |
r
|
## Packages used
library(dplyr); library(tidyr)
## Download data
if(!file.exists("./data")){
dir.create("./data")
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/zip.zip", method = "curl")
rm(fileUrl)
unzip("./data/zip.zip", exdir = "./data")
}
## Read data
options("stringsAsFactors" = FALSE)
features <- read.table("./data/UCI Har Dataset/features.txt")
activity <- read.table("./data/UCI Har Dataset/activity_labels.txt")
test.subject <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
test.activity <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
test.measures <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
train.subject <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
train.activity <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
train.measures <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
## Merge test and train data
test <- data.frame(test.subject, test.activity, test.measures)
train <- data.frame(train.subject, train.activity, train.measures)
samsung <- rbind(test, train)
## Add column names
features <- features$V2
names(samsung) <- c("subject", "activity", features)
## Translate activity to all lower-case
activity <- activity$V2
activity <- tolower(activity)
## Convert activity and subject to factors
samsung$activity <- factor(samsung$activity, labels = activity)
samsung$subject <- factor(samsung$subject, ordered = FALSE)
## Subset mean() and std() from samsung
mean <- grep("mean[^F]", names(samsung))
std <- grep("std", names(samsung))
criteria <- c(mean, std)
samsung <- samsung[, c(1:2, criteria)]
## Clear memory of unnecessry objects
rm(features, activity, test.subject, test.activity, test.measures, train.subject,
train.activity, train.measures, test, train, mean, std, criteria)
## Convert samsung to a tbl_df object
samsung <- tbl_df(samsung)
## Add "all" to end of multidirectional features
index <- grep("[^X-Z]$", names(samsung))
index <- index[-(1:2)]
for(i in index){
names(samsung)[i] <- paste(names(samsung)[i], "all", sep = "-")
}
rm(index, i)
## Gather variables and separate into feature, summary, direction, and measure
samsung <- samsung %>%
gather(demo, measure, -subject, -activity) %>%
separate(demo, c("feature", "summary", "axis"), sep = "-")
## Convert feature, summary, and direction to factors
samsung <- samsung %>%
mutate(feature = factor(feature),
summary = factor(summary, labels = c("mean", "std")),
axis = factor(tolower(axis)))
## Write samsung to file
if(!file.exists("./samsung.txt")){
write.table(samsung, "./samsung.txt", row.name = FALSE)
}
## Summarize samsung by average of each summary for each direction,
## each feature, each activity, and each subject.
summarized <- samsung %>%
group_by(subject, activity, feature, axis, summary) %>%
summarize(average = mean(measure))
## Write summ to file
if(!file.exists("./summarized.txt")){
write.table(summarized, "./summarized.txt", row.name = FALSE)
}
|
library(tidyverse)
library(scales)
library(Cairo)
theme_set(theme_classic())
Ex_1 <- tribble(
~Tier, ~Number_Account, ~Percentage_Accounts, ~Revenue_M, ~Percentage_Revenue,
'A', 77, 7.08, 4.68, 25,
'A+', 19, 1.75, 3.93, 21,
'B', 338, 31.07, 5.98, 32,
'C', 425, 39.06, 2.81, 15,
'D', 24, 2.21, 0.37, 2
) %>%
mutate(
class = ifelse((Percentage_Accounts - Percentage_Revenue) < 0, 'blue', 'slategrey')
)
left_label <- Ex_1$Tier
positions_y <- Ex_1$Percentage_Accounts
positions_y[c(2,5)] <- positions_y[c(2,5)] + c(-.5,.5)
ggplot(Ex_1) +
geom_segment(aes(x=1, xend=2, y=Percentage_Accounts, yend=Percentage_Revenue, col=class),
size=.75, show.legend=F) +
geom_vline(xintercept=1, linetype="dashed", size=.1, color = 'lightslategrey') +
geom_vline(xintercept=2, linetype="dashed", size=.1, color = 'lightslategrey') +
scale_color_manual(labels = c("Up", "Down"),
values = c("slategrey"="slategrey", "blue"="blue")) + # color of lines
labs(x="", y="",
title = 'New Client tier share changes when looking at Accounts or Revenue') + # Axis labels
scale_x_continuous(limits = c(.5, 2.5), breaks = NULL) +
scale_y_continuous(
limits = c(0,(1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)))),
labels = percent_format(scale = 1)
) +
geom_text(
label=left_label, y=positions_y,
x=c(.99,1.005,.99,.99,.99), hjust=1.2, size=3
) +
geom_text(
label=left_label, y=Ex_1$Percentage_Revenue,
x=c(2.01,2.01,2.01,2.01,2.01), hjust=-.2, size=3
) +
geom_text(
label="Participation\nin Accounts", x=.68, y = 1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)),
hjust=0, size=4.3, color = 'darkslategrey') +
geom_text(
label="Participation\nin Revenue", x=2.02, y = 1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)),
hjust=0, size=4.3, color = 'darkslategrey') +
geom_text(
label = "C tier has low participation in \nrevenues despite the biggest \nshare of new accounts.",
x = 2.1, y = 30, hjust = 0, size = 3.5, color = 'slategrey'
) +
geom_text(
label = "Together A and A+ make up for \nalmost half of the revenue \ndespite low share of \naccounts.",
x = 2.1, y = 20, hjust = 0, size = 3.5, color = 'blue'
) +
theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.ticks.y = element_line(color = 'lightslategrey'),
axis.text.x = element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_line(color = 'lightslategrey'),
axis.text.y = element_text(color = 'lightslategrey'),
panel.border = element_blank(),
title = element_text(colour = "darkslategrey", face = 'bold'))
path <- paste0(here::here("docs", "assets", "images"),"/", '2019_10_SWD.png')
ggsave(path, type = 'cairo', scale = 1.5)
|
/Storytelling_with_Data/2019_10_SWD_Challenge.R
|
no_license
|
jorgel-mendes/Behold-the-Vision
|
R
| false | false | 2,875 |
r
|
library(tidyverse)
library(scales)
library(Cairo)
theme_set(theme_classic())
Ex_1 <- tribble(
~Tier, ~Number_Account, ~Percentage_Accounts, ~Revenue_M, ~Percentage_Revenue,
'A', 77, 7.08, 4.68, 25,
'A+', 19, 1.75, 3.93, 21,
'B', 338, 31.07, 5.98, 32,
'C', 425, 39.06, 2.81, 15,
'D', 24, 2.21, 0.37, 2
) %>%
mutate(
class = ifelse((Percentage_Accounts - Percentage_Revenue) < 0, 'blue', 'slategrey')
)
left_label <- Ex_1$Tier
positions_y <- Ex_1$Percentage_Accounts
positions_y[c(2,5)] <- positions_y[c(2,5)] + c(-.5,.5)
ggplot(Ex_1) +
geom_segment(aes(x=1, xend=2, y=Percentage_Accounts, yend=Percentage_Revenue, col=class),
size=.75, show.legend=F) +
geom_vline(xintercept=1, linetype="dashed", size=.1, color = 'lightslategrey') +
geom_vline(xintercept=2, linetype="dashed", size=.1, color = 'lightslategrey') +
scale_color_manual(labels = c("Up", "Down"),
values = c("slategrey"="slategrey", "blue"="blue")) + # color of lines
labs(x="", y="",
title = 'New Client tier share changes when looking at Accounts or Revenue') + # Axis labels
scale_x_continuous(limits = c(.5, 2.5), breaks = NULL) +
scale_y_continuous(
limits = c(0,(1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)))),
labels = percent_format(scale = 1)
) +
geom_text(
label=left_label, y=positions_y,
x=c(.99,1.005,.99,.99,.99), hjust=1.2, size=3
) +
geom_text(
label=left_label, y=Ex_1$Percentage_Revenue,
x=c(2.01,2.01,2.01,2.01,2.01), hjust=-.2, size=3
) +
geom_text(
label="Participation\nin Accounts", x=.68, y = 1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)),
hjust=0, size=4.3, color = 'darkslategrey') +
geom_text(
label="Participation\nin Revenue", x=2.02, y = 1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)),
hjust=0, size=4.3, color = 'darkslategrey') +
geom_text(
label = "C tier has low participation in \nrevenues despite the biggest \nshare of new accounts.",
x = 2.1, y = 30, hjust = 0, size = 3.5, color = 'slategrey'
) +
geom_text(
label = "Together A and A+ make up for \nalmost half of the revenue \ndespite low share of \naccounts.",
x = 2.1, y = 20, hjust = 0, size = 3.5, color = 'blue'
) +
theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.ticks.y = element_line(color = 'lightslategrey'),
axis.text.x = element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_line(color = 'lightslategrey'),
axis.text.y = element_text(color = 'lightslategrey'),
panel.border = element_blank(),
title = element_text(colour = "darkslategrey", face = 'bold'))
path <- paste0(here::here("docs", "assets", "images"),"/", '2019_10_SWD.png')
ggsave(path, type = 'cairo', scale = 1.5)
|
# scalar(스칼라): 한개의 값이 저장된 객체(object, 변수 variable).
# vector(벡터): 한가지 타입(유형)의 여러개의 값이 1차원으로 저장된 객체.
# scalar의 예
x <- 100 # x: 숫자 한개를 저장하고 있는 scalar
name <- '오쌤' # name: 문자열 한개를 저장하고 있는 scalar
name
# R에서는 문자열을 작은따옴표('') 또는 큰따옴표("")로 묶을 수 있음.
# (비교) SQL에서는 문자열을 사용할 때 작은따옴표만 사용해야 함.
is_big <- TRUE # 논릿값(logical: TRUE, FALSE) 한개를 저장하는 scalar.
is_big <- (5 > 3)
is_big <- (3 > 5)
# 비교 연산(>, >=, <, <=, ==, !=)
is_same <- (3 == 5)
# vector의 예
# c(): combine
numbers <- c(1, 2, 10, 20, 50, 100)
# 숫자(numeric) 6개를 저장하는 vector
numbers
stu_names <- c('Abc', '홍길동')
# 문자열(characters) 2개를 저장하는 vector
stu_names
bools <- c(TRUE, TRUE, FALSE, TRUE, FALSE)
# 논리(logical) 타입 값 5개를 저장하는 vector
# vector의 원소(element)를 선택하는 방법 - 인덱스 사용.
# 1) 특정 위치(인덱스)에 있는 원소 1개를 선택:
numbers[1]
numbers[2]
# 2) 특정 (인덱스) 범위(range)에 있는 원소 여러개를 선택:
numbers[2:4] # 2 <= index <= 4 범위의 원소 선택
# 3) 특정 위치(인덱스) 여러곳의 원소들을 선택:
numbers[c(1, 4, 6)]
# R에서 변수에 값을 저장(할당)할 때: 변수 <- 값
# 변수는 Global Environment에 생기게 됨.
# 함수를 호출할 때 함수에게 argument를 전달할 때: arg = 값
# 함수(function): 기능. 연산.
# argument: 함수를 호출할 때 함수에게 전달하는 값.
# 필수(mandatory) argument: 함수를 호출할 때 반드시 전달해야 하는 값.
# 선택(optional) argument: 기본값(default)이 설정되어 있어서,
# 함수를 호출할 때 생략해도 되는 값.
# parameter: argument를 저장하기 위한 함수 내부의 변수.
# return value(반환 값): 함수가 기능을 수행한 후 반환하는 값. 함수 수행 결과.
# seq(): Sequence.
# 함수를 호출할 때, 파라미터 이름을 생략하고 argument를 전달함.
evens <- seq(2, 10, 2) # 2부터 10까지 2씩 증가하는 숫자들로 이루어진 vector.
# 함수를 호출할 때, 어떤 파라미터에 무슨 값을 전달할 지를 지정함.
odds <- seq(from = 1, to = 10, by = 2) # 1부터 10까지 2씩 증가하는 숫자들로 이루어진 vector.
# optional argument들을 전달하지 않으면(생략하면), 기본값이 사용됨.
numbers <- seq(from = 1, to = 10) # by의 기본값 1이 사용됨.
numbers
numbers <- seq(to = 5) # from=1, by=1 기본값이 사용됨.
numbers
countdown <- seq(from = 10, to = 1, by = -1) # 10부터 1까지 1씩 감소하는 수열 생성.
countdown
# vector와 scalar 연산
numbers <- c(1, 10, 100)
numbers
numbers + 1
# vector와 vector의 연산
numbers1 <- c(1, 10, 100)
numbers2 <- c(2, 4, 6)
numbers1 + numbers2
|
/r02_scalar_vector.R
|
no_license
|
seanhong7777/R
|
R
| false | false | 3,065 |
r
|
# scalar(스칼라): 한개의 값이 저장된 객체(object, 변수 variable).
# vector(벡터): 한가지 타입(유형)의 여러개의 값이 1차원으로 저장된 객체.
# scalar의 예
x <- 100 # x: 숫자 한개를 저장하고 있는 scalar
name <- '오쌤' # name: 문자열 한개를 저장하고 있는 scalar
name
# R에서는 문자열을 작은따옴표('') 또는 큰따옴표("")로 묶을 수 있음.
# (비교) SQL에서는 문자열을 사용할 때 작은따옴표만 사용해야 함.
is_big <- TRUE # 논릿값(logical: TRUE, FALSE) 한개를 저장하는 scalar.
is_big <- (5 > 3)
is_big <- (3 > 5)
# 비교 연산(>, >=, <, <=, ==, !=)
is_same <- (3 == 5)
# vector의 예
# c(): combine
numbers <- c(1, 2, 10, 20, 50, 100)
# 숫자(numeric) 6개를 저장하는 vector
numbers
stu_names <- c('Abc', '홍길동')
# 문자열(characters) 2개를 저장하는 vector
stu_names
bools <- c(TRUE, TRUE, FALSE, TRUE, FALSE)
# 논리(logical) 타입 값 5개를 저장하는 vector
# vector의 원소(element)를 선택하는 방법 - 인덱스 사용.
# 1) 특정 위치(인덱스)에 있는 원소 1개를 선택:
numbers[1]
numbers[2]
# 2) 특정 (인덱스) 범위(range)에 있는 원소 여러개를 선택:
numbers[2:4] # 2 <= index <= 4 범위의 원소 선택
# 3) 특정 위치(인덱스) 여러곳의 원소들을 선택:
numbers[c(1, 4, 6)]
# R에서 변수에 값을 저장(할당)할 때: 변수 <- 값
# 변수는 Global Environment에 생기게 됨.
# 함수를 호출할 때 함수에게 argument를 전달할 때: arg = 값
# 함수(function): 기능. 연산.
# argument: 함수를 호출할 때 함수에게 전달하는 값.
# 필수(mandatory) argument: 함수를 호출할 때 반드시 전달해야 하는 값.
# 선택(optional) argument: 기본값(default)이 설정되어 있어서,
# 함수를 호출할 때 생략해도 되는 값.
# parameter: argument를 저장하기 위한 함수 내부의 변수.
# return value(반환 값): 함수가 기능을 수행한 후 반환하는 값. 함수 수행 결과.
# seq(): Sequence.
# 함수를 호출할 때, 파라미터 이름을 생략하고 argument를 전달함.
evens <- seq(2, 10, 2) # 2부터 10까지 2씩 증가하는 숫자들로 이루어진 vector.
# 함수를 호출할 때, 어떤 파라미터에 무슨 값을 전달할 지를 지정함.
odds <- seq(from = 1, to = 10, by = 2) # 1부터 10까지 2씩 증가하는 숫자들로 이루어진 vector.
# optional argument들을 전달하지 않으면(생략하면), 기본값이 사용됨.
numbers <- seq(from = 1, to = 10) # by의 기본값 1이 사용됨.
numbers
numbers <- seq(to = 5) # from=1, by=1 기본값이 사용됨.
numbers
countdown <- seq(from = 10, to = 1, by = -1) # 10부터 1까지 1씩 감소하는 수열 생성.
countdown
# vector와 scalar 연산
numbers <- c(1, 10, 100)
numbers
numbers + 1
# vector와 vector의 연산
numbers1 <- c(1, 10, 100)
numbers2 <- c(2, 4, 6)
numbers1 + numbers2
|
#html_session_try adds:
#1.auto retry functionality using exponantial delay(2s,4s,8s,16s etc)
#2.use tryCatch to create robust scraper, any network issues or error will not break the script. It's safe to run it in loops
#3.keep track of unsuccessful request(including both error and warning).Conditions of failed requests are saved as attributes in function output
html_session_try <- function(url,do_try=3,...){
library(rvest)
library(httr)
dots <- c(...)
#auto retry
my_session <- NULL
tried = 0
while(is.null(my_session) && tried <= do_try) {
tried <- tried + 1
tryCatch(
{
my_session <- suppressWarnings(html_session(url,dots))
},
error=function(cond){
try_error_message<<-conditionMessage(cond)
Sys.sleep(2^tried)
}
)
}
#if request failed: error occurs or status_code is not 200, function otput will be NA with attributes:"status_code" and "condition_message"
if(is.null(my_session)){
my_session<-structure(NA,
status_code=NA,
condition_message=try_error_message)
} else if (status_code(my_session)!=200) {
my_session<-structure(NA,
status_code=status_code(my_session),
condition_message=NA)
} else {
my_session<-structure(my_session,
status_code=status_code(my_session),
condition_message=NA)
}
return(my_session)
}
|
/R Projects/function/html_session_try.R
|
no_license
|
yusuzech/web-scraping-projects
|
R
| false | false | 1,613 |
r
|
#html_session_try adds:
#1.auto retry functionality using exponantial delay(2s,4s,8s,16s etc)
#2.use tryCatch to create robust scraper, any network issues or error will not break the script. It's safe to run it in loops
#3.keep track of unsuccessful request(including both error and warning).Conditions of failed requests are saved as attributes in function output
html_session_try <- function(url,do_try=3,...){
library(rvest)
library(httr)
dots <- c(...)
#auto retry
my_session <- NULL
tried = 0
while(is.null(my_session) && tried <= do_try) {
tried <- tried + 1
tryCatch(
{
my_session <- suppressWarnings(html_session(url,dots))
},
error=function(cond){
try_error_message<<-conditionMessage(cond)
Sys.sleep(2^tried)
}
)
}
#if request failed: error occurs or status_code is not 200, function otput will be NA with attributes:"status_code" and "condition_message"
if(is.null(my_session)){
my_session<-structure(NA,
status_code=NA,
condition_message=try_error_message)
} else if (status_code(my_session)!=200) {
my_session<-structure(NA,
status_code=status_code(my_session),
condition_message=NA)
} else {
my_session<-structure(my_session,
status_code=status_code(my_session),
condition_message=NA)
}
return(my_session)
}
|
# cmd_args=commandArgs(TRUE)
#
# ngenecl <- as.numeric(cmd_args[1]) # cells per cell type
# out <- cmd_args[2]
source("/proj/milovelab/mu/SC-ASE/simulation/cluster.R")
source("/proj/milovelab/mu/SC-ASE/simulation/fusedlasso.R")
library("smurf")
library(emdbook)
library(mclust)
library(pbapply)
library(aricode)
library(pheatmap)
ngenecl<-80
n<-10
cnt<-50
ncl<-4 #4 gene cluster. large AI,NO AI, consistent AI, small AI
ngene<-ncl*ngenecl
nct<-8
x <- factor(rep(1:nct,each=n))
mu1 <- 5
nb.disp <- 1/100
ncl<-4 #4 gene cluster. large AI,NO AI, consistent AI, small AI
step1<-4 #First AI step [0-4]
ans <- pbsapply(1:10, function(i) {
set.seed(i)
# total count
cts <- matrix(rep(c(rnbinom(ngene*n/2,mu=mu1,size=1/nb.disp),
rnbinom(ngene*n/2,mu=cnt,size=1/nb.disp)),nct),ncol = nct*n)
colnames(cts)<-paste0("cell",1:(nct*n))
p.vec <- (5 + rep(c(seq(from=-step1,to=step1,length.out=nct/2),rep(0,nct/2),rep(2,nct/2),seq(from=3.5,to=4.5,length.out=nct/2)),each=2))/10
p <- rep(p.vec, each=n*nct*ngene/length(p.vec)) # true prob
nclgene<-ngene/ncl #number genes within cluster
nclcell<-nct*n*nclgene #number elements within cluster
ase.cts<-lapply(1:ncl,function(m) {
matrix(rbetabinom(nclcell, prob=p[(nclcell*m-nclcell+1):(nclcell*m)], size=cts[(m*nclgene-nclgene+1):(m*nclgene),], theta=10),ncol = nct*n)})
ase.cts<-do.call(rbind,ase.cts)
ratio<-(ase.cts)/(cts)
ratio_pseudo<-(ase.cts+1)/(cts+2) ## pseudo allelic ratio for gene clustering
level<-paste0(rep("type",nct),1:nct) # pheatmap of ratio
# anno_df <- data.frame(celltype=rep(level,each=n), row.names=colnames(ratio_pseudo))
# pheatmap(ratio_pseudo, cluster_rows = FALSE, cluster_cols = FALSE,annotation_col=anno_df,show_colnames = F,
# color = colorRampPalette(colors = c("blue","white","red"))(100))
cluster<-genecluster(ratio_pseudo,nct=nct,G=ncl) #return gene cluster
mcl<-adjustedRandIndex(cluster,rep(1:ncl,each=ngene/ncl))
# modeling
out<-list()
for (j in 1:ncl) {
# poi<-which(cluster==unique(factor(cluster))[j]) # gene position
poi<-(ngenecl*j-ngenecl+1):(ngenecl*j) # gene position
r<-as.vector(ratio[poi,])
size<-as.vector(cts[poi,])
data=data.frame(x=rep(x,each=length(poi)),ratio=r,cts=size)
f <- ratio ~ p(x, pen="gflasso", refcat="1") # formula
t <- system.time(fit<-fusedlasso(formula=f,model="binomial",data=data,ncores=1))[[3]] # saving the elapsed time
t2 <- system.time(fit2<-fusedlasso(formula=f,model="gaussian",data,ncores=1))[[3]] # saving the elapsed time
co <- coef(fit)
co <- co + c(0,rep(co[1],nct-1))
a <- adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(co))
co2 <- coef(fit2)
co2 <- co2 + c(0,rep(co2[1],nct-1))
a2 <- adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(co2))
t3 <- system.time(fit3<-wilcox(data,nct,method="holm"))[[3]]
a3<-adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(fit3))
out[[j]]=c(a,a2,a3,t,t2,t3)
}
out
},cl=10)
ans <- do.call(cbind, ans)
ans2<-rbind(ans,rep(1:200,each=4))
id<-which(!is.numeric(ans2[3,]))
# a<-pbsapply(1:ncol(ans),function(i){adjustedRandIndex(ans[,i],rep(c(0,1),each=ngene/ncl))})
# save the results as a data.frame
dat2 <- data.frame(type=rep(c("bin","gau","wilcoxon"),each=ncol(ans)),
ARI_mcl=as.vector(t(ans[1,])),
ARI=as.vector(t(ans[2:4,])),
cl=rep(c("large AI gap","NAI","consisAI","small AI gap"),n=ncol(ans)/ncl*3),
time=as.vector(t(ans[5:7,])))
# write out as a table
# write.table(dat, file="/proj/milovelab/mu/SC-ASE/simulation/csv/sim2.csv", row.names=FALSE, col.names=FALSE, quote=FALSE, sep=",")
write.table(dat2, file="/proj/milovelab/mu/SC-ASE/simulation/csv/sim2_80.csv", row.names=FALSE, col.names=FALSE, quote=FALSE, sep=",")
|
/simulation/sim2.R
|
no_license
|
Wancen/SC-ASE
|
R
| false | false | 3,866 |
r
|
# cmd_args=commandArgs(TRUE)
#
# ngenecl <- as.numeric(cmd_args[1]) # cells per cell type
# out <- cmd_args[2]
source("/proj/milovelab/mu/SC-ASE/simulation/cluster.R")
source("/proj/milovelab/mu/SC-ASE/simulation/fusedlasso.R")
library("smurf")
library(emdbook)
library(mclust)
library(pbapply)
library(aricode)
library(pheatmap)
ngenecl<-80
n<-10
cnt<-50
ncl<-4 #4 gene cluster. large AI,NO AI, consistent AI, small AI
ngene<-ncl*ngenecl
nct<-8
x <- factor(rep(1:nct,each=n))
mu1 <- 5
nb.disp <- 1/100
ncl<-4 #4 gene cluster. large AI,NO AI, consistent AI, small AI
step1<-4 #First AI step [0-4]
ans <- pbsapply(1:10, function(i) {
set.seed(i)
# total count
cts <- matrix(rep(c(rnbinom(ngene*n/2,mu=mu1,size=1/nb.disp),
rnbinom(ngene*n/2,mu=cnt,size=1/nb.disp)),nct),ncol = nct*n)
colnames(cts)<-paste0("cell",1:(nct*n))
p.vec <- (5 + rep(c(seq(from=-step1,to=step1,length.out=nct/2),rep(0,nct/2),rep(2,nct/2),seq(from=3.5,to=4.5,length.out=nct/2)),each=2))/10
p <- rep(p.vec, each=n*nct*ngene/length(p.vec)) # true prob
nclgene<-ngene/ncl #number genes within cluster
nclcell<-nct*n*nclgene #number elements within cluster
ase.cts<-lapply(1:ncl,function(m) {
matrix(rbetabinom(nclcell, prob=p[(nclcell*m-nclcell+1):(nclcell*m)], size=cts[(m*nclgene-nclgene+1):(m*nclgene),], theta=10),ncol = nct*n)})
ase.cts<-do.call(rbind,ase.cts)
ratio<-(ase.cts)/(cts)
ratio_pseudo<-(ase.cts+1)/(cts+2) ## pseudo allelic ratio for gene clustering
level<-paste0(rep("type",nct),1:nct) # pheatmap of ratio
# anno_df <- data.frame(celltype=rep(level,each=n), row.names=colnames(ratio_pseudo))
# pheatmap(ratio_pseudo, cluster_rows = FALSE, cluster_cols = FALSE,annotation_col=anno_df,show_colnames = F,
# color = colorRampPalette(colors = c("blue","white","red"))(100))
cluster<-genecluster(ratio_pseudo,nct=nct,G=ncl) #return gene cluster
mcl<-adjustedRandIndex(cluster,rep(1:ncl,each=ngene/ncl))
# modeling
out<-list()
for (j in 1:ncl) {
# poi<-which(cluster==unique(factor(cluster))[j]) # gene position
poi<-(ngenecl*j-ngenecl+1):(ngenecl*j) # gene position
r<-as.vector(ratio[poi,])
size<-as.vector(cts[poi,])
data=data.frame(x=rep(x,each=length(poi)),ratio=r,cts=size)
f <- ratio ~ p(x, pen="gflasso", refcat="1") # formula
t <- system.time(fit<-fusedlasso(formula=f,model="binomial",data=data,ncores=1))[[3]] # saving the elapsed time
t2 <- system.time(fit2<-fusedlasso(formula=f,model="gaussian",data,ncores=1))[[3]] # saving the elapsed time
co <- coef(fit)
co <- co + c(0,rep(co[1],nct-1))
a <- adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(co))
co2 <- coef(fit2)
co2 <- co2 + c(0,rep(co2[1],nct-1))
a2 <- adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(co2))
t3 <- system.time(fit3<-wilcox(data,nct,method="holm"))[[3]]
a3<-adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(fit3))
out[[j]]=c(a,a2,a3,t,t2,t3)
}
out
},cl=10)
ans <- do.call(cbind, ans)
ans2<-rbind(ans,rep(1:200,each=4))
id<-which(!is.numeric(ans2[3,]))
# a<-pbsapply(1:ncol(ans),function(i){adjustedRandIndex(ans[,i],rep(c(0,1),each=ngene/ncl))})
# save the results as a data.frame
dat2 <- data.frame(type=rep(c("bin","gau","wilcoxon"),each=ncol(ans)),
ARI_mcl=as.vector(t(ans[1,])),
ARI=as.vector(t(ans[2:4,])),
cl=rep(c("large AI gap","NAI","consisAI","small AI gap"),n=ncol(ans)/ncl*3),
time=as.vector(t(ans[5:7,])))
# write out as a table
# write.table(dat, file="/proj/milovelab/mu/SC-ASE/simulation/csv/sim2.csv", row.names=FALSE, col.names=FALSE, quote=FALSE, sep=",")
write.table(dat2, file="/proj/milovelab/mu/SC-ASE/simulation/csv/sim2_80.csv", row.names=FALSE, col.names=FALSE, quote=FALSE, sep=",")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/currentarrows.R
\name{currentarrows}
\alias{currentarrows}
\title{Plot arrows and segments showing the size and direction of currents.}
\usage{
currentarrows(
data,
maxsize = 0.5,
maxn,
col = "blue",
lwd = 2,
arrowsize = 0.2,
center = T
)
}
\arguments{
\item{data}{Data in a list with components \code{lat} and \code{lon} with
decimal degrees, and \code{current} with the current magnitude.}
\item{maxsize}{Maximum current segment size.}
\item{maxn}{Current given with \code{maxsize}, defaults to
\code{max(data$current)}.}
\item{col}{Color of current arrows and segments.}
\item{lwd}{Line width of the segments showing current.}
\item{arrowsize}{Arrow size.}
\item{center}{Whether or not to center the arrow, defaults to \code{TRUE}.}
}
\description{
Plot arrows and segments showing the size and direction of currents.
}
\note{
Needs further checking and elaboration.
}
\keyword{aplot}
|
/man/currentarrows.Rd
|
no_license
|
Hafro/geo
|
R
| false | true | 986 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/currentarrows.R
\name{currentarrows}
\alias{currentarrows}
\title{Plot arrows and segments showing the size and direction of currents.}
\usage{
currentarrows(
data,
maxsize = 0.5,
maxn,
col = "blue",
lwd = 2,
arrowsize = 0.2,
center = T
)
}
\arguments{
\item{data}{Data in a list with components \code{lat} and \code{lon} with
decimal degrees, and \code{current} with the current magnitude.}
\item{maxsize}{Maximum current segment size.}
\item{maxn}{Current given with \code{maxsize}, defaults to
\code{max(data$current)}.}
\item{col}{Color of current arrows and segments.}
\item{lwd}{Line width of the segments showing current.}
\item{arrowsize}{Arrow size.}
\item{center}{Whether or not to center the arrow, defaults to \code{TRUE}.}
}
\description{
Plot arrows and segments showing the size and direction of currents.
}
\note{
Needs further checking and elaboration.
}
\keyword{aplot}
|
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
}
## Computing the inverse of a square matrix can be done with the solve function in R. For example, if X is a square invertible matrix, then solve(X) returns its inverse
## Just looking if possible to edit
|
/cachematrix.R
|
no_license
|
datatool/ProgrammingAssignment2
|
R
| false | false | 619 |
r
|
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
}
## Computing the inverse of a square matrix can be done with the solve function in R. For example, if X is a square invertible matrix, then solve(X) returns its inverse
## Just looking if possible to edit
|
#!/usr/bin/Rscript
# Daily Pick
##############
#
# Standalone script intended to be run by Cron job to report daily picks.
#
delta=30
theDate=as.Date(Sys.time())
#Share Select
setwd("/home/raffles/Raffles/")
source("Quantlib.R")
setwd("./Data/")
loadLocalData()
#Loads basic libraries and sets up required environments for Quantstrat
loadLibraries<-function()
{
require(slackr)
require(quantmod)
require(quantstrat)
require(readr)
require(chron)
}
loadLibraries()
library(chron)
#Make a list
shift<-list()
#Loop through known symbols
for (symbol in ls(LoadedSymbols))
{
data<-Cl(LoadedSymbols[[symbol]])
#Only use data we have access to.
data<-data[paste("::",theDate,sep="")]
#If we have enough data
if(nrow(data)>delta)
{
#Get ROC vs delta periods ago
#val=median(ROC(data,n = delta),na.rm = TRUE)
val=median(ROC(data,n = delta),na.rm = TRUE)
}
else
{
#Else shove it to bottom of the pile
val=0
}
shift[symbol]<-val
}
#Transpose and sort
res<-(t(as.data.frame(shift)))
res<-res[order(res,decreasing = TRUE),]
picks<-gsub(names(res),pattern = "\\.",replacement = ":")
write.csv(picks,"picks.csv")
picks<-head(picks,5)
slackr_setup()
slackrBot("Making daily picks from highest median gain in previous 30 days:")
slackrBot(print(head(res)))
messageLinks=""
for(pick in picks)
{
messageLinks<-paste(messageLinks,"\nhttp://www.iii.co.uk/research/",pick,sep="")
}
messageLinks<-gsub(messageLinks,pattern = "LON",replacement = "LSE")
slackrMsg(txt=messageLinks)
#Blart Everything to Slack
for(i in 1:5)
{
jpeg("Plot.jpeg")
barChart(LoadedSymbols[[picks[i]]],name=picks[i],TA='addRSI();addVo()')
dev.off()
slackrUpload(filename = "Plot.jpeg", title = picks[i], channels = "raffles")
}
|
/DailyPick.R.save
|
no_license
|
piratesjustarr/Raffles
|
R
| false | false | 1,755 |
save
|
#!/usr/bin/Rscript
# Daily Pick
##############
#
# Standalone script intended to be run by Cron job to report daily picks.
#
delta=30
theDate=as.Date(Sys.time())
#Share Select
setwd("/home/raffles/Raffles/")
source("Quantlib.R")
setwd("./Data/")
loadLocalData()
#Loads basic libraries and sets up required environments for Quantstrat
loadLibraries<-function()
{
require(slackr)
require(quantmod)
require(quantstrat)
require(readr)
require(chron)
}
loadLibraries()
library(chron)
#Make a list
shift<-list()
#Loop through known symbols
for (symbol in ls(LoadedSymbols))
{
data<-Cl(LoadedSymbols[[symbol]])
#Only use data we have access to.
data<-data[paste("::",theDate,sep="")]
#If we have enough data
if(nrow(data)>delta)
{
#Get ROC vs delta periods ago
#val=median(ROC(data,n = delta),na.rm = TRUE)
val=median(ROC(data,n = delta),na.rm = TRUE)
}
else
{
#Else shove it to bottom of the pile
val=0
}
shift[symbol]<-val
}
#Transpose and sort
res<-(t(as.data.frame(shift)))
res<-res[order(res,decreasing = TRUE),]
picks<-gsub(names(res),pattern = "\\.",replacement = ":")
write.csv(picks,"picks.csv")
picks<-head(picks,5)
slackr_setup()
slackrBot("Making daily picks from highest median gain in previous 30 days:")
slackrBot(print(head(res)))
messageLinks=""
for(pick in picks)
{
messageLinks<-paste(messageLinks,"\nhttp://www.iii.co.uk/research/",pick,sep="")
}
messageLinks<-gsub(messageLinks,pattern = "LON",replacement = "LSE")
slackrMsg(txt=messageLinks)
#Blart Everything to Slack
for(i in 1:5)
{
jpeg("Plot.jpeg")
barChart(LoadedSymbols[[picks[i]]],name=picks[i],TA='addRSI();addVo()')
dev.off()
slackrUpload(filename = "Plot.jpeg", title = picks[i], channels = "raffles")
}
|
testlist <- list(scale = 1.17613105186789e-309, shape = -2.95612684604669e-196)
result <- do.call(bama:::rand_igamma,testlist)
str(result)
|
/bama/inst/testfiles/rand_igamma/AFL_rand_igamma/rand_igamma_valgrind_files/1615926417-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false | false | 138 |
r
|
testlist <- list(scale = 1.17613105186789e-309, shape = -2.95612684604669e-196)
result <- do.call(bama:::rand_igamma,testlist)
str(result)
|
#代码更适合批量化和自动化,鼠标是替代不了的
|
/excel案例.R
|
no_license
|
liuiscoding/R_learn
|
R
| false | false | 64 |
r
|
#代码更适合批量化和自动化,鼠标是替代不了的
|
gap.barplot<-function (y,gap,xaxlab,xtics,yaxlab,ytics,xlim=NA,ylim=NA,
xlab=NULL,ylab=NULL,horiz=FALSE,col=NULL,...) {
if (missing(y)) stop("y values required")
if(missing(xtics)) xtics <- 1:length(y)
if (missing(gap)) stop("gap must be specified")
if (is.null(ylab)) ylab <- deparse(substitute(y))
if (is.null(col)) col <- color.gradient(c(0,1),c(0,1,0),c(1,0),length(y))
else if(length(col) < length(y)) rep(col,length.out=length(y))
littleones <- which(y <= gap[1])
bigones <- which(y >= gap[2])
valid.y<-y[!is.na(y)]
if(any(valid.y > gap[1] & valid.y < gap[2]))
warning("gap includes some values of y")
gapsize <- gap[2] - gap[1]
if(missing(xaxlab)) xaxlab <- as.character(xtics)
if(is.na(xlim[1])) xlim <- range(xtics)
if(is.na(ylim[1])) ylim <- c(min(valid.y)-gapsize,max(valid.y)-gapsize)
if(ylim[1] < 0) ylim[1]<-0
if(missing(ytics)) ytics <- pretty(y)
if(any(ytics<0)) ytics<-ytics[ytics >= 0]
if(missing(yaxlab)) yaxlab <- ytics
littletics <- which(ytics < gap[1])
bigtics <- which(ytics >= gap[2])
halfwidth <- min(diff(xtics))/2
if(horiz) {
if(!is.null(xlab)) {
tmplab<-xlab
xlab<-ylab
ylab<-tmplab
}
plot(0,xlim=ylim,ylim=xlim,xlab=xlab,ylab=ylab,axes=FALSE,type="n",...)
plot.lim <- par("usr")
botgap<-ifelse(gap[1]<0,gap[1],ylim[1])
box()
axis(2,at=xtics,labels=xaxlab,...)
axis(1,at=c(ytics[littletics],ytics[bigtics]-gapsize),
labels=c(yaxlab[littletics],yaxlab[bigtics]),...)
rect(botgap,xtics[y<gap[1]] - halfwidth,y[y<gap[1]],
xtics[y<gap[1]] + halfwidth,col=col[y<gap[1]])
rect(botgap,xtics[bigones] - halfwidth,y[bigones]-gapsize,
xtics[bigones] + halfwidth,col=col[bigones])
axis.break(1,gap[1],style="gap")
}
else {
plot(0,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,axes=FALSE,type="n",...)
plot.lim <- par("usr")
botgap<-ylim[1]
box()
axis(1,at=xtics,labels=xaxlab,...)
axis(2,at=c(ytics[littletics],ytics[bigtics] - gapsize),
labels=c(yaxlab[littletics],yaxlab[bigtics]),...)
rect(xtics[littleones] - halfwidth,botgap,
xtics[littleones] + halfwidth,y[littleones],col=col[littleones])
rect(xtics[bigones] - halfwidth,botgap,xtics[bigones] + halfwidth,
y[bigones]-gapsize,col=col[bigones])
axis.break(2,gap[1],style="gap")
}
invisible(xtics)
}
|
/primeiroProjetoR/plotrix/R/gap.barplot.R
|
no_license
|
bernardomsvieira/Rproject
|
R
| false | false | 2,321 |
r
|
gap.barplot<-function (y,gap,xaxlab,xtics,yaxlab,ytics,xlim=NA,ylim=NA,
xlab=NULL,ylab=NULL,horiz=FALSE,col=NULL,...) {
if (missing(y)) stop("y values required")
if(missing(xtics)) xtics <- 1:length(y)
if (missing(gap)) stop("gap must be specified")
if (is.null(ylab)) ylab <- deparse(substitute(y))
if (is.null(col)) col <- color.gradient(c(0,1),c(0,1,0),c(1,0),length(y))
else if(length(col) < length(y)) rep(col,length.out=length(y))
littleones <- which(y <= gap[1])
bigones <- which(y >= gap[2])
valid.y<-y[!is.na(y)]
if(any(valid.y > gap[1] & valid.y < gap[2]))
warning("gap includes some values of y")
gapsize <- gap[2] - gap[1]
if(missing(xaxlab)) xaxlab <- as.character(xtics)
if(is.na(xlim[1])) xlim <- range(xtics)
if(is.na(ylim[1])) ylim <- c(min(valid.y)-gapsize,max(valid.y)-gapsize)
if(ylim[1] < 0) ylim[1]<-0
if(missing(ytics)) ytics <- pretty(y)
if(any(ytics<0)) ytics<-ytics[ytics >= 0]
if(missing(yaxlab)) yaxlab <- ytics
littletics <- which(ytics < gap[1])
bigtics <- which(ytics >= gap[2])
halfwidth <- min(diff(xtics))/2
if(horiz) {
if(!is.null(xlab)) {
tmplab<-xlab
xlab<-ylab
ylab<-tmplab
}
plot(0,xlim=ylim,ylim=xlim,xlab=xlab,ylab=ylab,axes=FALSE,type="n",...)
plot.lim <- par("usr")
botgap<-ifelse(gap[1]<0,gap[1],ylim[1])
box()
axis(2,at=xtics,labels=xaxlab,...)
axis(1,at=c(ytics[littletics],ytics[bigtics]-gapsize),
labels=c(yaxlab[littletics],yaxlab[bigtics]),...)
rect(botgap,xtics[y<gap[1]] - halfwidth,y[y<gap[1]],
xtics[y<gap[1]] + halfwidth,col=col[y<gap[1]])
rect(botgap,xtics[bigones] - halfwidth,y[bigones]-gapsize,
xtics[bigones] + halfwidth,col=col[bigones])
axis.break(1,gap[1],style="gap")
}
else {
plot(0,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,axes=FALSE,type="n",...)
plot.lim <- par("usr")
botgap<-ylim[1]
box()
axis(1,at=xtics,labels=xaxlab,...)
axis(2,at=c(ytics[littletics],ytics[bigtics] - gapsize),
labels=c(yaxlab[littletics],yaxlab[bigtics]),...)
rect(xtics[littleones] - halfwidth,botgap,
xtics[littleones] + halfwidth,y[littleones],col=col[littleones])
rect(xtics[bigones] - halfwidth,botgap,xtics[bigones] + halfwidth,
y[bigones]-gapsize,col=col[bigones])
axis.break(2,gap[1],style="gap")
}
invisible(xtics)
}
|
library(multistate)
### Name: sm4rs
### Title: 4-State Relative Survival Semi-Markov Model with Additive Risks
### Aliases: sm4rs
### Keywords: semi-Markov relative survival
### ** Examples
# import the observed data
# (X=1 corresponds to initial state with a functioning graft, X=2 to acute rejection episode,
# X=3 to return to dialysis, X=4 to death with a functioning graft)
data(dataDIVAT)
# A subgroup analysis to reduce the time needed for this example
dataDIVAT$id<-c(1:nrow(dataDIVAT))
set.seed(2)
d4<-dataDIVAT[dataDIVAT$id %in% sample(dataDIVAT$id, 300, replace = FALSE),]
# import the expected mortality rates
data(fr.ratetable)
# 4-state parametric additive relative survival semi-Markov model including one
# explicative variable (z is the delayed graft function) on the transition from X=1 to X=2
# Note: a semi-Markovian process with sojourn times exponentially distributed
# is a time-homogeneous Markov process
# We only reduced the precision and the number of iteration to save time in this example,
# prefer the default values.
sm4rs(t1=d4$time1, t2=d4$time2, sequence=d4$trajectory, dist=c("E","E","E","E","E"),
ini.dist.12=c(8.34), ini.dist.13=c(10.44), ini.dist.14=c(10.70),
ini.dist.23=c(9.43), ini.dist.24=c(11.11),
cov.12=d4$z, init.cov.12=c(0.04), names.12=c("beta12_z"),
p.age=d4$ageR*365.24, p.sex=d4$sexR,
p.year=as.date(paste("01","01",d4$year.tx), order = "mdy"),
p.rate.table=fr.ratetable, conf.int=TRUE,
silent=FALSE, precision=0.001)
|
/data/genthat_extracted_code/multistate/examples/sm4rs.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,506 |
r
|
library(multistate)
### Name: sm4rs
### Title: 4-State Relative Survival Semi-Markov Model with Additive Risks
### Aliases: sm4rs
### Keywords: semi-Markov relative survival
### ** Examples
# import the observed data
# (X=1 corresponds to initial state with a functioning graft, X=2 to acute rejection episode,
# X=3 to return to dialysis, X=4 to death with a functioning graft)
data(dataDIVAT)
# A subgroup analysis to reduce the time needed for this example
dataDIVAT$id<-c(1:nrow(dataDIVAT))
set.seed(2)
d4<-dataDIVAT[dataDIVAT$id %in% sample(dataDIVAT$id, 300, replace = FALSE),]
# import the expected mortality rates
data(fr.ratetable)
# 4-state parametric additive relative survival semi-Markov model including one
# explicative variable (z is the delayed graft function) on the transition from X=1 to X=2
# Note: a semi-Markovian process with sojourn times exponentially distributed
# is a time-homogeneous Markov process
# We only reduced the precision and the number of iteration to save time in this example,
# prefer the default values.
sm4rs(t1=d4$time1, t2=d4$time2, sequence=d4$trajectory, dist=c("E","E","E","E","E"),
ini.dist.12=c(8.34), ini.dist.13=c(10.44), ini.dist.14=c(10.70),
ini.dist.23=c(9.43), ini.dist.24=c(11.11),
cov.12=d4$z, init.cov.12=c(0.04), names.12=c("beta12_z"),
p.age=d4$ageR*365.24, p.sex=d4$sexR,
p.year=as.date(paste("01","01",d4$year.tx), order = "mdy"),
p.rate.table=fr.ratetable, conf.int=TRUE,
silent=FALSE, precision=0.001)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sw.R
\name{T68fromT90}
\alias{T68fromT90}
\title{Convert from ITS-90 to IPTS-68 temperature}
\usage{
T68fromT90(temperature)
}
\arguments{
\item{temperature}{Vector of temperatures expressed in the ITS-90 scale.}
}
\value{
Temperature expressed in the IPTS-68 scale.
}
\description{
Today's instruments typically record in the ITS-90 scale, but some old
datasets will be in the IPTS-68 scale. \code{T90fromT68()} converts from the
IPTS-68 to the ITS-90 scale, using Saunders' (1990) formula, while
\code{T68fromT90()} does the reverse. The difference between IPTS-68 and
ITS-90 values is typically a few millidegrees (see \sQuote{Examples}), which
is seldom visible on a typical temperature profile, but may be of interest
in some precise work. Mostly for historical interest, \code{T90fromT48()}
is provided to convert from the ITS-48 system to ITS-90.
}
\examples{
library(oce)
T68 <- seq(3, 20, 1)
T90 <- T90fromT68(T68)
sqrt(mean((T68-T90)^2))
}
\references{
P. M. Saunders, 1990. The international temperature scale of
1990, ITS-90. WOCE Newsletter, volume 10, September 1990, page 10.
(\url{http://www.nodc.noaa.gov/woce/wdiu/wocedocs/newsltr/news10/contents.htm})
}
\seealso{
Other functions that calculate seawater properties: \code{\link{T90fromT48}},
\code{\link{T90fromT68}},
\code{\link{swAbsoluteSalinity}},
\code{\link{swAlphaOverBeta}}, \code{\link{swAlpha}},
\code{\link{swBeta}}, \code{\link{swCSTp}},
\code{\link{swConservativeTemperature}},
\code{\link{swDepth}}, \code{\link{swDynamicHeight}},
\code{\link{swLapseRate}}, \code{\link{swN2}},
\code{\link{swPressure}}, \code{\link{swRho}},
\code{\link{swRrho}}, \code{\link{swSCTp}},
\code{\link{swSTrho}}, \code{\link{swSigma0}},
\code{\link{swSigma1}}, \code{\link{swSigma2}},
\code{\link{swSigma3}}, \code{\link{swSigma4}},
\code{\link{swSigmaTheta}}, \code{\link{swSigmaT}},
\code{\link{swSigma}}, \code{\link{swSoundAbsorption}},
\code{\link{swSoundSpeed}}, \code{\link{swSpecificHeat}},
\code{\link{swSpice}}, \code{\link{swTFreeze}},
\code{\link{swTSrho}},
\code{\link{swThermalConductivity}},
\code{\link{swTheta}}, \code{\link{swViscosity}},
\code{\link{swZ}}
}
\author{
Dan Kelley
}
|
/pkgs/oce/man/T68fromT90.Rd
|
no_license
|
vaguiar/EDAV_Project_2017
|
R
| false | true | 2,283 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sw.R
\name{T68fromT90}
\alias{T68fromT90}
\title{Convert from ITS-90 to IPTS-68 temperature}
\usage{
T68fromT90(temperature)
}
\arguments{
\item{temperature}{Vector of temperatures expressed in the ITS-90 scale.}
}
\value{
Temperature expressed in the IPTS-68 scale.
}
\description{
Today's instruments typically record in the ITS-90 scale, but some old
datasets will be in the IPTS-68 scale. \code{T90fromT68()} converts from the
IPTS-68 to the ITS-90 scale, using Saunders' (1990) formula, while
\code{T68fromT90()} does the reverse. The difference between IPTS-68 and
ITS-90 values is typically a few millidegrees (see \sQuote{Examples}), which
is seldom visible on a typical temperature profile, but may be of interest
in some precise work. Mostly for historical interest, \code{T90fromT48()}
is provided to convert from the ITS-48 system to ITS-90.
}
\examples{
library(oce)
T68 <- seq(3, 20, 1)
T90 <- T90fromT68(T68)
sqrt(mean((T68-T90)^2))
}
\references{
P. M. Saunders, 1990. The international temperature scale of
1990, ITS-90. WOCE Newsletter, volume 10, September 1990, page 10.
(\url{http://www.nodc.noaa.gov/woce/wdiu/wocedocs/newsltr/news10/contents.htm})
}
\seealso{
Other functions that calculate seawater properties: \code{\link{T90fromT48}},
\code{\link{T90fromT68}},
\code{\link{swAbsoluteSalinity}},
\code{\link{swAlphaOverBeta}}, \code{\link{swAlpha}},
\code{\link{swBeta}}, \code{\link{swCSTp}},
\code{\link{swConservativeTemperature}},
\code{\link{swDepth}}, \code{\link{swDynamicHeight}},
\code{\link{swLapseRate}}, \code{\link{swN2}},
\code{\link{swPressure}}, \code{\link{swRho}},
\code{\link{swRrho}}, \code{\link{swSCTp}},
\code{\link{swSTrho}}, \code{\link{swSigma0}},
\code{\link{swSigma1}}, \code{\link{swSigma2}},
\code{\link{swSigma3}}, \code{\link{swSigma4}},
\code{\link{swSigmaTheta}}, \code{\link{swSigmaT}},
\code{\link{swSigma}}, \code{\link{swSoundAbsorption}},
\code{\link{swSoundSpeed}}, \code{\link{swSpecificHeat}},
\code{\link{swSpice}}, \code{\link{swTFreeze}},
\code{\link{swTSrho}},
\code{\link{swThermalConductivity}},
\code{\link{swTheta}}, \code{\link{swViscosity}},
\code{\link{swZ}}
}
\author{
Dan Kelley
}
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
test.pub_697_exec_bad_key_name <- function() {
prostatePath = locate("smalldata/prostate/prostate.csv")
prostate.hex = h2o.importFile(path = prostatePath, destination_frame = "prostate.hex")
prostate.local = as.data.frame(prostate.hex)
# Are we in the right universe?
expect_equal(380, dim(prostate.local)[1])
expect_equal(9, dim(prostate.local)[2])
remote = t(prostate.hex$AGE) %*% prostate.hex$CAPSULE
expect_equal(1, dim(remote)[1])
expect_equal(1, dim(remote)[2])
expect_error(t(pub697$AGE) %*% prostate.hex$CAPSULE)
}
doTest("PUB-697 bad key should not cause crash", test.pub_697_exec_bad_key_name)
|
/h2o-r/tests/testdir_jira/runit_pub_697_exec_bad_key_name.R
|
permissive
|
tamseo/h2o-3
|
R
| false | false | 711 |
r
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
test.pub_697_exec_bad_key_name <- function() {
prostatePath = locate("smalldata/prostate/prostate.csv")
prostate.hex = h2o.importFile(path = prostatePath, destination_frame = "prostate.hex")
prostate.local = as.data.frame(prostate.hex)
# Are we in the right universe?
expect_equal(380, dim(prostate.local)[1])
expect_equal(9, dim(prostate.local)[2])
remote = t(prostate.hex$AGE) %*% prostate.hex$CAPSULE
expect_equal(1, dim(remote)[1])
expect_equal(1, dim(remote)[2])
expect_error(t(pub697$AGE) %*% prostate.hex$CAPSULE)
}
doTest("PUB-697 bad key should not cause crash", test.pub_697_exec_bad_key_name)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ParetoShrinkage.R
\name{R2_Wherry}
\alias{R2_Wherry}
\title{R2_Wherry function}
\usage{
R2_Wherry(N, p, R2)
}
\arguments{
\item{N}{Sample size}
\item{p}{number of predictors}
\item{R2}{R-squared}
}
\value{
R2_W formula-adjusted R2 based on Wherry (1931) shrinkage formula
}
\description{
Estimate shrunken R2 based on Wherry (1931) formula
}
\examples{
# (1) Sample size
N <- 100
# (2) Number of predictors
p <- 5
# (3) R2 R-squared
R2 <- 0.30
# Estimate shrunken R2
R2_Wherry(N = N, p = p, R2 = R2)
}
|
/man/R2_Wherry.Rd
|
no_license
|
Diversity-ParetoOptimal/ParetoR
|
R
| false | true | 613 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ParetoShrinkage.R
\name{R2_Wherry}
\alias{R2_Wherry}
\title{R2_Wherry function}
\usage{
R2_Wherry(N, p, R2)
}
\arguments{
\item{N}{Sample size}
\item{p}{number of predictors}
\item{R2}{R-squared}
}
\value{
R2_W formula-adjusted R2 based on Wherry (1931) shrinkage formula
}
\description{
Estimate shrunken R2 based on Wherry (1931) formula
}
\examples{
# (1) Sample size
N <- 100
# (2) Number of predictors
p <- 5
# (3) R2 R-squared
R2 <- 0.30
# Estimate shrunken R2
R2_Wherry(N = N, p = p, R2 = R2)
}
|
###############################################################################
# #
# execute exp3_bayes_t_priors.R #
# #
###############################################################################
setwd("Documents/wiskunde/2017-2018/bachelor_project/R/handin")
setwd("/media/mynewdrive1/Documenten/Wiskunde/2017-2018/bachelor_project/R/handin")
# libraries
library("arm")
# load functions
source("helpers.R")
source("helpers_CV_bayesglm.R")
# load variables
source("variables_exp3.R")
# Choose what initial betas and sample size to use
## Betas
k = 1
initial_betas[[k]] = c(1,1,1,1,1,1)
## Sample size
l = 1
order_fit <- 2
repeat_cross_val <-1001
df_plot = TRUE; data_plot = TRUE
df_plot = FALSE; data_plot = FALSE
# Execute experiment
for (i in 1:repeat_cross_val){
cross_val_temp <- execute_cross_val_tprior(df = df, x_min = x_min, x_max = x_max, order_sample = order_p, order_fit = order_fit, initial_betas = initial_betas[[k]], sample_size = sample_size[l], orthog =FALSE, df_plot = df_plot, data_plot = data_plot, multiple_samples = FALSE)
#
df_stars[i] <- cross_val_temp[[1]]$df_star
mses[i] <- cross_val_temp[[1]]$df_star_cross_val$mse
mu_news[[i]] <- cross_val_temp[[1]]$df_star_cross_val$mu_new
print(i)
}
# calc out of back mse = test mse using mu_news
i <- 1
test_mses <- c()
for (beta in mu_news[1:1001]) {
#print(beta)
test_mses[i] <- out_of_back_mse(beta_hat = beta, beta_true = c(1,1,1), x_min = x_min, x_max = x_max, n = 100)
i <- i + 1
}
mean(test_mses)
mean(test_mses,trim=0.1)
hist(test_mses)
hist(test_mses,breaks=10000,main="",xlab="MSE",ylab="Frequentie")
hist(sort(test_mses)[1:(0.8*length(test_mses))],breaks=10,main="",xlab="MSE",ylab="Frequentie")
# Visualise experiment
hist(df_stars,main="",xlab="df",ylab="Frequentie")
plot(df_stars,mses)
plot(df_stars)
hist(mses,breaks =50,main="",xlab="MSE",ylab="Frequentie")
# numbers
mean(df_stars)
mean(mses)
mean(mses,trim=0.1)
mu_1 <- 0
mu_2 <- 0
mu_3 <- 0
mu_4 <- 0
mu_5 <- 0
mu_6 <- 0
for (j in 1:length(mu_news)){
mu_1 <- mu_1 + mu_news[[j]][1]
mu_2 <- mu_2 + mu_news[[j]][2]
mu_3 <- mu_3 + mu_news[[j]][3]
mu_4 <- mu_4 + mu_news[[j]][4]
mu_5 <- mu_5 + mu_news[[j]][5]
mu_6 <- mu_6 + mu_news[[j]][6]
}
mu_1_mean <- mu_1 / length(mu_news)
mu_2_mean <- mu_2 / length(mu_news)
mu_3_mean <- mu_3 / length(mu_news)
mu_4_mean <- mu_4 / length(mu_news)
mu_5_mean <- mu_5 / length(mu_news)
mu_6_mean <- mu_6 / length(mu_news)
mu_1_mean
mu_2_mean
mu_3_mean
mu_4_mean
mu_5_mean
mu_6_mean
mu_news_mean <- c(mu_1_mean,mu_2_mean,mu_3_mean,mu_4_mean,mu_5_mean,mu_6_mean)
# bias?!
verschil1 <- abs(c(1,1,1,1,1,1)-c(mu_1_mean,mu_2_mean,mu_3_mean,mu_4_mean,mu_5_mean,mu_6_mean))
bias_sq <- mean(verschil1)**2
bias_sq
verschil2 <- 0
var1 <- 0
for (beta in mu_news[1:1000]){
# bias
#verschil2 <- verschil2 + abs(c(beta[1,1],beta[2,1],beta[3,1]) - c(1,1,1))
verschil2 <- verschil2 + abs(c(beta[1],beta[2],beta[3]) - c(1,1,1))
#print(verschil2)
# var
#var1 <- var1 + sum((mu_news_mean - beta)**2)
}
bias_sq2 <- mean(verschil2 / 1000)**2
bias_sq2
var1 <- var1 / 1000
var1
# Var?
var1 <- mean(mean(mu_news)-mu_news)
|
/R/exp3_bayes_tpriors.R
|
no_license
|
StudentThom/handin_bachelor_project
|
R
| false | false | 3,190 |
r
|
###############################################################################
# #
# execute exp3_bayes_t_priors.R #
# #
###############################################################################
setwd("Documents/wiskunde/2017-2018/bachelor_project/R/handin")
setwd("/media/mynewdrive1/Documenten/Wiskunde/2017-2018/bachelor_project/R/handin")
# libraries
library("arm")
# load functions
source("helpers.R")
source("helpers_CV_bayesglm.R")
# load variables
source("variables_exp3.R")
# Choose what initial betas and sample size to use
## Betas
k = 1
initial_betas[[k]] = c(1,1,1,1,1,1)
## Sample size
l = 1
order_fit <- 2
repeat_cross_val <-1001
df_plot = TRUE; data_plot = TRUE
df_plot = FALSE; data_plot = FALSE
# Execute experiment
for (i in 1:repeat_cross_val){
cross_val_temp <- execute_cross_val_tprior(df = df, x_min = x_min, x_max = x_max, order_sample = order_p, order_fit = order_fit, initial_betas = initial_betas[[k]], sample_size = sample_size[l], orthog =FALSE, df_plot = df_plot, data_plot = data_plot, multiple_samples = FALSE)
#
df_stars[i] <- cross_val_temp[[1]]$df_star
mses[i] <- cross_val_temp[[1]]$df_star_cross_val$mse
mu_news[[i]] <- cross_val_temp[[1]]$df_star_cross_val$mu_new
print(i)
}
# calc out of back mse = test mse using mu_news
i <- 1
test_mses <- c()
for (beta in mu_news[1:1001]) {
#print(beta)
test_mses[i] <- out_of_back_mse(beta_hat = beta, beta_true = c(1,1,1), x_min = x_min, x_max = x_max, n = 100)
i <- i + 1
}
mean(test_mses)
mean(test_mses,trim=0.1)
hist(test_mses)
hist(test_mses,breaks=10000,main="",xlab="MSE",ylab="Frequentie")
hist(sort(test_mses)[1:(0.8*length(test_mses))],breaks=10,main="",xlab="MSE",ylab="Frequentie")
# Visualise experiment
hist(df_stars,main="",xlab="df",ylab="Frequentie")
plot(df_stars,mses)
plot(df_stars)
hist(mses,breaks =50,main="",xlab="MSE",ylab="Frequentie")
# numbers
mean(df_stars)
mean(mses)
mean(mses,trim=0.1)
mu_1 <- 0
mu_2 <- 0
mu_3 <- 0
mu_4 <- 0
mu_5 <- 0
mu_6 <- 0
for (j in 1:length(mu_news)){
mu_1 <- mu_1 + mu_news[[j]][1]
mu_2 <- mu_2 + mu_news[[j]][2]
mu_3 <- mu_3 + mu_news[[j]][3]
mu_4 <- mu_4 + mu_news[[j]][4]
mu_5 <- mu_5 + mu_news[[j]][5]
mu_6 <- mu_6 + mu_news[[j]][6]
}
mu_1_mean <- mu_1 / length(mu_news)
mu_2_mean <- mu_2 / length(mu_news)
mu_3_mean <- mu_3 / length(mu_news)
mu_4_mean <- mu_4 / length(mu_news)
mu_5_mean <- mu_5 / length(mu_news)
mu_6_mean <- mu_6 / length(mu_news)
mu_1_mean
mu_2_mean
mu_3_mean
mu_4_mean
mu_5_mean
mu_6_mean
mu_news_mean <- c(mu_1_mean,mu_2_mean,mu_3_mean,mu_4_mean,mu_5_mean,mu_6_mean)
# bias?!
verschil1 <- abs(c(1,1,1,1,1,1)-c(mu_1_mean,mu_2_mean,mu_3_mean,mu_4_mean,mu_5_mean,mu_6_mean))
bias_sq <- mean(verschil1)**2
bias_sq
verschil2 <- 0
var1 <- 0
for (beta in mu_news[1:1000]){
# bias
#verschil2 <- verschil2 + abs(c(beta[1,1],beta[2,1],beta[3,1]) - c(1,1,1))
verschil2 <- verschil2 + abs(c(beta[1],beta[2],beta[3]) - c(1,1,1))
#print(verschil2)
# var
#var1 <- var1 + sum((mu_news_mean - beta)**2)
}
bias_sq2 <- mean(verschil2 / 1000)**2
bias_sq2
var1 <- var1 / 1000
var1
# Var?
var1 <- mean(mean(mu_news)-mu_news)
|
#######################
### Meta-Analyse: Korrelationen
# von Julien P. Irmer
## Vorbereitung
library(metafor)
## Übersicht über den Datensatz verschaffen
head(dat.molloy2014)
summary(dat.molloy2014$ri)
## Grafische Veranschaulichung der Beziehung zwischen der Medikamenteneinnahme und der Gewissenhaftigkeit
boxplot(dat.molloy2014$ri)
## Fisher's z-Transformation
data_transformed <- escalc(measure="ZCOR", # z-Transformation
ri=ri, # beobachtete Korrelationskoeffizienten
ni=ni, # Stichprobengröße pro Studie
data=dat.molloy2014, # Datensatz
var.names = c("z_ri", "v_ri")) # Namen der neu zu erstellenden Variablen
head(data_transformed)
data_transformed_2 <- escalc(measure="ZCOR", # z-Transformation
ri=dat.molloy2014$ri, # beobachtete Korrelationskoeffizienten
ni=dat.molloy2014$ni, # Stichprobengröße pro Studie
var.names = c("z_ri", "v_ri")) # Namen der neu zu erstellenden Variablen
head(data_transformed_2)
data_transformed$v_ri[1:4] # die ersten 4 Einträge betrachten
1/(dat.molloy2014$ni - 3)[1:4]
plot(x = data_transformed$ri, y = data_transformed$z_ri,
xlab = "r", ylab = "z",
main = "Fisher's z-Transformation")
## Random Effects Model
REM <- rma(yi = z_ri, vi = v_ri, data=data_transformed)
summary(REM)
REM$b # mittlere Schätzung b
REM$tau2 # tau²
predict(REM, transf=transf.ztor) # Retransformation
pred_REM <- predict(REM, transf=transf.ztor)
names(pred_REM)
pred_REM$pred # retransformierter gepoolter Korrelationskoeffizient
## Weitere Moderatoren und Psychometrische Metaanalysen
df <- data.frame(r = c(0.3, 0.3, 0.5, 0.4),
RelX = c(0.6, 0.8, 1, 1),
RelY = c(0.5, 0.7, 0.8, 1),
n = c(65, 65, 34, 46))
head(df)
df$r_correct <- df$r/sqrt(df$RelX*df$RelY) # Minderungskorrektur
head(df)
|
/content/post/KliPPs_MSc5a_R_Files/8_meta-analyse_korrelationen_RCode.R
|
no_license
|
martscht/projekte
|
R
| false | false | 2,090 |
r
|
#######################
### Meta-Analyse: Korrelationen
# von Julien P. Irmer
## Vorbereitung
library(metafor)
## Übersicht über den Datensatz verschaffen
head(dat.molloy2014)
summary(dat.molloy2014$ri)
## Grafische Veranschaulichung der Beziehung zwischen der Medikamenteneinnahme und der Gewissenhaftigkeit
boxplot(dat.molloy2014$ri)
## Fisher's z-Transformation
data_transformed <- escalc(measure="ZCOR", # z-Transformation
ri=ri, # beobachtete Korrelationskoeffizienten
ni=ni, # Stichprobengröße pro Studie
data=dat.molloy2014, # Datensatz
var.names = c("z_ri", "v_ri")) # Namen der neu zu erstellenden Variablen
head(data_transformed)
data_transformed_2 <- escalc(measure="ZCOR", # z-Transformation
ri=dat.molloy2014$ri, # beobachtete Korrelationskoeffizienten
ni=dat.molloy2014$ni, # Stichprobengröße pro Studie
var.names = c("z_ri", "v_ri")) # Namen der neu zu erstellenden Variablen
head(data_transformed_2)
data_transformed$v_ri[1:4] # die ersten 4 Einträge betrachten
1/(dat.molloy2014$ni - 3)[1:4]
plot(x = data_transformed$ri, y = data_transformed$z_ri,
xlab = "r", ylab = "z",
main = "Fisher's z-Transformation")
## Random Effects Model
REM <- rma(yi = z_ri, vi = v_ri, data=data_transformed)
summary(REM)
REM$b # mittlere Schätzung b
REM$tau2 # tau²
predict(REM, transf=transf.ztor) # Retransformation
pred_REM <- predict(REM, transf=transf.ztor)
names(pred_REM)
pred_REM$pred # retransformierter gepoolter Korrelationskoeffizient
## Weitere Moderatoren und Psychometrische Metaanalysen
df <- data.frame(r = c(0.3, 0.3, 0.5, 0.4),
RelX = c(0.6, 0.8, 1, 1),
RelY = c(0.5, 0.7, 0.8, 1),
n = c(65, 65, 34, 46))
head(df)
df$r_correct <- df$r/sqrt(df$RelX*df$RelY) # Minderungskorrektur
head(df)
|
#######################################################################################
#
# This file is Question5.R
# The purpose is to address the fifth question on the merged data.
# "Cut the GDP rankings into 5 separate quantile groups."
# "Make a table versus Income Group."
# "How many countries are Lower middle income but among the 38 nations with
# highest GDP?"
#
#######################################################################################
#######################################################################################
# Create a new variable for the GDP Group making sure it is the right data type
# Stick it on the end of the mergedReducedSorted data frame, optionally, check
# Populate it appropriately
#######################################################################################
GDPGroup <- numeric(nrow(mergedReducedSorted))
mergedReducedSorted <- cbind(mergedReducedSorted,GDPGroup)
if (debug == 1) {
str(mergedReducedSorted)
}
for (i in 1:nrow(mergedReducedSorted)) {
if (mergedReducedSorted$GDPRanking[i] <= nrow(mergedReducedSorted)/5) {
mergedReducedSorted$GDPGroup[i] = 1
} else if ( (mergedReducedSorted$GDPRanking[i] > nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 2*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 2
} else if ( (mergedReducedSorted$GDPRanking[i] > 2*nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 3*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 3
} else if ( (mergedReducedSorted$GDPRanking[i] > 3*nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 4*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 4
} else if (mergedReducedSorted$GDPRanking[i] > 4*nrow(mergedReducedSorted)/5) {
mergedReducedSorted$GDPGroup[i] = 5
}
}
#######################################################################################
# Make the requested table
# Create a vector of the Lower middle income GDP rankings
# Determine (programatically) how many Lower middle income are in the top 38
#######################################################################################
message ("In the table below, the GDP quantiles are rows indicated by the numbers 1 through 5 on the left. The Income Groups are listed across the top. The elements of the table indicate number of countries corresponding to the GDP quantile and Income Group." )
table(mergedReducedSorted$GDPGroup, mergedReducedSorted$IncomeGroup)
TopLMI <- mergedReducedSorted[2][mergedReducedSorted[5]== "Lower middle income"]
message ("The number of Lower middle income countries in the top 38 GDP are ", sum(TopLMI < 39) )
#######################################################################################
# The requested table shows 4 but I reported 5
# The reason is the cutoff. There were 189 countries (not 190), 189/5 = 37.8
# So, my highest group had only 37 countries. The 38th was a Lower middle income
# The table went through 37 while the question asked through 38
#######################################################################################
|
/Analysis/Question5.R
|
no_license
|
bgobran/CaseStudy1FinalVersion
|
R
| false | false | 3,259 |
r
|
#######################################################################################
#
# This file is Question5.R
# The purpose is to address the fifth question on the merged data.
# "Cut the GDP rankings into 5 separate quantile groups."
# "Make a table versus Income Group."
# "How many countries are Lower middle income but among the 38 nations with
# highest GDP?"
#
#######################################################################################
#######################################################################################
# Create a new variable for the GDP Group making sure it is the right data type
# Stick it on the end of the mergedReducedSorted data frame, optionally, check
# Populate it appropriately
#######################################################################################
GDPGroup <- numeric(nrow(mergedReducedSorted))
mergedReducedSorted <- cbind(mergedReducedSorted,GDPGroup)
if (debug == 1) {
str(mergedReducedSorted)
}
for (i in 1:nrow(mergedReducedSorted)) {
if (mergedReducedSorted$GDPRanking[i] <= nrow(mergedReducedSorted)/5) {
mergedReducedSorted$GDPGroup[i] = 1
} else if ( (mergedReducedSorted$GDPRanking[i] > nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 2*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 2
} else if ( (mergedReducedSorted$GDPRanking[i] > 2*nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 3*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 3
} else if ( (mergedReducedSorted$GDPRanking[i] > 3*nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 4*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 4
} else if (mergedReducedSorted$GDPRanking[i] > 4*nrow(mergedReducedSorted)/5) {
mergedReducedSorted$GDPGroup[i] = 5
}
}
#######################################################################################
# Make the requested table
# Create a vector of the Lower middle income GDP rankings
# Determine (programatically) how many Lower middle income are in the top 38
#######################################################################################
message ("In the table below, the GDP quantiles are rows indicated by the numbers 1 through 5 on the left. The Income Groups are listed across the top. The elements of the table indicate number of countries corresponding to the GDP quantile and Income Group." )
table(mergedReducedSorted$GDPGroup, mergedReducedSorted$IncomeGroup)
TopLMI <- mergedReducedSorted[2][mergedReducedSorted[5]== "Lower middle income"]
message ("The number of Lower middle income countries in the top 38 GDP are ", sum(TopLMI < 39) )
#######################################################################################
# The requested table shows 4 but I reported 5
# The reason is the cutoff. There were 189 countries (not 190), 189/5 = 37.8
# So, my highest group had only 37 countries. The 38th was a Lower middle income
# The table went through 37 while the question asked through 38
#######################################################################################
|
#' Print DataM Object
#'
#' Modifies the "print" function to take objects of class \code{DataM} (or any of its subclasses) and print out a matrix where the first column is the dependent variable and the remaining columns are the independent variables.
#'
#' @param DataM An object of class DataM
#'
#' @author Thomas Carroll: \email{thomasscarroll89@gmail.com}
#' @rdname print
#' @export
setMethod("print",
signature(x="DataM"),
function(x, ...){
print(cbind(x@depvar, x@covariates))
}
)
getMethod("print", signature="DataM")
|
/MyPackage/R/print-mod.R
|
no_license
|
thomasscarroll89/RPackageProblemSet
|
R
| false | false | 573 |
r
|
#' Print DataM Object
#'
#' Modifies the "print" function to take objects of class \code{DataM} (or any of its subclasses) and print out a matrix where the first column is the dependent variable and the remaining columns are the independent variables.
#'
#' @param DataM An object of class DataM
#'
#' @author Thomas Carroll: \email{thomasscarroll89@gmail.com}
#' @rdname print
#' @export
setMethod("print",
signature(x="DataM"),
function(x, ...){
print(cbind(x@depvar, x@covariates))
}
)
getMethod("print", signature="DataM")
|
testlist <- list(data = structure(c(6.53867576132537e+286, 6.53867576126997e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576130081e+286, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 8L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610554326-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 713 |
r
|
testlist <- list(data = structure(c(6.53867576132537e+286, 6.53867576126997e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576130081e+286, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 8L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
#Read the two files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
str(NEI)
library(ggplot2)
library(plyr)
#Retain just the Baltimore City data
NEI_Baltimore <- NEI[NEI$fips == "24510",]
#Convert type variable to a factor
NEI_Baltimore$type <- as.factor(NEI_Baltimore$type)
#Aggregate emission data by year
NEI_yearem_Baltimore <- ddply(NEI_Baltimore, .(type, year), summarize, Emissions = sum(Emissions))
NEI_yearem_Baltimore$Pollutant_type <- NEI_yearem_Baltimore$type
#Set margins
par("mar" = c(4,6,4,4))
#Create the plot
qplot(x = year, y = Emissions, data = NEI_yearem_Baltimore, group = Pollutant_type, color = Pollutant_type, geom = c("point", "line"), xlab = "Year",
ylab = "Total" ~ PM[2.5] ~"Emissions", main = "Total" ~ PM[2.5] ~"Emissions for Baltimore by Pollutant Type")
#Save the plot as a png
dev.copy(png, file = "plot3.png")
dev.off()
|
/Exploratory_Data_Analysis_Assignment2/plot3.R
|
no_license
|
sharathlives/JohnHopkins_Coursera_Exploratory_Data_Analysis
|
R
| false | false | 905 |
r
|
#Read the two files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
str(NEI)
library(ggplot2)
library(plyr)
#Retain just the Baltimore City data
NEI_Baltimore <- NEI[NEI$fips == "24510",]
#Convert type variable to a factor
NEI_Baltimore$type <- as.factor(NEI_Baltimore$type)
#Aggregate emission data by year
NEI_yearem_Baltimore <- ddply(NEI_Baltimore, .(type, year), summarize, Emissions = sum(Emissions))
NEI_yearem_Baltimore$Pollutant_type <- NEI_yearem_Baltimore$type
#Set margins
par("mar" = c(4,6,4,4))
#Create the plot
qplot(x = year, y = Emissions, data = NEI_yearem_Baltimore, group = Pollutant_type, color = Pollutant_type, geom = c("point", "line"), xlab = "Year",
ylab = "Total" ~ PM[2.5] ~"Emissions", main = "Total" ~ PM[2.5] ~"Emissions for Baltimore by Pollutant Type")
#Save the plot as a png
dev.copy(png, file = "plot3.png")
dev.off()
|
library(lattice)
extract_chrom <- function(t, thisdata, productmz, extraction_window=0.05)
{
this_spectrum = subset(thisdata, SEC == t)
return(sum(subset(this_spectrum, MZ > productmz-(extraction_window/2) & MZ < productmz+(extraction_window/2))$INT))
}
graphme <- function(xxp,allmx){
xxp <- xxp[length(xxp):1]
allmx <- allmx[allmx$MZ > 400,]
sum(is.element(allmx$label,xxp))
allmx <- allmx[is.element(allmx$label,xxp),]
print(dim(allmx))
allmx$MZ <- as.factor(allmx$MZ)
return(allmx)
}
irt2rt <- function(x,c=2148.68,m=33.87) {
return(m*x+c)
}
plotgraph <- function(assay_irt,background,rt_extraction_window=180) {
txtfiles <- dir(pattern=glob2rx(paste("*",background,"*","._chrom.mzML.dta2d",sep="")))
rawdata <- list()
for(i in 1:length(txtfiles))
{
rawdata[[i]] <- read.csv(txtfiles[i], sep="\t")
names(rawdata[[i]])<-c("SEC","MZ","INT")
}
# use this code to extract chromatograms
# data <- list()
# for(i in 1:length(txtfiles))
# {
# df<-data.frame()
# for(j in 1:length(productmz)) {
# dfj <- data.frame("INT" = sapply( unique(rawdata[[i]]$SEC), extract_chrom, thisdata=rawdata[[i]], productmz=productmz[j]), "SEC"=unique(rawdata[[i]]$SEC))
# dfj$MZ <- rep(productmz[j],dim(dfj)[1])
# df<-rbind(df,dfj)
# }
# data[[i]] = df
# }
data<-rawdata
xx <- c("x512","x256","x128","x064","x032","x016","x008","x004","x002","x001")
length(xx)
allm <- NULL
label <- NULL
for(i in 1:10){
allm <- rbind(allm,data[[i]])
labelt <- rep(xx[i],dim(data[[i]])[1])
label <- c(label, labelt)
}
allm <- cbind(label, allm)
allm <- data.frame(as.factor(allm$label), as.numeric(allm$SEC), as.numeric(allm$MZ), as.numeric(allm$INT))
colnames(allm) <- c("label","SEC","MZ","INT")
colnames(allm)
allm$label[1:10]
xxs <- c("x512","x256","x128","x064","x032","x016","x008","x004","x002","x001")
allmx <- allm
if (background=="human") {
irt<-irt2rt(assay_irt[[1]],1687.64,33.61)
}
else if (background=="yeast") {
irt<-irt2rt(assay_irt[[1]],2105.2,34.27)
}
else if (background=="no_background") {
irt<-irt2rt(assay_irt[[1]],2150.32,35.05)
}
pdf(file=paste(names(assay_irt)[[1]],"_",background,".pdf",sep=""),width=6, height=length(xxs)*1.5)
print(xyplot(INT ~ SEC | label ,data=subset(allmx,SEC >= irt-rt_extraction_window & SEC <= irt+rt_extraction_window),type="l",xlim=c(irt-rt_extraction_window,irt+rt_extraction_window),scales=list(y=list(relation="free", cex=0.7,rot=45)),groups=MZ,layout=c(1,length(xxs)),xlab="RT [s]", ylab="INT",as.table=TRUE))
dev.off()
}
background<-list("water"="no_background","yeast"="yeast","human"="human")
assays<-list("VGDTVLYGK"=3.7,"IADIQLEGLR"=49.4,"TGGDEFDEAIIK"=40.8,"LITVEGPDGAGK"=10.9,"LVDEEGNDVTPEK"=-5.1)
assay_irt<-assays[tail(strsplit(getwd(),"/")[[1]],n=1)]
for(j in 1:length(background)) {
plotgraph(assay_irt,background[[j]])
}
|
/analysis/scripts/plotChrom.R
|
permissive
|
msproteomicstools/msproteomicstools
|
R
| false | false | 2,920 |
r
|
library(lattice)
extract_chrom <- function(t, thisdata, productmz, extraction_window=0.05)
{
this_spectrum = subset(thisdata, SEC == t)
return(sum(subset(this_spectrum, MZ > productmz-(extraction_window/2) & MZ < productmz+(extraction_window/2))$INT))
}
graphme <- function(xxp,allmx){
xxp <- xxp[length(xxp):1]
allmx <- allmx[allmx$MZ > 400,]
sum(is.element(allmx$label,xxp))
allmx <- allmx[is.element(allmx$label,xxp),]
print(dim(allmx))
allmx$MZ <- as.factor(allmx$MZ)
return(allmx)
}
irt2rt <- function(x,c=2148.68,m=33.87) {
return(m*x+c)
}
plotgraph <- function(assay_irt,background,rt_extraction_window=180) {
txtfiles <- dir(pattern=glob2rx(paste("*",background,"*","._chrom.mzML.dta2d",sep="")))
rawdata <- list()
for(i in 1:length(txtfiles))
{
rawdata[[i]] <- read.csv(txtfiles[i], sep="\t")
names(rawdata[[i]])<-c("SEC","MZ","INT")
}
# use this code to extract chromatograms
# data <- list()
# for(i in 1:length(txtfiles))
# {
# df<-data.frame()
# for(j in 1:length(productmz)) {
# dfj <- data.frame("INT" = sapply( unique(rawdata[[i]]$SEC), extract_chrom, thisdata=rawdata[[i]], productmz=productmz[j]), "SEC"=unique(rawdata[[i]]$SEC))
# dfj$MZ <- rep(productmz[j],dim(dfj)[1])
# df<-rbind(df,dfj)
# }
# data[[i]] = df
# }
data<-rawdata
xx <- c("x512","x256","x128","x064","x032","x016","x008","x004","x002","x001")
length(xx)
allm <- NULL
label <- NULL
for(i in 1:10){
allm <- rbind(allm,data[[i]])
labelt <- rep(xx[i],dim(data[[i]])[1])
label <- c(label, labelt)
}
allm <- cbind(label, allm)
allm <- data.frame(as.factor(allm$label), as.numeric(allm$SEC), as.numeric(allm$MZ), as.numeric(allm$INT))
colnames(allm) <- c("label","SEC","MZ","INT")
colnames(allm)
allm$label[1:10]
xxs <- c("x512","x256","x128","x064","x032","x016","x008","x004","x002","x001")
allmx <- allm
if (background=="human") {
irt<-irt2rt(assay_irt[[1]],1687.64,33.61)
}
else if (background=="yeast") {
irt<-irt2rt(assay_irt[[1]],2105.2,34.27)
}
else if (background=="no_background") {
irt<-irt2rt(assay_irt[[1]],2150.32,35.05)
}
pdf(file=paste(names(assay_irt)[[1]],"_",background,".pdf",sep=""),width=6, height=length(xxs)*1.5)
print(xyplot(INT ~ SEC | label ,data=subset(allmx,SEC >= irt-rt_extraction_window & SEC <= irt+rt_extraction_window),type="l",xlim=c(irt-rt_extraction_window,irt+rt_extraction_window),scales=list(y=list(relation="free", cex=0.7,rot=45)),groups=MZ,layout=c(1,length(xxs)),xlab="RT [s]", ylab="INT",as.table=TRUE))
dev.off()
}
background<-list("water"="no_background","yeast"="yeast","human"="human")
assays<-list("VGDTVLYGK"=3.7,"IADIQLEGLR"=49.4,"TGGDEFDEAIIK"=40.8,"LITVEGPDGAGK"=10.9,"LVDEEGNDVTPEK"=-5.1)
assay_irt<-assays[tail(strsplit(getwd(),"/")[[1]],n=1)]
for(j in 1:length(background)) {
plotgraph(assay_irt,background[[j]])
}
|
kurtosis <-
function(x) {
x<-na.omit(x)
n<-length(x)
suma<-sum((x-mean(x))^4)/(var(x))^2
k <- n*(n+1)*suma/((n-1)*(n-2)*(n-3)) - 3*(n-1)^2/((n-2)*(n-3))
return(k)
}
|
/R/kurtosis.R
|
no_license
|
cran/agricolae
|
R
| false | false | 173 |
r
|
kurtosis <-
function(x) {
x<-na.omit(x)
n<-length(x)
suma<-sum((x-mean(x))^4)/(var(x))^2
k <- n*(n+1)*suma/((n-1)*(n-2)*(n-3)) - 3*(n-1)^2/((n-2)*(n-3))
return(k)
}
|
library(shiny)
library(gapminder)
library(dplyr)
library(plotly)
library(ggplot2)
library()
server <- function(input, output){
rGDP <- reactive({ input$GDP })
rContinent <- reactive({ input$Continent})
output$scatterPlot <- renderPlot({
ggplot(subset(gapminder, continent == rContinent() & gdpPercap >= rGDP()),
aes(x = gdpPercap, y = lifeExp, z = pop)) + geom_point() +
geom_smooth(method=lm, color = "darkred") +
labs(x = "GDP per Capita", y = "Life Expectancy")
})
output$timePlot <- renderPlot({
dataT<- subset(gapminder,continent == rContinent() & gdpPercap >= rGDP())
ggplot(dataT, aes(x = year, y = lifeExp, color = country)) +
geom_line(lwd = 1, show.legend = TRUE) + facet_wrap(~ continent) +
scale_color_manual(values = country_colors)
})
output$boxPlot <- renderPlot({
ggplot(subset(gapminder, continent == rContinent() & gdpPercap >= rGDP()),
aes(x = country, y = lifeExp)) + geom_boxplot() + coord_flip()
})
output$table <- renderTable({
subset((gapminder), continent == rContinent() & gdpPercap >= rGDP())
})
}
|
/server.R
|
no_license
|
brianmblakely/DataProduct
|
R
| false | false | 1,149 |
r
|
library(shiny)
library(gapminder)
library(dplyr)
library(plotly)
library(ggplot2)
library()
server <- function(input, output){
rGDP <- reactive({ input$GDP })
rContinent <- reactive({ input$Continent})
output$scatterPlot <- renderPlot({
ggplot(subset(gapminder, continent == rContinent() & gdpPercap >= rGDP()),
aes(x = gdpPercap, y = lifeExp, z = pop)) + geom_point() +
geom_smooth(method=lm, color = "darkred") +
labs(x = "GDP per Capita", y = "Life Expectancy")
})
output$timePlot <- renderPlot({
dataT<- subset(gapminder,continent == rContinent() & gdpPercap >= rGDP())
ggplot(dataT, aes(x = year, y = lifeExp, color = country)) +
geom_line(lwd = 1, show.legend = TRUE) + facet_wrap(~ continent) +
scale_color_manual(values = country_colors)
})
output$boxPlot <- renderPlot({
ggplot(subset(gapminder, continent == rContinent() & gdpPercap >= rGDP()),
aes(x = country, y = lifeExp)) + geom_boxplot() + coord_flip()
})
output$table <- renderTable({
subset((gapminder), continent == rContinent() & gdpPercap >= rGDP())
})
}
|
#include <AudioUnit/AudioUnit.r>
#include "FullBacanoVersion.h"
// Note that resource IDs must be spaced 2 apart for the 'STR ' name and description
#define kAudioUnitResID_FullBacano 1000
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FullBacano~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#define RES_ID kAudioUnitResID_FullBacano
#define COMP_TYPE kAudioUnitType_Effect
#define COMP_SUBTYPE FullBacano_COMP_SUBTYPE
#define COMP_MANUF FullBacano_COMP_MANF
#define VERSION kFullBacanoVersion
#define NAME "Activata: FullBacano"
#define DESCRIPTION "FullBacano 1.0"
#define ENTRY_POINT "FullBacanoEntry"
#include "AUResources.r"
|
/FullBacano/FullBacano/FullBacano.r
|
no_license
|
activata/FullBacano
|
R
| false | false | 641 |
r
|
#include <AudioUnit/AudioUnit.r>
#include "FullBacanoVersion.h"
// Note that resource IDs must be spaced 2 apart for the 'STR ' name and description
#define kAudioUnitResID_FullBacano 1000
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FullBacano~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#define RES_ID kAudioUnitResID_FullBacano
#define COMP_TYPE kAudioUnitType_Effect
#define COMP_SUBTYPE FullBacano_COMP_SUBTYPE
#define COMP_MANUF FullBacano_COMP_MANF
#define VERSION kFullBacanoVersion
#define NAME "Activata: FullBacano"
#define DESCRIPTION "FullBacano 1.0"
#define ENTRY_POINT "FullBacanoEntry"
#include "AUResources.r"
|
#' Model Playground (Gadget) UI Function
#'
#' @param id, character used to specify namespace, see \code{shiny::\link[shiny]{NS}}
#'
#' @importFrom shiny tagList
#'
#' @return a \code{shiny::\link[shiny]{tag}} containing UI elements
#'
#' @export
patientGraphUI <- function(id) {
ns <- shiny::NS(id)
bs4Dash::bs4Card(
title = "Gadget Playground",
elevation = 3,
width = 12,
closable = FALSE,
collapsible = FALSE,
headerBorder = FALSE,
style = 'padding_0px'
)
}
#' Graph Output Server Function
#'
#' @param input Shiny inputs.
#' @param output Shiny Outputs.
#' @param session Session object.
#'
#' @return list with following components
#' \describe{
#' \item{xvar}{reactive character string indicating x variable selection}
#' \item{yvar}{reactive character string indicating y variable selection}
#' }
#'
#' @export
patientGraph <- function(input, output, session) {
ns <- session$ns
output$patient_graph <- shiny::renderUI({
# fluidRow(
# column(
# width = 6,
# style = 'padding:0px;'
# #uiOutput("graph_box")
# )
# )
## Add Davids gadget
#tags$iframe(src="https://cardiomodel.shinyapps.io/gadget/", height=600, width=535)
#tagList(
# bs4Card(
# withSpinner(
# plotlyOutput(
# "plot_node",
# height = "500px",
# width = "100%"
# ),
# size = 2,
# type = 8,
# color = "#000000"
# )
# )
#)
})
}
|
/R/gadget.R
|
no_license
|
ddezel/CardioResp
|
R
| false | false | 1,544 |
r
|
#' Model Playground (Gadget) UI Function
#'
#' @param id, character used to specify namespace, see \code{shiny::\link[shiny]{NS}}
#'
#' @importFrom shiny tagList
#'
#' @return a \code{shiny::\link[shiny]{tag}} containing UI elements
#'
#' @export
patientGraphUI <- function(id) {
ns <- shiny::NS(id)
bs4Dash::bs4Card(
title = "Gadget Playground",
elevation = 3,
width = 12,
closable = FALSE,
collapsible = FALSE,
headerBorder = FALSE,
style = 'padding_0px'
)
}
#' Graph Output Server Function
#'
#' @param input Shiny inputs.
#' @param output Shiny Outputs.
#' @param session Session object.
#'
#' @return list with following components
#' \describe{
#' \item{xvar}{reactive character string indicating x variable selection}
#' \item{yvar}{reactive character string indicating y variable selection}
#' }
#'
#' @export
patientGraph <- function(input, output, session) {
ns <- session$ns
output$patient_graph <- shiny::renderUI({
# fluidRow(
# column(
# width = 6,
# style = 'padding:0px;'
# #uiOutput("graph_box")
# )
# )
## Add Davids gadget
#tags$iframe(src="https://cardiomodel.shinyapps.io/gadget/", height=600, width=535)
#tagList(
# bs4Card(
# withSpinner(
# plotlyOutput(
# "plot_node",
# height = "500px",
# width = "100%"
# ),
# size = 2,
# type = 8,
# color = "#000000"
# )
# )
#)
})
}
|
##First all data is read and then a subset is taken.
Dataset<-read.table("household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?")
Dataset<-subset(Dataset, Date=="2/2/2007"|Date=="1/2/2007")
#Extra column created psting date and time together
Dataset$DateTime <-paste(Dataset$Date, Dataset$Time)
png("plot2.png") #Initiate plot
plot(strptime(Dataset$DateTime, "%d/%m/%Y %H:%M:%S"), Dataset$Global_active_power,
xlab="",
ylab = "Global Active Power (kilowatts)",
type = "l")
dev.off() #Close plot
|
/plot2.R
|
no_license
|
FlorienM/ExData_Plotting1
|
R
| false | false | 556 |
r
|
##First all data is read and then a subset is taken.
Dataset<-read.table("household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?")
Dataset<-subset(Dataset, Date=="2/2/2007"|Date=="1/2/2007")
#Extra column created psting date and time together
Dataset$DateTime <-paste(Dataset$Date, Dataset$Time)
png("plot2.png") #Initiate plot
plot(strptime(Dataset$DateTime, "%d/%m/%Y %H:%M:%S"), Dataset$Global_active_power,
xlab="",
ylab = "Global Active Power (kilowatts)",
type = "l")
dev.off() #Close plot
|
library(staRdom)
### Name: abs_fit_slope
### Title: Fit absorbance data to exponential curve. 'drm' is used for the
### fitting process.
### Aliases: abs_fit_slope
### ** Examples
data(abs_data)
abs_fit_slope(abs_data$wavelength,abs_data$sample1,lim=c(350,400),l_ref=350)
|
/data/genthat_extracted_code/staRdom/examples/abs_fit_slope.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 281 |
r
|
library(staRdom)
### Name: abs_fit_slope
### Title: Fit absorbance data to exponential curve. 'drm' is used for the
### fitting process.
### Aliases: abs_fit_slope
### ** Examples
data(abs_data)
abs_fit_slope(abs_data$wavelength,abs_data$sample1,lim=c(350,400),l_ref=350)
|
makeCacheMatrix <- function(x = matrix()) {
invrs <- NULL
setorig <- function(y) {
x <<- y
invrs <<- NULL
}
getorig <- function() x
setinversevalue <- function(inverse) invrs <<- inverse
getinversevalue <- function() invrs
list(set = setorig,
get = getorig,
setinverse = setinversevalue,
getinverse = getinversevalue)
}
cacheSolve <- function(x, ...) {
invrs <- x$getinverse()
if (!is.null(invrs)) {
message("Inverse is already caluculated before and is cached")
return(invrs)
}
data <- x$get()
invrs <- solve(data, ...)
x$setinverse(invrs)
invrs
}
|
/cachematrix.R
|
no_license
|
manjuvegesna/ProgrammingAssignment2
|
R
| false | false | 649 |
r
|
makeCacheMatrix <- function(x = matrix()) {
invrs <- NULL
setorig <- function(y) {
x <<- y
invrs <<- NULL
}
getorig <- function() x
setinversevalue <- function(inverse) invrs <<- inverse
getinversevalue <- function() invrs
list(set = setorig,
get = getorig,
setinverse = setinversevalue,
getinverse = getinversevalue)
}
cacheSolve <- function(x, ...) {
invrs <- x$getinverse()
if (!is.null(invrs)) {
message("Inverse is already caluculated before and is cached")
return(invrs)
}
data <- x$get()
invrs <- solve(data, ...)
x$setinverse(invrs)
invrs
}
|
\name{Zimmerman}
\alias{Zimmerman}
\docType{data}
\title{Stand Your Ground Simpson's Paradox }
\description{
Data from 220 cases in Florida where a "Stand your ground" defense was used.
}
\format{
A data frame with 220 observations on the following 5 variables.
\describe{
\item{\code{Convicted}}{Was the defendant Convicted? (\code{No} or \code{Yes})}
\item{\code{IndWhiteVictim}}{Was the victim white? (\code{1}=yes or \code{0}=no)}
\item{\code{IndWhiteDefendant}}{Was the defendant white? (\code{1}=yes or \code{0}=no)}
\item{\code{VictimRace}}{Race of the victim (\code{Minority} or \code{White})}
\item{\code{DefendantRace}}{Race of the defendant (\code{Minority} or \code{White})}
}
}
\details{
Inspired by the Travon Martin case, combined fatal and non-fatal cases of assault in Florida for which the defendant used the Stand Your Ground law in defense. These data show Simpson's Paradox. Race of the victim is more important than race of the defendant.
}
\source{
Data from Tampa Bay Times, male plus female cases, as of 2/8/15 -- final posted data
http://www.tampabay.com/stand-your-ground-law/nonfatal-cases
http://www.tampabay.com/stand-your-ground-law/fatal-cases
}
\keyword{datasets}
|
/man/Zimmerman.Rd
|
permissive
|
tessington/qsci381
|
R
| false | false | 1,223 |
rd
|
\name{Zimmerman}
\alias{Zimmerman}
\docType{data}
\title{Stand Your Ground Simpson's Paradox }
\description{
Data from 220 cases in Florida where a "Stand your ground" defense was used.
}
\format{
A data frame with 220 observations on the following 5 variables.
\describe{
\item{\code{Convicted}}{Was the defendant Convicted? (\code{No} or \code{Yes})}
\item{\code{IndWhiteVictim}}{Was the victim white? (\code{1}=yes or \code{0}=no)}
\item{\code{IndWhiteDefendant}}{Was the defendant white? (\code{1}=yes or \code{0}=no)}
\item{\code{VictimRace}}{Race of the victim (\code{Minority} or \code{White})}
\item{\code{DefendantRace}}{Race of the defendant (\code{Minority} or \code{White})}
}
}
\details{
Inspired by the Travon Martin case, combined fatal and non-fatal cases of assault in Florida for which the defendant used the Stand Your Ground law in defense. These data show Simpson's Paradox. Race of the victim is more important than race of the defendant.
}
\source{
Data from Tampa Bay Times, male plus female cases, as of 2/8/15 -- final posted data
http://www.tampabay.com/stand-your-ground-law/nonfatal-cases
http://www.tampabay.com/stand-your-ground-law/fatal-cases
}
\keyword{datasets}
|
#' Estimates principal component functions by computing eigenfunctions of the covariance function
#'
#' Estimates principal component functions by computing eigenfunctions of the covariance function
#'
#' @param dat functional data set that can be passed to \code{ssfcov2::estimate_cov_function()}. See documentation for details.
#' @param n.marginal.knots number of knot locations to use on the marginal domain. The number of knot locations actually used in estimation will be n^2 on the product domain.
#' @return list containing first two principal component functions
fpca_ss <- function(dat, n.marginal.knots=NULL, marginal.knots=NULL){
cov.est <- estimate_cov_function(dat, n.marginal.knots = n.marginal.knots, marginal.knots=marginal.knots)
eig.est <- estimate_eigenfunctions(cov.est)
fpc1 <- extract_pcf(nharm = 1, method = 'ss', eig.est)
fpc2 <- extract_pcf(nharm = 2, method = 'ss', eig.est)
return(list(fpc1 = fpc1, fpc2 = fpc2))
}
|
/R/fpca_ss.R
|
no_license
|
dan410/SimStudy_eigenfunction_estimation
|
R
| false | false | 955 |
r
|
#' Estimates principal component functions by computing eigenfunctions of the covariance function
#'
#' Estimates principal component functions by computing eigenfunctions of the covariance function
#'
#' @param dat functional data set that can be passed to \code{ssfcov2::estimate_cov_function()}. See documentation for details.
#' @param n.marginal.knots number of knot locations to use on the marginal domain. The number of knot locations actually used in estimation will be n^2 on the product domain.
#' @return list containing first two principal component functions
fpca_ss <- function(dat, n.marginal.knots=NULL, marginal.knots=NULL){
cov.est <- estimate_cov_function(dat, n.marginal.knots = n.marginal.knots, marginal.knots=marginal.knots)
eig.est <- estimate_eigenfunctions(cov.est)
fpc1 <- extract_pcf(nharm = 1, method = 'ss', eig.est)
fpc2 <- extract_pcf(nharm = 2, method = 'ss', eig.est)
return(list(fpc1 = fpc1, fpc2 = fpc2))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.R
\name{extract_1d}
\alias{extract_1d}
\title{Extract 1d Values}
\usage{
extract_1d(core_table = NULL, input = NULL, data_location = NULL)
}
\arguments{
\item{core_table}{the core table from make_core}
\item{input}{the HIC code for the variable of interest}
\item{data_location}{the column name that stores the primary data for this
variable}
}
\value{
a tibble with HIC data for a specified variable
}
\description{
This function extracts the correct column from the CC-HIC database
depending upon what type of data is called for
}
|
/man/extract_1d.Rd
|
no_license
|
CC-HIC/inspectEHR
|
R
| false | true | 621 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.R
\name{extract_1d}
\alias{extract_1d}
\title{Extract 1d Values}
\usage{
extract_1d(core_table = NULL, input = NULL, data_location = NULL)
}
\arguments{
\item{core_table}{the core table from make_core}
\item{input}{the HIC code for the variable of interest}
\item{data_location}{the column name that stores the primary data for this
variable}
}
\value{
a tibble with HIC data for a specified variable
}
\description{
This function extracts the correct column from the CC-HIC database
depending upon what type of data is called for
}
|
main <- function() {
library(sqldf)
data <- read.csv.sql("household_power_consumption.txt", sql = "select * from file where Date = '1/2/2007' OR Date = '2/2/2007'", eol = "\n", header = TRUE, sep = ";")dat$DateTime <- strptime(paste(dat$Date, dat$Time), "%d/%m/%Y %H:%M")
data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
png(filename = "plot2.png")
plot(data$DateTime, data$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (killowatts)")
dev.off()
}
|
/plot2.R
|
no_license
|
pnwhitney/ExData_Plotting1
|
R
| false | false | 525 |
r
|
main <- function() {
library(sqldf)
data <- read.csv.sql("household_power_consumption.txt", sql = "select * from file where Date = '1/2/2007' OR Date = '2/2/2007'", eol = "\n", header = TRUE, sep = ";")dat$DateTime <- strptime(paste(dat$Date, dat$Time), "%d/%m/%Y %H:%M")
data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
png(filename = "plot2.png")
plot(data$DateTime, data$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (killowatts)")
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot_util.R
\name{geom_txt}
\alias{geom_txt}
\title{geom_txt}
\usage{
geom_txt(..., family = theme_get()$text$family, size = 3,
colour = "#2b2b2b")
}
\arguments{
\item{...}{Passed to \code{geom_text}.}
\item{family}{Font family. Defaults to theme-defined family.}
\item{size}{Font size. Defaults to 3.}
\item{colour}{Font colour. Defaults to \code{#2b2b2b}}
}
\description{
Helper for \code{geom_text} with some defaults.
}
|
/man/geom_txt.Rd
|
no_license
|
arbelt/azwmisc
|
R
| false | true | 510 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot_util.R
\name{geom_txt}
\alias{geom_txt}
\title{geom_txt}
\usage{
geom_txt(..., family = theme_get()$text$family, size = 3,
colour = "#2b2b2b")
}
\arguments{
\item{...}{Passed to \code{geom_text}.}
\item{family}{Font family. Defaults to theme-defined family.}
\item{size}{Font size. Defaults to 3.}
\item{colour}{Font colour. Defaults to \code{#2b2b2b}}
}
\description{
Helper for \code{geom_text} with some defaults.
}
|
library(testthat)
library(BrokenAdaptiveRidge)
test_check("BrokenAdaptiveRidge")
|
/tests/testthat.R
|
permissive
|
yuxitian/BrokenAdaptiveRidge
|
R
| false | false | 82 |
r
|
library(testthat)
library(BrokenAdaptiveRidge)
test_check("BrokenAdaptiveRidge")
|
library(shiny)
CohortEffect <- function(x1,
x2,
min.meaningful.effect) {
dat <- data.frame(y=c(x1,x2),
d2=c(rep(0, length(x1)), rep(1, length(x2))))
res <- lm(y ~ d2, data=dat)
coefs <- summary(res)$coefficients
effect.mean <- coefs[2,1]
effect.sd <- coefs[2,2]
xmin <- min(c(-min.meaningful.effect, effect.mean - 3*effect.sd))
xmax <- max(c(min.meaningful.effect, effect.mean + 3*effect.sd))
ymax <- max(dnorm(0, sd=effect.sd))
prob.near.zero <- pnorm(min.meaningful.effect, mean=effect.mean, sd=effect.sd) -
pnorm(-min.meaningful.effect, mean=effect.mean, sd=effect.sd)
prob.positive <- 1 - pnorm(min.meaningful.effect, mean=effect.mean, sd=effect.sd)
prob.negative <- pnorm(-min.meaningful.effect, mean=effect.mean, sd=effect.sd)
return(list(effect.mean=effect.mean,
effect.sd=effect.sd,
min.meaningful.effect=min.meaningful.effect,
plot.xmin=xmin,
plot.xmax=xmax,
plot.ymax=ymax,
prob.near.zero=prob.near.zero,
prob.positive=prob.positive,
prob.negative=prob.negative,
n=length(x1)+length(x2)))
}
PlotCohortEffect <- function(cohort.effect,
col="RoyalBlue",
xlab="seconds") {
plot(0,0, type='n',
main=paste("Cohort Effects:", ifelse(cohort.effect$effect.mean > 0, "Comparison", "Baseline"), "Cohort Did Better"),
sub=paste("n =", cohort.effect$n, " mean =", signif(cohort.effect$effect.mean, 2)),
xlim=c(cohort.effect$plot.xmin, cohort.effect$plot.xmax),
ylim=c(0, cohort.effect$plot.ymax),
xlab=xlab,
ylab="",
yaxt='n')
d1 <- function(x) dnorm(x, mean=cohort.effect$effect.mean, sd=cohort.effect$effect.sd)
polygon(x=c(cohort.effect$plot.xmin,
seq(from=cohort.effect$plot.xmin, to=cohort.effect$plot.xmax, length.out=101),
cohort.effect$plot.xmax),
y=c(0, d1(seq(from=cohort.effect$plot.xmin, to=cohort.effect$plot.xmax, length.out=101)), 0),
col=col)
curve(d1,
from=cohort.effect$plot.xmin,
to=cohort.effect$plot.xmax,
add=TRUE,
lwd=2)
abline(v=c(-cohort.effect$min.meaningful.effect, cohort.effect$min.meaningful.effect),
lwd=2,
lty=2)
legend(.98 * cohort.effect$plot.xmin + .02 * cohort.effect$plot.ymax,
.8*cohort.effect$plot.ymax,
c(paste("positive: ", round(100*cohort.effect$prob.positive), "%", sep=""),
paste("near zero: ", round(100*cohort.effect$prob.near.zero), "%", sep=""),
paste("negative: ", round(100*cohort.effect$prob.negative), "%", sep="")))
}
ObservationsNeeded <- function(x1, x2, min.meaningful.effect, confidence) {
stopifnot(confidence > .5)
y <- c(x1, x2)
x <- c(rep(0, length(x1)), rep(1, length(x2)))
res <- lm(y ~ x)
post.mean <- summary(res)$coefficients[2,1]
data.sd <- summary(res)$coefficients[2,2] * sqrt(length(x1) + length(x2))
#return(paste("data.sd:", data.sd))
#return(paste("post.mean:", post.mean))
#return(paste("confidence:", confidence))
if(post.mean > min.meaningful.effect) {
# case: positive effect
# return("DEBUG: positive effect")
f <- function(N) pnorm((post.mean - min.meaningful.effect) * sqrt(N) / data.sd) - confidence / 100
} else if(post.mean < -min.meaningful.effect) {
# case: negative effect
# return("DEBUG: negative effect")
f <- function(N) pnorm((-min.meaningful.effect - post.mean) * sqrt(N) / data.sd) - confidence / 100
} else {
# case: no meaningful effect
# return("DEBUG: no meaningful effect")
f <- function(N) pnorm((min.meaningful.effect - post.mean) * sqrt(N) / data.sd) -
pnorm((-min.meaningful.effect - post.mean) * sqrt(N) / data.sd) - confidence / 100
}
root.res <- uniroot(f, interval=c(2, 1e10))
# root.res <- tryCatch(uniroot(f, interval=c(1, 1e10)), error=function(e) NA)
if(is.na(root.res)) return("Unable to estimate number of needed observations.")
if(root.res$estim.prec > 1e-3) {
warning("Unable to estimate number of needed observations")
return(Inf)
} else {
return(ceiling(root.res$root))
}
}
ObservationsNeededMessage <- function(current.obs, needed.obs, desired.certainty) {
if(current.obs >= needed.obs) {
return(paste("You (should) already have already exceeded ", desired.certainty, "% certainty. If not, you're very close.", sep=""))
} else {
return(paste("If the mean so far is correct, you will need a total of ", needed.obs,
" (", needed.obs-current.obs, " additional) observations to reach ",
desired.certainty, "% certainty.",
sep=""))
}
}
ReadX <- function(start.date, end.date, cohort) {
x <- read.table(
paste("http://172.31.2.98/shiny-data/ab-data.php?start=", start.date,
"&end=", end.date,
"&cohort=", cohort, sep=""))[,1]
x <- x[x < 60 * 60 * 24]
return(x)
}
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$obs.needed <- renderText({
# Read the data
x1 <- read.delim('data1.tsv')[,1]
x2 <- read.delim('data2.tsv')[,1]
needed.obs <- ObservationsNeeded(x1, x2, input$min.meaningful.effect, input$confidence)
ObservationsNeededMessage(current.obs=length(x1) + length(x2),
needed.obs=ObservationsNeeded(x1, x2, input$min.meaningful.effect, input$confidence),
desired.certainty=input$confidence)
})
output$distPlot <- renderPlot({
# Read the data
x1 <- read.delim('data1.tsv')[,1]
x2 <- read.delim('data2.tsv')[,1]
ce <- CohortEffect(x1, x2, input$min.meaningful.effect)
PlotCohortEffect(ce)
})
# output$densityPlot <- renderPlot({
# # Read the data
# x1 <- ReadX(input$start.date, end.date=input$end.date, cohort=input$cohort1)
# x2 <- ReadX(input$start.date, end.date=input$end.date, cohort=input$cohort2)
#
# plot(1, 1,
# type='n',
# xlim=c(min(c(x1, x2)), max(c(x1, x2))),
# ylim=c(0, max(c(density(x1)$y, density(x2)$y))),
# main="Distributions of times for the Cohorts",
# xlab="seconds",
# ylab="")
# lines(density(x1), lwd=3)
# lines(density(x2), lwd=3, lty=2)
# legend(.6 * max(c(x1, x2)),
# max(c(density(x1)$y, density(x2)$y)),
# legend=paste("cohort", c(input$cohort1, input$cohort2)),
# lwd=3, lty=1:2)
# })
})
|
/demo/ab/server.R
|
no_license
|
shaptonstahl/abtest
|
R
| false | false | 6,607 |
r
|
library(shiny)
CohortEffect <- function(x1,
x2,
min.meaningful.effect) {
dat <- data.frame(y=c(x1,x2),
d2=c(rep(0, length(x1)), rep(1, length(x2))))
res <- lm(y ~ d2, data=dat)
coefs <- summary(res)$coefficients
effect.mean <- coefs[2,1]
effect.sd <- coefs[2,2]
xmin <- min(c(-min.meaningful.effect, effect.mean - 3*effect.sd))
xmax <- max(c(min.meaningful.effect, effect.mean + 3*effect.sd))
ymax <- max(dnorm(0, sd=effect.sd))
prob.near.zero <- pnorm(min.meaningful.effect, mean=effect.mean, sd=effect.sd) -
pnorm(-min.meaningful.effect, mean=effect.mean, sd=effect.sd)
prob.positive <- 1 - pnorm(min.meaningful.effect, mean=effect.mean, sd=effect.sd)
prob.negative <- pnorm(-min.meaningful.effect, mean=effect.mean, sd=effect.sd)
return(list(effect.mean=effect.mean,
effect.sd=effect.sd,
min.meaningful.effect=min.meaningful.effect,
plot.xmin=xmin,
plot.xmax=xmax,
plot.ymax=ymax,
prob.near.zero=prob.near.zero,
prob.positive=prob.positive,
prob.negative=prob.negative,
n=length(x1)+length(x2)))
}
PlotCohortEffect <- function(cohort.effect,
col="RoyalBlue",
xlab="seconds") {
plot(0,0, type='n',
main=paste("Cohort Effects:", ifelse(cohort.effect$effect.mean > 0, "Comparison", "Baseline"), "Cohort Did Better"),
sub=paste("n =", cohort.effect$n, " mean =", signif(cohort.effect$effect.mean, 2)),
xlim=c(cohort.effect$plot.xmin, cohort.effect$plot.xmax),
ylim=c(0, cohort.effect$plot.ymax),
xlab=xlab,
ylab="",
yaxt='n')
d1 <- function(x) dnorm(x, mean=cohort.effect$effect.mean, sd=cohort.effect$effect.sd)
polygon(x=c(cohort.effect$plot.xmin,
seq(from=cohort.effect$plot.xmin, to=cohort.effect$plot.xmax, length.out=101),
cohort.effect$plot.xmax),
y=c(0, d1(seq(from=cohort.effect$plot.xmin, to=cohort.effect$plot.xmax, length.out=101)), 0),
col=col)
curve(d1,
from=cohort.effect$plot.xmin,
to=cohort.effect$plot.xmax,
add=TRUE,
lwd=2)
abline(v=c(-cohort.effect$min.meaningful.effect, cohort.effect$min.meaningful.effect),
lwd=2,
lty=2)
legend(.98 * cohort.effect$plot.xmin + .02 * cohort.effect$plot.ymax,
.8*cohort.effect$plot.ymax,
c(paste("positive: ", round(100*cohort.effect$prob.positive), "%", sep=""),
paste("near zero: ", round(100*cohort.effect$prob.near.zero), "%", sep=""),
paste("negative: ", round(100*cohort.effect$prob.negative), "%", sep="")))
}
ObservationsNeeded <- function(x1, x2, min.meaningful.effect, confidence) {
stopifnot(confidence > .5)
y <- c(x1, x2)
x <- c(rep(0, length(x1)), rep(1, length(x2)))
res <- lm(y ~ x)
post.mean <- summary(res)$coefficients[2,1]
data.sd <- summary(res)$coefficients[2,2] * sqrt(length(x1) + length(x2))
#return(paste("data.sd:", data.sd))
#return(paste("post.mean:", post.mean))
#return(paste("confidence:", confidence))
if(post.mean > min.meaningful.effect) {
# case: positive effect
# return("DEBUG: positive effect")
f <- function(N) pnorm((post.mean - min.meaningful.effect) * sqrt(N) / data.sd) - confidence / 100
} else if(post.mean < -min.meaningful.effect) {
# case: negative effect
# return("DEBUG: negative effect")
f <- function(N) pnorm((-min.meaningful.effect - post.mean) * sqrt(N) / data.sd) - confidence / 100
} else {
# case: no meaningful effect
# return("DEBUG: no meaningful effect")
f <- function(N) pnorm((min.meaningful.effect - post.mean) * sqrt(N) / data.sd) -
pnorm((-min.meaningful.effect - post.mean) * sqrt(N) / data.sd) - confidence / 100
}
root.res <- uniroot(f, interval=c(2, 1e10))
# root.res <- tryCatch(uniroot(f, interval=c(1, 1e10)), error=function(e) NA)
if(is.na(root.res)) return("Unable to estimate number of needed observations.")
if(root.res$estim.prec > 1e-3) {
warning("Unable to estimate number of needed observations")
return(Inf)
} else {
return(ceiling(root.res$root))
}
}
ObservationsNeededMessage <- function(current.obs, needed.obs, desired.certainty) {
if(current.obs >= needed.obs) {
return(paste("You (should) already have already exceeded ", desired.certainty, "% certainty. If not, you're very close.", sep=""))
} else {
return(paste("If the mean so far is correct, you will need a total of ", needed.obs,
" (", needed.obs-current.obs, " additional) observations to reach ",
desired.certainty, "% certainty.",
sep=""))
}
}
ReadX <- function(start.date, end.date, cohort) {
x <- read.table(
paste("http://172.31.2.98/shiny-data/ab-data.php?start=", start.date,
"&end=", end.date,
"&cohort=", cohort, sep=""))[,1]
x <- x[x < 60 * 60 * 24]
return(x)
}
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$obs.needed <- renderText({
# Read the data
x1 <- read.delim('data1.tsv')[,1]
x2 <- read.delim('data2.tsv')[,1]
needed.obs <- ObservationsNeeded(x1, x2, input$min.meaningful.effect, input$confidence)
ObservationsNeededMessage(current.obs=length(x1) + length(x2),
needed.obs=ObservationsNeeded(x1, x2, input$min.meaningful.effect, input$confidence),
desired.certainty=input$confidence)
})
output$distPlot <- renderPlot({
# Read the data
x1 <- read.delim('data1.tsv')[,1]
x2 <- read.delim('data2.tsv')[,1]
ce <- CohortEffect(x1, x2, input$min.meaningful.effect)
PlotCohortEffect(ce)
})
# output$densityPlot <- renderPlot({
# # Read the data
# x1 <- ReadX(input$start.date, end.date=input$end.date, cohort=input$cohort1)
# x2 <- ReadX(input$start.date, end.date=input$end.date, cohort=input$cohort2)
#
# plot(1, 1,
# type='n',
# xlim=c(min(c(x1, x2)), max(c(x1, x2))),
# ylim=c(0, max(c(density(x1)$y, density(x2)$y))),
# main="Distributions of times for the Cohorts",
# xlab="seconds",
# ylab="")
# lines(density(x1), lwd=3)
# lines(density(x2), lwd=3, lty=2)
# legend(.6 * max(c(x1, x2)),
# max(c(density(x1)$y, density(x2)$y)),
# legend=paste("cohort", c(input$cohort1, input$cohort2)),
# lwd=3, lty=1:2)
# })
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom.mean.R
\name{geom.mean}
\alias{geom.mean}
\title{Geometric Mean}
\usage{
geom.mean(x)
}
\arguments{
\item{x}{a numeric vector for which geometric mean computations shall be performed.}
}
\description{
This function computes the geometric mean of a numeric input vector \code{x}.
}
\examples{
x <- 1:10
geom.mean(x)
}
\author{
Hajk-Georg Drost
}
|
/man/geom.mean.Rd
|
no_license
|
AcaDemIQ/myTAI
|
R
| false | true | 431 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom.mean.R
\name{geom.mean}
\alias{geom.mean}
\title{Geometric Mean}
\usage{
geom.mean(x)
}
\arguments{
\item{x}{a numeric vector for which geometric mean computations shall be performed.}
}
\description{
This function computes the geometric mean of a numeric input vector \code{x}.
}
\examples{
x <- 1:10
geom.mean(x)
}
\author{
Hajk-Georg Drost
}
|
# #Upload to Shiny IO
# install.packages('rsconnect')
# library(rsconnect)
#install.packages(c('shiny','DT','ggplot2','purrr','dplyr','corrplot','plotly','randomForest'))
#
# rsconnect::setAccountInfo(name='chrisedstrom', token='88D536E091400263E74E70661483E0F1', secret='OYmlIwBg/jZdy9htFnQf8Kgex0crEwWkYFQKLOf4')
# rsconnect::deployApp('C:\\Users\\Chris\\Desktop\\Shiny')
# deployApp()
#Load libraries
library(Boruta)
library(corrplot)
library(dplyr)
library(DT)
library(ggplot2)
library(plotly)
library(purrr)
library(randomForest)
library(rpart)
library(rpart.plot)
library(shiny)
library(shinydashboard)
# Load data and create variable converting it to numeric for correlation
df <-read.csv(file="C:\\Users\\cyberguerra\\Desktop\\HR_Cat.csv",
header = T,
check.names=F,
fileEncoding="UTF-8-BOM")
#df<-read.csv("./HR_Cat.csv")
#########################################################################################################
ui <- dashboardPage(
dashboardHeader(title = "Simple Descriptive Analytics"),
dashboardSidebar(
fileInput("file1", "Choose CSV File",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Horizontal line ----
tags$hr(),
# Input: Select what to display
selectInput("dataset",
"Data:",
choices =list(df="df",uploaded_file = "inFile"),
selected=NULL)),
dashboardBody(
# Output: Tabset w/ correlation, summary, and table ----
tabsetPanel(type = "tabs",
tabPanel("Data",DT::dataTableOutput("table"),
style = "overflow-y: scroll;overflow-x: scroll;"),
tabPanel("Summary",
verbatimTextOutput("summary")),
tabPanel("Correlation",
verbatimTextOutput("selection"),
plotlyOutput("heat"),
plotlyOutput("scatterplot")),
tabPanel("Graphs",
sidebarPanel(
selectInput("plot.type",
"Plot Type:",
list(histogram = "histogram",
boxplot = "boxplot",
density = "density",
bar = "bar")),
checkboxInput("show.points",
"show points for Boxplot",
TRUE),
selectInput('xcol', 'Dependent Variable (y)', "",
selected = ""),
selectInput("ycol", 'Independent Variable (X)',
choices = NULL)),
uiOutput("graph")),
tabPanel("Random Forest",
sidebarPanel(
# Input: Select number of trees for Random Forest
sliderInput("ntree", "Number of Trees (ntree):",
min = 1,
max = 100,
value = 1),
# Input: Select number of variables to consider
sliderInput("mtry", "Number of Variables (mtry):",
min = 1,
max = 100,
value = 1,
step=1),
# Input: Select number of variables to consider
sliderInput("nodesize",
"Number of Branches (nodesize):",
min = 1,
max = 100,
value = 1,
step=1)),
mainPanel(
plotOutput("varImpPlot"),
verbatimTextOutput("rf",
placeholder = TRUE))),
tabPanel("Decision Tree",
sidebarPanel(
# Input: Select number of variables to consider
sliderInput("minsplit",
"Minimum Number of Observations (minsplit):",
min = 1,
max = 100,
value = 1,
step=1),
# Input: Select number of nodes
sliderInput("maxdepth",
"Tree Depth (maxdepth):",
min = 1,
max = 100,
value = 1,
step=1)),
mainPanel(plotOutput("decisiontree"),
verbatimTextOutput("DTsum",
placeholder = TRUE))))
)
)
##############################################################################################################
##############################################################################################################
server <- function(input, output, session) {
datasetInput <- reactive({
switch(input$dataset,
"df" = df,
"inFile" = read.csv(input$file1$datapath,fileEncoding="UTF-8-BOM"))
})
#update group and variables based on the data
observe({
if(!exists(input$dataset)) return() #make sure upload exists
var.opts<-colnames(get(input$dataset))
updateSelectInput(session,
"ycol",
choices = var.opts)
updateSelectInput(session,
"xcol",
choices = var.opts)
updateSliderInput(session, 'mtry',
max = ncol(datasetInput())-1)
updateSliderInput(session, 'maxdepth',
max = ncol(datasetInput())-2)
})
output$graph <- renderUI({
plotOutput("p")
})
#Get data object
get_data<-reactive({
if(!exists(input$dataset)) return() # if no upload
check<-function(x){is.null(x) || x==""}
if(check(input$dataset)) return()
obj<-list(data=get(input$dataset),
effect=input$ycol,
cause=input$xcol
)
#Require all to be set to proceed
if(any(sapply(obj,check))) return()
#Make sure choices had a chance to update
check<-function(obj){
!all(c(obj$effect,obj$cause) %in% colnames(obj$data))
}
if(check(obj)) return()
obj
})
#Plot function using ggplot2
output$p <- renderPlot({eval(expr)
plot.obj<-get_data()
#conditions for plotting
if(is.null(plot.obj)) return()
#make sure variable and group have loaded
if(plot.obj$effect == "" | plot.obj$cause =="") return()
#plot types
plot.type<-switch(input$plot.type,
"histogram" = geom_histogram(alpha=0.5,position="identity"),#,stat = "count"),
"boxplot" = geom_boxplot(),
"density" = geom_density(alpha=.75),
"bar" = geom_bar(position="dodge")
)
if(input$plot.type=="histogram") {
p<-ggplot(plot.obj$data,
aes_string(as.numeric(plot.obj$cause),
y = plot.obj$effect,
x = plot.obj$cause,
fill = plot.obj$cause # let type determine plotting
)
) + plot.type
}
if(input$plot.type=="boxplot") { #control for 1D or 2D graphs
p<-ggplot(plot.obj$data,
aes_string(
y = plot.obj$effect,
x = plot.obj$cause,
fill = plot.obj$cause # let type determine plotting
)
) + plot.type
if(input$show.points==TRUE)
{
p<-p+ geom_point(aes_string(color=input$xcol),
alpha=0.5,
position = 'jitter',
show.legend = F)
}
} else {
p<-ggplot(plot.obj$data,
aes_string(
x = plot.obj$effect,
fill = plot.obj$cause,
group = plot.obj$cause
)
) + plot.type
}
p<-p+labs(
fill = input$cause,
x = input$effect,
y = ""
) +
.theme
print(p)
})
# set uploaded file
upload_data<-reactive({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
#could also store in a reactiveValues
read.csv(inFile$datapath,fileEncoding="UTF-8-BOM")
})
observeEvent(input$file1,{
inFile<<-upload_data()
})
# Generate a correlation matrix of the data ----
output$heat <- renderPlotly({
dfNum <- datasetInput() %>% mutate_if(is.factor, as.numeric)
nms <- names(datasetInput())
# compute a correlation matrix
correlation <- cor(dfNum)
plot_ly(x = nms, y = nms, z = correlation,
key = correlation, type = "heatmap",
source = "heatplot",
colors = colorRamp(c("black", "gray", "yellow"))) %>%
layout(xaxis = list(title = ""),
yaxis = list(title = ""))
})
output$selection <- renderPrint({'Click on a cell in the heatmap to display a scatterplot'})
# Show scatterplot for correlation matrix selection
output$scatterplot <- renderPlotly({
dfNum <- Filter(is.numeric, datasetInput())
s <- event_data("plotly_click", source = "heatplot")
if (length(s)) {
vars <- c(s[["x"]], s[["y"]])
d <- setNames(datasetInput()[vars], c("x", "y"))
yhat <- fitted(lm(y ~ x, data = d))
plot_ly(d, x = ~x) %>%
add_markers(y = ~y) %>%
add_lines(y = ~yhat) %>%
layout(xaxis = list(title = s[["x"]]),
yaxis = list(title = s[["y"]]),
showlegend = FALSE)
} else {
plotly_empty()
}
})
# Show data in table
output$table <- DT::renderDataTable({
if (is.null(datasetInput))
return(NULL)
datasetInput()
},
filter = 'top',
rownames = FALSE)
#Get Summary
output$summary <- renderPrint({
if (is.null(datasetInput))
return(NULL)
summary(datasetInput())
})
# Reactive expression to create data frame of all input values ----
sliderValues <- reactive({
data.frame(
Name = c("ntree",
"mtry",
"nodesize",
"maxdepth"),
Value = as.character(c(input$ntree,
input$mtry,
input$nodesize,
input$maxdepth)),
stringsAsFactors = FALSE)
})
output$rf<-renderPrint({
mydf <- data.frame(datasetInput())
mydf<-rename(mydf, target = 1)
# Split into Train and Validation sets
# Training Set : Validation Set = 70 : 30 (random)
set.seed(100)
train <- sample(nrow(mydf),
0.7*nrow(mydf),
replace = FALSE)
TrainSet <- mydf[train,]
ValidSet <- mydf[-train,]
# Fine tuning parameters of Random Forest model
RF_Model <<- randomForest(target ~ ., #@change
data = TrainSet,
ntree = input$ntree,
mtry = input$mtry,
nodesize = input$nodesize, #@change
importance = TRUE)
# Predicting on train set
predTrain <- predict(RF_Model,
TrainSet,
type = "class")
# Checking classification accuracy
out_table1 <- table(predTrain, #@change
TrainSet$target)
# Predicting on Validation set
predValid <- predict(RF_Model,
ValidSet,
type = "class")
# Checking classification accuracy
out_mean2 <- mean(predValid == ValidSet$target) #@change
out_table2 <- table(predValid, #@change
ValidSet$target)
mean(predValid == ValidSet$target)
out_importance <- importance(RF_Model) #@change
#--- output --- @change
cat("\n----- Model -----\n")
cat("\nRF_Model:\n")
print(RF_Model)
cat("\n----- Checking classification accuracy -----\n")
cat("\nTrainSet:\n")
print(out_table1)
cat("\nValidSet:\n")
print(out_table2)
cat("\naccuracy:\n")
print(out_mean2)
cat("\n----- importance (RF_Model) -----\n")
print(out_importance)
})
output$varImpPlot <- renderPlot({ #@change
mydf <- data.frame(datasetInput())
mydf<-rename(mydf, target = 1)
# Split into Train and Validation sets
# Training Set : Validation Set = 70 : 30 (random)
set.seed(100)
train <- sample(nrow(mydf),
0.7*nrow(mydf),
replace = FALSE)
TrainSet <- mydf[train,]
boruta_output <- Boruta(target ~ ., data=TrainSet,
ntree = input$ntree,
mtry = input$mtry,
doTrace=0)
# names(boruta_output)
# Get significant variables including tentatives
boruta_signif <- getSelectedAttributes(boruta_output, withTentative = TRUE)
# print(boruta_signif)
# Do a tentative rough fix
roughFixMod <- TentativeRoughFix(boruta_output)
boruta_signif <- getSelectedAttributes(roughFixMod)
# print(boruta_signif)
# Variable Importance Scores
imps <- attStats(roughFixMod)
imps2 = imps[imps$decision != 'Rejected', c('meanImp', 'decision')]
head(imps2[order(-imps2$meanImp), ]) # descending sort
# Plot variable importance
plot(boruta_output, cex.axis=.7, las=2, xlab="", main="Variable Importance")
})
# Classification Tree Summary with rpart
output$DTsum <- renderPrint({
mydf <- data.frame(datasetInput())
mydf<-rename(mydf, target = 1)
set.seed(100)
fit <- rpart(target ~ .,
method="class",
data = mydf,
control=rpart.control(minsplit = input$minsplit,
maxdepth = input$maxdepth))
print(printcp(fit)) # display the results
print(plotcp(fit)) # visualize cross-validation results
print(summary(fit)) # detailed summary of splits
rpart.plot(fit, uniform=TRUE,
main="Classification Tree",
extra= 1)
})
# Classification Tree
output$decisiontree <- renderPlot({
mydf <- data.frame(datasetInput())
mydf<-rename(mydf, target = 1)
set.seed(100)
#grow tree
fit <- rpart(target ~ .,
method="class",
data = mydf,
control=rpart.control(minsplit = input$minsplit,
maxdepth = input$maxdepth))
# plot tree
rpart.plot(fit, uniform=TRUE,
main="Classification Tree",
extra= 1)
})
}
shinyApp(ui, server)
|
/App5.R
|
no_license
|
ChrisEdstrom/Shiny
|
R
| false | false | 16,027 |
r
|
# #Upload to Shiny IO
# install.packages('rsconnect')
# library(rsconnect)
#install.packages(c('shiny','DT','ggplot2','purrr','dplyr','corrplot','plotly','randomForest'))
#
# rsconnect::setAccountInfo(name='chrisedstrom', token='88D536E091400263E74E70661483E0F1', secret='OYmlIwBg/jZdy9htFnQf8Kgex0crEwWkYFQKLOf4')
# rsconnect::deployApp('C:\\Users\\Chris\\Desktop\\Shiny')
# deployApp()
#Load libraries
library(Boruta)
library(corrplot)
library(dplyr)
library(DT)
library(ggplot2)
library(plotly)
library(purrr)
library(randomForest)
library(rpart)
library(rpart.plot)
library(shiny)
library(shinydashboard)
# Load data and create variable converting it to numeric for correlation
df <-read.csv(file="C:\\Users\\cyberguerra\\Desktop\\HR_Cat.csv",
header = T,
check.names=F,
fileEncoding="UTF-8-BOM")
#df<-read.csv("./HR_Cat.csv")
#########################################################################################################
ui <- dashboardPage(
dashboardHeader(title = "Simple Descriptive Analytics"),
dashboardSidebar(
fileInput("file1", "Choose CSV File",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Horizontal line ----
tags$hr(),
# Input: Select what to display
selectInput("dataset",
"Data:",
choices =list(df="df",uploaded_file = "inFile"),
selected=NULL)),
dashboardBody(
# Output: Tabset w/ correlation, summary, and table ----
tabsetPanel(type = "tabs",
tabPanel("Data",DT::dataTableOutput("table"),
style = "overflow-y: scroll;overflow-x: scroll;"),
tabPanel("Summary",
verbatimTextOutput("summary")),
tabPanel("Correlation",
verbatimTextOutput("selection"),
plotlyOutput("heat"),
plotlyOutput("scatterplot")),
tabPanel("Graphs",
sidebarPanel(
selectInput("plot.type",
"Plot Type:",
list(histogram = "histogram",
boxplot = "boxplot",
density = "density",
bar = "bar")),
checkboxInput("show.points",
"show points for Boxplot",
TRUE),
selectInput('xcol', 'Dependent Variable (y)', "",
selected = ""),
selectInput("ycol", 'Independent Variable (X)',
choices = NULL)),
uiOutput("graph")),
tabPanel("Random Forest",
sidebarPanel(
# Input: Select number of trees for Random Forest
sliderInput("ntree", "Number of Trees (ntree):",
min = 1,
max = 100,
value = 1),
# Input: Select number of variables to consider
sliderInput("mtry", "Number of Variables (mtry):",
min = 1,
max = 100,
value = 1,
step=1),
# Input: Select number of variables to consider
sliderInput("nodesize",
"Number of Branches (nodesize):",
min = 1,
max = 100,
value = 1,
step=1)),
mainPanel(
plotOutput("varImpPlot"),
verbatimTextOutput("rf",
placeholder = TRUE))),
tabPanel("Decision Tree",
sidebarPanel(
# Input: Select number of variables to consider
sliderInput("minsplit",
"Minimum Number of Observations (minsplit):",
min = 1,
max = 100,
value = 1,
step=1),
# Input: Select number of nodes
sliderInput("maxdepth",
"Tree Depth (maxdepth):",
min = 1,
max = 100,
value = 1,
step=1)),
mainPanel(plotOutput("decisiontree"),
verbatimTextOutput("DTsum",
placeholder = TRUE))))
)
)
##############################################################################################################
##############################################################################################################
server <- function(input, output, session) {
datasetInput <- reactive({
switch(input$dataset,
"df" = df,
"inFile" = read.csv(input$file1$datapath,fileEncoding="UTF-8-BOM"))
})
#update group and variables based on the data
observe({
if(!exists(input$dataset)) return() #make sure upload exists
var.opts<-colnames(get(input$dataset))
updateSelectInput(session,
"ycol",
choices = var.opts)
updateSelectInput(session,
"xcol",
choices = var.opts)
updateSliderInput(session, 'mtry',
max = ncol(datasetInput())-1)
updateSliderInput(session, 'maxdepth',
max = ncol(datasetInput())-2)
})
output$graph <- renderUI({
plotOutput("p")
})
#Get data object
get_data<-reactive({
if(!exists(input$dataset)) return() # if no upload
check<-function(x){is.null(x) || x==""}
if(check(input$dataset)) return()
obj<-list(data=get(input$dataset),
effect=input$ycol,
cause=input$xcol
)
#Require all to be set to proceed
if(any(sapply(obj,check))) return()
#Make sure choices had a chance to update
check<-function(obj){
!all(c(obj$effect,obj$cause) %in% colnames(obj$data))
}
if(check(obj)) return()
obj
})
#Plot function using ggplot2
output$p <- renderPlot({eval(expr)
plot.obj<-get_data()
#conditions for plotting
if(is.null(plot.obj)) return()
#make sure variable and group have loaded
if(plot.obj$effect == "" | plot.obj$cause =="") return()
#plot types
plot.type<-switch(input$plot.type,
"histogram" = geom_histogram(alpha=0.5,position="identity"),#,stat = "count"),
"boxplot" = geom_boxplot(),
"density" = geom_density(alpha=.75),
"bar" = geom_bar(position="dodge")
)
if(input$plot.type=="histogram") {
p<-ggplot(plot.obj$data,
aes_string(as.numeric(plot.obj$cause),
y = plot.obj$effect,
x = plot.obj$cause,
fill = plot.obj$cause # let type determine plotting
)
) + plot.type
}
if(input$plot.type=="boxplot") { #control for 1D or 2D graphs
p<-ggplot(plot.obj$data,
aes_string(
y = plot.obj$effect,
x = plot.obj$cause,
fill = plot.obj$cause # let type determine plotting
)
) + plot.type
if(input$show.points==TRUE)
{
p<-p+ geom_point(aes_string(color=input$xcol),
alpha=0.5,
position = 'jitter',
show.legend = F)
}
} else {
p<-ggplot(plot.obj$data,
aes_string(
x = plot.obj$effect,
fill = plot.obj$cause,
group = plot.obj$cause
)
) + plot.type
}
p<-p+labs(
fill = input$cause,
x = input$effect,
y = ""
) +
.theme
print(p)
})
# set uploaded file
upload_data<-reactive({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
#could also store in a reactiveValues
read.csv(inFile$datapath,fileEncoding="UTF-8-BOM")
})
observeEvent(input$file1,{
inFile<<-upload_data()
})
# Generate a correlation matrix of the data ----
output$heat <- renderPlotly({
dfNum <- datasetInput() %>% mutate_if(is.factor, as.numeric)
nms <- names(datasetInput())
# compute a correlation matrix
correlation <- cor(dfNum)
plot_ly(x = nms, y = nms, z = correlation,
key = correlation, type = "heatmap",
source = "heatplot",
colors = colorRamp(c("black", "gray", "yellow"))) %>%
layout(xaxis = list(title = ""),
yaxis = list(title = ""))
})
output$selection <- renderPrint({'Click on a cell in the heatmap to display a scatterplot'})
# Show scatterplot for correlation matrix selection
output$scatterplot <- renderPlotly({
dfNum <- Filter(is.numeric, datasetInput())
s <- event_data("plotly_click", source = "heatplot")
if (length(s)) {
vars <- c(s[["x"]], s[["y"]])
d <- setNames(datasetInput()[vars], c("x", "y"))
yhat <- fitted(lm(y ~ x, data = d))
plot_ly(d, x = ~x) %>%
add_markers(y = ~y) %>%
add_lines(y = ~yhat) %>%
layout(xaxis = list(title = s[["x"]]),
yaxis = list(title = s[["y"]]),
showlegend = FALSE)
} else {
plotly_empty()
}
})
# Show data in table
output$table <- DT::renderDataTable({
if (is.null(datasetInput))
return(NULL)
datasetInput()
},
filter = 'top',
rownames = FALSE)
#Get Summary
output$summary <- renderPrint({
if (is.null(datasetInput))
return(NULL)
summary(datasetInput())
})
# Reactive expression to create data frame of all input values ----
sliderValues <- reactive({
data.frame(
Name = c("ntree",
"mtry",
"nodesize",
"maxdepth"),
Value = as.character(c(input$ntree,
input$mtry,
input$nodesize,
input$maxdepth)),
stringsAsFactors = FALSE)
})
output$rf<-renderPrint({
mydf <- data.frame(datasetInput())
mydf<-rename(mydf, target = 1)
# Split into Train and Validation sets
# Training Set : Validation Set = 70 : 30 (random)
set.seed(100)
train <- sample(nrow(mydf),
0.7*nrow(mydf),
replace = FALSE)
TrainSet <- mydf[train,]
ValidSet <- mydf[-train,]
# Fine tuning parameters of Random Forest model
RF_Model <<- randomForest(target ~ ., #@change
data = TrainSet,
ntree = input$ntree,
mtry = input$mtry,
nodesize = input$nodesize, #@change
importance = TRUE)
# Predicting on train set
predTrain <- predict(RF_Model,
TrainSet,
type = "class")
# Checking classification accuracy
out_table1 <- table(predTrain, #@change
TrainSet$target)
# Predicting on Validation set
predValid <- predict(RF_Model,
ValidSet,
type = "class")
# Checking classification accuracy
out_mean2 <- mean(predValid == ValidSet$target) #@change
out_table2 <- table(predValid, #@change
ValidSet$target)
mean(predValid == ValidSet$target)
out_importance <- importance(RF_Model) #@change
#--- output --- @change
cat("\n----- Model -----\n")
cat("\nRF_Model:\n")
print(RF_Model)
cat("\n----- Checking classification accuracy -----\n")
cat("\nTrainSet:\n")
print(out_table1)
cat("\nValidSet:\n")
print(out_table2)
cat("\naccuracy:\n")
print(out_mean2)
cat("\n----- importance (RF_Model) -----\n")
print(out_importance)
})
output$varImpPlot <- renderPlot({ #@change
mydf <- data.frame(datasetInput())
mydf<-rename(mydf, target = 1)
# Split into Train and Validation sets
# Training Set : Validation Set = 70 : 30 (random)
set.seed(100)
train <- sample(nrow(mydf),
0.7*nrow(mydf),
replace = FALSE)
TrainSet <- mydf[train,]
boruta_output <- Boruta(target ~ ., data=TrainSet,
ntree = input$ntree,
mtry = input$mtry,
doTrace=0)
# names(boruta_output)
# Get significant variables including tentatives
boruta_signif <- getSelectedAttributes(boruta_output, withTentative = TRUE)
# print(boruta_signif)
# Do a tentative rough fix
roughFixMod <- TentativeRoughFix(boruta_output)
boruta_signif <- getSelectedAttributes(roughFixMod)
# print(boruta_signif)
# Variable Importance Scores
imps <- attStats(roughFixMod)
imps2 = imps[imps$decision != 'Rejected', c('meanImp', 'decision')]
head(imps2[order(-imps2$meanImp), ]) # descending sort
# Plot variable importance
plot(boruta_output, cex.axis=.7, las=2, xlab="", main="Variable Importance")
})
# Classification Tree Summary with rpart
output$DTsum <- renderPrint({
mydf <- data.frame(datasetInput())
mydf<-rename(mydf, target = 1)
set.seed(100)
fit <- rpart(target ~ .,
method="class",
data = mydf,
control=rpart.control(minsplit = input$minsplit,
maxdepth = input$maxdepth))
print(printcp(fit)) # display the results
print(plotcp(fit)) # visualize cross-validation results
print(summary(fit)) # detailed summary of splits
rpart.plot(fit, uniform=TRUE,
main="Classification Tree",
extra= 1)
})
# Classification Tree
output$decisiontree <- renderPlot({
mydf <- data.frame(datasetInput())
mydf<-rename(mydf, target = 1)
set.seed(100)
#grow tree
fit <- rpart(target ~ .,
method="class",
data = mydf,
control=rpart.control(minsplit = input$minsplit,
maxdepth = input$maxdepth))
# plot tree
rpart.plot(fit, uniform=TRUE,
main="Classification Tree",
extra= 1)
})
}
shinyApp(ui, server)
|
#' Classify a review as good or bad
#'
#' @param x Text to be classified, ideally a one-sentence product review.
#' @param random_forest A model created with the randomForest package.
#' @param vectoriser A vectoriser constructed with the text2vec package.
#' @param tfidf A tfidf object constructed with the text2vec package.
#' If no tfidf is NULL, then weighting will not be applied.
#'
#' @importFrom randomForest randomForest
#' @export
#'
sentiment <- function(x, random_forest, vectoriser, tfidf = NULL) {
processed <- map_to_dtm(x, vectoriser = vectoriser, tfidf = tfidf)
as.character(stats::predict(random_forest, processed))
}
|
/R/sentiment.R
|
permissive
|
mdneuzerling/ReviewSentiment
|
R
| false | false | 641 |
r
|
#' Classify a review as good or bad
#'
#' @param x Text to be classified, ideally a one-sentence product review.
#' @param random_forest A model created with the randomForest package.
#' @param vectoriser A vectoriser constructed with the text2vec package.
#' @param tfidf A tfidf object constructed with the text2vec package.
#' If no tfidf is NULL, then weighting will not be applied.
#'
#' @importFrom randomForest randomForest
#' @export
#'
sentiment <- function(x, random_forest, vectoriser, tfidf = NULL) {
processed <- map_to_dtm(x, vectoriser = vectoriser, tfidf = tfidf)
as.character(stats::predict(random_forest, processed))
}
|
#'
Predict1Word.UsingNgram__Generator__ <- function(DFMs) {
freqNgrams <- lapply(1:length(DFMs), function(N) {
freq <- colSums(DFMs[[N]]) %>%
as.data.table(keep.rownames = "Ngram") %>%
.[, `:=`(c("Ngram", paste0("Word", (N-1):0)),
strsplit(Ngram, "_") %>%
lapply(function(s) as.list(c(paste(s[1:N][-N], collapse = "_"),
s[1:N]))) %>%
rbindlist)
] %>%
setnames(".", "count")
for (n in 0:(N-1)) freq <- freq[get(paste0("Word", n)) != ""]
freq[, `:=`(paste0("Word", (0:(N-1))[-1]), NULL)]
setkey(freq, Ngram, Word0)
})
# freqNgrams.top <- lapply(freqNgrams, function(freq) {
# freq[, .SD[which.max(count)], by = Ngram]
# })
function(tkn, nextWords = NULL, return.counts = F) {
Nmax <- min(length(tkn) + 1, length(freqNgrams))
tkn <- tkn[length(tkn) - ((Nmax-2):0)]
nextWord <- character(0)
N <- Nmax
while(length(nextWord) == 0) {
Ngram_ <- paste(tkn, collapse = "_")
freq <- freqNgrams[[N]][.(Ngram_, nextWords)]
nextWord <- freq[which.max(count), Word0]
N <- N - 1
tkn <- tkn[-1]
}
if (return.counts) list(nextWord = nextWord, freq = freq) else nextWord
}
}
##
DFMs <- list(DFM_noStopwords_1grams$blogs,
DFM_noStopwords_2grams$blogs,
DFM_noStopwords_3grams$blogs,
DFM_noStopwords_4grams$blogs)
Predict1Word.UsingNgram.noStopwords <- Predict1Word.UsingNgram__Generator__(DFMs)
#### ####
Sentences <- list(q1 = "When you breathe, I want to be the air for you. I'll be there for you, I'd live and I'd",
q2 = "Guy at my table's wife got up to go to the bathroom and I asked about dessert and he started telling me about his",
q3 = "I'd give anything to see arctic monkeys this",
q4 = "Talking to your mom has the same effect as a hug and helps reduce your",
q5 = "When you were in Holland you were like 1 inch away from me but you hadn't time to take a",
q6 = "I'd just like all of these questions answered, a presentation of evidence, and a jury to settle the",
q7 = "I can't deal with unsymetrical things. I can't even hold an uneven number of bags of groceries in each",
q8 = "Every inch of you is perfect from the bottom to the",
q9 = "I'm thankful my childhood was filled with imagination and bruises from playing",
q10 = "I like how the same people are in almost all of Adam Sandler's")
Choices <- list(q1 = c("give", "die", "eat", "sleep"),
q2 = c("marital", "financial", "horticultural", "spiritual"),
q3 = c("morning", "month", "weekend", "decade"),
q4 = c("sleepiness", "stress", "happiness", "hunger"),
q5 = c("minute", "picture", "walk", "look"),
q6 = c("case", "account", "incident", "matter"),
q7 = c("hand", "finger", "toe", "arm"),
q8 = c("side", "top", "center", "middle"),
q9 = c("inside", "daily", "weekly", "outside"),
q10 = c("novels", "movies", "stories", "pictures"))
Correct <- c(q1="die", q2="marital", q3="weekend", q4="stress",
q5="picture", q6 = "matter", q7="hand", q8="top",
q9="outside", q10="movies")
#### ####
tkns <- sapply(Sentences, function(sent) {
sent <- tokens(sent, what = "sentence")[[1]] %>%
.[length(.)] %>%
tokens(remove_twitter = T,
remove_numbers = T,
remove_punct = T,
remove_symbols = T) %>%
.[[1]]
})
tkns_noStopwords <- sapply(Sentences, function(sent) {
sent <- tokens(sent, what = "sentence")[[1]] %>%
.[length(.)] %>%
tokens(remove_twitter = T,
remove_numbers = T,
remove_punct = T,
remove_symbols = T) %>%
tokens_remove(stopwords()) %>%
.[[1]]
})
#### ####
Attempt1 <- lapply(names(tkns_noStopwords), function(q)
Predict1Word.UsingNgram.noStopwords(tkns_noStopwords[[q]], Choices[[q]], T))
# q1 q2 q3 q4 q5 q6
# "die" "financial" "morning" "stress" "picture" "case"
# q7 q8 q9 q10
# "hand" "side" "outside" "pictures"
Score1 <- c(1,0,0, 1,1,0, 1,0,1, 0)
#### ####
Attempt2 <- lapply(names(tkns), function(q)
Predict1Word.UsingNgram(tkns[[q]], Choices[[q]], T))
#### ####
smoothing <- .01
Attempt3 <- mapply(function(freq1, freq2) {
freq <- merge(freq1$freq[, .(Word0, count)][is.na(count), count := 0],
freq2$freq[, .(Word0, count)][is.na(count), count := 0],
by = "Word0"
)[, p.x := (count.x + smoothing) / sum(count.x + smoothing)][
, p.y := (count.y + smoothing) / sum(count.y + smoothing)][
, p := (p.x + p.y)/2]
print(freq)
freq[which.max(p), Word0]
},
Attempt1, Attempt2)
|
/Week3Tasks.R
|
no_license
|
lchiaying/Coursera-DataScienceSpecialization-Capstone
|
R
| false | false | 5,444 |
r
|
#'
Predict1Word.UsingNgram__Generator__ <- function(DFMs) {
freqNgrams <- lapply(1:length(DFMs), function(N) {
freq <- colSums(DFMs[[N]]) %>%
as.data.table(keep.rownames = "Ngram") %>%
.[, `:=`(c("Ngram", paste0("Word", (N-1):0)),
strsplit(Ngram, "_") %>%
lapply(function(s) as.list(c(paste(s[1:N][-N], collapse = "_"),
s[1:N]))) %>%
rbindlist)
] %>%
setnames(".", "count")
for (n in 0:(N-1)) freq <- freq[get(paste0("Word", n)) != ""]
freq[, `:=`(paste0("Word", (0:(N-1))[-1]), NULL)]
setkey(freq, Ngram, Word0)
})
# freqNgrams.top <- lapply(freqNgrams, function(freq) {
# freq[, .SD[which.max(count)], by = Ngram]
# })
function(tkn, nextWords = NULL, return.counts = F) {
Nmax <- min(length(tkn) + 1, length(freqNgrams))
tkn <- tkn[length(tkn) - ((Nmax-2):0)]
nextWord <- character(0)
N <- Nmax
while(length(nextWord) == 0) {
Ngram_ <- paste(tkn, collapse = "_")
freq <- freqNgrams[[N]][.(Ngram_, nextWords)]
nextWord <- freq[which.max(count), Word0]
N <- N - 1
tkn <- tkn[-1]
}
if (return.counts) list(nextWord = nextWord, freq = freq) else nextWord
}
}
##
DFMs <- list(DFM_noStopwords_1grams$blogs,
DFM_noStopwords_2grams$blogs,
DFM_noStopwords_3grams$blogs,
DFM_noStopwords_4grams$blogs)
Predict1Word.UsingNgram.noStopwords <- Predict1Word.UsingNgram__Generator__(DFMs)
#### ####
Sentences <- list(q1 = "When you breathe, I want to be the air for you. I'll be there for you, I'd live and I'd",
q2 = "Guy at my table's wife got up to go to the bathroom and I asked about dessert and he started telling me about his",
q3 = "I'd give anything to see arctic monkeys this",
q4 = "Talking to your mom has the same effect as a hug and helps reduce your",
q5 = "When you were in Holland you were like 1 inch away from me but you hadn't time to take a",
q6 = "I'd just like all of these questions answered, a presentation of evidence, and a jury to settle the",
q7 = "I can't deal with unsymetrical things. I can't even hold an uneven number of bags of groceries in each",
q8 = "Every inch of you is perfect from the bottom to the",
q9 = "I'm thankful my childhood was filled with imagination and bruises from playing",
q10 = "I like how the same people are in almost all of Adam Sandler's")
Choices <- list(q1 = c("give", "die", "eat", "sleep"),
q2 = c("marital", "financial", "horticultural", "spiritual"),
q3 = c("morning", "month", "weekend", "decade"),
q4 = c("sleepiness", "stress", "happiness", "hunger"),
q5 = c("minute", "picture", "walk", "look"),
q6 = c("case", "account", "incident", "matter"),
q7 = c("hand", "finger", "toe", "arm"),
q8 = c("side", "top", "center", "middle"),
q9 = c("inside", "daily", "weekly", "outside"),
q10 = c("novels", "movies", "stories", "pictures"))
Correct <- c(q1="die", q2="marital", q3="weekend", q4="stress",
q5="picture", q6 = "matter", q7="hand", q8="top",
q9="outside", q10="movies")
#### ####
tkns <- sapply(Sentences, function(sent) {
sent <- tokens(sent, what = "sentence")[[1]] %>%
.[length(.)] %>%
tokens(remove_twitter = T,
remove_numbers = T,
remove_punct = T,
remove_symbols = T) %>%
.[[1]]
})
tkns_noStopwords <- sapply(Sentences, function(sent) {
sent <- tokens(sent, what = "sentence")[[1]] %>%
.[length(.)] %>%
tokens(remove_twitter = T,
remove_numbers = T,
remove_punct = T,
remove_symbols = T) %>%
tokens_remove(stopwords()) %>%
.[[1]]
})
#### ####
Attempt1 <- lapply(names(tkns_noStopwords), function(q)
Predict1Word.UsingNgram.noStopwords(tkns_noStopwords[[q]], Choices[[q]], T))
# q1 q2 q3 q4 q5 q6
# "die" "financial" "morning" "stress" "picture" "case"
# q7 q8 q9 q10
# "hand" "side" "outside" "pictures"
Score1 <- c(1,0,0, 1,1,0, 1,0,1, 0)
#### ####
Attempt2 <- lapply(names(tkns), function(q)
Predict1Word.UsingNgram(tkns[[q]], Choices[[q]], T))
#### ####
smoothing <- .01
Attempt3 <- mapply(function(freq1, freq2) {
freq <- merge(freq1$freq[, .(Word0, count)][is.na(count), count := 0],
freq2$freq[, .(Word0, count)][is.na(count), count := 0],
by = "Word0"
)[, p.x := (count.x + smoothing) / sum(count.x + smoothing)][
, p.y := (count.y + smoothing) / sum(count.y + smoothing)][
, p := (p.x + p.y)/2]
print(freq)
freq[which.max(p), Word0]
},
Attempt1, Attempt2)
|
## hpc = Household Power Consumption
library(data.table)
library(dplyr)
## Removes all pre-existing variables.
rm(list = ls())
## Set working directory to preferred folder on Desktop
setwd('C:/Users/mhgandhi/Desktop/Data Science Specialization/Course 4 - Exploratory Data Analysis/
Week 1/Course Project 1')
getwd()
## Reading raw .txt file into hpc variable
hpc <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
head(hpc)
str(hpc)
## Changing Date and Time formats from Factors to Date and Character.
hpc$Date <- as.Date(hpc$Date,format = "%d/%m/%Y")
hpc$Time <- format(hpc$Time, format = "%H:%M:%S")
## Changing remaining columns fom Character to Numeric.
hpc$Global_active_power <- as.numeric(hpc$Global_active_power)
hpc$Global_reactive_power <- as.numeric(hpc$Global_reactive_power)
hpc$Voltage <- as.numeric(hpc$Voltage)
hpc$Global_intensity <- as.numeric(hpc$Global_intensity)
hpc$Sub_metering_1 <- as.numeric(hpc$Sub_metering_1)
hpc$Sub_metering_2 <- as.numeric(hpc$Sub_metering_2)
hpc$Sub_metering_3 <- as.numeric(hpc$Sub_metering_3)
## Filter for specified dates.
hpc <- hpc[hpc$Date >= '2007-02-01' & hpc$Date <= '2007-02-02',]
head(hpc)
tail(hpc)
str(hpc)
## -------------------------- PLOT 4---------------------------------------##
## First - Create a date_time column to combine date and time columns.
## Second - Check default number of plots using par(mfrow) function.
## Third - Create file Plot4.png of 480 X 480.
## Fourth - Set number of plots to a 2 X 2 using par(mfrow) function
## Fifth - Plot using with function to avoid naming dataset repeatedly.
hpc$date_time <- as.POSIXct(paste(hpc$Date, hpc$Time), format="%Y-%m-%d %H:%M:%S")
par("mfrow")
png(filename = "Plot4.png", width = 480,height = 480)
par(mfrow = c(2,2),oma = c(0,0,2,0))
with(hpc,
{
plot(date_time,Global_active_power,type = "l",xlab = " ",ylab = "Global Active Power (kilowatts)")
plot(date_time,Voltage,type = "l",xlab = " datetime ",ylab = "Voltage")
plot(date_time,Sub_metering_1, type = "l",
xlab = " ",ylab = "Energy sub metering")
lines(date_time,Sub_metering_2, type = "l", col = "red1")
lines(date_time,Sub_metering_3, type = "l", col = "mediumblue")
legend("topright",names(hpc)[7:9], lty = 1,
col = c("black","red1","mediumblue"), bty ='o')
plot(date_time,Global_reactive_power,type = "l",xlab = " datetime ",ylab = "Global_reactive_power")
})
dev.off()
|
/Plot 4.R
|
no_license
|
montoohg/ExData_Plotting1
|
R
| false | false | 2,539 |
r
|
## hpc = Household Power Consumption
library(data.table)
library(dplyr)
## Removes all pre-existing variables.
rm(list = ls())
## Set working directory to preferred folder on Desktop
setwd('C:/Users/mhgandhi/Desktop/Data Science Specialization/Course 4 - Exploratory Data Analysis/
Week 1/Course Project 1')
getwd()
## Reading raw .txt file into hpc variable
hpc <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
head(hpc)
str(hpc)
## Changing Date and Time formats from Factors to Date and Character.
hpc$Date <- as.Date(hpc$Date,format = "%d/%m/%Y")
hpc$Time <- format(hpc$Time, format = "%H:%M:%S")
## Changing remaining columns fom Character to Numeric.
hpc$Global_active_power <- as.numeric(hpc$Global_active_power)
hpc$Global_reactive_power <- as.numeric(hpc$Global_reactive_power)
hpc$Voltage <- as.numeric(hpc$Voltage)
hpc$Global_intensity <- as.numeric(hpc$Global_intensity)
hpc$Sub_metering_1 <- as.numeric(hpc$Sub_metering_1)
hpc$Sub_metering_2 <- as.numeric(hpc$Sub_metering_2)
hpc$Sub_metering_3 <- as.numeric(hpc$Sub_metering_3)
## Filter for specified dates.
hpc <- hpc[hpc$Date >= '2007-02-01' & hpc$Date <= '2007-02-02',]
head(hpc)
tail(hpc)
str(hpc)
## -------------------------- PLOT 4---------------------------------------##
## First - Create a date_time column to combine date and time columns.
## Second - Check default number of plots using par(mfrow) function.
## Third - Create file Plot4.png of 480 X 480.
## Fourth - Set number of plots to a 2 X 2 using par(mfrow) function
## Fifth - Plot using with function to avoid naming dataset repeatedly.
hpc$date_time <- as.POSIXct(paste(hpc$Date, hpc$Time), format="%Y-%m-%d %H:%M:%S")
par("mfrow")
png(filename = "Plot4.png", width = 480,height = 480)
par(mfrow = c(2,2),oma = c(0,0,2,0))
with(hpc,
{
plot(date_time,Global_active_power,type = "l",xlab = " ",ylab = "Global Active Power (kilowatts)")
plot(date_time,Voltage,type = "l",xlab = " datetime ",ylab = "Voltage")
plot(date_time,Sub_metering_1, type = "l",
xlab = " ",ylab = "Energy sub metering")
lines(date_time,Sub_metering_2, type = "l", col = "red1")
lines(date_time,Sub_metering_3, type = "l", col = "mediumblue")
legend("topright",names(hpc)[7:9], lty = 1,
col = c("black","red1","mediumblue"), bty ='o')
plot(date_time,Global_reactive_power,type = "l",xlab = " datetime ",ylab = "Global_reactive_power")
})
dev.off()
|
#!/usr/local/bin/R
ApeShape_withprint <- function(infile, inTree){
args <- commandArgs(trailingOnly=TRUE)
library(phangorn)
library(ape)
tree <- read.tree(file= args[2])
seq <- read.FASTA(file= args[1])
phyDat <- phyDat(seq,type="DNA")
treePML <- pml(tree,phyDat)
anc <- ancestral.pml(treePML)
baseIndex <- which.max(anc$'7')
if (baseIndex == 1) {
base = 'A'}
else if (baseIndex == 2){
base = "C"}
else if (baseIndex == 3){
base = "G"}
else if (baseIndex == 4){
base = "T"
}
return(list(args[], base))
}
ApeShape_withprint(inData, inTree)
|
/R/ApeShape_withprint.R
|
permissive
|
jbeacher6/ApeShape
|
R
| false | false | 573 |
r
|
#!/usr/local/bin/R
ApeShape_withprint <- function(infile, inTree){
args <- commandArgs(trailingOnly=TRUE)
library(phangorn)
library(ape)
tree <- read.tree(file= args[2])
seq <- read.FASTA(file= args[1])
phyDat <- phyDat(seq,type="DNA")
treePML <- pml(tree,phyDat)
anc <- ancestral.pml(treePML)
baseIndex <- which.max(anc$'7')
if (baseIndex == 1) {
base = 'A'}
else if (baseIndex == 2){
base = "C"}
else if (baseIndex == 3){
base = "G"}
else if (baseIndex == 4){
base = "T"
}
return(list(args[], base))
}
ApeShape_withprint(inData, inTree)
|
library("stringr")
library("purrr")
|
/libraries.R
|
no_license
|
czeildi/r-dojo
|
R
| false | false | 36 |
r
|
library("stringr")
library("purrr")
|
#' Is an object an expression?
#'
#' @description
#' In rlang, an _expression_ is the return type of [parse_expr()], the
#' set of objects that can be obtained from parsing R code. Under this
#' definition expressions include numbers, strings, `NULL`, symbols,
#' and function calls. These objects can be classified as:
#'
#' * Symbolic objects, i.e. symbols and function calls (for which
#' `is_symbolic()` returns `TRUE`)
#' * Syntactic literals, i.e. scalar atomic objects and `NULL`
#' (testable with `is_syntactic_literal()`)
#'
#' `is_expression()` returns `TRUE` if the input is either a symbolic
#' object or a syntactic literal. If a call, the elements of the call
#' must all be expressions as well. Unparsable calls are not
#' considered expressions in this narrow definition.
#'
#' Note that in base R, there exists [expression()] vectors, a data
#' type similar to a list that supports special attributes created by
#' the parser called source references. This data type is not
#' supported in rlang.
#'
#' @details
#' `is_symbolic()` returns `TRUE` for symbols and calls (objects with
#' type `language`). Symbolic objects are replaced by their value
#' during evaluation. Literals are the complement of symbolic
#' objects. They are their own value and return themselves during
#' evaluation.
#'
#' `is_syntactic_literal()` is a predicate that returns `TRUE` for the
#' subset of literals that are created by R when parsing text (see
#' [parse_expr()]): numbers, strings and `NULL`. Along with symbols,
#' these literals are the terminating nodes in an AST.
#'
#' Note that in the most general sense, a literal is any R object that
#' evaluates to itself and that can be evaluated in the empty
#' environment. For instance, `quote(c(1, 2))` is not a literal, it is
#' a call. However, the result of evaluating it in [base_env()] is a
#' literal(in this case an atomic vector).
#'
#' As the data structure for function arguments, pairlists are also a
#' kind of language objects. However, since they are mostly an
#' internal data structure and can't be returned as is by the parser,
#' `is_expression()` returns `FALSE` for pairlists.
#'
#' @param x An object to test.
#' @seealso [is_call()] for a call predicate.
#' @export
#' @examples
#' q1 <- quote(1)
#' is_expression(q1)
#' is_syntactic_literal(q1)
#'
#' q2 <- quote(x)
#' is_expression(q2)
#' is_symbol(q2)
#'
#' q3 <- quote(x + 1)
#' is_expression(q3)
#' is_call(q3)
#'
#'
#' # Atomic expressions are the terminating nodes of a call tree:
#' # NULL or a scalar atomic vector:
#' is_syntactic_literal("string")
#' is_syntactic_literal(NULL)
#'
#' is_syntactic_literal(letters)
#' is_syntactic_literal(quote(call()))
#'
#' # Parsable literals have the property of being self-quoting:
#' identical("foo", quote("foo"))
#' identical(1L, quote(1L))
#' identical(NULL, quote(NULL))
#'
#' # Like any literals, they can be evaluated within the empty
#' # environment:
#' eval_bare(quote(1L), empty_env())
#'
#' # Whereas it would fail for symbolic expressions:
#' # eval_bare(quote(c(1L, 2L)), empty_env())
#'
#'
#' # Pairlists are also language objects representing argument lists.
#' # You will usually encounter them with extracted formals:
#' fmls <- formals(is_expression)
#' typeof(fmls)
#'
#' # Since they are mostly an internal data structure, is_expression()
#' # returns FALSE for pairlists, so you will have to check explicitly
#' # for them:
#' is_expression(fmls)
#' is_pairlist(fmls)
is_expression <- function(x) {
stack <- new_stack()
stack$push(x)
while (!is_exhausted(elt <- stack$pop())) {
if (is_missing(elt)) {
return(FALSE)
}
switch(
typeof(elt),
language = stack$push(!!!as.list(elt)),
if (!is_symbol(elt) && !is_syntactic_literal(elt)) {
return(FALSE)
}
)
}
TRUE
}
#' @export
#' @rdname is_expression
is_syntactic_literal <- function(x) {
switch(typeof(x),
NULL = {
TRUE
},
logical = ,
integer = ,
double = ,
character = {
length(x) == 1
},
complex = {
if (length(x) != 1) {
return(FALSE)
}
is_na(x) || Re(x) == 0
},
FALSE
)
}
#' @export
#' @rdname is_expression
is_symbolic <- function(x) {
typeof(x) %in% c("language", "symbol")
}
#' Turn an expression to a label
#'
#' @keywords internal
#' @description
#'
#' `r lifecycle::badge("questioning")`
#'
#' `expr_text()` turns the expression into a single string, which
#' might be multi-line. `expr_name()` is suitable for formatting
#' names. It works best with symbols and scalar types, but also
#' accepts calls. `expr_label()` formats the expression nicely for use
#' in messages.
#'
#' @param expr An expression to labellise.
#'
#' @examples
#' # To labellise a function argument, first capture it with
#' # substitute():
#' fn <- function(x) expr_label(substitute(x))
#' fn(x:y)
#'
#' # Strings are encoded
#' expr_label("a\nb")
#'
#' # Names and expressions are quoted with ``
#' expr_label(quote(x))
#' expr_label(quote(a + b + c))
#'
#' # Long expressions are collapsed
#' expr_label(quote(foo({
#' 1 + 2
#' print(x)
#' })))
#' @export
expr_label <- function(expr) {
if (is.character(expr)) {
encodeString(expr, quote = '"')
} else if (is.atomic(expr)) {
format(expr)
} else if (is.name(expr)) {
paste0("`", as.character(expr), "`")
} else {
chr <- deparse_one(expr)
paste0("`", chr, "`")
}
}
#' @rdname expr_label
#' @export
expr_name <- function(expr) {
if (is_null(expr)) {
return("NULL")
}
if (is_symbol(expr)) {
return(as_string(expr))
}
if (is_call(expr)) {
if (is_data_pronoun(expr)) {
name <- data_pronoun_name(expr) %||% "<unknown>"
} else {
name <- deparse_one(expr)
name <- gsub("\n.*$", "...", name)
}
return(name)
}
# So 1L is translated to "1" and not "1L"
if (is_scalar_atomic(expr)) {
return(as.character(expr))
}
if (length(expr) == 1) {
name <- expr_text(expr)
name <- gsub("\n.*$", "...", name)
return(name)
}
abort("`expr` must quote a symbol, scalar, or call")
}
#' @rdname expr_label
#' @export
#' @param width Width of each line.
#' @param nlines Maximum number of lines to extract.
expr_text <- function(expr, width = 60L, nlines = Inf) {
if (is_symbol(expr)) {
return(sym_text(expr))
}
str <- deparse(expr, width.cutoff = width, backtick = TRUE)
if (length(str) > nlines) {
str <- c(str[seq_len(nlines - 1)], "...")
}
paste0(str, collapse = "\n")
}
sym_text <- function(sym) {
# Use as_string() to translate unicode tags
text <- as_string(sym)
if (needs_backticks(text)) {
text <- sprintf("`%s`", text)
}
text
}
deparse_one <- function(expr) {
str <- deparse(expr, 60L)
if (length(str) > 1) {
if (is_call(expr, function_sym)) {
expr[[3]] <- quote(...)
str <- deparse(expr, 60L)
} else if (is_call(expr, brace_sym)) {
str <- "{ ... }"
} else if (is_call(expr)) {
str <- deparse(call2(expr[[1]], quote(...)), 60L)
}
str <- paste(str, collapse = "\n")
}
str
}
#' Set and get an expression
#'
#' @keywords internal
#' @description
#'
#' These helpers are useful to make your function work generically
#' with quosures and raw expressions. First call `get_expr()` to
#' extract an expression. Once you're done processing the expression,
#' call `set_expr()` on the original object to update the expression.
#' You can return the result of `set_expr()`, either a formula or an
#' expression depending on the input type. Note that `set_expr()` does
#' not change its input, it creates a new object.
#'
#' @param x An expression, closure, or one-sided formula. In addition,
#' `set_expr()` accept frames.
#' @param value An updated expression.
#' @param default A default expression to return when `x` is not an
#' expression wrapper. Defaults to `x` itself.
#' @return The updated original input for `set_expr()`. A raw
#' expression for `get_expr()`.
#' @seealso [quo_get_expr()] and [quo_set_expr()] for versions of
#' [get_expr()] and [set_expr()] that only work on quosures.
#' @export
#' @examples
#' f <- ~foo(bar)
#' e <- quote(foo(bar))
#' frame <- identity(identity(ctxt_frame()))
#'
#' get_expr(f)
#' get_expr(e)
#' get_expr(frame)
#'
#' set_expr(f, quote(baz))
#' set_expr(e, quote(baz))
set_expr <- function(x, value) {
if (is_quosure(x)) {
x <- quo_set_expr(x, value)
} else if (is_formula(x)) {
f_rhs(x) <- value
} else if (is_closure(x)) {
body(x) <- value
} else {
x <- value
}
x
}
#' @rdname set_expr
#' @export
get_expr <- function(x, default = x) {
.Call(ffi_get_expression, x, default)
}
expr_type_of <- function(x) {
if (missing(x)) {
return("missing")
}
type <- typeof(x)
if (type %in% c("symbol", "language", "pairlist", "NULL")) {
type
} else {
"literal"
}
}
switch_expr <- function(.x, ...) {
switch(expr_type_of(.x), ...)
}
#' Print an expression
#'
#' @description
#'
#' `expr_print()`, powered by `expr_deparse()`, is an alternative
#' printer for R expressions with a few improvements over the base R
#' printer.
#'
#' * It colourises [quosures][nse-defuse] according to their environment.
#' Quosures from the global environment are printed normally while
#' quosures from local environments are printed in unique colour (or
#' in italic when all colours are taken).
#'
#' * It wraps inlined objects in angular brackets. For instance, an
#' integer vector unquoted in a function call (e.g.
#' `expr(foo(!!(1:3)))`) is printed like this: `foo(<int: 1L, 2L,
#' 3L>)` while by default R prints the code to create that vector:
#' `foo(1:3)` which is ambiguous.
#'
#' * It respects the width boundary (from the global option `width`)
#' in more cases.
#'
#' @param x An object or expression to print.
#' @param width The width of the deparsed or printed expression.
#' Defaults to the global option `width`.
#' @param ... Arguments passed to `expr_deparse()`.
#'
#' @return `expr_deparse()` returns a character vector of lines.
#' `expr_print()` returns its input invisibly.
#'
#' @export
#' @examples
#' # It supports any object. Non-symbolic objects are always printed
#' # within angular brackets:
#' expr_print(1:3)
#' expr_print(function() NULL)
#'
#' # Contrast this to how the code to create these objects is printed:
#' expr_print(quote(1:3))
#' expr_print(quote(function() NULL))
#'
#' # The main cause of non-symbolic objects in expressions is
#' # quasiquotation:
#' expr_print(expr(foo(!!(1:3))))
#'
#'
#' # Quosures from the global environment are printed normally:
#' expr_print(quo(foo))
#' expr_print(quo(foo(!!quo(bar))))
#'
#' # Quosures from local environments are colourised according to
#' # their environments (if you have crayon installed):
#' local_quo <- local(quo(foo))
#' expr_print(local_quo)
#'
#' wrapper_quo <- local(quo(bar(!!local_quo, baz)))
#' expr_print(wrapper_quo)
expr_print <- function(x, ...) {
cat_line(expr_deparse(x, ...))
invisible(x)
}
#' @rdname expr_print
#' @export
expr_deparse <- function(x,
...,
width = peek_option("width")) {
check_dots_empty0(...)
deparser <- new_quo_deparser(width = width)
quo_deparse(x, deparser)
}
|
/R/expr.R
|
permissive
|
seankross/rlang
|
R
| false | false | 11,290 |
r
|
#' Is an object an expression?
#'
#' @description
#' In rlang, an _expression_ is the return type of [parse_expr()], the
#' set of objects that can be obtained from parsing R code. Under this
#' definition expressions include numbers, strings, `NULL`, symbols,
#' and function calls. These objects can be classified as:
#'
#' * Symbolic objects, i.e. symbols and function calls (for which
#' `is_symbolic()` returns `TRUE`)
#' * Syntactic literals, i.e. scalar atomic objects and `NULL`
#' (testable with `is_syntactic_literal()`)
#'
#' `is_expression()` returns `TRUE` if the input is either a symbolic
#' object or a syntactic literal. If a call, the elements of the call
#' must all be expressions as well. Unparsable calls are not
#' considered expressions in this narrow definition.
#'
#' Note that in base R, there exists [expression()] vectors, a data
#' type similar to a list that supports special attributes created by
#' the parser called source references. This data type is not
#' supported in rlang.
#'
#' @details
#' `is_symbolic()` returns `TRUE` for symbols and calls (objects with
#' type `language`). Symbolic objects are replaced by their value
#' during evaluation. Literals are the complement of symbolic
#' objects. They are their own value and return themselves during
#' evaluation.
#'
#' `is_syntactic_literal()` is a predicate that returns `TRUE` for the
#' subset of literals that are created by R when parsing text (see
#' [parse_expr()]): numbers, strings and `NULL`. Along with symbols,
#' these literals are the terminating nodes in an AST.
#'
#' Note that in the most general sense, a literal is any R object that
#' evaluates to itself and that can be evaluated in the empty
#' environment. For instance, `quote(c(1, 2))` is not a literal, it is
#' a call. However, the result of evaluating it in [base_env()] is a
#' literal(in this case an atomic vector).
#'
#' As the data structure for function arguments, pairlists are also a
#' kind of language objects. However, since they are mostly an
#' internal data structure and can't be returned as is by the parser,
#' `is_expression()` returns `FALSE` for pairlists.
#'
#' @param x An object to test.
#' @seealso [is_call()] for a call predicate.
#' @export
#' @examples
#' q1 <- quote(1)
#' is_expression(q1)
#' is_syntactic_literal(q1)
#'
#' q2 <- quote(x)
#' is_expression(q2)
#' is_symbol(q2)
#'
#' q3 <- quote(x + 1)
#' is_expression(q3)
#' is_call(q3)
#'
#'
#' # Atomic expressions are the terminating nodes of a call tree:
#' # NULL or a scalar atomic vector:
#' is_syntactic_literal("string")
#' is_syntactic_literal(NULL)
#'
#' is_syntactic_literal(letters)
#' is_syntactic_literal(quote(call()))
#'
#' # Parsable literals have the property of being self-quoting:
#' identical("foo", quote("foo"))
#' identical(1L, quote(1L))
#' identical(NULL, quote(NULL))
#'
#' # Like any literals, they can be evaluated within the empty
#' # environment:
#' eval_bare(quote(1L), empty_env())
#'
#' # Whereas it would fail for symbolic expressions:
#' # eval_bare(quote(c(1L, 2L)), empty_env())
#'
#'
#' # Pairlists are also language objects representing argument lists.
#' # You will usually encounter them with extracted formals:
#' fmls <- formals(is_expression)
#' typeof(fmls)
#'
#' # Since they are mostly an internal data structure, is_expression()
#' # returns FALSE for pairlists, so you will have to check explicitly
#' # for them:
#' is_expression(fmls)
#' is_pairlist(fmls)
is_expression <- function(x) {
stack <- new_stack()
stack$push(x)
while (!is_exhausted(elt <- stack$pop())) {
if (is_missing(elt)) {
return(FALSE)
}
switch(
typeof(elt),
language = stack$push(!!!as.list(elt)),
if (!is_symbol(elt) && !is_syntactic_literal(elt)) {
return(FALSE)
}
)
}
TRUE
}
#' @export
#' @rdname is_expression
is_syntactic_literal <- function(x) {
switch(typeof(x),
NULL = {
TRUE
},
logical = ,
integer = ,
double = ,
character = {
length(x) == 1
},
complex = {
if (length(x) != 1) {
return(FALSE)
}
is_na(x) || Re(x) == 0
},
FALSE
)
}
#' @export
#' @rdname is_expression
is_symbolic <- function(x) {
typeof(x) %in% c("language", "symbol")
}
#' Turn an expression to a label
#'
#' @keywords internal
#' @description
#'
#' `r lifecycle::badge("questioning")`
#'
#' `expr_text()` turns the expression into a single string, which
#' might be multi-line. `expr_name()` is suitable for formatting
#' names. It works best with symbols and scalar types, but also
#' accepts calls. `expr_label()` formats the expression nicely for use
#' in messages.
#'
#' @param expr An expression to labellise.
#'
#' @examples
#' # To labellise a function argument, first capture it with
#' # substitute():
#' fn <- function(x) expr_label(substitute(x))
#' fn(x:y)
#'
#' # Strings are encoded
#' expr_label("a\nb")
#'
#' # Names and expressions are quoted with ``
#' expr_label(quote(x))
#' expr_label(quote(a + b + c))
#'
#' # Long expressions are collapsed
#' expr_label(quote(foo({
#' 1 + 2
#' print(x)
#' })))
#' @export
expr_label <- function(expr) {
if (is.character(expr)) {
encodeString(expr, quote = '"')
} else if (is.atomic(expr)) {
format(expr)
} else if (is.name(expr)) {
paste0("`", as.character(expr), "`")
} else {
chr <- deparse_one(expr)
paste0("`", chr, "`")
}
}
#' @rdname expr_label
#' @export
expr_name <- function(expr) {
if (is_null(expr)) {
return("NULL")
}
if (is_symbol(expr)) {
return(as_string(expr))
}
if (is_call(expr)) {
if (is_data_pronoun(expr)) {
name <- data_pronoun_name(expr) %||% "<unknown>"
} else {
name <- deparse_one(expr)
name <- gsub("\n.*$", "...", name)
}
return(name)
}
# So 1L is translated to "1" and not "1L"
if (is_scalar_atomic(expr)) {
return(as.character(expr))
}
if (length(expr) == 1) {
name <- expr_text(expr)
name <- gsub("\n.*$", "...", name)
return(name)
}
abort("`expr` must quote a symbol, scalar, or call")
}
#' @rdname expr_label
#' @export
#' @param width Width of each line.
#' @param nlines Maximum number of lines to extract.
expr_text <- function(expr, width = 60L, nlines = Inf) {
if (is_symbol(expr)) {
return(sym_text(expr))
}
str <- deparse(expr, width.cutoff = width, backtick = TRUE)
if (length(str) > nlines) {
str <- c(str[seq_len(nlines - 1)], "...")
}
paste0(str, collapse = "\n")
}
sym_text <- function(sym) {
# Use as_string() to translate unicode tags
text <- as_string(sym)
if (needs_backticks(text)) {
text <- sprintf("`%s`", text)
}
text
}
deparse_one <- function(expr) {
str <- deparse(expr, 60L)
if (length(str) > 1) {
if (is_call(expr, function_sym)) {
expr[[3]] <- quote(...)
str <- deparse(expr, 60L)
} else if (is_call(expr, brace_sym)) {
str <- "{ ... }"
} else if (is_call(expr)) {
str <- deparse(call2(expr[[1]], quote(...)), 60L)
}
str <- paste(str, collapse = "\n")
}
str
}
#' Set and get an expression
#'
#' @keywords internal
#' @description
#'
#' These helpers are useful to make your function work generically
#' with quosures and raw expressions. First call `get_expr()` to
#' extract an expression. Once you're done processing the expression,
#' call `set_expr()` on the original object to update the expression.
#' You can return the result of `set_expr()`, either a formula or an
#' expression depending on the input type. Note that `set_expr()` does
#' not change its input, it creates a new object.
#'
#' @param x An expression, closure, or one-sided formula. In addition,
#' `set_expr()` accept frames.
#' @param value An updated expression.
#' @param default A default expression to return when `x` is not an
#' expression wrapper. Defaults to `x` itself.
#' @return The updated original input for `set_expr()`. A raw
#' expression for `get_expr()`.
#' @seealso [quo_get_expr()] and [quo_set_expr()] for versions of
#' [get_expr()] and [set_expr()] that only work on quosures.
#' @export
#' @examples
#' f <- ~foo(bar)
#' e <- quote(foo(bar))
#' frame <- identity(identity(ctxt_frame()))
#'
#' get_expr(f)
#' get_expr(e)
#' get_expr(frame)
#'
#' set_expr(f, quote(baz))
#' set_expr(e, quote(baz))
set_expr <- function(x, value) {
if (is_quosure(x)) {
x <- quo_set_expr(x, value)
} else if (is_formula(x)) {
f_rhs(x) <- value
} else if (is_closure(x)) {
body(x) <- value
} else {
x <- value
}
x
}
#' @rdname set_expr
#' @export
get_expr <- function(x, default = x) {
.Call(ffi_get_expression, x, default)
}
expr_type_of <- function(x) {
if (missing(x)) {
return("missing")
}
type <- typeof(x)
if (type %in% c("symbol", "language", "pairlist", "NULL")) {
type
} else {
"literal"
}
}
switch_expr <- function(.x, ...) {
switch(expr_type_of(.x), ...)
}
#' Print an expression
#'
#' @description
#'
#' `expr_print()`, powered by `expr_deparse()`, is an alternative
#' printer for R expressions with a few improvements over the base R
#' printer.
#'
#' * It colourises [quosures][nse-defuse] according to their environment.
#' Quosures from the global environment are printed normally while
#' quosures from local environments are printed in unique colour (or
#' in italic when all colours are taken).
#'
#' * It wraps inlined objects in angular brackets. For instance, an
#' integer vector unquoted in a function call (e.g.
#' `expr(foo(!!(1:3)))`) is printed like this: `foo(<int: 1L, 2L,
#' 3L>)` while by default R prints the code to create that vector:
#' `foo(1:3)` which is ambiguous.
#'
#' * It respects the width boundary (from the global option `width`)
#' in more cases.
#'
#' @param x An object or expression to print.
#' @param width The width of the deparsed or printed expression.
#' Defaults to the global option `width`.
#' @param ... Arguments passed to `expr_deparse()`.
#'
#' @return `expr_deparse()` returns a character vector of lines.
#' `expr_print()` returns its input invisibly.
#'
#' @export
#' @examples
#' # It supports any object. Non-symbolic objects are always printed
#' # within angular brackets:
#' expr_print(1:3)
#' expr_print(function() NULL)
#'
#' # Contrast this to how the code to create these objects is printed:
#' expr_print(quote(1:3))
#' expr_print(quote(function() NULL))
#'
#' # The main cause of non-symbolic objects in expressions is
#' # quasiquotation:
#' expr_print(expr(foo(!!(1:3))))
#'
#'
#' # Quosures from the global environment are printed normally:
#' expr_print(quo(foo))
#' expr_print(quo(foo(!!quo(bar))))
#'
#' # Quosures from local environments are colourised according to
#' # their environments (if you have crayon installed):
#' local_quo <- local(quo(foo))
#' expr_print(local_quo)
#'
#' wrapper_quo <- local(quo(bar(!!local_quo, baz)))
#' expr_print(wrapper_quo)
expr_print <- function(x, ...) {
cat_line(expr_deparse(x, ...))
invisible(x)
}
#' @rdname expr_print
#' @export
expr_deparse <- function(x,
...,
width = peek_option("width")) {
check_dots_empty0(...)
deparser <- new_quo_deparser(width = width)
quo_deparse(x, deparser)
}
|
library(markovchain)
library(expm)
lambda<- 1
mu <- 2
rho<- 3
gamma<- 4
estadosCarga<-c(0,10,20,30,40,50,60,70,80,90,100)
estadosBooleano<-c(0:1)
estados=as.vector(outer(estadosBooleano, estadosCarga, paste, sep=","))
estados
matrizQ<-matrix(0,nrow=length(estados),ncol=length(estados))
colnames(matrizQ)=estados
rownames(matrizQ)=estados
for (i in estadosCarga)
{
for(j in estadosBooleano)
{
tempY = outer(j, i, paste, sep=",")
if(i>0 & j == 0)
{#Caso 1
tempX =outer(0, i-10, paste, sep=",")
matrizQ[tempY,tempX]=lambda
}
if(i<100 & j == 1)
{#Caso 2
tempX = outer(1, i+10, paste, sep=",")
matrizQ[tempY,tempX]=mu
}
if(j == 0)
{#Caso 3
tempX = outer(1, i, paste, sep=",")
matrizQ[tempY,tempX]=rho
}
if(i>=70 & j == 1)
{#Caso 4
tempX = outer(0, i, paste, sep=",")
matrizQ[tempY,tempX]=gamma
}
}
}
for (i in 1: length(estados)){
#Diagonal
matrizQ[i,i]=-sum(matrizQ[i, ])
}
CMTC <- new(Class="ctmc", states = as.character(estados), byrow = TRUE, generator = matrizQ)
plot(CMTC, edge.arrow.size=0.5)
|
/Archivo R.R
|
no_license
|
jc-corrales/SMS-Sachsen
|
R
| false | false | 1,127 |
r
|
library(markovchain)
library(expm)
lambda<- 1
mu <- 2
rho<- 3
gamma<- 4
estadosCarga<-c(0,10,20,30,40,50,60,70,80,90,100)
estadosBooleano<-c(0:1)
estados=as.vector(outer(estadosBooleano, estadosCarga, paste, sep=","))
estados
matrizQ<-matrix(0,nrow=length(estados),ncol=length(estados))
colnames(matrizQ)=estados
rownames(matrizQ)=estados
for (i in estadosCarga)
{
for(j in estadosBooleano)
{
tempY = outer(j, i, paste, sep=",")
if(i>0 & j == 0)
{#Caso 1
tempX =outer(0, i-10, paste, sep=",")
matrizQ[tempY,tempX]=lambda
}
if(i<100 & j == 1)
{#Caso 2
tempX = outer(1, i+10, paste, sep=",")
matrizQ[tempY,tempX]=mu
}
if(j == 0)
{#Caso 3
tempX = outer(1, i, paste, sep=",")
matrizQ[tempY,tempX]=rho
}
if(i>=70 & j == 1)
{#Caso 4
tempX = outer(0, i, paste, sep=",")
matrizQ[tempY,tempX]=gamma
}
}
}
for (i in 1: length(estados)){
#Diagonal
matrizQ[i,i]=-sum(matrizQ[i, ])
}
CMTC <- new(Class="ctmc", states = as.character(estados), byrow = TRUE, generator = matrizQ)
plot(CMTC, edge.arrow.size=0.5)
|
# read and clean 16S ------------------------------------------------------
# setwd("~/Desktop/R/CMAIKI_clean_and_query/bacterial_16S/")
# clean 16S pipeline outputs for R analyses
clean_16S_tables <- function(abundance_file = NULL,
taxonomy_file = NULL,
metadata_file = NULL,
description = NULL,
output_dir = "./",
id_column = NULL,
cull = list(min.num = NULL, min.abund = NULL, min.single.abund = NULL)){
# Requirements
require(dplyr)
require(tidyr)
require(data.table)
########## START
# check input files
if(!file.exists(abundance_file)){
warning("Abundance file doesn't exist")
}
if(!file.exists(taxonomy_file)){
warning("Taxonomy file doesn't exist")
}
if(!file.exists(metadata_file)){
warning("Metadata file doesn't exist")
}
## Read in tables
message("reading abundance")
abund <- fread( abundance_file, header = T, sep = "\t")
message("reading taxonomy")
tax <- fread( taxonomy_file, header = T, sep = "\t")
message("reading metadata")
fullmeta <- fread( metadata_file, header = T,
sep = "," )
## Clean tables
# Clean taxonomy table:
#Split taxonomic annotations (col = Taxonomy, generates warning because of trailing ";")
tax <- tax %>%
separate( Taxonomy,
c( "kingdom", "phylum", "class", "order","family","genus" ),
sep = ";")
# Clean abundance table: Cut down id name and delete numOtus
abund$Group <- sub( ".*(1\\d{5}).*",
"\\1",
abund$Group,
perl = T)
abund[, c("numOtus","label"):=NULL]
# Check abundance and taxonomy have the same OTUs
if( !all( colnames(abund)[-c(1,2)] %in% tax$OTU)){ # test
warning("*** WARNING Abundance and taxonomy have different OTUs") # no
}
else {
message("OKAY Abundance and taxonomy have the same OTUs") # yes
}
## Culling abundance file to manageable size
# define cull.otu function
cull.otu = function(abund.dt, min.num = 0, min.abund = 0, min.single.abund = 0, sample_column = "Group") {
# This function locates columns matching criteria and drops them from the data.table
# Creates a single vector of columns to drop and then drops them, which is instantaneous and requires no memory usage.
#Inputs: relabund.df = dataframe containing ONLY relative abundance data, no metadata or other info. Samples in rows and OTUs in columns.
#min.num = the minimum number of samples an OTU needs to be present in to not be culled.
#min.abund = the minimum relative abundance an OTU needs to have in (the min.num) samples to not be culled.
# determine which columns to keep
# TEST
# abund.dt = abund[,1:5]
# min.num = 3
# min.abund = 50000
# min.single.abund = 100000
# check mean read abundance per sample pre-culling
# pull out sample names
group_names <- abund.dt[[sample_column]]
# identify columns containing OTU abundance counts
otu_cols <- colnames(abund.dt)[colnames(abund.dt) != sample_column]
# copy abundance counts to a new data.table and convert to class numeric
abund.ra <- copy(abund.dt)
abund.ra[ , (otu_cols) := lapply(.SD, as.numeric), .SDcols = otu_cols]
# add a column with total abundance for each sample
abund.ra[ , total_abund := rowSums(abund.ra[ , ..otu_cols])]
# convert abundance to relative abundance
abund.ra[ , (otu_cols) := lapply(.SD, function(x) x / total_abund), by = sample_column, .SDcols = otu_cols]
# drop non-OTU columns (i.e. sample name and total sample abundance)
abund.ra[ , c(sample_column, "total_abund"):= NULL]
# identify OTU columns that fail to meet minimum requirements
drop_cols <- which(
sapply(abund.ra, function(otu)
length(otu[otu >= min.abund]) < min.num || length(otu[otu > min.single.abund]) == 0
)
)
# drop OTU columns that fail to meet minimum requirements from the original abundance table
abund.dt[ , (names(drop_cols)):=NULL]
}
#If cull is a list, cut down the abundance, and taxonomy files based on listed values
if(is.list(cull) & !all(lapply(cull,is.null))){
message("Culling OTUS from data")
message( paste("min number of samples for OTU to be present in = ", cull$min.num,
"min relative abundance of OTU to count as being present = ", cull$min.abund,
"minimum relative abundance in a single sample =", cull$min.single.abund,
sep = "\n"))
message(paste("Starting # OTUS =", ncol(abund)-1))
message(paste("Starting mean read count per sample =",
round( mean( rowSums( abund[ , .SD, .SDcols = !"Group"] )))))
# run cull otus function
suppressWarnings(
cull.otu(abund.dt = abund,
min.num = cull$min_num,
min.abund = cull$min.abund,
min.single.abund = cull$min.single.abund))
# update taxonomy to match culled abundance table
tax <- tax[OTU %in% colnames(abund)]
message(paste("New # OTUS =", ncol(abund) -1))
message(paste("New mean read count per sample =",
round( mean( rowSums( abund[ , .SD, .SDcols = !"Group"] )))))
}
else{
message("Skipping cull step")
}
# update the metadata table to match culled abundance table
meta <- fullmeta[ fullmeta[[id_column]] %in% abund$Group ]
not_in_abund <- fullmeta[!(fullmeta[[id_column]] %in% abund$Group)]
if( ! all(abund$Group %in% fullmeta[[id_column]])){
warning("Some abundance samples are missing from the metadata! Dropping those samples from abundance.")
abund <-abund[Group %in% meta[[id_column]]]
}
# Make all tables into a list
result <- list( abund, tax, meta, fullmeta, not_in_abund)
names(result) <- c("abundance", "taxonomy", "metadata","full_run_metadata","failed_samples")
# Write out all tables as .csv and return a "result" list of tables
lapply(1:length(result),
function(x) fwrite(result[[x]],
file = paste(output_dir,"/",description, "_", names(result[x]),"_", "table.csv", sep = "")))
# write message
message(paste("tables written out as csv files starting with ", description, "...", sep = ""))
return(result)
########## END
}
# summarize 16S sequencing success --------------------------------------------------
pass_fail <- function(clean_tables_list = NULL, id_column = NULL){
# shorten variable
run <- clean_tables_list
# pull out samples that are in the abundance table, assume these sequence well
good_samples <- run$full_run_metadata[ run$full_run_metadata[, id_column] %in% run$abundance$Group,]
# pull out samples that are not in the abundance table, assume these did not sequence well
bad_samples <- run$full_run_metadata[!(run$full_run_metadata[, id_column] %in% run$abundance$Group)]
result<-list(good_samples,bad_samples)
names(result) <- c("good_samples","bad_samples")
message(paste("for ",
deparse(substitute(clean_tables_list)), # run name, ideally
", ",
nrow(good_samples), # number of good samples
" samples sequenced well and ",
nrow(bad_samples), " did not", sep = "")) # number of bad samples
return(result)
########### END
}
|
/src/clean_and_query_16S.R
|
no_license
|
soswift/waimea_marine
|
R
| false | false | 8,059 |
r
|
# read and clean 16S ------------------------------------------------------
# setwd("~/Desktop/R/CMAIKI_clean_and_query/bacterial_16S/")
# clean 16S pipeline outputs for R analyses
clean_16S_tables <- function(abundance_file = NULL,
taxonomy_file = NULL,
metadata_file = NULL,
description = NULL,
output_dir = "./",
id_column = NULL,
cull = list(min.num = NULL, min.abund = NULL, min.single.abund = NULL)){
# Requirements
require(dplyr)
require(tidyr)
require(data.table)
########## START
# check input files
if(!file.exists(abundance_file)){
warning("Abundance file doesn't exist")
}
if(!file.exists(taxonomy_file)){
warning("Taxonomy file doesn't exist")
}
if(!file.exists(metadata_file)){
warning("Metadata file doesn't exist")
}
## Read in tables
message("reading abundance")
abund <- fread( abundance_file, header = T, sep = "\t")
message("reading taxonomy")
tax <- fread( taxonomy_file, header = T, sep = "\t")
message("reading metadata")
fullmeta <- fread( metadata_file, header = T,
sep = "," )
## Clean tables
# Clean taxonomy table:
#Split taxonomic annotations (col = Taxonomy, generates warning because of trailing ";")
tax <- tax %>%
separate( Taxonomy,
c( "kingdom", "phylum", "class", "order","family","genus" ),
sep = ";")
# Clean abundance table: Cut down id name and delete numOtus
abund$Group <- sub( ".*(1\\d{5}).*",
"\\1",
abund$Group,
perl = T)
abund[, c("numOtus","label"):=NULL]
# Check abundance and taxonomy have the same OTUs
if( !all( colnames(abund)[-c(1,2)] %in% tax$OTU)){ # test
warning("*** WARNING Abundance and taxonomy have different OTUs") # no
}
else {
message("OKAY Abundance and taxonomy have the same OTUs") # yes
}
## Culling abundance file to manageable size
# define cull.otu function
cull.otu = function(abund.dt, min.num = 0, min.abund = 0, min.single.abund = 0, sample_column = "Group") {
# This function locates columns matching criteria and drops them from the data.table
# Creates a single vector of columns to drop and then drops them, which is instantaneous and requires no memory usage.
#Inputs: relabund.df = dataframe containing ONLY relative abundance data, no metadata or other info. Samples in rows and OTUs in columns.
#min.num = the minimum number of samples an OTU needs to be present in to not be culled.
#min.abund = the minimum relative abundance an OTU needs to have in (the min.num) samples to not be culled.
# determine which columns to keep
# TEST
# abund.dt = abund[,1:5]
# min.num = 3
# min.abund = 50000
# min.single.abund = 100000
# check mean read abundance per sample pre-culling
# pull out sample names
group_names <- abund.dt[[sample_column]]
# identify columns containing OTU abundance counts
otu_cols <- colnames(abund.dt)[colnames(abund.dt) != sample_column]
# copy abundance counts to a new data.table and convert to class numeric
abund.ra <- copy(abund.dt)
abund.ra[ , (otu_cols) := lapply(.SD, as.numeric), .SDcols = otu_cols]
# add a column with total abundance for each sample
abund.ra[ , total_abund := rowSums(abund.ra[ , ..otu_cols])]
# convert abundance to relative abundance
abund.ra[ , (otu_cols) := lapply(.SD, function(x) x / total_abund), by = sample_column, .SDcols = otu_cols]
# drop non-OTU columns (i.e. sample name and total sample abundance)
abund.ra[ , c(sample_column, "total_abund"):= NULL]
# identify OTU columns that fail to meet minimum requirements
drop_cols <- which(
sapply(abund.ra, function(otu)
length(otu[otu >= min.abund]) < min.num || length(otu[otu > min.single.abund]) == 0
)
)
# drop OTU columns that fail to meet minimum requirements from the original abundance table
abund.dt[ , (names(drop_cols)):=NULL]
}
#If cull is a list, cut down the abundance, and taxonomy files based on listed values
if(is.list(cull) & !all(lapply(cull,is.null))){
message("Culling OTUS from data")
message( paste("min number of samples for OTU to be present in = ", cull$min.num,
"min relative abundance of OTU to count as being present = ", cull$min.abund,
"minimum relative abundance in a single sample =", cull$min.single.abund,
sep = "\n"))
message(paste("Starting # OTUS =", ncol(abund)-1))
message(paste("Starting mean read count per sample =",
round( mean( rowSums( abund[ , .SD, .SDcols = !"Group"] )))))
# run cull otus function
suppressWarnings(
cull.otu(abund.dt = abund,
min.num = cull$min_num,
min.abund = cull$min.abund,
min.single.abund = cull$min.single.abund))
# update taxonomy to match culled abundance table
tax <- tax[OTU %in% colnames(abund)]
message(paste("New # OTUS =", ncol(abund) -1))
message(paste("New mean read count per sample =",
round( mean( rowSums( abund[ , .SD, .SDcols = !"Group"] )))))
}
else{
message("Skipping cull step")
}
# update the metadata table to match culled abundance table
meta <- fullmeta[ fullmeta[[id_column]] %in% abund$Group ]
not_in_abund <- fullmeta[!(fullmeta[[id_column]] %in% abund$Group)]
if( ! all(abund$Group %in% fullmeta[[id_column]])){
warning("Some abundance samples are missing from the metadata! Dropping those samples from abundance.")
abund <-abund[Group %in% meta[[id_column]]]
}
# Make all tables into a list
result <- list( abund, tax, meta, fullmeta, not_in_abund)
names(result) <- c("abundance", "taxonomy", "metadata","full_run_metadata","failed_samples")
# Write out all tables as .csv and return a "result" list of tables
lapply(1:length(result),
function(x) fwrite(result[[x]],
file = paste(output_dir,"/",description, "_", names(result[x]),"_", "table.csv", sep = "")))
# write message
message(paste("tables written out as csv files starting with ", description, "...", sep = ""))
return(result)
########## END
}
# summarize 16S sequencing success --------------------------------------------------
pass_fail <- function(clean_tables_list = NULL, id_column = NULL){
# shorten variable
run <- clean_tables_list
# pull out samples that are in the abundance table, assume these sequence well
good_samples <- run$full_run_metadata[ run$full_run_metadata[, id_column] %in% run$abundance$Group,]
# pull out samples that are not in the abundance table, assume these did not sequence well
bad_samples <- run$full_run_metadata[!(run$full_run_metadata[, id_column] %in% run$abundance$Group)]
result<-list(good_samples,bad_samples)
names(result) <- c("good_samples","bad_samples")
message(paste("for ",
deparse(substitute(clean_tables_list)), # run name, ideally
", ",
nrow(good_samples), # number of good samples
" samples sequenced well and ",
nrow(bad_samples), " did not", sep = "")) # number of bad samples
return(result)
########### END
}
|
library(ensembleBMA)
### Name: ymdhTOjul
### Title: Convert to Julian dates.
### Aliases: ymdhTOjul
### Keywords: chron
### ** Examples
data(ensBMAtest)
julianVdates <- ymdhTOjul(ensBMAtest$vdate)
all.equal( julTOymdh(julianVdates), as.character(ensBMAtest$vdate))
all.equal( ymdhTOjul(ensBMAtest$idate), julianVdates-2)
|
/data/genthat_extracted_code/ensembleBMA/examples/ymdhTOjul.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 338 |
r
|
library(ensembleBMA)
### Name: ymdhTOjul
### Title: Convert to Julian dates.
### Aliases: ymdhTOjul
### Keywords: chron
### ** Examples
data(ensBMAtest)
julianVdates <- ymdhTOjul(ensBMAtest$vdate)
all.equal( julTOymdh(julianVdates), as.character(ensBMAtest$vdate))
all.equal( ymdhTOjul(ensBMAtest$idate), julianVdates-2)
|
#Ex1.1, Page 4
library(lattice)
data<-c(6.1,12.6,34.7,1.6,18.8,2.2,3.0,2.2,5.6,3.8,2.2,3.1,1.3,1.1,14.1,4.0,21.0,6.1,1.3,20.4,7.5,3.9,10.1,8.1,19.5,5.2,12.0,15.8,10.4,5.2,6.4,10.8,83.1,3.6,6.2,6.3,16.3,12.7,1.3,0.8,8.8,5.1,3.7,26.3,6.0,48.0,8.2,11.7,7.2,3.9,15.3,16.6,8.8,12.0,4.7,14.7,6.4,17.0,2.5,16.2)
stem(data,scale=2)
hist(data,main="Histogram for charity fundraising percentage data",xlab="FundRaising",col="grey",xlim=c(0,100),ylim=c(0,40))
|
/Probability_And_Statistics_For_Engineering_And_The_Sciences_by_Jay_L_Devore/CH1/EX1.1/Ex1_1.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false | false | 462 |
r
|
#Ex1.1, Page 4
library(lattice)
data<-c(6.1,12.6,34.7,1.6,18.8,2.2,3.0,2.2,5.6,3.8,2.2,3.1,1.3,1.1,14.1,4.0,21.0,6.1,1.3,20.4,7.5,3.9,10.1,8.1,19.5,5.2,12.0,15.8,10.4,5.2,6.4,10.8,83.1,3.6,6.2,6.3,16.3,12.7,1.3,0.8,8.8,5.1,3.7,26.3,6.0,48.0,8.2,11.7,7.2,3.9,15.3,16.6,8.8,12.0,4.7,14.7,6.4,17.0,2.5,16.2)
stem(data,scale=2)
hist(data,main="Histogram for charity fundraising percentage data",xlab="FundRaising",col="grey",xlim=c(0,100),ylim=c(0,40))
|
setwd("C:\\users\\zhuangmg\\coursera\\exploratory data analysis\\project 1")
list.files()
data<-read.csv("./household_power_consumption.txt", sep=';',na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data2<-na.omit(data)
library(data.table)
fulldata<-data.table(data2)
## Subsetting data
subdata1<-fulldata[fulldata$Date=="1/2/2007"]
subdata2<-fulldata[fulldata$Date=="2/2/2007"]
subdatatotal<-rbind(subdata1,subdata2)
## Converting dates & power
datetime <- strptime(paste(subdatatotal$Date, subdatatotal$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subdatatotal$Global_active_power)
subdatatotal2<-data.table(subdatatotal,datetime,globalActivePower)
## Plot 3
with(subdatatotal2, {
plot(Sub_metering_1~datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~datetime,col='Red')
lines(Sub_metering_3~datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/plot3.R
|
no_license
|
mandyzzz/ExData_Plotting1
|
R
| false | false | 1,165 |
r
|
setwd("C:\\users\\zhuangmg\\coursera\\exploratory data analysis\\project 1")
list.files()
data<-read.csv("./household_power_consumption.txt", sep=';',na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data2<-na.omit(data)
library(data.table)
fulldata<-data.table(data2)
## Subsetting data
subdata1<-fulldata[fulldata$Date=="1/2/2007"]
subdata2<-fulldata[fulldata$Date=="2/2/2007"]
subdatatotal<-rbind(subdata1,subdata2)
## Converting dates & power
datetime <- strptime(paste(subdatatotal$Date, subdatatotal$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subdatatotal$Global_active_power)
subdatatotal2<-data.table(subdatatotal,datetime,globalActivePower)
## Plot 3
with(subdatatotal2, {
plot(Sub_metering_1~datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~datetime,col='Red')
lines(Sub_metering_3~datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/termsInfo.R
\name{tidy_smooth2d}
\alias{tidy_smooth2d}
\title{Extract 2d smooth objects in tidy format.}
\usage{
tidy_smooth2d(x, keep = c("x", "y", "fit", "se", "xlab", "ylab", "main"),
ci = FALSE, ...)
}
\arguments{
\item{x}{ a fitted \code{gam} object as produced by \code{gam()}.}
\item{keep}{A vector of variables to keep.}
\item{ci}{A logical value indicating whether confidence intervals should be
calculated and returned. Defaults to \code{TRUE}.}
\item{...}{Further arguments passed to \code{\link[mgcv]{plot.gam}}}
}
\description{
Extract 2d smooth objects in tidy format.
}
|
/man/tidy_smooth2d.Rd
|
no_license
|
adibender/pam
|
R
| false | true | 668 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/termsInfo.R
\name{tidy_smooth2d}
\alias{tidy_smooth2d}
\title{Extract 2d smooth objects in tidy format.}
\usage{
tidy_smooth2d(x, keep = c("x", "y", "fit", "se", "xlab", "ylab", "main"),
ci = FALSE, ...)
}
\arguments{
\item{x}{ a fitted \code{gam} object as produced by \code{gam()}.}
\item{keep}{A vector of variables to keep.}
\item{ci}{A logical value indicating whether confidence intervals should be
calculated and returned. Defaults to \code{TRUE}.}
\item{...}{Further arguments passed to \code{\link[mgcv]{plot.gam}}}
}
\description{
Extract 2d smooth objects in tidy format.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util-export.R
\name{writeDatabaseData}
\alias{writeDatabaseData}
\title{Write feature data frame to a database}
\usage{
writeDatabaseData(data, name = NULL, label = NULL, conn, overwrite = TRUE,
runConfig)
}
\arguments{
\item{data}{The feature data frame to be written}
\item{name}{Unused, but present for compatibility with other write* fucntions}
\item{label}{Unused, but present for compatibility with other write* fucntions}
\item{conn}{A DBI dbConnection object to the database that will host the table}
\item{overwrite}{Boolean indicator for whether the data written should overwrite any existing table or append it}
\item{runConfig}{Path to a run configuration file with names and descriptions
of the feature set and run. See
`inst/feature_set_run_conf/exampl_feature_set.conf` for an example.}
}
\description{
Write feature data frame to a database using a \code{\link{DBI::dbWriteTable}} call
}
|
/man/writeDatabaseData.Rd
|
permissive
|
ahmeduncc/visdom-1
|
R
| false | true | 991 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util-export.R
\name{writeDatabaseData}
\alias{writeDatabaseData}
\title{Write feature data frame to a database}
\usage{
writeDatabaseData(data, name = NULL, label = NULL, conn, overwrite = TRUE,
runConfig)
}
\arguments{
\item{data}{The feature data frame to be written}
\item{name}{Unused, but present for compatibility with other write* fucntions}
\item{label}{Unused, but present for compatibility with other write* fucntions}
\item{conn}{A DBI dbConnection object to the database that will host the table}
\item{overwrite}{Boolean indicator for whether the data written should overwrite any existing table or append it}
\item{runConfig}{Path to a run configuration file with names and descriptions
of the feature set and run. See
`inst/feature_set_run_conf/exampl_feature_set.conf` for an example.}
}
\description{
Write feature data frame to a database using a \code{\link{DBI::dbWriteTable}} call
}
|
> fa.parallel(mydata,fa="pc")
Parallel analysis suggests that the number of factors = NA and the number of components = 2
> scree(mydata)
2 componenten
> VSS(mydata,rotate="promax", fm="pc")
Very Simple Structure
Call: vss(x = x, n = n, rotate = rotate, diagonal = diagonal, fm = fm,
n.obs = n.obs, plot = plot, title = title)
VSS complexity 1 achieves a maximimum of 0.94 with 1 factors
VSS complexity 2 achieves a maximimum of 0.7 with 2 factors
The Velicer MAP achieves a minimum of 0.02 with 4 factors
#Hauptkomponentenanalyse mit einem Faktor
> fit <- principal(mydata, nfactors=1, rotate="none")
> fit
Principal Components Analysis
Call: principal(r = mydata, nfactors = 1, rotate = "none")
Standardized loadings (pattern matrix) based upon correlation matrix
PC1 h2 u2
gespr8.1 0.76 0.58 0.42
anspr8.2 0.75 0.56 0.44
ofohr8.3 0.66 0.44 0.56
austs8.4 0.79 0.63 0.37
infrm8.5 0.77 0.59 0.41
etwgs8.6 0.73 0.53 0.47
ptner8.7 0.73 0.53 0.47
ausef8.8 0.81 0.66 0.34
auflt8.9 0.52 0.27 0.73
mitw8.10 0.71 0.50 0.50
eamt8.11 0.55 0.30 0.70
hosp8.12 0.71 0.50 0.50
pdar8.13 0.78 0.61 0.39
eins8.14 0.82 0.68 0.32
besw8.15 0.77 0.59 0.41
ents8.16 0.75 0.57 0.43
eazf8.17 0.75 0.56 0.44
ifpd8.18 0.73 0.54 0.46
betl8.19 0.70 0.50 0.50
PC1
SS loadings 10.15
Proportion Var 0.53
Test of the hypothesis that 1 component is sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 152 and the objective function was 2.57
The total number of observations was 2579 with MLE Chi Square = 6597.21 with prob < 0
Fit based upon off diagonal values = 0.98
#Rotiert
> fit <- principal(mydata, nfactors=1, rotate="promax")
> fit
Principal Components Analysis
Call: principal(r = mydata, nfactors = 1, rotate = "promax")
Standardized loadings (pattern matrix) based upon correlation matrix
PC1 h2 u2
gespr8.1 0.76 0.58 0.42
anspr8.2 0.75 0.56 0.44
ofohr8.3 0.66 0.44 0.56
austs8.4 0.79 0.63 0.37
infrm8.5 0.77 0.59 0.41
etwgs8.6 0.73 0.53 0.47
ptner8.7 0.73 0.53 0.47
ausef8.8 0.81 0.66 0.34
auflt8.9 0.52 0.27 0.73
mitw8.10 0.71 0.50 0.50
eamt8.11 0.55 0.30 0.70
hosp8.12 0.71 0.50 0.50
pdar8.13 0.78 0.61 0.39
eins8.14 0.82 0.68 0.32
besw8.15 0.77 0.59 0.41
ents8.16 0.75 0.57 0.43
eazf8.17 0.75 0.56 0.44
ifpd8.18 0.73 0.54 0.46
betl8.19 0.70 0.50 0.50
PC1
SS loadings 10.15
Proportion Var 0.53
Test of the hypothesis that 1 component is sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 152 and the objective function was 2.57
The total number of observations was 2579 with MLE Chi Square = 6597.21 with prob < 0
Fit based upon off diagonal values = 0.98
#Rotierte und unrotierte zeigen das selbe Ergebniss bei einer Hauptkomponente
#Hauptkomponentenanalyse mit 2 Faktoren
# Nicht Rotiert
> fit2 <- principal(mydata, nfactors=2, rotate="none")
> fit2
Principal Components Analysis
Call: principal(r = mydata, nfactors = 2, rotate = "none")
Standardized loadings (pattern matrix) based upon correlation matrix
PC1 PC2 h2 u2
gespr8.1 0.76 -0.33 0.69 0.31
anspr8.2 0.75 -0.24 0.62 0.38
ofohr8.3 0.66 0.12 0.46 0.54
austs8.4 0.79 -0.28 0.71 0.29
infrm8.5 0.77 -0.35 0.71 0.29
etwgs8.6 0.73 -0.28 0.61 0.39
ptner8.7 0.73 -0.26 0.60 0.40
ausef8.8 0.81 -0.27 0.73 0.27
auflt8.9 0.52 0.05 0.27 0.73
mitw8.10 0.71 0.38 0.65 0.35
eamt8.11 0.55 0.54 0.59 0.41
hosp8.12 0.71 0.30 0.59 0.41
pdar8.13 0.78 0.04 0.61 0.39
eins8.14 0.82 -0.03 0.68 0.32
besw8.15 0.77 0.00 0.59 0.41
ents8.16 0.75 0.12 0.58 0.42
eazf8.17 0.75 0.23 0.62 0.38
ifpd8.18 0.73 0.23 0.59 0.41
betl8.19 0.70 0.32 0.60 0.40
PC1 PC2
SS loadings 10.15 1.35
Proportion Var 0.53 0.07
Cumulative Var 0.53 0.61
Proportion Explained 0.88 0.12
Cumulative Proportion 0.88 1.00
Test of the hypothesis that 2 components are sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 134 and the objective function was 1.85
The total number of observations was 2579 with MLE Chi Square = 4761.43 with prob < 0
Fit based upon off diagonal values = 0.99
#Rotiert
> fit2 <- principal(mydata, nfactors=2, rotate="promax")
> fit2
Principal Components Analysis
Call: principal(r = mydata, nfactors = 2, rotate = "promax")
Standardized loadings (pattern matrix) based upon correlation matrix
PC1 PC2 h2 u2
gespr8.1 0.90 -0.10 0.69 0.31
anspr8.2 0.78 0.02 0.62 0.38
ofohr8.3 0.24 0.48 0.46 0.54
austs8.4 0.85 -0.01 0.71 0.29
infrm8.5 0.93 -0.12 0.71 0.29
etwgs8.6 0.81 -0.04 0.61 0.39
ptner8.7 0.79 -0.02 0.60 0.40
ausef8.8 0.85 0.00 0.73 0.27
auflt8.9 0.25 0.31 0.27 0.73
mitw8.10 -0.08 0.86 0.65 0.35
eamt8.11 -0.39 1.00 0.59 0.41
hosp8.12 0.02 0.75 0.59 0.41
pdar8.13 0.42 0.42 0.61 0.39
eins8.14 0.54 0.34 0.68 0.32
besw8.15 0.47 0.36 0.59 0.41
ents8.16 0.29 0.52 0.58 0.42
eazf8.17 0.15 0.67 0.62 0.38
ifpd8.18 0.13 0.66 0.59 0.41
betl8.19 0.00 0.77 0.60 0.40
PC1 PC2
SS loadings 6.37 5.13
Proportion Var 0.34 0.27
Cumulative Var 0.34 0.61
Proportion Explained 0.55 0.45
Cumulative Proportion 0.55 1.00
With component correlations of
PC1 PC2
PC1 1.00 0.73
PC2 0.73 1.00
Test of the hypothesis that 2 components are sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 134 and the objective function was 1.85
The total number of observations was 2579 with MLE Chi Square = 4761.43 with prob < 0
#Hauptkomponentenanalyse mit 4 Faktoren
#Nicht rotiert
> fit3 <- principal(mydata, nfactors=4, rotate="none")
> fit3
Principal Components Analysis
Call: principal(r = mydata, nfactors = 4, rotate = "none")
Standardized loadings (pattern matrix) based upon correlation matrix
PC1 PC2 PC3 PC4 h2 u2
gespr8.1 0.76 -0.33 -0.03 0.23 0.74 0.26
anspr8.2 0.75 -0.24 0.01 0.41 0.79 0.21
ofohr8.3 0.66 0.12 0.03 0.45 0.66 0.34
austs8.4 0.79 -0.28 -0.02 0.26 0.78 0.22
infrm8.5 0.77 -0.35 0.01 -0.04 0.72 0.28
etwgs8.6 0.73 -0.28 -0.07 0.05 0.62 0.38
ptner8.7 0.73 -0.26 0.19 -0.33 0.74 0.26
ausef8.8 0.81 -0.27 0.17 -0.25 0.83 0.17
auflt8.9 0.52 0.05 0.33 -0.28 0.46 0.54
mitw8.10 0.71 0.38 0.18 0.07 0.68 0.32
eamt8.11 0.55 0.54 0.38 0.14 0.75 0.25
hosp8.12 0.71 0.30 0.36 0.06 0.72 0.28
pdar8.13 0.78 0.04 -0.04 -0.13 0.63 0.37
eins8.14 0.82 -0.03 0.07 -0.23 0.74 0.26
besw8.15 0.77 0.00 -0.03 0.02 0.59 0.41
ents8.16 0.75 0.12 -0.14 -0.14 0.62 0.38
eazf8.17 0.75 0.23 -0.41 -0.09 0.79 0.21
ifpd8.18 0.73 0.23 -0.40 -0.17 0.78 0.22
betl8.19 0.70 0.32 -0.40 0.00 0.75 0.25
PC1 PC2 PC3 PC4
SS loadings 10.15 1.35 1.00 0.90
Proportion Var 0.53 0.07 0.05 0.05
Cumulative Var 0.53 0.61 0.66 0.71
Proportion Explained 0.76 0.10 0.07 0.07
Cumulative Proportion 0.76 0.86 0.93 1.00
Test of the hypothesis that 4 components are sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 101 and the objective function was 1.09
The total number of observations was 2579 with MLE Chi Square = 2802.73 with prob < 0
Fit based upon off diagonal values = 0.99
#Rotiert
Fit based upon off diagonal values = 0.99
> fit3 <- principal(mydata, nfactors=4, rotate="promax")
> fit3
Principal Components Analysis
Call: principal(r = mydata, nfactors = 4, rotate = "promax")
Standardized loadings (pattern matrix) based upon correlation matrix
PC3 PC4 PC1 PC2 h2 u2
gespr8.1 -0.04 0.80 0.19 -0.10 0.74 0.26
anspr8.2 -0.11 0.95 -0.05 0.08 0.79 0.21
ofohr8.3 0.06 0.69 -0.35 0.43 0.66 0.34
austs8.4 -0.03 0.81 0.14 -0.02 0.78 0.22
infrm8.5 0.01 0.48 0.54 -0.18 0.72 0.28
etwgs8.6 0.11 0.53 0.33 -0.16 0.62 0.38
ptner8.7 -0.09 0.04 0.92 -0.05 0.74 0.26
ausef8.8 -0.08 0.17 0.86 -0.03 0.83 0.17
auflt8.9 -0.15 -0.22 0.69 0.33 0.46 0.54
mitw8.10 0.18 0.02 0.05 0.65 0.68 0.32
eamt8.11 -0.06 -0.07 -0.07 0.97 0.75 0.25
hosp8.12 -0.10 0.07 0.21 0.74 0.72 0.28
pdar8.13 0.36 0.08 0.37 0.09 0.63 0.37
eins8.14 0.21 0.03 0.61 0.10 0.74 0.26
besw8.15 0.26 0.29 0.22 0.12 0.59 0.41
ents8.16 0.54 0.00 0.25 0.07 0.62 0.38
eazf8.17 0.96 -0.01 -0.05 -0.06 0.79 0.21
ifpd8.18 0.99 -0.12 0.04 -0.09 0.78 0.22
betl8.19 0.96 0.01 -0.21 0.06 0.75 0.25
PC3 PC4 PC1 PC2
SS loadings 3.42 3.78 3.79 2.42
Proportion Var 0.18 0.20 0.20 0.13
Cumulative Var 0.18 0.38 0.58 0.71
Proportion Explained 0.26 0.28 0.28 0.18
Cumulative Proportion 0.26 0.54 0.82 1.00
With component correlations of
PC3 PC4 PC1 PC2
PC3 1.00 0.68 0.68 0.63
PC4 0.68 1.00 0.67 0.54
PC1 0.68 0.67 1.00 0.56
PC2 0.63 0.54 0.56 1.00
Test of the hypothesis that 4 components are sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 101 and the objective function was 1.09
The total number of observations was 2579 with MLE Chi Square = 2802.73 with prob < 0
Fit based upon off diagonal values = 0.99
#Mittelwert der Skala Zusammenarbeit mit Familien
> mean(mydata$Zusammenarbeit.vektor, na.rm=TRUE)
[1] 3.193173
> var(mydata$Zusammenarbeit.vektor, na.rm=TRUE)
[1] 0.4935195
|
/Masterarbeit/R-Berechnungen/Hauptkomponentenanylyse_aktuell.R
|
no_license
|
karpyuk/TeX
|
R
| false | false | 9,640 |
r
|
> fa.parallel(mydata,fa="pc")
Parallel analysis suggests that the number of factors = NA and the number of components = 2
> scree(mydata)
2 componenten
> VSS(mydata,rotate="promax", fm="pc")
Very Simple Structure
Call: vss(x = x, n = n, rotate = rotate, diagonal = diagonal, fm = fm,
n.obs = n.obs, plot = plot, title = title)
VSS complexity 1 achieves a maximimum of 0.94 with 1 factors
VSS complexity 2 achieves a maximimum of 0.7 with 2 factors
The Velicer MAP achieves a minimum of 0.02 with 4 factors
#Hauptkomponentenanalyse mit einem Faktor
> fit <- principal(mydata, nfactors=1, rotate="none")
> fit
Principal Components Analysis
Call: principal(r = mydata, nfactors = 1, rotate = "none")
Standardized loadings (pattern matrix) based upon correlation matrix
PC1 h2 u2
gespr8.1 0.76 0.58 0.42
anspr8.2 0.75 0.56 0.44
ofohr8.3 0.66 0.44 0.56
austs8.4 0.79 0.63 0.37
infrm8.5 0.77 0.59 0.41
etwgs8.6 0.73 0.53 0.47
ptner8.7 0.73 0.53 0.47
ausef8.8 0.81 0.66 0.34
auflt8.9 0.52 0.27 0.73
mitw8.10 0.71 0.50 0.50
eamt8.11 0.55 0.30 0.70
hosp8.12 0.71 0.50 0.50
pdar8.13 0.78 0.61 0.39
eins8.14 0.82 0.68 0.32
besw8.15 0.77 0.59 0.41
ents8.16 0.75 0.57 0.43
eazf8.17 0.75 0.56 0.44
ifpd8.18 0.73 0.54 0.46
betl8.19 0.70 0.50 0.50
PC1
SS loadings 10.15
Proportion Var 0.53
Test of the hypothesis that 1 component is sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 152 and the objective function was 2.57
The total number of observations was 2579 with MLE Chi Square = 6597.21 with prob < 0
Fit based upon off diagonal values = 0.98
#Rotiert
> fit <- principal(mydata, nfactors=1, rotate="promax")
> fit
Principal Components Analysis
Call: principal(r = mydata, nfactors = 1, rotate = "promax")
Standardized loadings (pattern matrix) based upon correlation matrix
PC1 h2 u2
gespr8.1 0.76 0.58 0.42
anspr8.2 0.75 0.56 0.44
ofohr8.3 0.66 0.44 0.56
austs8.4 0.79 0.63 0.37
infrm8.5 0.77 0.59 0.41
etwgs8.6 0.73 0.53 0.47
ptner8.7 0.73 0.53 0.47
ausef8.8 0.81 0.66 0.34
auflt8.9 0.52 0.27 0.73
mitw8.10 0.71 0.50 0.50
eamt8.11 0.55 0.30 0.70
hosp8.12 0.71 0.50 0.50
pdar8.13 0.78 0.61 0.39
eins8.14 0.82 0.68 0.32
besw8.15 0.77 0.59 0.41
ents8.16 0.75 0.57 0.43
eazf8.17 0.75 0.56 0.44
ifpd8.18 0.73 0.54 0.46
betl8.19 0.70 0.50 0.50
PC1
SS loadings 10.15
Proportion Var 0.53
Test of the hypothesis that 1 component is sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 152 and the objective function was 2.57
The total number of observations was 2579 with MLE Chi Square = 6597.21 with prob < 0
Fit based upon off diagonal values = 0.98
#Rotierte und unrotierte zeigen das selbe Ergebniss bei einer Hauptkomponente
#Hauptkomponentenanalyse mit 2 Faktoren
# Nicht Rotiert
> fit2 <- principal(mydata, nfactors=2, rotate="none")
> fit2
Principal Components Analysis
Call: principal(r = mydata, nfactors = 2, rotate = "none")
Standardized loadings (pattern matrix) based upon correlation matrix
PC1 PC2 h2 u2
gespr8.1 0.76 -0.33 0.69 0.31
anspr8.2 0.75 -0.24 0.62 0.38
ofohr8.3 0.66 0.12 0.46 0.54
austs8.4 0.79 -0.28 0.71 0.29
infrm8.5 0.77 -0.35 0.71 0.29
etwgs8.6 0.73 -0.28 0.61 0.39
ptner8.7 0.73 -0.26 0.60 0.40
ausef8.8 0.81 -0.27 0.73 0.27
auflt8.9 0.52 0.05 0.27 0.73
mitw8.10 0.71 0.38 0.65 0.35
eamt8.11 0.55 0.54 0.59 0.41
hosp8.12 0.71 0.30 0.59 0.41
pdar8.13 0.78 0.04 0.61 0.39
eins8.14 0.82 -0.03 0.68 0.32
besw8.15 0.77 0.00 0.59 0.41
ents8.16 0.75 0.12 0.58 0.42
eazf8.17 0.75 0.23 0.62 0.38
ifpd8.18 0.73 0.23 0.59 0.41
betl8.19 0.70 0.32 0.60 0.40
PC1 PC2
SS loadings 10.15 1.35
Proportion Var 0.53 0.07
Cumulative Var 0.53 0.61
Proportion Explained 0.88 0.12
Cumulative Proportion 0.88 1.00
Test of the hypothesis that 2 components are sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 134 and the objective function was 1.85
The total number of observations was 2579 with MLE Chi Square = 4761.43 with prob < 0
Fit based upon off diagonal values = 0.99
#Rotiert
> fit2 <- principal(mydata, nfactors=2, rotate="promax")
> fit2
Principal Components Analysis
Call: principal(r = mydata, nfactors = 2, rotate = "promax")
Standardized loadings (pattern matrix) based upon correlation matrix
PC1 PC2 h2 u2
gespr8.1 0.90 -0.10 0.69 0.31
anspr8.2 0.78 0.02 0.62 0.38
ofohr8.3 0.24 0.48 0.46 0.54
austs8.4 0.85 -0.01 0.71 0.29
infrm8.5 0.93 -0.12 0.71 0.29
etwgs8.6 0.81 -0.04 0.61 0.39
ptner8.7 0.79 -0.02 0.60 0.40
ausef8.8 0.85 0.00 0.73 0.27
auflt8.9 0.25 0.31 0.27 0.73
mitw8.10 -0.08 0.86 0.65 0.35
eamt8.11 -0.39 1.00 0.59 0.41
hosp8.12 0.02 0.75 0.59 0.41
pdar8.13 0.42 0.42 0.61 0.39
eins8.14 0.54 0.34 0.68 0.32
besw8.15 0.47 0.36 0.59 0.41
ents8.16 0.29 0.52 0.58 0.42
eazf8.17 0.15 0.67 0.62 0.38
ifpd8.18 0.13 0.66 0.59 0.41
betl8.19 0.00 0.77 0.60 0.40
PC1 PC2
SS loadings 6.37 5.13
Proportion Var 0.34 0.27
Cumulative Var 0.34 0.61
Proportion Explained 0.55 0.45
Cumulative Proportion 0.55 1.00
With component correlations of
PC1 PC2
PC1 1.00 0.73
PC2 0.73 1.00
Test of the hypothesis that 2 components are sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 134 and the objective function was 1.85
The total number of observations was 2579 with MLE Chi Square = 4761.43 with prob < 0
#Hauptkomponentenanalyse mit 4 Faktoren
#Nicht rotiert
> fit3 <- principal(mydata, nfactors=4, rotate="none")
> fit3
Principal Components Analysis
Call: principal(r = mydata, nfactors = 4, rotate = "none")
Standardized loadings (pattern matrix) based upon correlation matrix
PC1 PC2 PC3 PC4 h2 u2
gespr8.1 0.76 -0.33 -0.03 0.23 0.74 0.26
anspr8.2 0.75 -0.24 0.01 0.41 0.79 0.21
ofohr8.3 0.66 0.12 0.03 0.45 0.66 0.34
austs8.4 0.79 -0.28 -0.02 0.26 0.78 0.22
infrm8.5 0.77 -0.35 0.01 -0.04 0.72 0.28
etwgs8.6 0.73 -0.28 -0.07 0.05 0.62 0.38
ptner8.7 0.73 -0.26 0.19 -0.33 0.74 0.26
ausef8.8 0.81 -0.27 0.17 -0.25 0.83 0.17
auflt8.9 0.52 0.05 0.33 -0.28 0.46 0.54
mitw8.10 0.71 0.38 0.18 0.07 0.68 0.32
eamt8.11 0.55 0.54 0.38 0.14 0.75 0.25
hosp8.12 0.71 0.30 0.36 0.06 0.72 0.28
pdar8.13 0.78 0.04 -0.04 -0.13 0.63 0.37
eins8.14 0.82 -0.03 0.07 -0.23 0.74 0.26
besw8.15 0.77 0.00 -0.03 0.02 0.59 0.41
ents8.16 0.75 0.12 -0.14 -0.14 0.62 0.38
eazf8.17 0.75 0.23 -0.41 -0.09 0.79 0.21
ifpd8.18 0.73 0.23 -0.40 -0.17 0.78 0.22
betl8.19 0.70 0.32 -0.40 0.00 0.75 0.25
PC1 PC2 PC3 PC4
SS loadings 10.15 1.35 1.00 0.90
Proportion Var 0.53 0.07 0.05 0.05
Cumulative Var 0.53 0.61 0.66 0.71
Proportion Explained 0.76 0.10 0.07 0.07
Cumulative Proportion 0.76 0.86 0.93 1.00
Test of the hypothesis that 4 components are sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 101 and the objective function was 1.09
The total number of observations was 2579 with MLE Chi Square = 2802.73 with prob < 0
Fit based upon off diagonal values = 0.99
#Rotiert
Fit based upon off diagonal values = 0.99
> fit3 <- principal(mydata, nfactors=4, rotate="promax")
> fit3
Principal Components Analysis
Call: principal(r = mydata, nfactors = 4, rotate = "promax")
Standardized loadings (pattern matrix) based upon correlation matrix
PC3 PC4 PC1 PC2 h2 u2
gespr8.1 -0.04 0.80 0.19 -0.10 0.74 0.26
anspr8.2 -0.11 0.95 -0.05 0.08 0.79 0.21
ofohr8.3 0.06 0.69 -0.35 0.43 0.66 0.34
austs8.4 -0.03 0.81 0.14 -0.02 0.78 0.22
infrm8.5 0.01 0.48 0.54 -0.18 0.72 0.28
etwgs8.6 0.11 0.53 0.33 -0.16 0.62 0.38
ptner8.7 -0.09 0.04 0.92 -0.05 0.74 0.26
ausef8.8 -0.08 0.17 0.86 -0.03 0.83 0.17
auflt8.9 -0.15 -0.22 0.69 0.33 0.46 0.54
mitw8.10 0.18 0.02 0.05 0.65 0.68 0.32
eamt8.11 -0.06 -0.07 -0.07 0.97 0.75 0.25
hosp8.12 -0.10 0.07 0.21 0.74 0.72 0.28
pdar8.13 0.36 0.08 0.37 0.09 0.63 0.37
eins8.14 0.21 0.03 0.61 0.10 0.74 0.26
besw8.15 0.26 0.29 0.22 0.12 0.59 0.41
ents8.16 0.54 0.00 0.25 0.07 0.62 0.38
eazf8.17 0.96 -0.01 -0.05 -0.06 0.79 0.21
ifpd8.18 0.99 -0.12 0.04 -0.09 0.78 0.22
betl8.19 0.96 0.01 -0.21 0.06 0.75 0.25
PC3 PC4 PC1 PC2
SS loadings 3.42 3.78 3.79 2.42
Proportion Var 0.18 0.20 0.20 0.13
Cumulative Var 0.18 0.38 0.58 0.71
Proportion Explained 0.26 0.28 0.28 0.18
Cumulative Proportion 0.26 0.54 0.82 1.00
With component correlations of
PC3 PC4 PC1 PC2
PC3 1.00 0.68 0.68 0.63
PC4 0.68 1.00 0.67 0.54
PC1 0.68 0.67 1.00 0.56
PC2 0.63 0.54 0.56 1.00
Test of the hypothesis that 4 components are sufficient.
The degrees of freedom for the null model are 171 and the objective function was 13.39
The degrees of freedom for the model are 101 and the objective function was 1.09
The total number of observations was 2579 with MLE Chi Square = 2802.73 with prob < 0
Fit based upon off diagonal values = 0.99
#Mittelwert der Skala Zusammenarbeit mit Familien
> mean(mydata$Zusammenarbeit.vektor, na.rm=TRUE)
[1] 3.193173
> var(mydata$Zusammenarbeit.vektor, na.rm=TRUE)
[1] 0.4935195
|
bayesLogNormalTest <- function(A_data,
B_data,
priors,
n_samples = 1e5) {
###
## Error Checking
###
if((
any(
A_data <= 0,
B_data <= 0
)
)) {
stop("Data input is incorrect. The support of a Log Normal Distribution is (0, Inf).")
}
if(any(is.na(suppressWarnings(as.numeric(c(A_data, B_data)))))) stop("A_data and B_data are not ALL numeric.")
###
## Sample from posterior
###
NormalResult <- bayesNormalTest(log(A_data), log(B_data), priors, n_samples)
## Means
A_mus <- NormalResult$posteriors$Mu$A_mus
B_mus <- NormalResult$posteriors$Mu$B_mus
## Sigmas
A_sig_sqs <- NormalResult$posteriors$Sig_Sq$A_sig_sqs
B_sig_sqs <- NormalResult$posteriors$Sig_Sq$B_sig_sqs
## Transform back to log normal for interpretation
A_means <- exp(A_mus + A_sig_sqs / 2)
B_means <- exp(B_mus + B_sig_sqs / 2)
A_vars <- (exp(A_sig_sqs) - 1) * exp(2 * A_mus + A_sig_sqs)
B_vars <- (exp(B_sig_sqs) - 1) * exp(2 * B_mus + B_sig_sqs)
###
## Output the result
###
result <- list(
inputs = as.list(match.call()[-1]),
posteriors = list(
Mu = list(A_mus = A_mus, B_mus = B_mus),
Sig_Sq = list(A_sig_sqs = A_sig_sqs, B_sig_sqs = B_sig_sqs),
Mean = list(A_means = A_means, B_means = B_means),
Var = list(A_vars = A_vars, B_vars = B_vars)
),
distribution = "lognormal"
)
class(result) <- c('bayesTest')
return(result)
}
|
/R/dist-lognormal.R
|
no_license
|
bryant1410/bayesAB
|
R
| false | false | 1,556 |
r
|
bayesLogNormalTest <- function(A_data,
B_data,
priors,
n_samples = 1e5) {
###
## Error Checking
###
if((
any(
A_data <= 0,
B_data <= 0
)
)) {
stop("Data input is incorrect. The support of a Log Normal Distribution is (0, Inf).")
}
if(any(is.na(suppressWarnings(as.numeric(c(A_data, B_data)))))) stop("A_data and B_data are not ALL numeric.")
###
## Sample from posterior
###
NormalResult <- bayesNormalTest(log(A_data), log(B_data), priors, n_samples)
## Means
A_mus <- NormalResult$posteriors$Mu$A_mus
B_mus <- NormalResult$posteriors$Mu$B_mus
## Sigmas
A_sig_sqs <- NormalResult$posteriors$Sig_Sq$A_sig_sqs
B_sig_sqs <- NormalResult$posteriors$Sig_Sq$B_sig_sqs
## Transform back to log normal for interpretation
A_means <- exp(A_mus + A_sig_sqs / 2)
B_means <- exp(B_mus + B_sig_sqs / 2)
A_vars <- (exp(A_sig_sqs) - 1) * exp(2 * A_mus + A_sig_sqs)
B_vars <- (exp(B_sig_sqs) - 1) * exp(2 * B_mus + B_sig_sqs)
###
## Output the result
###
result <- list(
inputs = as.list(match.call()[-1]),
posteriors = list(
Mu = list(A_mus = A_mus, B_mus = B_mus),
Sig_Sq = list(A_sig_sqs = A_sig_sqs, B_sig_sqs = B_sig_sqs),
Mean = list(A_means = A_means, B_means = B_means),
Var = list(A_vars = A_vars, B_vars = B_vars)
),
distribution = "lognormal"
)
class(result) <- c('bayesTest')
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.stringCode.R
\name{from.TreeCode}
\alias{from.TreeCode}
\title{from.TreeCode}
\usage{
from.TreeCode(x)
}
\description{
from.TreeCode
}
|
/modules/data.land/man/from.TreeCode.Rd
|
permissive
|
PecanProject/pecan
|
R
| false | true | 221 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.stringCode.R
\name{from.TreeCode}
\alias{from.TreeCode}
\title{from.TreeCode}
\usage{
from.TreeCode(x)
}
\description{
from.TreeCode
}
|
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIWBUsingCox400_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIWBUsingFine400_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIWBUsingCox200_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIWBUsingFine200_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\IDIWBUsingCox400_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\IDIWBUsingFine400_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\IDIWBUsingCox200_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\IDIWBUsingFine200_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIIDIWBUsingGerds200_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIIDIWBUsingGerds400_Alternative.rda")
#######################################################################################
### 30% NRI 400
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingCox400_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingCox400_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIWBUsingCox400_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingCox400_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIWBUsingCox400_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIWBUsingCox400_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingFine400_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingFine400_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIWBUsingFine400_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingFine400_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIWBUsingFine400_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIWBUsingFine400_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIIDIWBUsingGerds400_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIIDIWBUsingGerds400_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIIDIWBUsingGerds400_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIIDIWBUsingGerds400_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds400_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds400_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 30% IDI 400
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingCox400_Alternative[[i]][1,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingCox400_Alternative[[i]][2,])
cov_bca=rbind(cov_bca,IDIWBUsingCox400_Alternative[[i]][3,])
cov_boot=rbind(cov_boot,IDIWBUsingCox400_Alternative[[i]][4,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingFine400_Alternative[[i]][1,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingFine400_Alternative[[i]][2,])
cov_bca=rbind(cov_bca,IDIWBUsingFine400_Alternative[[i]][3,])
cov_boot=rbind(cov_boot,IDIWBUsingFine400_Alternative[[i]][4,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,NRIIDIWBUsingGerds400_Alternative[[i]][7,])
IDI_boot_sd=rbind(IDI_boot_sd,NRIIDIWBUsingGerds400_Alternative[[i]][8,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds400_Alternative[[i]][9,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds400_Alternative[[i]][10,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 30% NRI 200
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingCox200_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingCox200_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIWBUsingCox200_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingCox200_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIWBUsingCox200_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIWBUsingCox200_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingFine200_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingFine200_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIWBUsingFine200_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingFine200_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIWBUsingFine200_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIWBUsingFine200_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIIDIWBUsingGerds200_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIIDIWBUsingGerds200_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIIDIWBUsingGerds200_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIIDIWBUsingGerds200_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds200_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds200_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 30% IDI 200
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingCox200_Alternative[[i]][1,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingCox200_Alternative[[i]][2,])
cov_bca=rbind(cov_bca,IDIWBUsingCox200_Alternative[[i]][3,])
cov_boot=rbind(cov_boot,IDIWBUsingCox200_Alternative[[i]][4,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingFine200_Alternative[[i]][1,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingFine200_Alternative[[i]][2,])
cov_bca=rbind(cov_bca,IDIWBUsingFine200_Alternative[[i]][3,])
cov_boot=rbind(cov_boot,IDIWBUsingFine200_Alternative[[i]][4,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,NRIIDIWBUsingGerds200_Alternative[[i]][7,])
IDI_boot_sd=rbind(IDI_boot_sd,NRIIDIWBUsingGerds200_Alternative[[i]][8,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds200_Alternative[[i]][9,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds200_Alternative[[i]][10,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 50% NRI 400
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingCox400_Alternative[[i]][7,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingCox400_Alternative[[i]][8,])
cov_IF=rbind(cov_IF,NRIWBUsingCox400_Alternative[[i]][9,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingCox400_Alternative[[i]][10,])
cov_bca=rbind(cov_bca,NRIWBUsingCox400_Alternative[[i]][11,])
cov_boot=rbind(cov_boot,NRIWBUsingCox400_Alternative[[i]][12,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingFine400_Alternative[[i]][7,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingFine400_Alternative[[i]][8,])
cov_IF=rbind(cov_IF,NRIWBUsingFine400_Alternative[[i]][9,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingFine400_Alternative[[i]][10,])
cov_bca=rbind(cov_bca,NRIWBUsingFine400_Alternative[[i]][11,])
cov_boot=rbind(cov_boot,NRIWBUsingFine400_Alternative[[i]][12,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIIDIWBUsingGerds400_Alternative[[i]][11,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIIDIWBUsingGerds400_Alternative[[i]][12,])
cov_IF=rbind(cov_IF,NRIIDIWBUsingGerds400_Alternative[[i]][13,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIIDIWBUsingGerds400_Alternative[[i]][14,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds400_Alternative[[i]][15,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds400_Alternative[[i]][16,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 50% IDI 400
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingCox400_Alternative[[i]][5,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingCox400_Alternative[[i]][6,])
cov_bca=rbind(cov_bca,IDIWBUsingCox400_Alternative[[i]][7,])
cov_boot=rbind(cov_boot,IDIWBUsingCox400_Alternative[[i]][8,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingFine400_Alternative[[i]][5,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingFine400_Alternative[[i]][6,])
cov_bca=rbind(cov_bca,IDIWBUsingFine400_Alternative[[i]][7,])
cov_boot=rbind(cov_boot,IDIWBUsingFine400_Alternative[[i]][8,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,NRIIDIWBUsingGerds400_Alternative[[i]][17,])
IDI_boot_sd=rbind(IDI_boot_sd,NRIIDIWBUsingGerds400_Alternative[[i]][18,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds400_Alternative[[i]][19,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds400_Alternative[[i]][20,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 50% NRI 200
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingCox200_Alternative[[i]][7,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingCox200_Alternative[[i]][8,])
cov_IF=rbind(cov_IF,NRIWBUsingCox200_Alternative[[i]][9,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingCox200_Alternative[[i]][10,])
cov_bca=rbind(cov_bca,NRIWBUsingCox200_Alternative[[i]][11,])
cov_boot=rbind(cov_boot,NRIWBUsingCox200_Alternative[[i]][12,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingFine200_Alternative[[i]][7,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingFine200_Alternative[[i]][8,])
cov_IF=rbind(cov_IF,NRIWBUsingFine200_Alternative[[i]][9,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingFine200_Alternative[[i]][10,])
cov_bca=rbind(cov_bca,NRIWBUsingFine200_Alternative[[i]][11,])
cov_boot=rbind(cov_boot,NRIWBUsingFine200_Alternative[[i]][12,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIIDIWBUsingGerds200_Alternative[[i]][11,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIIDIWBUsingGerds200_Alternative[[i]][12,])
cov_IF=rbind(cov_IF,NRIIDIWBUsingGerds200_Alternative[[i]][13,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIIDIWBUsingGerds200_Alternative[[i]][14,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds200_Alternative[[i]][15,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds200_Alternative[[i]][16,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 50% IDI 200
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingCox200_Alternative[[i]][5,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingCox200_Alternative[[i]][6,])
cov_bca=rbind(cov_bca,IDIWBUsingCox200_Alternative[[i]][7,])
cov_boot=rbind(cov_boot,IDIWBUsingCox200_Alternative[[i]][8,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingFine200_Alternative[[i]][5,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingFine200_Alternative[[i]][6,])
cov_bca=rbind(cov_bca,IDIWBUsingFine200_Alternative[[i]][7,])
cov_boot=rbind(cov_boot,IDIWBUsingFine200_Alternative[[i]][8,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,NRIIDIWBUsingGerds200_Alternative[[i]][17,])
IDI_boot_sd=rbind(IDI_boot_sd,NRIIDIWBUsingGerds200_Alternative[[i]][18,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds200_Alternative[[i]][19,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds200_Alternative[[i]][20,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
|
/Simulation/Cox/result_viewer_alternative.R
|
permissive
|
WangandYu/NRIandIDI
|
R
| false | false | 14,098 |
r
|
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIWBUsingCox400_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIWBUsingFine400_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIWBUsingCox200_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIWBUsingFine200_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\IDIWBUsingCox400_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\IDIWBUsingFine400_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\IDIWBUsingCox200_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\IDIWBUsingFine200_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIIDIWBUsingGerds200_Alternative.rda")
load("E:\\Research\\NRIIDI revisions\\Cox\\NRIIDIWBUsingGerds400_Alternative.rda")
#######################################################################################
### 30% NRI 400
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingCox400_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingCox400_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIWBUsingCox400_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingCox400_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIWBUsingCox400_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIWBUsingCox400_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingFine400_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingFine400_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIWBUsingFine400_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingFine400_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIWBUsingFine400_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIWBUsingFine400_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIIDIWBUsingGerds400_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIIDIWBUsingGerds400_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIIDIWBUsingGerds400_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIIDIWBUsingGerds400_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds400_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds400_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 30% IDI 400
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingCox400_Alternative[[i]][1,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingCox400_Alternative[[i]][2,])
cov_bca=rbind(cov_bca,IDIWBUsingCox400_Alternative[[i]][3,])
cov_boot=rbind(cov_boot,IDIWBUsingCox400_Alternative[[i]][4,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingFine400_Alternative[[i]][1,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingFine400_Alternative[[i]][2,])
cov_bca=rbind(cov_bca,IDIWBUsingFine400_Alternative[[i]][3,])
cov_boot=rbind(cov_boot,IDIWBUsingFine400_Alternative[[i]][4,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,NRIIDIWBUsingGerds400_Alternative[[i]][7,])
IDI_boot_sd=rbind(IDI_boot_sd,NRIIDIWBUsingGerds400_Alternative[[i]][8,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds400_Alternative[[i]][9,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds400_Alternative[[i]][10,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 30% NRI 200
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingCox200_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingCox200_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIWBUsingCox200_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingCox200_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIWBUsingCox200_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIWBUsingCox200_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingFine200_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingFine200_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIWBUsingFine200_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingFine200_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIWBUsingFine200_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIWBUsingFine200_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIIDIWBUsingGerds200_Alternative[[i]][1,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIIDIWBUsingGerds200_Alternative[[i]][2,])
cov_IF=rbind(cov_IF,NRIIDIWBUsingGerds200_Alternative[[i]][3,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIIDIWBUsingGerds200_Alternative[[i]][4,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds200_Alternative[[i]][5,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds200_Alternative[[i]][6,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 30% IDI 200
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingCox200_Alternative[[i]][1,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingCox200_Alternative[[i]][2,])
cov_bca=rbind(cov_bca,IDIWBUsingCox200_Alternative[[i]][3,])
cov_boot=rbind(cov_boot,IDIWBUsingCox200_Alternative[[i]][4,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingFine200_Alternative[[i]][1,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingFine200_Alternative[[i]][2,])
cov_bca=rbind(cov_bca,IDIWBUsingFine200_Alternative[[i]][3,])
cov_boot=rbind(cov_boot,IDIWBUsingFine200_Alternative[[i]][4,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,NRIIDIWBUsingGerds200_Alternative[[i]][7,])
IDI_boot_sd=rbind(IDI_boot_sd,NRIIDIWBUsingGerds200_Alternative[[i]][8,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds200_Alternative[[i]][9,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds200_Alternative[[i]][10,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 50% NRI 400
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingCox400_Alternative[[i]][7,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingCox400_Alternative[[i]][8,])
cov_IF=rbind(cov_IF,NRIWBUsingCox400_Alternative[[i]][9,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingCox400_Alternative[[i]][10,])
cov_bca=rbind(cov_bca,NRIWBUsingCox400_Alternative[[i]][11,])
cov_boot=rbind(cov_boot,NRIWBUsingCox400_Alternative[[i]][12,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingFine400_Alternative[[i]][7,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingFine400_Alternative[[i]][8,])
cov_IF=rbind(cov_IF,NRIWBUsingFine400_Alternative[[i]][9,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingFine400_Alternative[[i]][10,])
cov_bca=rbind(cov_bca,NRIWBUsingFine400_Alternative[[i]][11,])
cov_boot=rbind(cov_boot,NRIWBUsingFine400_Alternative[[i]][12,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIIDIWBUsingGerds400_Alternative[[i]][11,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIIDIWBUsingGerds400_Alternative[[i]][12,])
cov_IF=rbind(cov_IF,NRIIDIWBUsingGerds400_Alternative[[i]][13,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIIDIWBUsingGerds400_Alternative[[i]][14,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds400_Alternative[[i]][15,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds400_Alternative[[i]][16,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/20
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 50% IDI 400
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingCox400_Alternative[[i]][5,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingCox400_Alternative[[i]][6,])
cov_bca=rbind(cov_bca,IDIWBUsingCox400_Alternative[[i]][7,])
cov_boot=rbind(cov_boot,IDIWBUsingCox400_Alternative[[i]][8,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingFine400_Alternative[[i]][5,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingFine400_Alternative[[i]][6,])
cov_bca=rbind(cov_bca,IDIWBUsingFine400_Alternative[[i]][7,])
cov_boot=rbind(cov_boot,IDIWBUsingFine400_Alternative[[i]][8,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,NRIIDIWBUsingGerds400_Alternative[[i]][17,])
IDI_boot_sd=rbind(IDI_boot_sd,NRIIDIWBUsingGerds400_Alternative[[i]][18,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds400_Alternative[[i]][19,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds400_Alternative[[i]][20,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 50% NRI 200
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingCox200_Alternative[[i]][7,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingCox200_Alternative[[i]][8,])
cov_IF=rbind(cov_IF,NRIWBUsingCox200_Alternative[[i]][9,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingCox200_Alternative[[i]][10,])
cov_bca=rbind(cov_bca,NRIWBUsingCox200_Alternative[[i]][11,])
cov_boot=rbind(cov_boot,NRIWBUsingCox200_Alternative[[i]][12,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIWBUsingFine200_Alternative[[i]][7,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIWBUsingFine200_Alternative[[i]][8,])
cov_IF=rbind(cov_IF,NRIWBUsingFine200_Alternative[[i]][9,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIWBUsingFine200_Alternative[[i]][10,])
cov_bca=rbind(cov_bca,NRIWBUsingFine200_Alternative[[i]][11,])
cov_boot=rbind(cov_boot,NRIWBUsingFine200_Alternative[[i]][12,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
NRI=NRI_IF_sd=cov_IF=NRI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
NRI=rbind(NRI,NRIIDIWBUsingGerds200_Alternative[[i]][11,])
NRI_IF_sd=rbind(NRI_IF_sd,NRIIDIWBUsingGerds200_Alternative[[i]][12,])
cov_IF=rbind(cov_IF,NRIIDIWBUsingGerds200_Alternative[[i]][13,])
NRI_boot_sd=rbind(NRI_boot_sd,NRIIDIWBUsingGerds200_Alternative[[i]][14,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds200_Alternative[[i]][15,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds200_Alternative[[i]][16,])
}
colMeans(NRI)
apply(NRI,2,sd)
colMeans(NRI_IF_sd)/sqrt(200)
colMeans(cov_IF)
colMeans(NRI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
#######################################################################################
### 50% IDI 200
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingCox200_Alternative[[i]][5,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingCox200_Alternative[[i]][6,])
cov_bca=rbind(cov_bca,IDIWBUsingCox200_Alternative[[i]][7,])
cov_boot=rbind(cov_boot,IDIWBUsingCox200_Alternative[[i]][8,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,IDIWBUsingFine200_Alternative[[i]][5,])
IDI_boot_sd=rbind(IDI_boot_sd,IDIWBUsingFine200_Alternative[[i]][6,])
cov_bca=rbind(cov_bca,IDIWBUsingFine200_Alternative[[i]][7,])
cov_boot=rbind(cov_boot,IDIWBUsingFine200_Alternative[[i]][8,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
IDI=IDI_boot_sd=cov_bca=cov_boot=NULL
for(i in 1:1000){
IDI=rbind(IDI,NRIIDIWBUsingGerds200_Alternative[[i]][17,])
IDI_boot_sd=rbind(IDI_boot_sd,NRIIDIWBUsingGerds200_Alternative[[i]][18,])
cov_bca=rbind(cov_bca,NRIIDIWBUsingGerds200_Alternative[[i]][19,])
cov_boot=rbind(cov_boot,NRIIDIWBUsingGerds200_Alternative[[i]][20,])
}
colMeans(IDI)
apply(IDI,2,sd)
colMeans(IDI_boot_sd)
colMeans(cov_bca,na.rm=TRUE)
colMeans(cov_boot)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{covCMB_internal2}
\alias{covCMB_internal2}
\title{covCMB_internal2}
\usage{
covCMB_internal2(cmbdf, nbin)
}
|
/man/covCMB_internal2.Rd
|
permissive
|
mingltu/rcosmo
|
R
| false | true | 214 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{covCMB_internal2}
\alias{covCMB_internal2}
\title{covCMB_internal2}
\usage{
covCMB_internal2(cmbdf, nbin)
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{print.cumulative_syllable_freq}
\alias{print.cumulative_syllable_freq}
\title{Prints a cumulative_syllable_freqObject}
\usage{
\method{print}{cumulative_syllable_freq}(x, ...)
}
\arguments{
\item{x}{The cumulative_syllable_freqobject.}
\item{\ldots}{ignored}
}
\description{
Prints a cumulative_syllable_freq object.
}
|
/man/print.cumulative_syllable_freq.Rd
|
no_license
|
joffrevillanueva/qdap
|
R
| false | false | 380 |
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{print.cumulative_syllable_freq}
\alias{print.cumulative_syllable_freq}
\title{Prints a cumulative_syllable_freqObject}
\usage{
\method{print}{cumulative_syllable_freq}(x, ...)
}
\arguments{
\item{x}{The cumulative_syllable_freqobject.}
\item{\ldots}{ignored}
}
\description{
Prints a cumulative_syllable_freq object.
}
|
# ----------------------------------------------------
# Initial data hacking for Ch 5: Primary Election Outcomes
# This file begins: May 13, 2020
# ----------------------------------------------------
library("here")
library("magrittr")
library("tidyverse")
library("broom")
# library("tidybayes")
library("boxr"); box_auth()
library("survival") # mlogit()
library("rstan")
mc_cores <- min(5, parallel::detectCores())
options(mc.cores = mc_cores)
rstan_options(auto_write = TRUE)
library("tidybayes")
# update symlink stuff
source(here::here("code", "helpers", "call-R-helpers.R"))
# box: data/_model-output/05-voting
# estimates for voting models
box_dir_model_output <- 112969122838
# ----------------------------------------------------
# import cleaned data
# ----------------------------------------------------
# box_search("candidates-x-irt.rds")
# box_dl({id_goes_here}, here("data", "_clean"))
cands_raw <-
read_rds(here("data", "_clean", "candidates-x-irt.rds")) %>%
print()
# recoding:
# - who wins
# - how many in choice set
# - candidate extremism level w/in group choice set
# filtering:
# - drop NA on key data
# - ONLY THEN:
# - each set must be n > 1, only 1 winner
cands <- cands_raw %>%
transmute(
Name, bonica_rid, recipient_fecid, state_abb, district_num,
group, cycle, party,
choice_set_ID = str_glue("{group}-{party}-{cycle}") %>% as.character(),
g_code = as.numeric(as.factor(choice_set_ID)),
win_primary = case_when(
pwinner == "W" ~ 1,
pwinner == "L" ~ 0
),
theta_mean_rescale, recipient_cfscore_dyn
) %>%
na.omit() %>%
group_by(group, cycle) %>%
mutate(
n_group = n(),
cf_extremism_level = case_when(
n_group > 1 & party == "R" ~
rank(recipient_cfscore_dyn, na.last = "keep"),
n_group > 1 & party == "D" ~
rank(-1 * recipient_cfscore_dyn, na.last = "keep"),
n_group <= 1 ~ 0
)
) %>%
filter(sum(win_primary) == 1) %>%
ungroup() %>%
filter(n_group > 1) %>%
print()
# n and cases
cands %>%
group_by(party) %>%
print() %>%
summarize(
sets = n_distinct(choice_set_ID),
cases = n()
)
# what's the deal with half-ranks?
# We get half ranks because some candidates have the same ideal point?
cands %>%
filter(
cf_extremism_level %% 1 != 0
) %>%
select(group, cycle, cf_extremism_level, recipient_cfscore_dyn, Name) %>%
semi_join(x = cands, by = c("group", "cycle")) %>%
select(
Name, state_abb, district_num, party, cycle, group,
cf_extremism_level, recipient_cfscore_dyn,
) %>%
arrange(cycle, group)
# why was I ranking
# ----------------------------------------------------
# initial data inspection
# ----------------------------------------------------
# ideas: split the world into districts with a clear "moderate and extreme"
# Is this why I was ranking?
# Indicate who the "extremist" is?
cands %>%
group_by(group, cycle) %>%
count() %>%
ungroup() %>%
filter(n > 1) %>%
count(cycle, n) %>%
arrange(desc(n))
cands %>%
filter(n_in_group > 1) %>%
ggplot(aes(x = cf_extremism_level, y = recipient_cfscore_dyn)) +
geom_point(aes(color = party))
# ppct ~ ranked extremism
cands %>%
group_by(group, cycle) %>%
filter(n_in_group %in% c(2:6)) %>%
ggplot() +
aes(x = cf_extremism_level, y = ppct) +
facet_grid(party ~ n_in_group) +
geom_point() +
geom_smooth()
select(cands, ppct, pwinner)
cands %>%
group_by(cycle) %>%
count(pct = !is.na(ppct), win = !is.na(pwinner)) %>%
filter(pct == TRUE | win == TRUE)
# ----------------------------------------------------
# MLE conditional logit
# ----------------------------------------------------
rmod <- clogit(
y ~ 0
# + theta_mean_rescale*recipient_cfscore_dyn
+ recipient_cfscore_dyn
+ strata(g_code),
data = choice_data,
subset = party == "R"
) %>%
print()
dmod <- clogit(
y ~ 0
# + theta_mean_rescale*recipient_cfscore_dyn
+ recipient_cfscore_dyn
+ strata(g_code),
data = choice_data,
subset = party == "D"
) %>%
print()
# this shows us the linear interaction but I hate it!
cands %>%
filter(party == "D") %$%
crossing(
recipient_cfscore = seq(
min(recipient_cfscore, na.rm = TRUE),
max(recipient_cfscore, na.rm = TRUE),
by = .5
),
theta_mean_rescale = seq(
from = min(theta_mean_rescale, na.rm = TRUE),
to = max(theta_mean_rescale, na.rm = TRUE),
by = .25
),
group = 1
) %>%
prediction::prediction(model = dmod, data = .) %>%
as_tibble() %>%
ggplot() +
aes(y = plogis(fitted), x = theta_mean_rescale) +
geom_line(aes(color = as.factor(recipient_cfscore)))
# what can we do?
# estimate within quantiles
# (some kind of CV method for CV-MSE-optimal cuts?)
# gaussian process for continuous interaction???
# ----------------------------------------------------
# stan conditional logit
# ----------------------------------------------------
set_sizes <- choice_data %>%
group_by(party, g_code) %>%
summarize(
n = n()
) %>%
ungroup() %>%
print()
R_set_size <- set_sizes %>%
filter(party == "R") %>%
pull(n)
D_set_size <- set_sizes %>%
filter(party == "D") %>%
pull(n)
choice_data_R <- choice_data %>%
filter(party == "R") %$%
list(
n = nrow(.),
g_code = g_code,
G = n_distinct(g_code),
n_g = R_set_size,
y = y,
X = data.frame(
recipient_cfscore_dyn,
recipient_cfscore_dyn * theta_mean_rescale
)
) %>%
c(p = ncol(.$X))
choice_data_D <- choice_data %>%
filter(party == "D") %$%
list(
n = nrow(.),
g_code = g_code,
G = n_distinct(g_code),
n_g = D_set_size,
y = y,
X = data.frame(
recipient_cfscore_dyn,
recipient_cfscore_dyn * theta_mean_rescale
)
) %>%
c(p = ncol(.$X))
simple_choice_data_R <-
c(choice_data_R, prior_sd = 1)
simple_choice_data_D <-
c(choice_data_D, prior_sd = 1)
lapply(simple_choice_data_R, head)
lapply(simple_choice_data_D, head)
simple_choice <- stan_model(
file = here("code", "05-voting", "stan", "simple-choice.stan"),
verbose = TRUE
)
lkj_choice <- stan_model(
file = here("code", "05-voting", "stan", "simple-lkj-choice.stan"),
verbose = TRUE
)
beepr::beep(2)
simple_R_stan <-
sampling(
object = simple_choice,
data = simple_choice_data_R,
iter = 2000,
chains = mc_cores
# , thin = 1,
# , include = FALSE,
# pars = c()
)
simple_D_stan <-
sampling(
object = simple_choice,
data = simple_choice_data_D,
iter = 2000,
chains = mc_cores
# , thin = 1,
# , include = FALSE,
# pars = c()
)
lkj_R_stan <-
sampling(
object = lkj_choice,
data = simple_choice_data_R,
iter = 2000,
chains = mc_cores
# , thin = 1,
# , include = FALSE,
# pars = c()
)
lkj_D_stan <-
sampling(
object = lkj_choice,
data = simple_choice_data_D,
iter = 2000,
chains = mc_cores
# , thin = 1,
# , include = FALSE,
# pars = c()
)
beepr::beep(2)
bind_rows(
"R" =
bind_rows(
"survival" = tidy(rmod, conf.int = TRUE),
"simple_bayes" = tidy(simple_R_stan, conf.int = TRUE),
"simple_lkj" = tidy(lkj_R_stan, conf.int = TRUE),
.id = "model"
) %>%
mutate(party = "R"),
"D" =
bind_rows(
"survival" = tidy(dmod, conf.int = TRUE),
"simple_bayes" = tidy(simple_D_stan, conf.int = TRUE),
"simple_lkj" = tidy(lkj_D_stan, conf.int = TRUE),
.id = "model"
) %>%
mutate(party = "D")
) %>%
mutate(
term = case_when(
term %in% c("coefs[1]", "recipient_cfscore_dyn") ~ "CF",
term %in% c("coefs[2]", "theta_mean_rescale:recipient_cfscore_dyn") ~
"Interaction"
)
) %>%
filter(is.na(term) == FALSE) %>%
ggplot(aes(x = term, y = estimate, color = party, shape = model)) +
geom_pointrange(
aes(ymin = conf.low, ymax = conf.high),
position = position_dodge(width = -0.25)
) +
facet_wrap(~ party) +
coord_flip() +
geom_hline(yintercept = 0) +
NULL
tidy(rmod)
tidy(dumb_R_stan)
tidy(dmod)
tidy(dumb_D_stan)
beepr::beep(2)
stan_trace(dumb_D_stan)
stan_trace(dumb_R_stan)
stan_ac(dumb_D_stan)
stan_ac(dumb_R_stan)
# ----------------------------------------------------
# neural network choice model
# ----------------------------------------------------
net_choice <- stan_model(
file = here("code", "05-voting", "stan", "choice-net.stan"),
verbose = TRUE
)
net_lkj <- stan_model(
file = here("code", "05-voting", "stan", "choice-net-lkj.stan"),
verbose = TRUE
)
n_nodes <- 3
hidden_prior_scale <- 1
act_prior_scale <- 0.5
net_data_R <- c(
choice_data_R,
n_nodes = n_nodes,
hidden_prior_scale = hidden_prior_scale,
act_prior_scale = act_prior_scale
)
net_data_D <- c(
choice_data_D,
n_nodes = n_nodes,
hidden_prior_scale = hidden_prior_scale,
act_prior_scale = act_prior_scale
)
# simple neural nets: no LKJ
stan_net_R <-
sampling(
object = net_choice,
data = net_data_R,
iter = 2000,
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
stan_net_D <-
sampling(
object = net_choice,
data = net_data_D,
iter = 2000,
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
# LKJ neural net
lkj_net_R <-
sampling(
object = net_lkj,
data = net_data_R,
iter = 2000,
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
lkj_net_D <-
sampling(
object = net_lkj,
data = net_data_D,
iter = 2000,
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
shinystan::shiny_stanfit(lkj_D_stan)
# compare coefs
bind_rows(
"stan_net_R" = tidy(stan_net_R, conf.int = TRUE),
"stan_net_D" = tidy(stan_net_D, conf.int = TRUE),
"lkj_net_R" = tidy(lkj_net_R, conf.int = TRUE),
"lkj_net_D" = tidy(lkj_net_D, conf.int = TRUE),
.id = "model"
) %>%
filter(str_detect(term, "wt")) %>%
ggplot() +
aes(x = term, y = estimate,
color = str_detect(model, "_D"),
shape = str_detect(model, "lkj_")
) +
geom_pointrange(
aes(ymin = conf.low, ymax = conf.high),
position = position_dodge(width = -0.25)
) +
coord_flip()
stan_utilities_R <- stan_net_R %>%
tidy_draws() %>%
gather_draws(util[case], n = 200) %>%
right_join(
choice_data %>%
filter(party == "R") %>%
mutate(case = row_number())
) %>%
print()
ggplot(stan_utilities_R) +
aes(x = theta_mean_rescale, y = recipient_cfscore_dyn, color = .value) +
geom_jitter(width = .035, height = .15, alpha = 0.2) +
scale_color_viridis_c() +
geom_hline(yintercept = 0)
# right now it looks like this is model dependent
# most of the "pow" comes at CF == 0
# ----------------------------------------------------
# neyman orthogonalization
# ----------------------------------------------------
# likelihood question
cands %>%
ggplot(aes(x = theta_mean_rescale * recipient_cfscore_dyn)) +
geom_histogram() +
facet_wrap(~ party)
neyman_net <- stan_model(
file = here("code", "05-voting", "stan", "choice-net-neyman.stan"),
verbose = TRUE
)
constrained_neyman <- stan_model(
file = here("code", "05-voting", "stan", "constrained-neyman.stan"),
verbose = TRUE
)
beepr::beep(2)
# ---- data -----------------------
# push this higher up?
# - keep only variables we want
# - drop NA
# - calculate set sizes
# - only sets with 1 winner
neyman_data <- cands %>%
transmute(
group, cycle, party,
g_code = as.numeric(as.factor(choice_set)),
y = pwinner,
scale_theta = scale(theta_mean_rescale)[,1],
scale_cf = scale(recipient_cfscore_dyn)[,1],
scale_total_receipts = scale(log(total_receipts + 1))[,1],
scale_district_white = scale(district_white)[,1],
woman = as.numeric(cand_gender == "F"),
incumbent = as.numeric(Incum_Chall == "I")
) %>%
na.omit() %>%
arrange(g_code) %>%
group_by(g_code) %>%
mutate(n_g = n()) %>%
filter(sum(y) == 1) %>%
ungroup() %>%
filter(n_g > 1) %>%
print()
ggplot(neyman_data, aes(x = scale_total_receipts)) + geom_histogram()
set_sizes <- neyman_data %>%
group_by(party, g_code) %>%
summarize(
n = n()
) %>%
ungroup() %>%
split(.$party) %>%
lapply(pull, n) %>%
print()
nodes_select <- 3
nodes_outcome <- 3
hid_prior_select <- 1
act_prior_select <- 2
hid_prior_outcome <- 1
act_prior_outcome <- 1
neyman_data_R <- neyman_data %>%
filter(party == "R") %$%
list(
n = nrow(.),
y = y,
theta = scale_theta,
cf_score = scale_cf,
X = data.frame(
scale_total_receipts, scale_district_white, woman, incumbent
),
G = n_distinct(g_code),
n_g = set_sizes$R
) %>%
c(P = ncol(.$X),
nodes_select = nodes_select,
nodes_outcome = nodes_outcome,
hid_prior_select = hid_prior_select,
act_prior_select = act_prior_select,
hid_prior_outcome = hid_prior_outcome,
act_prior_outcome = act_prior_outcome)
neyman_data_D <- neyman_data %>%
filter(party == "D") %$%
list(
n = nrow(.),
y = y,
theta = scale_theta,
cf_score = scale_cf,
X = data.frame(
scale_total_receipts, scale_district_white, woman, incumbent
),
G = n_distinct(g_code),
n_g = set_sizes$D
) %>%
c(P = ncol(.$X),
nodes_select = nodes_select,
nodes_outcome = nodes_outcome,
hid_prior_select = hid_prior_select,
act_prior_select = act_prior_select,
hid_prior_outcome = hid_prior_outcome,
act_prior_outcome = act_prior_outcome)
lapply(neyman_data_R, head)
lapply(neyman_data_D, head)
n_iter <- 2000
stan_neyman_R <- sampling(
object = neyman_net, data = neyman_data_R,
iter = n_iter, refresh = max(n_iter / 20, 1),
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
stan_neyman_D <- sampling(
object = neyman_net, data = neyman_data_D,
iter = n_iter, refresh = max(n_iter / 20, 1),
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
stan_constrained_R <- sampling(
object = constrained_neyman, data = neyman_data_R,
iter = n_iter, refresh = max(n_iter / 20, 1),
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
stan_constrained_D <- sampling(
object = constrained_neyman, data = neyman_data_D,
iter = n_iter, refresh = max(n_iter / 20, 1),
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
tidy(stan_neyman_R, conf.int = TRUE, ess = TRUE, rhat = TRUE) %>% print(n = nrow(.))
tidy(stan_neyman_D, conf.int = TRUE, ess = TRUE, rhat = TRUE) %>% print(n = nrow(.))
tidy(stan_constrained_R, conf.int = TRUE, ess = TRUE, rhat = TRUE) %>% print(n = nrow(.))
tidy(stan_constrained_D, conf.int = TRUE, ess = TRUE, rhat = TRUE) %>% print(n = nrow(.))
beepr::beep(2)
stan_plot(stan_neyman_R, pars = c("hid_outcome", "hid_select"))
stan_plot(stan_constrained_R, pars = c("hid_outcome", "hid_select"))
stan_plot(stan_neyman_D, pars = c("hid_outcome", "hid_select"))
stan_plot(stan_constrained_D, pars = c("hid_outcome", "hid_select"))
stan_trace(stan_neyman_R, pars = "hid_select_raw")
stan_trace(stan_neyman_R, pars = "bias_max_select")
stan_trace(stan_neyman_R, pars = "bias_slice_select")
stan_trace(stan_neyman_R, pars = "hid_select")
stan_plot(stan_neyman_R, pars = c("hid_select", "hid_select_raw"))
stan_trace(stan_neyman_D, pars = "hidden_outcome")
stan_trace(stan_neyman_D, pars = "hidden_select")
|
/code/05-voting/51_voting-eda.R
|
no_license
|
mikedecr/dissertation
|
R
| false | false | 15,469 |
r
|
# ----------------------------------------------------
# Initial data hacking for Ch 5: Primary Election Outcomes
# This file begins: May 13, 2020
# ----------------------------------------------------
library("here")
library("magrittr")
library("tidyverse")
library("broom")
# library("tidybayes")
library("boxr"); box_auth()
library("survival") # mlogit()
library("rstan")
mc_cores <- min(5, parallel::detectCores())
options(mc.cores = mc_cores)
rstan_options(auto_write = TRUE)
library("tidybayes")
# update symlink stuff
source(here::here("code", "helpers", "call-R-helpers.R"))
# box: data/_model-output/05-voting
# estimates for voting models
box_dir_model_output <- 112969122838
# ----------------------------------------------------
# import cleaned data
# ----------------------------------------------------
# box_search("candidates-x-irt.rds")
# box_dl({id_goes_here}, here("data", "_clean"))
cands_raw <-
read_rds(here("data", "_clean", "candidates-x-irt.rds")) %>%
print()
# recoding:
# - who wins
# - how many in choice set
# - candidate extremism level w/in group choice set
# filtering:
# - drop NA on key data
# - ONLY THEN:
# - each set must be n > 1, only 1 winner
cands <- cands_raw %>%
transmute(
Name, bonica_rid, recipient_fecid, state_abb, district_num,
group, cycle, party,
choice_set_ID = str_glue("{group}-{party}-{cycle}") %>% as.character(),
g_code = as.numeric(as.factor(choice_set_ID)),
win_primary = case_when(
pwinner == "W" ~ 1,
pwinner == "L" ~ 0
),
theta_mean_rescale, recipient_cfscore_dyn
) %>%
na.omit() %>%
group_by(group, cycle) %>%
mutate(
n_group = n(),
cf_extremism_level = case_when(
n_group > 1 & party == "R" ~
rank(recipient_cfscore_dyn, na.last = "keep"),
n_group > 1 & party == "D" ~
rank(-1 * recipient_cfscore_dyn, na.last = "keep"),
n_group <= 1 ~ 0
)
) %>%
filter(sum(win_primary) == 1) %>%
ungroup() %>%
filter(n_group > 1) %>%
print()
# n and cases
cands %>%
group_by(party) %>%
print() %>%
summarize(
sets = n_distinct(choice_set_ID),
cases = n()
)
# what's the deal with half-ranks?
# We get half ranks because some candidates have the same ideal point?
cands %>%
filter(
cf_extremism_level %% 1 != 0
) %>%
select(group, cycle, cf_extremism_level, recipient_cfscore_dyn, Name) %>%
semi_join(x = cands, by = c("group", "cycle")) %>%
select(
Name, state_abb, district_num, party, cycle, group,
cf_extremism_level, recipient_cfscore_dyn,
) %>%
arrange(cycle, group)
# why was I ranking
# ----------------------------------------------------
# initial data inspection
# ----------------------------------------------------
# ideas: split the world into districts with a clear "moderate and extreme"
# Is this why I was ranking?
# Indicate who the "extremist" is?
cands %>%
group_by(group, cycle) %>%
count() %>%
ungroup() %>%
filter(n > 1) %>%
count(cycle, n) %>%
arrange(desc(n))
cands %>%
filter(n_in_group > 1) %>%
ggplot(aes(x = cf_extremism_level, y = recipient_cfscore_dyn)) +
geom_point(aes(color = party))
# ppct ~ ranked extremism
cands %>%
group_by(group, cycle) %>%
filter(n_in_group %in% c(2:6)) %>%
ggplot() +
aes(x = cf_extremism_level, y = ppct) +
facet_grid(party ~ n_in_group) +
geom_point() +
geom_smooth()
select(cands, ppct, pwinner)
cands %>%
group_by(cycle) %>%
count(pct = !is.na(ppct), win = !is.na(pwinner)) %>%
filter(pct == TRUE | win == TRUE)
# ----------------------------------------------------
# MLE conditional logit
# ----------------------------------------------------
rmod <- clogit(
y ~ 0
# + theta_mean_rescale*recipient_cfscore_dyn
+ recipient_cfscore_dyn
+ strata(g_code),
data = choice_data,
subset = party == "R"
) %>%
print()
dmod <- clogit(
y ~ 0
# + theta_mean_rescale*recipient_cfscore_dyn
+ recipient_cfscore_dyn
+ strata(g_code),
data = choice_data,
subset = party == "D"
) %>%
print()
# this shows us the linear interaction but I hate it!
cands %>%
filter(party == "D") %$%
crossing(
recipient_cfscore = seq(
min(recipient_cfscore, na.rm = TRUE),
max(recipient_cfscore, na.rm = TRUE),
by = .5
),
theta_mean_rescale = seq(
from = min(theta_mean_rescale, na.rm = TRUE),
to = max(theta_mean_rescale, na.rm = TRUE),
by = .25
),
group = 1
) %>%
prediction::prediction(model = dmod, data = .) %>%
as_tibble() %>%
ggplot() +
aes(y = plogis(fitted), x = theta_mean_rescale) +
geom_line(aes(color = as.factor(recipient_cfscore)))
# what can we do?
# estimate within quantiles
# (some kind of CV method for CV-MSE-optimal cuts?)
# gaussian process for continuous interaction???
# ----------------------------------------------------
# stan conditional logit
# ----------------------------------------------------
set_sizes <- choice_data %>%
group_by(party, g_code) %>%
summarize(
n = n()
) %>%
ungroup() %>%
print()
R_set_size <- set_sizes %>%
filter(party == "R") %>%
pull(n)
D_set_size <- set_sizes %>%
filter(party == "D") %>%
pull(n)
choice_data_R <- choice_data %>%
filter(party == "R") %$%
list(
n = nrow(.),
g_code = g_code,
G = n_distinct(g_code),
n_g = R_set_size,
y = y,
X = data.frame(
recipient_cfscore_dyn,
recipient_cfscore_dyn * theta_mean_rescale
)
) %>%
c(p = ncol(.$X))
choice_data_D <- choice_data %>%
filter(party == "D") %$%
list(
n = nrow(.),
g_code = g_code,
G = n_distinct(g_code),
n_g = D_set_size,
y = y,
X = data.frame(
recipient_cfscore_dyn,
recipient_cfscore_dyn * theta_mean_rescale
)
) %>%
c(p = ncol(.$X))
simple_choice_data_R <-
c(choice_data_R, prior_sd = 1)
simple_choice_data_D <-
c(choice_data_D, prior_sd = 1)
lapply(simple_choice_data_R, head)
lapply(simple_choice_data_D, head)
simple_choice <- stan_model(
file = here("code", "05-voting", "stan", "simple-choice.stan"),
verbose = TRUE
)
lkj_choice <- stan_model(
file = here("code", "05-voting", "stan", "simple-lkj-choice.stan"),
verbose = TRUE
)
beepr::beep(2)
simple_R_stan <-
sampling(
object = simple_choice,
data = simple_choice_data_R,
iter = 2000,
chains = mc_cores
# , thin = 1,
# , include = FALSE,
# pars = c()
)
simple_D_stan <-
sampling(
object = simple_choice,
data = simple_choice_data_D,
iter = 2000,
chains = mc_cores
# , thin = 1,
# , include = FALSE,
# pars = c()
)
lkj_R_stan <-
sampling(
object = lkj_choice,
data = simple_choice_data_R,
iter = 2000,
chains = mc_cores
# , thin = 1,
# , include = FALSE,
# pars = c()
)
lkj_D_stan <-
sampling(
object = lkj_choice,
data = simple_choice_data_D,
iter = 2000,
chains = mc_cores
# , thin = 1,
# , include = FALSE,
# pars = c()
)
beepr::beep(2)
bind_rows(
"R" =
bind_rows(
"survival" = tidy(rmod, conf.int = TRUE),
"simple_bayes" = tidy(simple_R_stan, conf.int = TRUE),
"simple_lkj" = tidy(lkj_R_stan, conf.int = TRUE),
.id = "model"
) %>%
mutate(party = "R"),
"D" =
bind_rows(
"survival" = tidy(dmod, conf.int = TRUE),
"simple_bayes" = tidy(simple_D_stan, conf.int = TRUE),
"simple_lkj" = tidy(lkj_D_stan, conf.int = TRUE),
.id = "model"
) %>%
mutate(party = "D")
) %>%
mutate(
term = case_when(
term %in% c("coefs[1]", "recipient_cfscore_dyn") ~ "CF",
term %in% c("coefs[2]", "theta_mean_rescale:recipient_cfscore_dyn") ~
"Interaction"
)
) %>%
filter(is.na(term) == FALSE) %>%
ggplot(aes(x = term, y = estimate, color = party, shape = model)) +
geom_pointrange(
aes(ymin = conf.low, ymax = conf.high),
position = position_dodge(width = -0.25)
) +
facet_wrap(~ party) +
coord_flip() +
geom_hline(yintercept = 0) +
NULL
tidy(rmod)
tidy(dumb_R_stan)
tidy(dmod)
tidy(dumb_D_stan)
beepr::beep(2)
stan_trace(dumb_D_stan)
stan_trace(dumb_R_stan)
stan_ac(dumb_D_stan)
stan_ac(dumb_R_stan)
# ----------------------------------------------------
# neural network choice model
# ----------------------------------------------------
net_choice <- stan_model(
file = here("code", "05-voting", "stan", "choice-net.stan"),
verbose = TRUE
)
net_lkj <- stan_model(
file = here("code", "05-voting", "stan", "choice-net-lkj.stan"),
verbose = TRUE
)
n_nodes <- 3
hidden_prior_scale <- 1
act_prior_scale <- 0.5
net_data_R <- c(
choice_data_R,
n_nodes = n_nodes,
hidden_prior_scale = hidden_prior_scale,
act_prior_scale = act_prior_scale
)
net_data_D <- c(
choice_data_D,
n_nodes = n_nodes,
hidden_prior_scale = hidden_prior_scale,
act_prior_scale = act_prior_scale
)
# simple neural nets: no LKJ
stan_net_R <-
sampling(
object = net_choice,
data = net_data_R,
iter = 2000,
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
stan_net_D <-
sampling(
object = net_choice,
data = net_data_D,
iter = 2000,
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
# LKJ neural net
lkj_net_R <-
sampling(
object = net_lkj,
data = net_data_R,
iter = 2000,
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
lkj_net_D <-
sampling(
object = net_lkj,
data = net_data_D,
iter = 2000,
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
shinystan::shiny_stanfit(lkj_D_stan)
# compare coefs
bind_rows(
"stan_net_R" = tidy(stan_net_R, conf.int = TRUE),
"stan_net_D" = tidy(stan_net_D, conf.int = TRUE),
"lkj_net_R" = tidy(lkj_net_R, conf.int = TRUE),
"lkj_net_D" = tidy(lkj_net_D, conf.int = TRUE),
.id = "model"
) %>%
filter(str_detect(term, "wt")) %>%
ggplot() +
aes(x = term, y = estimate,
color = str_detect(model, "_D"),
shape = str_detect(model, "lkj_")
) +
geom_pointrange(
aes(ymin = conf.low, ymax = conf.high),
position = position_dodge(width = -0.25)
) +
coord_flip()
stan_utilities_R <- stan_net_R %>%
tidy_draws() %>%
gather_draws(util[case], n = 200) %>%
right_join(
choice_data %>%
filter(party == "R") %>%
mutate(case = row_number())
) %>%
print()
ggplot(stan_utilities_R) +
aes(x = theta_mean_rescale, y = recipient_cfscore_dyn, color = .value) +
geom_jitter(width = .035, height = .15, alpha = 0.2) +
scale_color_viridis_c() +
geom_hline(yintercept = 0)
# right now it looks like this is model dependent
# most of the "pow" comes at CF == 0
# ----------------------------------------------------
# neyman orthogonalization
# ----------------------------------------------------
# likelihood question
cands %>%
ggplot(aes(x = theta_mean_rescale * recipient_cfscore_dyn)) +
geom_histogram() +
facet_wrap(~ party)
neyman_net <- stan_model(
file = here("code", "05-voting", "stan", "choice-net-neyman.stan"),
verbose = TRUE
)
constrained_neyman <- stan_model(
file = here("code", "05-voting", "stan", "constrained-neyman.stan"),
verbose = TRUE
)
beepr::beep(2)
# ---- data -----------------------
# push this higher up?
# - keep only variables we want
# - drop NA
# - calculate set sizes
# - only sets with 1 winner
neyman_data <- cands %>%
transmute(
group, cycle, party,
g_code = as.numeric(as.factor(choice_set)),
y = pwinner,
scale_theta = scale(theta_mean_rescale)[,1],
scale_cf = scale(recipient_cfscore_dyn)[,1],
scale_total_receipts = scale(log(total_receipts + 1))[,1],
scale_district_white = scale(district_white)[,1],
woman = as.numeric(cand_gender == "F"),
incumbent = as.numeric(Incum_Chall == "I")
) %>%
na.omit() %>%
arrange(g_code) %>%
group_by(g_code) %>%
mutate(n_g = n()) %>%
filter(sum(y) == 1) %>%
ungroup() %>%
filter(n_g > 1) %>%
print()
ggplot(neyman_data, aes(x = scale_total_receipts)) + geom_histogram()
set_sizes <- neyman_data %>%
group_by(party, g_code) %>%
summarize(
n = n()
) %>%
ungroup() %>%
split(.$party) %>%
lapply(pull, n) %>%
print()
nodes_select <- 3
nodes_outcome <- 3
hid_prior_select <- 1
act_prior_select <- 2
hid_prior_outcome <- 1
act_prior_outcome <- 1
neyman_data_R <- neyman_data %>%
filter(party == "R") %$%
list(
n = nrow(.),
y = y,
theta = scale_theta,
cf_score = scale_cf,
X = data.frame(
scale_total_receipts, scale_district_white, woman, incumbent
),
G = n_distinct(g_code),
n_g = set_sizes$R
) %>%
c(P = ncol(.$X),
nodes_select = nodes_select,
nodes_outcome = nodes_outcome,
hid_prior_select = hid_prior_select,
act_prior_select = act_prior_select,
hid_prior_outcome = hid_prior_outcome,
act_prior_outcome = act_prior_outcome)
neyman_data_D <- neyman_data %>%
filter(party == "D") %$%
list(
n = nrow(.),
y = y,
theta = scale_theta,
cf_score = scale_cf,
X = data.frame(
scale_total_receipts, scale_district_white, woman, incumbent
),
G = n_distinct(g_code),
n_g = set_sizes$D
) %>%
c(P = ncol(.$X),
nodes_select = nodes_select,
nodes_outcome = nodes_outcome,
hid_prior_select = hid_prior_select,
act_prior_select = act_prior_select,
hid_prior_outcome = hid_prior_outcome,
act_prior_outcome = act_prior_outcome)
lapply(neyman_data_R, head)
lapply(neyman_data_D, head)
n_iter <- 2000
stan_neyman_R <- sampling(
object = neyman_net, data = neyman_data_R,
iter = n_iter, refresh = max(n_iter / 20, 1),
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
stan_neyman_D <- sampling(
object = neyman_net, data = neyman_data_D,
iter = n_iter, refresh = max(n_iter / 20, 1),
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
stan_constrained_R <- sampling(
object = constrained_neyman, data = neyman_data_R,
iter = n_iter, refresh = max(n_iter / 20, 1),
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
stan_constrained_D <- sampling(
object = constrained_neyman, data = neyman_data_D,
iter = n_iter, refresh = max(n_iter / 20, 1),
chains = mc_cores
# , thin = 1, , include = FALSE, pars = c()
)
tidy(stan_neyman_R, conf.int = TRUE, ess = TRUE, rhat = TRUE) %>% print(n = nrow(.))
tidy(stan_neyman_D, conf.int = TRUE, ess = TRUE, rhat = TRUE) %>% print(n = nrow(.))
tidy(stan_constrained_R, conf.int = TRUE, ess = TRUE, rhat = TRUE) %>% print(n = nrow(.))
tidy(stan_constrained_D, conf.int = TRUE, ess = TRUE, rhat = TRUE) %>% print(n = nrow(.))
beepr::beep(2)
stan_plot(stan_neyman_R, pars = c("hid_outcome", "hid_select"))
stan_plot(stan_constrained_R, pars = c("hid_outcome", "hid_select"))
stan_plot(stan_neyman_D, pars = c("hid_outcome", "hid_select"))
stan_plot(stan_constrained_D, pars = c("hid_outcome", "hid_select"))
stan_trace(stan_neyman_R, pars = "hid_select_raw")
stan_trace(stan_neyman_R, pars = "bias_max_select")
stan_trace(stan_neyman_R, pars = "bias_slice_select")
stan_trace(stan_neyman_R, pars = "hid_select")
stan_plot(stan_neyman_R, pars = c("hid_select", "hid_select_raw"))
stan_trace(stan_neyman_D, pars = "hidden_outcome")
stan_trace(stan_neyman_D, pars = "hidden_select")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/models.R
\name{shim}
\alias{shim}
\title{Fit Strong Heredity Interaction Model}
\usage{
shim(x, y, main.effect.names, interaction.names, family = c("gaussian",
"binomial", "poisson"), weights, lambda.factor = ifelse(nobs < nvars, 0.01,
1e-06), lambda.beta = NULL, lambda.gamma = NULL, nlambda.gamma = 10,
nlambda.beta = 10, nlambda = 100, threshold = 1e-04, max.iter = 100,
initialization.type = c("ridge", "univariate"), center = TRUE,
normalize = TRUE, verbose = TRUE, cores = 2)
}
\arguments{
\item{x}{Design matrix of dimension \code{n x q}, where \code{n} is the
number of subjects and q is the total number of variables; each row is an
observation vector. This must include all main effects and interactions as
well, with column names corresponding to the names of the main effects
(e.g. \code{x1, x2, E}) and their interactions (e.g. \code{x1:E, x2:E}).
All columns should be scaled to have mean 0 and variance 1; this is done
internally by the \code{\link{shim}} function.}
\item{y}{response variable. For \code{family="gaussian"} should be a 1 column
matrix or numeric vector. For \code{family="binomial"}, if the response is
a vector it can be numeric with 0 for failure and 1 for success, or a
factor with the first level representing "failure" and the second level
representing "success". Alternatively, For binomial logistic regression,
the response can be a matrix where the first column is the number of
"successes" and the second column is the number of "failures".}
\item{main.effect.names}{character vector of main effects names. MUST be
ordered in the same way as the column names of \code{x}. e.g. if the column
names of \code{x} are \code{"x1","x2"} then \code{main.effect.names =
c("x1","x2")}}
\item{interaction.names}{character vector of interaction names. MUST be
separated by a colon (e.g. x1:x2), AND MUST be ordered in the same way as
the column names of \code{x}}
\item{family}{response type. see \code{y} for details. Currently only
\code{family = "gaussian"} is implemented.}
\item{weights}{observation weights. Can be total counts if responses are
proportion matrices. Default is 1 for each observation. Currently NOT
IMPLEMENTED}
\item{lambda.factor}{The factor for getting the minimal lambda in lambda
sequence, where \code{min(lambda) = lambda.factor * max(lambda).
max(lambda)} is the smallest value of lambda for which all coefficients are
zero. The default depends on the relationship between \code{N} (the number
of rows in the matrix of predictors) and \code{p} (the number of
predictors). If \code{N > p}, the default is \code{1e-6}, close to zero. If
\code{N < p}, the default is \code{0.01}. A very small value of
lambda.factor will lead to a saturated fit.}
\item{lambda.beta}{sequence of tuning parameters for the main effects. If
\code{NULL} (default), this function will automatically calculate a
sequence using the \code{\link{shim_once}} function which will be over a
grid of tuning parameters for gamma as well. If the user specifies a
sequence then this function will not automatically perform the serach over
a grid. You will need to create the grid yourself e.g. repeat the
lambda.gamma for each value of lambda.beta}
\item{lambda.gamma}{sequence of tuning parameters for the interaction
effects. Default is \code{NULL} which means this function will
automatically calculate a sequence of tuning paramters. See
\code{\link{shim_once}} for details on how this sequence is calculated.}
\item{nlambda.gamma}{number of tuning parameters for gamma. This needs to be
specified even for user defined inputs}
\item{nlambda.beta}{number of tuning parameters for beta. This needs to be
specified even for user defined inputs}
\item{nlambda}{total number of tuning parameters. If \code{lambda.beta =
NULL} and \code{lambda.gamma = NULL} then \code{nlambda} should be equal to
\code{nlambda.beta x nlambda.gamma}. This is important to specify
especially when a user defined sequence of tuning parameters is set.}
\item{threshold}{Convergence threshold for coordinate descent. Each
coordinate-descent loop continues until the change in the objective
function after all coefficient updates is less than threshold. Default
value is \code{1e-4}.}
\item{max.iter}{Maximum number of passes over the data for all tuning
parameter values; default is 100.}
\item{initialization.type}{The procedure used to estimate the regression
coefficients and used in the \code{\link{uni_fun}} function. If
\code{"univariate"} then a series of univariate regressions is performed
with the response variable \code{y}. If \code{"ridge"} then ridge
regression is performed using the \code{\link[glmnet]{cv.glmnet}} function
and the tuning parameter is chosen using 10 fold cross validation. The
default is \code{"ridge"}.}
\item{center}{Should \code{x} and \code{y} be centered. Default is
\code{TRUE}. Centering \code{y} applies to \code{family="gaussian"} only.}
\item{normalize}{Should \code{x} be scaled to have unit variance. Default is
\code{TRUE}}
\item{verbose}{Should iteration number and vector of length \code{nlambda} be
printed to console? Default is \code{TRUE}. 0 represents the algorithm has
not converged for the pair of tuning parameters lambda.beta and
lambda.gamma and 1 means it has converged}
\item{cores}{The number of cores to use for certain calculations in the
\code{\link{shim}} function, i.e. at most how many child processes will be
run simultaneously using the \code{parallel} package. Must be at least one,
and parallelization requires at least two cores. Default is 2.}
}
\value{
An object with S3 class "shim" \describe{ \item{b0}{Intercept
sequence of length \code{nlambda}} \item{beta}{A nvars x \code{nlambda}
matrix of main effects (\eqn{\beta}) coefficients, stored in sparse column
format \code{("CsparseMatrix")}} \item{alpha}{A nvars x \code{nlambda}
matrix of interaction effects (\eqn{\alpha}) coefficients, stored in sparse
column format \code{("CsparseMatrix")}} \item{gamma}{A nvars x
\code{nlambda} matrix of (\eqn{\gamma}) coefficients, stored in sparse
column format \code{("CsparseMatrix")}} \item{lambda.beta}{The sequence of
tuning parameters used for the main effects} \item{lambda.gamma}{The
sequence of tuning parameters used for the interaction effects}
\item{tuning.parameters}{2 x nlambda matrix of tuning parameters. The first
row corresponds to \code{lambda.beta} and the second row corresponds to
\code{lambda.gamma}} \item{dfbeta}{list of length \code{nlambda} where each
element gives the index of the nonzero \eqn{\beta} coefficients}
\item{dfalpha}{list of length \code{nlambda} where each element gives the
index of the nonzero \eqn{\alpha} coefficients} \item{x}{x matrix }
\item{y}{response data} \item{bx}{column means of x matrix} \item{by}{mean
of response} \item{sx}{column standard deviations of x matrix}
\item{call}{the call to the function} \item{nlambda.gamma}{nlambda.gamma}
\item{nlambda.beta}{nlambda.beta} \item{nlambda}{nlambda}
\item{interaction.names}{interaction names} \item{main.effect.names}{main
effect names} }
}
\description{
function to fit the Strong Heredity Interaction Model for a
sequence of tuning parameters. This is a penalized regression method that
ensures the interaction term is non-zero only if its corresponding
main-effects are non-zero.
}
\details{
the index of the tuning parameters is as follows. If for example
there are 10 lambda_gammas, and 20 lambda_betas, then the first
lambda_gamma gets repeated 20 times. So the first twenty entries of tuning
parameters correspond to 1 lambda_gamma and the 20 lambda_betas
}
\note{
if the user specifies lambda.beta and lambda.gamma then they this will
not take all possible combinations of lambda.beta and lambda.gamma. It will
be the first element of each as a pair, and so on. This is done on purpose
for use with the cv.shim function which uses the same lambda sequences for
each fold.
}
\examples{
# number of observations
n <- 100
# number of predictors
p <- 5
# environment variable
e <- sample(c(0,1), n, replace = T)
# main effects
x <- cbind(matrix(rnorm(n*p), ncol = p), e)
# need to label columns
dimnames(x)[[2]] <- c("x1","x2","x3","x4","x5","e")
# design matrix without intercept (can be user defined interactions)
X <- model.matrix(~(x1+x2+x3)*e+x1*x4+x3*x5-1, data = as.data.frame(x))
# names must appear in the same order as X matrix
interaction_names <- grep(":", colnames(X), value = T)
main_effect_names <- setdiff(colnames(X), interaction_names)
# response
Y <- X \%*\% rbinom(ncol(X), 1, 0.6) + 3*rnorm(n)
# standardize data
data_std <- standardize(X,Y)
result <- shim(x = data_std$x, y = data_std$y,
main.effect.names = main_effect_names,
interaction.names = interaction_names)
}
\author{
Sahir Bhatnagar
Maintainer: Sahir Bhatnagar \email{sahir.bhatnagar@mail.mcgill.ca}
}
\seealso{
\code{\link{shim_once}}
}
|
/man/shim.Rd
|
no_license
|
friendlywlb/shim
|
R
| false | true | 9,007 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/models.R
\name{shim}
\alias{shim}
\title{Fit Strong Heredity Interaction Model}
\usage{
shim(x, y, main.effect.names, interaction.names, family = c("gaussian",
"binomial", "poisson"), weights, lambda.factor = ifelse(nobs < nvars, 0.01,
1e-06), lambda.beta = NULL, lambda.gamma = NULL, nlambda.gamma = 10,
nlambda.beta = 10, nlambda = 100, threshold = 1e-04, max.iter = 100,
initialization.type = c("ridge", "univariate"), center = TRUE,
normalize = TRUE, verbose = TRUE, cores = 2)
}
\arguments{
\item{x}{Design matrix of dimension \code{n x q}, where \code{n} is the
number of subjects and q is the total number of variables; each row is an
observation vector. This must include all main effects and interactions as
well, with column names corresponding to the names of the main effects
(e.g. \code{x1, x2, E}) and their interactions (e.g. \code{x1:E, x2:E}).
All columns should be scaled to have mean 0 and variance 1; this is done
internally by the \code{\link{shim}} function.}
\item{y}{response variable. For \code{family="gaussian"} should be a 1 column
matrix or numeric vector. For \code{family="binomial"}, if the response is
a vector it can be numeric with 0 for failure and 1 for success, or a
factor with the first level representing "failure" and the second level
representing "success". Alternatively, For binomial logistic regression,
the response can be a matrix where the first column is the number of
"successes" and the second column is the number of "failures".}
\item{main.effect.names}{character vector of main effects names. MUST be
ordered in the same way as the column names of \code{x}. e.g. if the column
names of \code{x} are \code{"x1","x2"} then \code{main.effect.names =
c("x1","x2")}}
\item{interaction.names}{character vector of interaction names. MUST be
separated by a colon (e.g. x1:x2), AND MUST be ordered in the same way as
the column names of \code{x}}
\item{family}{response type. see \code{y} for details. Currently only
\code{family = "gaussian"} is implemented.}
\item{weights}{observation weights. Can be total counts if responses are
proportion matrices. Default is 1 for each observation. Currently NOT
IMPLEMENTED}
\item{lambda.factor}{The factor for getting the minimal lambda in lambda
sequence, where \code{min(lambda) = lambda.factor * max(lambda).
max(lambda)} is the smallest value of lambda for which all coefficients are
zero. The default depends on the relationship between \code{N} (the number
of rows in the matrix of predictors) and \code{p} (the number of
predictors). If \code{N > p}, the default is \code{1e-6}, close to zero. If
\code{N < p}, the default is \code{0.01}. A very small value of
lambda.factor will lead to a saturated fit.}
\item{lambda.beta}{sequence of tuning parameters for the main effects. If
\code{NULL} (default), this function will automatically calculate a
sequence using the \code{\link{shim_once}} function which will be over a
grid of tuning parameters for gamma as well. If the user specifies a
sequence then this function will not automatically perform the serach over
a grid. You will need to create the grid yourself e.g. repeat the
lambda.gamma for each value of lambda.beta}
\item{lambda.gamma}{sequence of tuning parameters for the interaction
effects. Default is \code{NULL} which means this function will
automatically calculate a sequence of tuning paramters. See
\code{\link{shim_once}} for details on how this sequence is calculated.}
\item{nlambda.gamma}{number of tuning parameters for gamma. This needs to be
specified even for user defined inputs}
\item{nlambda.beta}{number of tuning parameters for beta. This needs to be
specified even for user defined inputs}
\item{nlambda}{total number of tuning parameters. If \code{lambda.beta =
NULL} and \code{lambda.gamma = NULL} then \code{nlambda} should be equal to
\code{nlambda.beta x nlambda.gamma}. This is important to specify
especially when a user defined sequence of tuning parameters is set.}
\item{threshold}{Convergence threshold for coordinate descent. Each
coordinate-descent loop continues until the change in the objective
function after all coefficient updates is less than threshold. Default
value is \code{1e-4}.}
\item{max.iter}{Maximum number of passes over the data for all tuning
parameter values; default is 100.}
\item{initialization.type}{The procedure used to estimate the regression
coefficients and used in the \code{\link{uni_fun}} function. If
\code{"univariate"} then a series of univariate regressions is performed
with the response variable \code{y}. If \code{"ridge"} then ridge
regression is performed using the \code{\link[glmnet]{cv.glmnet}} function
and the tuning parameter is chosen using 10 fold cross validation. The
default is \code{"ridge"}.}
\item{center}{Should \code{x} and \code{y} be centered. Default is
\code{TRUE}. Centering \code{y} applies to \code{family="gaussian"} only.}
\item{normalize}{Should \code{x} be scaled to have unit variance. Default is
\code{TRUE}}
\item{verbose}{Should iteration number and vector of length \code{nlambda} be
printed to console? Default is \code{TRUE}. 0 represents the algorithm has
not converged for the pair of tuning parameters lambda.beta and
lambda.gamma and 1 means it has converged}
\item{cores}{The number of cores to use for certain calculations in the
\code{\link{shim}} function, i.e. at most how many child processes will be
run simultaneously using the \code{parallel} package. Must be at least one,
and parallelization requires at least two cores. Default is 2.}
}
\value{
An object with S3 class "shim" \describe{ \item{b0}{Intercept
sequence of length \code{nlambda}} \item{beta}{A nvars x \code{nlambda}
matrix of main effects (\eqn{\beta}) coefficients, stored in sparse column
format \code{("CsparseMatrix")}} \item{alpha}{A nvars x \code{nlambda}
matrix of interaction effects (\eqn{\alpha}) coefficients, stored in sparse
column format \code{("CsparseMatrix")}} \item{gamma}{A nvars x
\code{nlambda} matrix of (\eqn{\gamma}) coefficients, stored in sparse
column format \code{("CsparseMatrix")}} \item{lambda.beta}{The sequence of
tuning parameters used for the main effects} \item{lambda.gamma}{The
sequence of tuning parameters used for the interaction effects}
\item{tuning.parameters}{2 x nlambda matrix of tuning parameters. The first
row corresponds to \code{lambda.beta} and the second row corresponds to
\code{lambda.gamma}} \item{dfbeta}{list of length \code{nlambda} where each
element gives the index of the nonzero \eqn{\beta} coefficients}
\item{dfalpha}{list of length \code{nlambda} where each element gives the
index of the nonzero \eqn{\alpha} coefficients} \item{x}{x matrix }
\item{y}{response data} \item{bx}{column means of x matrix} \item{by}{mean
of response} \item{sx}{column standard deviations of x matrix}
\item{call}{the call to the function} \item{nlambda.gamma}{nlambda.gamma}
\item{nlambda.beta}{nlambda.beta} \item{nlambda}{nlambda}
\item{interaction.names}{interaction names} \item{main.effect.names}{main
effect names} }
}
\description{
function to fit the Strong Heredity Interaction Model for a
sequence of tuning parameters. This is a penalized regression method that
ensures the interaction term is non-zero only if its corresponding
main-effects are non-zero.
}
\details{
the index of the tuning parameters is as follows. If for example
there are 10 lambda_gammas, and 20 lambda_betas, then the first
lambda_gamma gets repeated 20 times. So the first twenty entries of tuning
parameters correspond to 1 lambda_gamma and the 20 lambda_betas
}
\note{
if the user specifies lambda.beta and lambda.gamma then they this will
not take all possible combinations of lambda.beta and lambda.gamma. It will
be the first element of each as a pair, and so on. This is done on purpose
for use with the cv.shim function which uses the same lambda sequences for
each fold.
}
\examples{
# number of observations
n <- 100
# number of predictors
p <- 5
# environment variable
e <- sample(c(0,1), n, replace = T)
# main effects
x <- cbind(matrix(rnorm(n*p), ncol = p), e)
# need to label columns
dimnames(x)[[2]] <- c("x1","x2","x3","x4","x5","e")
# design matrix without intercept (can be user defined interactions)
X <- model.matrix(~(x1+x2+x3)*e+x1*x4+x3*x5-1, data = as.data.frame(x))
# names must appear in the same order as X matrix
interaction_names <- grep(":", colnames(X), value = T)
main_effect_names <- setdiff(colnames(X), interaction_names)
# response
Y <- X \%*\% rbinom(ncol(X), 1, 0.6) + 3*rnorm(n)
# standardize data
data_std <- standardize(X,Y)
result <- shim(x = data_std$x, y = data_std$y,
main.effect.names = main_effect_names,
interaction.names = interaction_names)
}
\author{
Sahir Bhatnagar
Maintainer: Sahir Bhatnagar \email{sahir.bhatnagar@mail.mcgill.ca}
}
\seealso{
\code{\link{shim_once}}
}
|
#' Check correct input DNA sequence
#'
#' @param secuencia character: coding dna, must be in frame
#'
#' @return throws an error if the sequence contains invalid characters or is not a
#' multiple of 3
#' @export
#'
#' @examples
#' validate_sequence(test_seq)
validate_sequence <- function(secuencia) {
secuencia <- stringr::str_to_upper(secuencia)
## check the sequence is in frame ---------
if (!nchar(secuencia) %% 3 == 0) {
err_msg <- paste0(
"Secuence not in frame, sequence length is: ",
nchar(secuencia),
" (not a multiple of 3)"
)
stop(err_msg)
}
## check for valid characters ---------
nucs_in_seq <-
stringr::str_split(secuencia, "") %>%
unlist() %>%
unique()
invalid <- nucs_in_seq[!nucs_in_seq %in% c("A", "G", "T", "C")]
if (length(invalid) > 0) {
err_msg <- paste0(
"Invalid charcter(s) found: ",
invalid[1]
)
stop(err_msg)
}
## give a warning when the sequence is too short ---------
min_value <- 70
max_value <- 43524
if (nchar(secuencia) < min_value) {
stop("The sequence is too short, results might be inaccurate")
}
if (nchar(secuencia) > max_value) {
stop("The sequence is too long!")
}
## Premature stop codon maybe the sequence is not in frame ---------
stop_codons <- c("TAG", "TAA", "TGA")
codones_en_seq <- split_by_codons(secuencia) %>%
utils::head(-1) # remove the stop codon (the last codon)
stop_codons_found <- codones_en_seq[codones_en_seq %in% stop_codons]
if (length(stop_codons_found) > 0) {
err_msg <- paste0("Secuence contains a premature stop codon: ", stop_codons_found[1])
stop(err_msg)
}
}
#' Split a sequence by codons
#'
#' This is an internal function
#' @inheritParams validate_sequence
#'
#' @return character vector, each element is a codon
#' and they come in the same order
split_by_codons <- function(secuencia) {
gsub("(.{3})", "\\1 ", secuencia) %>%
stringr::str_split(" ") %>%
unlist() %>%
# this approach insets an extra empty element
# to the vetor that i need to remove
.[-length(.)]
}
#' translate DNA sequence to amino acid
#'
#' @inheritParams validate_sequence
#'
#' @return string, amino acid sequence
#' @export
#'
#' @examples
#' translate("ATGTTT")
translate <- function(secuencia) {
secuencia <- stringr::str_to_upper(secuencia)
# validate_sequence(secuencia) calling this gives a bug
split_by_codons(secuencia) %>%
purrr::map_chr(function(x) iCodon::gc_codons_to_amino[x]) %>%
stringr::str_c(collapse = "")
}
#' Codon distance
#'
#' compute the number of codon differences between the two sequences
#'
#' @param seq_variant1 string: coding dna, must be in frame
#' @param seq_variant2 string: coding dna, must be in frame
#' @param proportion logical: if true, returns the distance as a proportion
#' (1 = all codons are different, 0 = no differences)
#' @return int, number of codon diferences
#' @export
#'
#' @examples
#' codon_distance("ATGCTG", "ATGCTT")
codon_distance <- function(seq_variant1, seq_variant2, proportion = FALSE) {
if (translate(seq_variant1) != translate(seq_variant2)) {
warning("sequences are not synonimous")
}
distance <- sum(split_by_codons(seq_variant1) != split_by_codons(seq_variant2))
if (proportion) {
distance <- distance / (nchar(seq_variant1) / 3)
}
distance
}
#' Nucleotide distance
#'
#' compute the number of nucleotide differences between the two sequences
#' assumes the sequences are same length and aligned
#'
#' @param seq_variant1 string: coding dna, must be in frame
#' @param seq_variant2 string: coding dna, must be in frame
#' @param proportion logical: if true, returns the distance as a proportion
#' (1 = all codons are different, 0 = no differences)
#' @return int, number of codon diferences
#' @export
#'
#' @examples
#' nucleotide_distance("ATGCTG", "ATGCTT")
nucleotide_distance <- function(seq_variant1, seq_variant2, proportion = FALSE) {
if (length(seq_variant1) != length(seq_variant2)) {
warning("sequences are not the same length")
}
nucs1 <- strsplit(seq_variant1, "") %>% unlist()
nucs2 <- strsplit(seq_variant2, "") %>% unlist()
distance <- sum(nucs1 != nucs2)
if (proportion) {
distance <- distance / (nchar(seq_variant1) / 3)
}
distance
}
#' Count codons in DNA sequence
#'
#' Counts the frequency of each triplete in sequence, assumes
#' the sequences is in frame
#'
#' @inheritParams validate_sequence
#'
#' @return The frequency of the codons in \code{seq}
#' @export
#' @importFrom magrittr %>%
#' @examples
#' count_codons("ACGGGG")
count_codons <- function(secuencia) {
if (nchar(secuencia) %% 3 != 0) {
stop("sequence not a multiple of 3")
}
secuencia <- toupper(secuencia)
tibble::tibble(codon = split_by_codons(secuencia)) %>%
dplyr::count(.data$codon)
}
|
/R/utilities.R
|
permissive
|
santiago1234/iCodon
|
R
| false | false | 4,863 |
r
|
#' Check correct input DNA sequence
#'
#' @param secuencia character: coding dna, must be in frame
#'
#' @return throws an error if the sequence contains invalid characters or is not a
#' multiple of 3
#' @export
#'
#' @examples
#' validate_sequence(test_seq)
validate_sequence <- function(secuencia) {
secuencia <- stringr::str_to_upper(secuencia)
## check the sequence is in frame ---------
if (!nchar(secuencia) %% 3 == 0) {
err_msg <- paste0(
"Secuence not in frame, sequence length is: ",
nchar(secuencia),
" (not a multiple of 3)"
)
stop(err_msg)
}
## check for valid characters ---------
nucs_in_seq <-
stringr::str_split(secuencia, "") %>%
unlist() %>%
unique()
invalid <- nucs_in_seq[!nucs_in_seq %in% c("A", "G", "T", "C")]
if (length(invalid) > 0) {
err_msg <- paste0(
"Invalid charcter(s) found: ",
invalid[1]
)
stop(err_msg)
}
## give a warning when the sequence is too short ---------
min_value <- 70
max_value <- 43524
if (nchar(secuencia) < min_value) {
stop("The sequence is too short, results might be inaccurate")
}
if (nchar(secuencia) > max_value) {
stop("The sequence is too long!")
}
## Premature stop codon maybe the sequence is not in frame ---------
stop_codons <- c("TAG", "TAA", "TGA")
codones_en_seq <- split_by_codons(secuencia) %>%
utils::head(-1) # remove the stop codon (the last codon)
stop_codons_found <- codones_en_seq[codones_en_seq %in% stop_codons]
if (length(stop_codons_found) > 0) {
err_msg <- paste0("Secuence contains a premature stop codon: ", stop_codons_found[1])
stop(err_msg)
}
}
#' Split a sequence by codons
#'
#' This is an internal function
#' @inheritParams validate_sequence
#'
#' @return character vector, each element is a codon
#' and they come in the same order
split_by_codons <- function(secuencia) {
gsub("(.{3})", "\\1 ", secuencia) %>%
stringr::str_split(" ") %>%
unlist() %>%
# this approach insets an extra empty element
# to the vetor that i need to remove
.[-length(.)]
}
#' translate DNA sequence to amino acid
#'
#' @inheritParams validate_sequence
#'
#' @return string, amino acid sequence
#' @export
#'
#' @examples
#' translate("ATGTTT")
translate <- function(secuencia) {
secuencia <- stringr::str_to_upper(secuencia)
# validate_sequence(secuencia) calling this gives a bug
split_by_codons(secuencia) %>%
purrr::map_chr(function(x) iCodon::gc_codons_to_amino[x]) %>%
stringr::str_c(collapse = "")
}
#' Codon distance
#'
#' compute the number of codon differences between the two sequences
#'
#' @param seq_variant1 string: coding dna, must be in frame
#' @param seq_variant2 string: coding dna, must be in frame
#' @param proportion logical: if true, returns the distance as a proportion
#' (1 = all codons are different, 0 = no differences)
#' @return int, number of codon diferences
#' @export
#'
#' @examples
#' codon_distance("ATGCTG", "ATGCTT")
codon_distance <- function(seq_variant1, seq_variant2, proportion = FALSE) {
if (translate(seq_variant1) != translate(seq_variant2)) {
warning("sequences are not synonimous")
}
distance <- sum(split_by_codons(seq_variant1) != split_by_codons(seq_variant2))
if (proportion) {
distance <- distance / (nchar(seq_variant1) / 3)
}
distance
}
#' Nucleotide distance
#'
#' compute the number of nucleotide differences between the two sequences
#' assumes the sequences are same length and aligned
#'
#' @param seq_variant1 string: coding dna, must be in frame
#' @param seq_variant2 string: coding dna, must be in frame
#' @param proportion logical: if true, returns the distance as a proportion
#' (1 = all codons are different, 0 = no differences)
#' @return int, number of codon diferences
#' @export
#'
#' @examples
#' nucleotide_distance("ATGCTG", "ATGCTT")
nucleotide_distance <- function(seq_variant1, seq_variant2, proportion = FALSE) {
if (length(seq_variant1) != length(seq_variant2)) {
warning("sequences are not the same length")
}
nucs1 <- strsplit(seq_variant1, "") %>% unlist()
nucs2 <- strsplit(seq_variant2, "") %>% unlist()
distance <- sum(nucs1 != nucs2)
if (proportion) {
distance <- distance / (nchar(seq_variant1) / 3)
}
distance
}
#' Count codons in DNA sequence
#'
#' Counts the frequency of each triplete in sequence, assumes
#' the sequences is in frame
#'
#' @inheritParams validate_sequence
#'
#' @return The frequency of the codons in \code{seq}
#' @export
#' @importFrom magrittr %>%
#' @examples
#' count_codons("ACGGGG")
count_codons <- function(secuencia) {
if (nchar(secuencia) %% 3 != 0) {
stop("sequence not a multiple of 3")
}
secuencia <- toupper(secuencia)
tibble::tibble(codon = split_by_codons(secuencia)) %>%
dplyr::count(.data$codon)
}
|
\name{portfolio_getSettings}
\alias{portfolio_getSettings}
\title{Get Portfolio Settings}
\usage{portfolio_getSettings(portfolio)
}
\arguments{
\item{portfolio}{Portfolio object created using \link[=portfolio_create]{portfolio_create( )} function}
}
\value{List with portfolio settings.}
\description{Method returns active list of settings of a given portfolio.}
\author{Kostin Andrey <andrey.kostin@portfolioeffect.com>}
\examples{
\dontrun{
dateStart = "2014-11-17 09:30:00"
dateEnd = "2014-11-17 16:00:00"
portfolio=portfolio_create(dateStart,dateEnd)
positionAAPL=position_add(portfolio,'AAPL',100)
positionC=position_add(portfolio,'C',300)
positionGOOG=position_add(portfolio,'GOOG',150)
portfolio_settings(portfolio,
windowLength='600s',
resultsSamplingInterval = '10s')
settings=portfolio_getSettings(portfolio)
settings
}}
\keyword{PortfolioEffectHFT}
%\concept{high frequency, intraday analytics, market data, portfolio, portfolio management,realtime analytics, risk, risk management, toolbox tools, trading, trading strategies}
\keyword{portfolio_getSettings}
|
/man/portfolio_getSettings.Rd
|
no_license
|
IanMadlenya/PortfolioEffectHFT
|
R
| false | false | 1,113 |
rd
|
\name{portfolio_getSettings}
\alias{portfolio_getSettings}
\title{Get Portfolio Settings}
\usage{portfolio_getSettings(portfolio)
}
\arguments{
\item{portfolio}{Portfolio object created using \link[=portfolio_create]{portfolio_create( )} function}
}
\value{List with portfolio settings.}
\description{Method returns active list of settings of a given portfolio.}
\author{Kostin Andrey <andrey.kostin@portfolioeffect.com>}
\examples{
\dontrun{
dateStart = "2014-11-17 09:30:00"
dateEnd = "2014-11-17 16:00:00"
portfolio=portfolio_create(dateStart,dateEnd)
positionAAPL=position_add(portfolio,'AAPL',100)
positionC=position_add(portfolio,'C',300)
positionGOOG=position_add(portfolio,'GOOG',150)
portfolio_settings(portfolio,
windowLength='600s',
resultsSamplingInterval = '10s')
settings=portfolio_getSettings(portfolio)
settings
}}
\keyword{PortfolioEffectHFT}
%\concept{high frequency, intraday analytics, market data, portfolio, portfolio management,realtime analytics, risk, risk management, toolbox tools, trading, trading strategies}
\keyword{portfolio_getSettings}
|
#-----------------------------------
# Objeto da Classe Tree
#-----------------------------------
# --- Facilitando a importacao dos dados ---
diretorio ="E:\\Academico\\Mestrado\\Tese\\ws\\trans\\zf2"
# inp <- file_path_sans_ext(dir(paste0(diretorio,"\\laz\\"),pattern='.laz')) #colocar arquivos na pasta laz
# Criar o lax (colocar na classe)
# input <- inp[6]
# inp015 = las$new(diretorio, input)
# inp015$descompact()
inp <- file_path_sans_ext(dir(paste0(diretorio,"\\las\\"),pattern='.las')) # Arquivos na pasta las
input <- inp[1]
# --- Criando Objeto ---
inp015 = las$new(diretorio, input)
# --- Executando metodos ---
# inp015$pasta()
inp015$fusion()
# inp356$emergent() ja tem nos outros
# inp356$ripley() Já tem nos outros
|
/objetoTree.R
|
no_license
|
gustavohom/FusionEmergent
|
R
| false | false | 759 |
r
|
#-----------------------------------
# Objeto da Classe Tree
#-----------------------------------
# --- Facilitando a importacao dos dados ---
diretorio ="E:\\Academico\\Mestrado\\Tese\\ws\\trans\\zf2"
# inp <- file_path_sans_ext(dir(paste0(diretorio,"\\laz\\"),pattern='.laz')) #colocar arquivos na pasta laz
# Criar o lax (colocar na classe)
# input <- inp[6]
# inp015 = las$new(diretorio, input)
# inp015$descompact()
inp <- file_path_sans_ext(dir(paste0(diretorio,"\\las\\"),pattern='.las')) # Arquivos na pasta las
input <- inp[1]
# --- Criando Objeto ---
inp015 = las$new(diretorio, input)
# --- Executando metodos ---
# inp015$pasta()
inp015$fusion()
# inp356$emergent() ja tem nos outros
# inp356$ripley() Já tem nos outros
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(synapser))
suppressPackageStartupMessages(library(assertr))
suppressPackageStartupMessages(library(agoradataprocessing))
suppressPackageStartupMessages(library("optparse"))
option_list <- list(
make_option(c("-c", "--config"), type="character",
help="Configuration file.", dest="config",
metavar="config", default = "config-staging.json"),
make_option(c("--store"), action="store_true", default=FALSE,
dest="store", help="Store in Synapse [default: %default]")
)
opt <- parse_args(OptionParser(option_list=option_list))
synLogin()
store <- FALSE
config <- jsonlite::fromJSON(opt$config)
processed_data <- agoradataprocessing::process_data(config = config)
#########################################
# Write out all data and store in Synapse
#########################################
processed_data$teamInfo %>%
jsonlite::toJSON(pretty=2) %>%
readr::write_lines(config$teamInfoFileJSON)
processed_data$geneInfo %>%
jsonlite::toJSON(pretty=2) %>%
readr::write_lines(config$geneInfoFileJSON)
processed_data$diffExprData %>%
jsonlite::toJSON(pretty=2) %>%
readr::write_lines(config$diffExprFileJSON)
processed_data$network %>%
jsonlite::toJSON(pretty=2) %>%
readr::write_lines(config$networkOutputFileJSON)
processed_data$proteomics %>%
jsonlite::toJSON(pretty=2, na=NULL) %>%
readr::write_lines(config$proteomicsFileJSON)
processed_data$metabolomics %>%
jsonlite::toJSON(pretty=2, digits=NA) %>%
readr::write_lines(config$metabolomicsFileJSON)
if (opt$store) {
teamInfoJSON <- synStore(File(config$teamInfoFileJSON,
parent=config$outputFolderId),
used=c(config$teamInfoId,
config$teamMemberInfoId),
forceVersion=FALSE)
geneInfoFinalJSON <- synStore(File(config$geneInfoFileJSON,
parent=config$outputFolderId),
used=c(config$diffExprDataId,
config$igapDataId,
config$eqtlDataId,
config$medianExprDataId,
config$brainExpressionDataId,
config$targetListOrigId,
config$druggabilityDataId),
forceVersion=FALSE)
diffExprDataJSON <- synStore(File(config$diffExprFileJSON,
parent=config$outputFolderId),
used=c(config$diffExprDataId,
config$tissuesTableId,
config$studiesTableId),
forceVersion=FALSE)
networkDataJSON <- synStore(File(config$networkOutputFileJSON,
parent=config$outputFolderId),
used=c(config$diffExprDataId,
config$networkDataId),
forceVersion=FALSE)
proteomicsDataJSON <- synStore(File(config$proteomicsFileJSON,
parent=config$outputFolderId),
used=c(config$proteomicsDataId),
forceVersion=FALSE)
metabolomicsDataJSON <- synStore(File(config$metabolomicsFileJSON,
parent=config$outputFolderId),
used=c(config$metabolomicsDataId),
forceVersion=FALSE)
dataFiles <- c(diffExprDataJSON,
geneInfoFinalJSON,
teamInfoJSON,
networkDataJSON,
proteomicsDataJSON,
metabolomicsDataJSON)
dataManifest <- purrr::map_df(.x=dataFiles,
.f=function(x) data.frame(id=x$properties$id,
version=x$properties$versionNumber))
dataManifest %>% readr::write_csv(config$manifestFileCSV)
dataManifestCsv <- synStore(File(config$manifestFileCSV,
parent=config$outputFolderId),
used=dataFiles,
forceVersion=FALSE)
}
|
/exec/process.R
|
permissive
|
mfazza/agoradataprocessing
|
R
| false | false | 4,462 |
r
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(synapser))
suppressPackageStartupMessages(library(assertr))
suppressPackageStartupMessages(library(agoradataprocessing))
suppressPackageStartupMessages(library("optparse"))
option_list <- list(
make_option(c("-c", "--config"), type="character",
help="Configuration file.", dest="config",
metavar="config", default = "config-staging.json"),
make_option(c("--store"), action="store_true", default=FALSE,
dest="store", help="Store in Synapse [default: %default]")
)
opt <- parse_args(OptionParser(option_list=option_list))
synLogin()
store <- FALSE
config <- jsonlite::fromJSON(opt$config)
processed_data <- agoradataprocessing::process_data(config = config)
#########################################
# Write out all data and store in Synapse
#########################################
processed_data$teamInfo %>%
jsonlite::toJSON(pretty=2) %>%
readr::write_lines(config$teamInfoFileJSON)
processed_data$geneInfo %>%
jsonlite::toJSON(pretty=2) %>%
readr::write_lines(config$geneInfoFileJSON)
processed_data$diffExprData %>%
jsonlite::toJSON(pretty=2) %>%
readr::write_lines(config$diffExprFileJSON)
processed_data$network %>%
jsonlite::toJSON(pretty=2) %>%
readr::write_lines(config$networkOutputFileJSON)
processed_data$proteomics %>%
jsonlite::toJSON(pretty=2, na=NULL) %>%
readr::write_lines(config$proteomicsFileJSON)
processed_data$metabolomics %>%
jsonlite::toJSON(pretty=2, digits=NA) %>%
readr::write_lines(config$metabolomicsFileJSON)
if (opt$store) {
teamInfoJSON <- synStore(File(config$teamInfoFileJSON,
parent=config$outputFolderId),
used=c(config$teamInfoId,
config$teamMemberInfoId),
forceVersion=FALSE)
geneInfoFinalJSON <- synStore(File(config$geneInfoFileJSON,
parent=config$outputFolderId),
used=c(config$diffExprDataId,
config$igapDataId,
config$eqtlDataId,
config$medianExprDataId,
config$brainExpressionDataId,
config$targetListOrigId,
config$druggabilityDataId),
forceVersion=FALSE)
diffExprDataJSON <- synStore(File(config$diffExprFileJSON,
parent=config$outputFolderId),
used=c(config$diffExprDataId,
config$tissuesTableId,
config$studiesTableId),
forceVersion=FALSE)
networkDataJSON <- synStore(File(config$networkOutputFileJSON,
parent=config$outputFolderId),
used=c(config$diffExprDataId,
config$networkDataId),
forceVersion=FALSE)
proteomicsDataJSON <- synStore(File(config$proteomicsFileJSON,
parent=config$outputFolderId),
used=c(config$proteomicsDataId),
forceVersion=FALSE)
metabolomicsDataJSON <- synStore(File(config$metabolomicsFileJSON,
parent=config$outputFolderId),
used=c(config$metabolomicsDataId),
forceVersion=FALSE)
dataFiles <- c(diffExprDataJSON,
geneInfoFinalJSON,
teamInfoJSON,
networkDataJSON,
proteomicsDataJSON,
metabolomicsDataJSON)
dataManifest <- purrr::map_df(.x=dataFiles,
.f=function(x) data.frame(id=x$properties$id,
version=x$properties$versionNumber))
dataManifest %>% readr::write_csv(config$manifestFileCSV)
dataManifestCsv <- synStore(File(config$manifestFileCSV,
parent=config$outputFolderId),
used=dataFiles,
forceVersion=FALSE)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/IsBinary.R
\name{is.binary}
\alias{is.binary}
\title{Calculate cross validation penalty}
\usage{
is.binary(x)
}
\arguments{
\item{x}{a numeric vector}
}
\description{
\code{is.binary} returns TRUE if a variable is binary, and FALSE otherwise
}
|
/man/is.binary.Rd
|
no_license
|
EricZhao636/Information
|
R
| false | false | 331 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/IsBinary.R
\name{is.binary}
\alias{is.binary}
\title{Calculate cross validation penalty}
\usage{
is.binary(x)
}
\arguments{
\item{x}{a numeric vector}
}
\description{
\code{is.binary} returns TRUE if a variable is binary, and FALSE otherwise
}
|
# con questo processo si considera pari a zero i values dei battelli non inviati, quindi si esegue l'espansione ricalcolando le pr_i che inizialmente sono Inf per i sent=0 & id_battello>0
# calcola pr_i ####
setkey(flotta, id_strato)
flotta_temp=flotta[.( as.numeric(input_strato_imp_m()) )][,list(id_strato,lft,id_battello)]
if(nrow(flotta_temp)>0) { # input_strato_imp_m() può essere nullo se l'utente non seleziona nulla. Non è vero per selected_strata() del tab outliers, dove se tutti i box sono nulli, equivalgono a selezionare tutto.
setkey(flotta_temp, id_strato,lft)
flotta_temp[,eti:=1:nrow(.SD), by=id_strato]
strati_censimento=flotta_temp[,list(.N,n=sum(ifelse(id_battello>0,1,0))),keyby=id_strato][N-n==0,id_strato]
if(length(strati_censimento) >0) {
pr_i_temp=flotta_temp[!id_strato %in% strati_censimento,data.table(.SD[id_battello>0,.(lft,id_battello,eti)],pr_i=diag(hv_pij(lft, n=nrow(.SD[id_battello>0]), eti=.SD[id_battello>0,eti], M=T) ) ) , keyby=id_strato]
} else {
pr_i_temp=flotta_temp[,data.table(.SD[id_battello>0,.(lft,id_battello,eti)],pr_i=diag(hv_pij(lft, n=nrow(.SD[id_battello>0]), eti=.SD[id_battello>0,eti], M=T) ) ) , keyby=id_strato]
}
pr_i_temp=pr_i_temp[,list(id_battello,pr_i)]
# aggiungo censimenti
if(length(strati_censimento)>0) pr_i_temp=rbindlist(list(pr_i_temp,flotta_temp[id_strato %in% strati_censimento,.(id_battello, pr_i=1)]))
# calcolo fattore di correzione
if ( exists("cy") ) {
setkey(cy,id_strato)
ric=all[var=="ricavi", list(id_battello,id_strato,value)]
setkey(ric,id_battello)
setkey(pr_i_temp,id_battello)
ric=pr_i_temp[ric][,ric_esp_nisea:=sum(value/pr_i),by=id_strato]
setkey(ric,id_strato)
ric=cy[ric]
ric[is.na(ricavi), ricavi:=ric_esp_nisea] # corr_fact will be 1 for that
ric[,corr_fact:=ricavi/ric_esp_nisea]
setkey(ric,id_battello)
# weight_with_correction = weight * corr_fact = 1/pr * corr_fact -->
# pr_with_correction= 1 / weight_with_correction = 1/(1/pr * corr_fact) = pr / corr_fact
pr_i_temp[ric, pr_i:=pr_i*(1/corr_fact)]
rm(ric)
}
setkey(pr_i_temp, id_battello)
} else {
pr_i_temp=data.table(id_battello=as.integer(0), pr_i=as.numeric(0) )
setkey(pr_i_temp, id_battello)
}
|
/source/refresh_pr_i_imp_m.R
|
no_license
|
micheledemeo/datacontrol
|
R
| false | false | 2,288 |
r
|
# con questo processo si considera pari a zero i values dei battelli non inviati, quindi si esegue l'espansione ricalcolando le pr_i che inizialmente sono Inf per i sent=0 & id_battello>0
# calcola pr_i ####
setkey(flotta, id_strato)
flotta_temp=flotta[.( as.numeric(input_strato_imp_m()) )][,list(id_strato,lft,id_battello)]
if(nrow(flotta_temp)>0) { # input_strato_imp_m() può essere nullo se l'utente non seleziona nulla. Non è vero per selected_strata() del tab outliers, dove se tutti i box sono nulli, equivalgono a selezionare tutto.
setkey(flotta_temp, id_strato,lft)
flotta_temp[,eti:=1:nrow(.SD), by=id_strato]
strati_censimento=flotta_temp[,list(.N,n=sum(ifelse(id_battello>0,1,0))),keyby=id_strato][N-n==0,id_strato]
if(length(strati_censimento) >0) {
pr_i_temp=flotta_temp[!id_strato %in% strati_censimento,data.table(.SD[id_battello>0,.(lft,id_battello,eti)],pr_i=diag(hv_pij(lft, n=nrow(.SD[id_battello>0]), eti=.SD[id_battello>0,eti], M=T) ) ) , keyby=id_strato]
} else {
pr_i_temp=flotta_temp[,data.table(.SD[id_battello>0,.(lft,id_battello,eti)],pr_i=diag(hv_pij(lft, n=nrow(.SD[id_battello>0]), eti=.SD[id_battello>0,eti], M=T) ) ) , keyby=id_strato]
}
pr_i_temp=pr_i_temp[,list(id_battello,pr_i)]
# aggiungo censimenti
if(length(strati_censimento)>0) pr_i_temp=rbindlist(list(pr_i_temp,flotta_temp[id_strato %in% strati_censimento,.(id_battello, pr_i=1)]))
# calcolo fattore di correzione
if ( exists("cy") ) {
setkey(cy,id_strato)
ric=all[var=="ricavi", list(id_battello,id_strato,value)]
setkey(ric,id_battello)
setkey(pr_i_temp,id_battello)
ric=pr_i_temp[ric][,ric_esp_nisea:=sum(value/pr_i),by=id_strato]
setkey(ric,id_strato)
ric=cy[ric]
ric[is.na(ricavi), ricavi:=ric_esp_nisea] # corr_fact will be 1 for that
ric[,corr_fact:=ricavi/ric_esp_nisea]
setkey(ric,id_battello)
# weight_with_correction = weight * corr_fact = 1/pr * corr_fact -->
# pr_with_correction= 1 / weight_with_correction = 1/(1/pr * corr_fact) = pr / corr_fact
pr_i_temp[ric, pr_i:=pr_i*(1/corr_fact)]
rm(ric)
}
setkey(pr_i_temp, id_battello)
} else {
pr_i_temp=data.table(id_battello=as.integer(0), pr_i=as.numeric(0) )
setkey(pr_i_temp, id_battello)
}
|
library(PReMiuM)
library(tidyverse)
# library(parallel)
# require(doMC)
# require(foreach)
setwd("/work/04734/dhbrand/stampede2/GitHub/EnviroTyping/data/interim/G2F_Hybrid/hyb_by_month_preds/full_long")
df <- read_rds("../../hybrid_by_month_calibrated_weather.rds")
#subset <- df[sample(1:nrow(df),.1*dim(df)[1]),]
set.seed(1234)
train_index <- sample(1:nrow(df), 0.5 * nrow(df))
test_index <- setdiff(1:nrow(df), train_index)
train <- df[train_index,]
test <- df[test_index,]
# find continous variables with variance
val <- grep("Median",names(train))
contVars <- names(which(map_dbl(train[val], var, na.rm = TRUE) != 0))
discrVars <- c("Pedi", "Month")
tic()
runInfoObj <- profRegr(covNames, outcome = 'Yield', yModel = 'Normal', xModel = "Mixed",discreteCovs = discrVars, continuousCovs = contVars, data = train, predict = test, nSweeps = 1000, nBurn = 1000, seed = 1234)
print(toc())
calcDists <- calcDissimilarityMatrix(runInfoObj)
clusObj <- calcOptimalClustering(calcDists)
riskProfObj <- calcAvgRiskAndProfile(clusObj)
predictions <- calcPredictions(riskProfObj,fullSweepPredictions=TRUE,fullSweepLogOR=TRUE)
print(rsqrd <- 1-(sum((predictions$observedY - predictions$predictedY)^2)/sum((predictions$observedY - mean(predictions$observedY))^2)))
print(predictions$rmse)
nfolds=5
set.seed(1)
foldi=sample(rep(1:nfolds,length.out=length(df$Pedi)))
table(foldi)
rSqrd=NULL
predErr=NULL
for(k in 1:nfolds){
testi=which(foldi==k)
train=df[-testi,]
test=df[testi,]
runInfoObj <- profRegr(covNames, outcome = 'Yield', yModel = 'Normal', xModel = "Mixed",discreteCovs = "Pedi", continuousCovs = numericVars, data = train, predict = test, nSweeps = 1000, nBurn = 1000, seed = 1234)
calcDists <- calcDissimilarityMatrix(runInfoObj)
clusObj <- calcOptimalClustering(calcDists)
riskProfObj <- calcAvgRiskAndProfile(clusObj)
predictions <- calcPredictions(riskProfObj,doRaoBlackwell = F, fullSweepPredictions = F,fullSweepLogOR = T)
print(rsqrd <- 1-(sum((predictions$observedY - predictions$predictedY)^2)/sum((predictions$observedY - mean(predictions$observedY))^2)))
print(predictions$rmse)
predErr=c(predErr,predictions$rmse)
rSqrd=c(rSqrd,rsqrd)
}
(avg_rSqrd <- mean(rSqrd))
(avg_predErr <- mean(predErr))
val <- grep("Min|Max",names(df))
numericVars <- names(which(map_dbl(df[val], var) != 0))
nfolds=5
set.seed(1)
foldi=sample(rep(1:nfolds,length.out=length(df$Pedi)))
table(foldi)
rSqrd=NULL
predErr=NULL
for(k in 1:nfolds){
testi=which(foldi==k)
train=df[-testi,]
test=df[testi,]
runInfoObj <- profRegr(covNames, outcome = 'Yield', yModel = 'Normal', xModel = "Mixed",discreteCovs = "Pedi", continuousCovs = numericVars, data = train, predict = test, nSweeps = 100, nBurn = 100, seed = 1234)
calcDists <- calcDissimilarityMatrix(runInfoObj)
clusObj <- calcOptimalClustering(calcDists, maxNClusters = 9)
riskProfObj <- calcAvgRiskAndProfile(clusObj)
predictions <- calcPredictions(riskProfObj,fullSweepPredictions=TRUE,fullSweepLogOR=TRUE)
print(rsqrd <- 1-(sum((predictions$observedY - predictions$predictedY)^2)/sum((predictions$observedY - mean(predictions$observedY))^2)))
print(prederr <- sqrt(1/(dim(test)[1])*sum((predictions$observedY - predictions$predictedY)^2)))
predErr=c(predErr,prederr)
rSqrd=c(rSqrd,rsqrd)
}
(avg_rSqrd <- mean(rSqrd))
(avg_predErr <- mean(predErr))
system('top -b -n 1 -u $USER', intern=TRUE)
registerDoMC(cores = 4)
nfolds=5
set.seed(1)
foldi=sample(rep(1:nfolds,length.out=length(inputs$inputData$Variable1)))
table(foldi)
rSqrd=NULL
predErr=NULL
foreach(k = 1:nfolds) %dopar% {
testi=which(foldi==k)
train=inputs$inputData[-testi,]
test=inputs$inputData[testi,]
runInfoObj <- profRegr(yModel=inputs$yModel, xModel=inputs$xModel,nSweeps=1000, nBurn=1000, data=train, output="output", covNames=inputs$covNames, predict = test, fixedEffectsNames = inputs$fixedEffectNames, seed = 1234)
dissimObj <- calcDissimilarityMatrix(runInfoObj)
clusObj <- calcOptimalClustering(dissimObj)
riskProfileObj <- calcAvgRiskAndProfile(clusObj)
predictions <- calcPredictions(riskProfileObj,fullSweepPredictions=TRUE,fullSweepLogOR=TRUE)
print(rsqrd <- 1-(sum((predictions$observedY - predictions$predictedY)^2)/sum((predictions$observedY - mean(predictions$observedY))^2)))
print(prederr <- sqrt(1/(dim(test)[1])*sum((predictions$observedY - predictions$predictedY)^2)))
#predErr=c(predErr,prederr)
#rSqrd=c(rSqrd,rsqrd)
}
(avg_rSqrd <- mean(rSqrd))
(avg_predErr <- mean(predErr))
|
/sandbox/hyb_by_month_preds/full_long.R
|
no_license
|
TACC/EnviroTyping
|
R
| false | false | 4,563 |
r
|
library(PReMiuM)
library(tidyverse)
# library(parallel)
# require(doMC)
# require(foreach)
setwd("/work/04734/dhbrand/stampede2/GitHub/EnviroTyping/data/interim/G2F_Hybrid/hyb_by_month_preds/full_long")
df <- read_rds("../../hybrid_by_month_calibrated_weather.rds")
#subset <- df[sample(1:nrow(df),.1*dim(df)[1]),]
set.seed(1234)
train_index <- sample(1:nrow(df), 0.5 * nrow(df))
test_index <- setdiff(1:nrow(df), train_index)
train <- df[train_index,]
test <- df[test_index,]
# find continous variables with variance
val <- grep("Median",names(train))
contVars <- names(which(map_dbl(train[val], var, na.rm = TRUE) != 0))
discrVars <- c("Pedi", "Month")
tic()
runInfoObj <- profRegr(covNames, outcome = 'Yield', yModel = 'Normal', xModel = "Mixed",discreteCovs = discrVars, continuousCovs = contVars, data = train, predict = test, nSweeps = 1000, nBurn = 1000, seed = 1234)
print(toc())
calcDists <- calcDissimilarityMatrix(runInfoObj)
clusObj <- calcOptimalClustering(calcDists)
riskProfObj <- calcAvgRiskAndProfile(clusObj)
predictions <- calcPredictions(riskProfObj,fullSweepPredictions=TRUE,fullSweepLogOR=TRUE)
print(rsqrd <- 1-(sum((predictions$observedY - predictions$predictedY)^2)/sum((predictions$observedY - mean(predictions$observedY))^2)))
print(predictions$rmse)
nfolds=5
set.seed(1)
foldi=sample(rep(1:nfolds,length.out=length(df$Pedi)))
table(foldi)
rSqrd=NULL
predErr=NULL
for(k in 1:nfolds){
testi=which(foldi==k)
train=df[-testi,]
test=df[testi,]
runInfoObj <- profRegr(covNames, outcome = 'Yield', yModel = 'Normal', xModel = "Mixed",discreteCovs = "Pedi", continuousCovs = numericVars, data = train, predict = test, nSweeps = 1000, nBurn = 1000, seed = 1234)
calcDists <- calcDissimilarityMatrix(runInfoObj)
clusObj <- calcOptimalClustering(calcDists)
riskProfObj <- calcAvgRiskAndProfile(clusObj)
predictions <- calcPredictions(riskProfObj,doRaoBlackwell = F, fullSweepPredictions = F,fullSweepLogOR = T)
print(rsqrd <- 1-(sum((predictions$observedY - predictions$predictedY)^2)/sum((predictions$observedY - mean(predictions$observedY))^2)))
print(predictions$rmse)
predErr=c(predErr,predictions$rmse)
rSqrd=c(rSqrd,rsqrd)
}
(avg_rSqrd <- mean(rSqrd))
(avg_predErr <- mean(predErr))
val <- grep("Min|Max",names(df))
numericVars <- names(which(map_dbl(df[val], var) != 0))
nfolds=5
set.seed(1)
foldi=sample(rep(1:nfolds,length.out=length(df$Pedi)))
table(foldi)
rSqrd=NULL
predErr=NULL
for(k in 1:nfolds){
testi=which(foldi==k)
train=df[-testi,]
test=df[testi,]
runInfoObj <- profRegr(covNames, outcome = 'Yield', yModel = 'Normal', xModel = "Mixed",discreteCovs = "Pedi", continuousCovs = numericVars, data = train, predict = test, nSweeps = 100, nBurn = 100, seed = 1234)
calcDists <- calcDissimilarityMatrix(runInfoObj)
clusObj <- calcOptimalClustering(calcDists, maxNClusters = 9)
riskProfObj <- calcAvgRiskAndProfile(clusObj)
predictions <- calcPredictions(riskProfObj,fullSweepPredictions=TRUE,fullSweepLogOR=TRUE)
print(rsqrd <- 1-(sum((predictions$observedY - predictions$predictedY)^2)/sum((predictions$observedY - mean(predictions$observedY))^2)))
print(prederr <- sqrt(1/(dim(test)[1])*sum((predictions$observedY - predictions$predictedY)^2)))
predErr=c(predErr,prederr)
rSqrd=c(rSqrd,rsqrd)
}
(avg_rSqrd <- mean(rSqrd))
(avg_predErr <- mean(predErr))
system('top -b -n 1 -u $USER', intern=TRUE)
registerDoMC(cores = 4)
nfolds=5
set.seed(1)
foldi=sample(rep(1:nfolds,length.out=length(inputs$inputData$Variable1)))
table(foldi)
rSqrd=NULL
predErr=NULL
foreach(k = 1:nfolds) %dopar% {
testi=which(foldi==k)
train=inputs$inputData[-testi,]
test=inputs$inputData[testi,]
runInfoObj <- profRegr(yModel=inputs$yModel, xModel=inputs$xModel,nSweeps=1000, nBurn=1000, data=train, output="output", covNames=inputs$covNames, predict = test, fixedEffectsNames = inputs$fixedEffectNames, seed = 1234)
dissimObj <- calcDissimilarityMatrix(runInfoObj)
clusObj <- calcOptimalClustering(dissimObj)
riskProfileObj <- calcAvgRiskAndProfile(clusObj)
predictions <- calcPredictions(riskProfileObj,fullSweepPredictions=TRUE,fullSweepLogOR=TRUE)
print(rsqrd <- 1-(sum((predictions$observedY - predictions$predictedY)^2)/sum((predictions$observedY - mean(predictions$observedY))^2)))
print(prederr <- sqrt(1/(dim(test)[1])*sum((predictions$observedY - predictions$predictedY)^2)))
#predErr=c(predErr,prederr)
#rSqrd=c(rSqrd,rsqrd)
}
(avg_rSqrd <- mean(rSqrd))
(avg_predErr <- mean(predErr))
|
# -----------------------------------------
#Midwestern agriculture synthesis Shiny app
# -----------------------------------------
# Managing Soil Carbon - SNAPP Working Group
library("shiny") # for making Shiny app
library("dplyr") # for sorting and summarizing data
library("readxl") # for importing dataframe
library("ggplot2") # for plotting data
library(shinyjs)
library(gdata) #reorder legends
library(shinydashboard)
setwd(".")
#datapath <- "/Users/LWA/Desktop/github/midwesternag_synthesis/"
datapath <- "~/Box Sync/Work/Code/Midwest-Agriculture-Synthesis/www/data"
#import data -> summary files
covercrop <- read.csv(file.path(datapath, "/CC_FULL_Summary2.csv"), stringsAsFactors = FALSE)
pestmgmt <- read.csv(file.path(datapath, "/PestMgmt_FULL_Summary2.csv"), stringsAsFactors = FALSE)
summary_all <- full_join(covercrop, pestmgmt)
#change columns to factors
collist <- c("Review_id", "main_group", "group_metric", "Legend_1", "Legend_2", "Legend_3", "Group_RV", "Review")
summary_all[collist] <- lapply(summary_all[collist], factor)
levels(summary_all$Legend_1)
#reorder the data for the legend
summary_all$Legend_1 <- reorder.factor(summary_all$Legend_1, new.order = c("Monoculture", "Mixture (2 Spp.)", "Mixture (3+ Spp.)", "Soil", "Foliage","Seed", "Seed & Foliage" ))
#rearrange the data according to the new ordering defined above
summary_all <- summary_all %>% arrange(Legend_1)
#make a temporary new column that just combines the group_metric and the main group. This new column has all rows then.
#then we reorder this column, so that it will be organized by both facet(main group) and by group_metric
summary_all$group_metric_facet <- with(summary_all, paste(group_metric, main_group, sep = "_"))
summary_all$group_metric_facet <- reorder.factor(summary_all$group_metric_facet, new.order = sort(unique(summary_all$group_metric_facet), decreasing = TRUE))
###start of management button test (safe to delete this chunk)###
#all of our data has cover cropping in this column. I make half of the data (randomly chosen) a different entry to see if the button works.
#levels(summary_all$Review) <- c(levels(summary_all$Review), "test")
#summary_all$Review[sample(x = nrow(summary_all), size = nrow(summary_all)/2)] <- "test"
###end of test###
#user interface
ui <- fluidPage(
useShinyjs(), #this lets us use the shinyjs package. This is required just for the "click" function below, which "clicks" the update button to initialize a plot at the start
titlePanel('Synthesis of the trade-offs associated with Best Management Practices (BMPs) in the US Midwest'),
sidebarLayout(
sidebarPanel(
tabsetPanel(
tabPanel("Practice",
radioButtons(inputId = "MgmtPractice", label = "Practice",
choices = unique(summary_all$Review), #will be expanded as review dataframes are populated
selected = "Cover Crop"),
radioButtons(inputId = "RV", label = "Outcome",
choices = unique(summary_all$Group_RV) %>% sort(),
selected = "Soil"),
actionButton(inputId = "update", label = "Update")
),
tabPanel("Outcome",
radioButtons(inputId = "MgmtPractice", label = "Practice",
choices = unique(summary_all$Review), #will be expanded as review dataframes are populated
selected = "Cover Crop"),
radioButtons(inputId = "RV", label = "Outcome",
choices = unique(summary_all$Group_RV) %>% sort(),
selected = "Soil"),
actionButton(inputId = "update", label = "Update")
)
)
),
mainPanel(
tabsetPanel(
tabPanel("Data",
plotOutput(outputId = "forestplot"),
br(),
fluidRow(
box(
textOutput(outputId = "text_description"),
title = "Plot Description",
width = 10 #change width depending on how big you want the textbox to be. can go from 1-12
#to use other features of box (like color, collapsable, ect..) we need to change fluidPage to dashboardPage
)
)
),
tabPanel("Map"),
tabPanel("References")
)
)
)
)
####server instructions####
#build plot in server function
server <- function(input, output) {
df <- eventReactive(input$update,{ #set action button to initiate changes in the figures displayed
#filter dataset to display selected review and response variables
summary_all %>%
filter(Review == input$MgmtPractice) %>%
filter(Group_RV == input$RV)
})
#build figure based on selected data
output$forestplot <- renderPlot({
ggplot(df(), aes(group_metric_facet, mean_per_change1, #remember that group_metric_facet is the column ordered by main_group and group_metric
ymin = mean_per_change1-sem_per_change1,
ymax = mean_per_change1 +sem_per_change1)) +
scale_x_discrete("", breaks = summary_all$group_metric_facet, label = summary_all$group_metric) + #this line relabels the x from "group_metric_main_group" to just "group_metric"
geom_pointrange() +
geom_errorbar(aes(ymin = mean_per_change1-sem_per_change1,
ymax = mean_per_change1 +sem_per_change1,
width=.1)) +
geom_hline(yintercept=0, lty=2) +# add a dotted line at x=0 after flip
coord_flip() + # flip coordinates (puts labels on y axis)
labs(title =df()$Review[1], #since we are filtering summary_all to only have 1 value for review/group_rv, we can take any element as the label (they should all be the same)
subtitle = df()$Group_RV[1],
x = "",
y = "percent difference between control and treatment (%)") +
#scale_fill_discrete(breaks=c("Monoculture","Mixture (2 Spp.)","Mixture (3+ Spp.)")) +
theme_bw() +
geom_point( aes(colour = Legend_1)) + #color labeling of fine level groupings
facet_grid(main_group ~., scales = "free", space = "free") +
theme(strip.text.y = element_text(angle = 0))
})
output$text_description <- renderText({
if(df()$Review[1] == "Cover Crop"){
"this is the text we can show for Cover Crop options. We can add another if statement within this one if we want a separate description for each response. See commented code for an example"
# if(df()$Group_RV[1] == "Crop Production"){
# "This is the text we show for cover crop & crop production"
# }
# else if(df()$Group_RV[1] =="Soil"){
# "This is the text we show for cover crop and soil"
# }
# else if ... ect ect
}
else{
"this is the text we can show for Early Season Pest Management. The width of these text boxes can be adjusted in the ui, in the 'box' function"
}
})
observe({
click("update") #this chunk will click the update button at the very start, so that our app starts with a plot.
# if you want to change the default plot, then change the default "selected" values above
#invalidateLater(1000) #invalidateLater would click the update button every '1000' milliseconds
})
}
shinyApp(ui = ui, server = server)
#Share the app
#replace my computer with a web server
#Create directory with every file the app needs...datasets, images, css, helper scripts, etc.
#app.R #name of your script which ends witha call to shinyApp()
#shinyapps.io <- server maintained by RStudy to upload apps as they are developed
#server notes
#input$MgmtPractice #use as reactive value, need to integrate into output code. sets how the data are queried
#establishes summary that will be set to df and used for figure out
#data <- reactive({#write querying code here based on input$MgmtPractice selected from checkboxes
# })
#need to add a few other drop down bars to account for other selection options.!!!Creates a function!
#data <- eventReactive(input$go, {checkboxGroupInput$MgmtPractice})
#triggers code to run on the server!, allows you to precisely specify which reactive values to invalidate
#reactive tookit <- functions
#renderPlot({}) <- build something that will be displayed
#reactive({}) <- use this to create reactive expressions, these are technically functions.
#isolate({}) <- prevent app from responding before all choices are selected. Hit go? Maybe useful as user selects which groups to look at
# actionButton(inputId = "go", label = "Selection Complete") createbutton in ui section that will download specific file for use, circumvent the slow process of querying within the app?
#observeEvent() #Trigger code***** place code outside of server function if it needs to be run once per session (querying here)
#code inside server function is run once connection
#(filtering to see which query file to grab goes wihin reactive fnction (render function))
#eventReactive(input$go, {checkboxGroupInput$MgmtPractice}) #delay reaction
#
#need to set each grouping name to each new datafile
#I think you link each list option to the different data sets
#df <- eventReactive(input$update,{
# if_else(input$RV == "Crop Production",cc_yield_summary,
#if_else(input$RV == "Pest Regulation",cc_pest_summary,
#if_else(input$RV == "Soils", cc_soil_summary,
#if_else(input$RV == "Water Movement", cc_water_summary, NULL)
#)))})
#df <- eventReactive(input$update,{ #merge datasets and then filter based on RVs
# if (input$RV %in% "Crop Production") {
# dataset1 <- cc_yield_summary
# if (input$RV %in% "Pest Regulation") {
# dataset1 <- cc_pest_summary
#if (input$RV %in% "Soils") {
# dataset1 <- cc_soil_summary
#}
# return(dataset1)
#}
#}})
#ggplot(df(), aes(group_metric, mean_per_change, ymin = mean_per_change-sem_per_change, ymax = mean_per_change +sem_per_change)) +
# geom_pointrange() +
#geom_errorbar(aes(ymin = mean_per_change-sem_per_change, ymax = mean_per_change +sem_per_change, width=.2)) +
#geom_hline(yintercept=0, lty=2) +# add a dotted line at x=0 after flip
#coord_flip() + # flip coordinates (puts labels on y axis)
#theme_bw() +
#geom_point( aes(colour = Cover_crop_diversity2)) + #color labeling of fine level groupings
#facet_grid(main_group ~ .,scales = "free", space = "free") +
#theme(strip.text.y = element_text(angle = 0))
|
/www/app.R
|
no_license
|
kanedan29/Midwest-Agriculture-Synthesis
|
R
| false | false | 10,630 |
r
|
# -----------------------------------------
#Midwestern agriculture synthesis Shiny app
# -----------------------------------------
# Managing Soil Carbon - SNAPP Working Group
library("shiny") # for making Shiny app
library("dplyr") # for sorting and summarizing data
library("readxl") # for importing dataframe
library("ggplot2") # for plotting data
library(shinyjs)
library(gdata) #reorder legends
library(shinydashboard)
setwd(".")
#datapath <- "/Users/LWA/Desktop/github/midwesternag_synthesis/"
datapath <- "~/Box Sync/Work/Code/Midwest-Agriculture-Synthesis/www/data"
#import data -> summary files
covercrop <- read.csv(file.path(datapath, "/CC_FULL_Summary2.csv"), stringsAsFactors = FALSE)
pestmgmt <- read.csv(file.path(datapath, "/PestMgmt_FULL_Summary2.csv"), stringsAsFactors = FALSE)
summary_all <- full_join(covercrop, pestmgmt)
#change columns to factors
collist <- c("Review_id", "main_group", "group_metric", "Legend_1", "Legend_2", "Legend_3", "Group_RV", "Review")
summary_all[collist] <- lapply(summary_all[collist], factor)
levels(summary_all$Legend_1)
#reorder the data for the legend
summary_all$Legend_1 <- reorder.factor(summary_all$Legend_1, new.order = c("Monoculture", "Mixture (2 Spp.)", "Mixture (3+ Spp.)", "Soil", "Foliage","Seed", "Seed & Foliage" ))
#rearrange the data according to the new ordering defined above
summary_all <- summary_all %>% arrange(Legend_1)
#make a temporary new column that just combines the group_metric and the main group. This new column has all rows then.
#then we reorder this column, so that it will be organized by both facet(main group) and by group_metric
summary_all$group_metric_facet <- with(summary_all, paste(group_metric, main_group, sep = "_"))
summary_all$group_metric_facet <- reorder.factor(summary_all$group_metric_facet, new.order = sort(unique(summary_all$group_metric_facet), decreasing = TRUE))
###start of management button test (safe to delete this chunk)###
#all of our data has cover cropping in this column. I make half of the data (randomly chosen) a different entry to see if the button works.
#levels(summary_all$Review) <- c(levels(summary_all$Review), "test")
#summary_all$Review[sample(x = nrow(summary_all), size = nrow(summary_all)/2)] <- "test"
###end of test###
#user interface
ui <- fluidPage(
useShinyjs(), #this lets us use the shinyjs package. This is required just for the "click" function below, which "clicks" the update button to initialize a plot at the start
titlePanel('Synthesis of the trade-offs associated with Best Management Practices (BMPs) in the US Midwest'),
sidebarLayout(
sidebarPanel(
tabsetPanel(
tabPanel("Practice",
radioButtons(inputId = "MgmtPractice", label = "Practice",
choices = unique(summary_all$Review), #will be expanded as review dataframes are populated
selected = "Cover Crop"),
radioButtons(inputId = "RV", label = "Outcome",
choices = unique(summary_all$Group_RV) %>% sort(),
selected = "Soil"),
actionButton(inputId = "update", label = "Update")
),
tabPanel("Outcome",
radioButtons(inputId = "MgmtPractice", label = "Practice",
choices = unique(summary_all$Review), #will be expanded as review dataframes are populated
selected = "Cover Crop"),
radioButtons(inputId = "RV", label = "Outcome",
choices = unique(summary_all$Group_RV) %>% sort(),
selected = "Soil"),
actionButton(inputId = "update", label = "Update")
)
)
),
mainPanel(
tabsetPanel(
tabPanel("Data",
plotOutput(outputId = "forestplot"),
br(),
fluidRow(
box(
textOutput(outputId = "text_description"),
title = "Plot Description",
width = 10 #change width depending on how big you want the textbox to be. can go from 1-12
#to use other features of box (like color, collapsable, ect..) we need to change fluidPage to dashboardPage
)
)
),
tabPanel("Map"),
tabPanel("References")
)
)
)
)
####server instructions####
#build plot in server function
server <- function(input, output) {
df <- eventReactive(input$update,{ #set action button to initiate changes in the figures displayed
#filter dataset to display selected review and response variables
summary_all %>%
filter(Review == input$MgmtPractice) %>%
filter(Group_RV == input$RV)
})
#build figure based on selected data
output$forestplot <- renderPlot({
ggplot(df(), aes(group_metric_facet, mean_per_change1, #remember that group_metric_facet is the column ordered by main_group and group_metric
ymin = mean_per_change1-sem_per_change1,
ymax = mean_per_change1 +sem_per_change1)) +
scale_x_discrete("", breaks = summary_all$group_metric_facet, label = summary_all$group_metric) + #this line relabels the x from "group_metric_main_group" to just "group_metric"
geom_pointrange() +
geom_errorbar(aes(ymin = mean_per_change1-sem_per_change1,
ymax = mean_per_change1 +sem_per_change1,
width=.1)) +
geom_hline(yintercept=0, lty=2) +# add a dotted line at x=0 after flip
coord_flip() + # flip coordinates (puts labels on y axis)
labs(title =df()$Review[1], #since we are filtering summary_all to only have 1 value for review/group_rv, we can take any element as the label (they should all be the same)
subtitle = df()$Group_RV[1],
x = "",
y = "percent difference between control and treatment (%)") +
#scale_fill_discrete(breaks=c("Monoculture","Mixture (2 Spp.)","Mixture (3+ Spp.)")) +
theme_bw() +
geom_point( aes(colour = Legend_1)) + #color labeling of fine level groupings
facet_grid(main_group ~., scales = "free", space = "free") +
theme(strip.text.y = element_text(angle = 0))
})
output$text_description <- renderText({
if(df()$Review[1] == "Cover Crop"){
"this is the text we can show for Cover Crop options. We can add another if statement within this one if we want a separate description for each response. See commented code for an example"
# if(df()$Group_RV[1] == "Crop Production"){
# "This is the text we show for cover crop & crop production"
# }
# else if(df()$Group_RV[1] =="Soil"){
# "This is the text we show for cover crop and soil"
# }
# else if ... ect ect
}
else{
"this is the text we can show for Early Season Pest Management. The width of these text boxes can be adjusted in the ui, in the 'box' function"
}
})
observe({
click("update") #this chunk will click the update button at the very start, so that our app starts with a plot.
# if you want to change the default plot, then change the default "selected" values above
#invalidateLater(1000) #invalidateLater would click the update button every '1000' milliseconds
})
}
shinyApp(ui = ui, server = server)
#Share the app
#replace my computer with a web server
#Create directory with every file the app needs...datasets, images, css, helper scripts, etc.
#app.R #name of your script which ends witha call to shinyApp()
#shinyapps.io <- server maintained by RStudy to upload apps as they are developed
#server notes
#input$MgmtPractice #use as reactive value, need to integrate into output code. sets how the data are queried
#establishes summary that will be set to df and used for figure out
#data <- reactive({#write querying code here based on input$MgmtPractice selected from checkboxes
# })
#need to add a few other drop down bars to account for other selection options.!!!Creates a function!
#data <- eventReactive(input$go, {checkboxGroupInput$MgmtPractice})
#triggers code to run on the server!, allows you to precisely specify which reactive values to invalidate
#reactive tookit <- functions
#renderPlot({}) <- build something that will be displayed
#reactive({}) <- use this to create reactive expressions, these are technically functions.
#isolate({}) <- prevent app from responding before all choices are selected. Hit go? Maybe useful as user selects which groups to look at
# actionButton(inputId = "go", label = "Selection Complete") createbutton in ui section that will download specific file for use, circumvent the slow process of querying within the app?
#observeEvent() #Trigger code***** place code outside of server function if it needs to be run once per session (querying here)
#code inside server function is run once connection
#(filtering to see which query file to grab goes wihin reactive fnction (render function))
#eventReactive(input$go, {checkboxGroupInput$MgmtPractice}) #delay reaction
#
#need to set each grouping name to each new datafile
#I think you link each list option to the different data sets
#df <- eventReactive(input$update,{
# if_else(input$RV == "Crop Production",cc_yield_summary,
#if_else(input$RV == "Pest Regulation",cc_pest_summary,
#if_else(input$RV == "Soils", cc_soil_summary,
#if_else(input$RV == "Water Movement", cc_water_summary, NULL)
#)))})
#df <- eventReactive(input$update,{ #merge datasets and then filter based on RVs
# if (input$RV %in% "Crop Production") {
# dataset1 <- cc_yield_summary
# if (input$RV %in% "Pest Regulation") {
# dataset1 <- cc_pest_summary
#if (input$RV %in% "Soils") {
# dataset1 <- cc_soil_summary
#}
# return(dataset1)
#}
#}})
#ggplot(df(), aes(group_metric, mean_per_change, ymin = mean_per_change-sem_per_change, ymax = mean_per_change +sem_per_change)) +
# geom_pointrange() +
#geom_errorbar(aes(ymin = mean_per_change-sem_per_change, ymax = mean_per_change +sem_per_change, width=.2)) +
#geom_hline(yintercept=0, lty=2) +# add a dotted line at x=0 after flip
#coord_flip() + # flip coordinates (puts labels on y axis)
#theme_bw() +
#geom_point( aes(colour = Cover_crop_diversity2)) + #color labeling of fine level groupings
#facet_grid(main_group ~ .,scales = "free", space = "free") +
#theme(strip.text.y = element_text(angle = 0))
|
#Set Working Directory
#Windows @ UA
setwd("C:/Users/avanderlaar/Dropbox/R/Distance/")
#add these libraries
library(unmarked)
library(AICcmodavg)
#Read in the bird detection information
dists<-read.csv('Sora13.csv', header=TRUE)
#reading in the habitat information
soracov<-read.csv('Veg13_min.csv', header=TRUE )
# List of all transects from observations
tran.obs = unique(dists$Impound)
# List of transects without observations
#list = tran.all[tran.all%in%tran.obs==FALSE]
#dists$Impound<-factor(dists$Impound, levels=tran.all)
dists$Impound<-factor(dists$Impound, levels=c(1:51))
dists$Transect<-factor(dists$Transect, levels=c(1:281))
dists$Occasion<-factor(dists$Occasion, levels=c(1:25))
cutPT = seq(0,13,by=1)
#yDat = formatDistData(dists, distCol="Distance", transectNameCol="Transect", occasionCol="Occasion",
dist.breaks=cutPT)
yDat = formatDistData(dists, distCol="Distance", transectNameCol="Transect", dist.breaks=cutPT)
# umf = unmarkedFrameGDS(y=yDat, yearlySiteCovs=soracov,
# numPrimary=2, survey="line", dist.breaks=seq(0, 20, by=2),
# unitsIn="m", tlength=soracov$Effort_M)
#numPrimary=4 because there are four rounds
#tlength = soracov$Km because that is the column in soracov that has the length in km
umf = unmarkedFrameGDS(y=yDat, numPrimary=25, survey="line", dist.breaks=cutPT, unitsIn="km", tlength=soracov$Km)
cutPT2=c(0:13)
umfD = unmarkedFrameDS(y=yDat, dist.breaks=cutPT2, tlength=soracov$Km, survey="line", unitsIn="km")
head(umf)
nullD = distsamp(~1~1, umfD)
null = gdistsamp(~1, ~1, ~1, umf)
str(umf)
summary(umf)
#Graphs the distrbution of observations from teh transect line in a histogram
hist(dists$Distance)
##################################################
#### Modeling ####################################
##################################################
#in 'distsamp' the space after the first ~ is for modeling factors
#affecting detection and the space after the second ~ is for modeling
#factors affecting density.
##################################################
#### Habitat Management #########################
#################################################
Cand.modG = list()
Cand.modG[[1]] = gdistsamp(~Habitat~1, umf, keyfun="hazard")
Cand.modG[[2]] = gdistsamp(~Habitat~RegionA + Habitat + Year, umf, keyfun="hazard")
Cand.modG[[3]] = gdistsamp(~Habitat~Habitat + Year, umf, keyfun="hazard")
Cand.modG[[4]] = gdistsamp(~Habitat~RegionA, umf, keyfun="hazard")
Cand.modG[[5]] = gdistsamp(~Habitat~AreaA + RegionA, umf, keyfun="hazard")
Cand.modG[[6]] = gdistsamp(~Habitat~AreaA, umf, keyfun="hazard")
Modnames = c("null", "Region+Hab+Dist", "Hab+Dist", "Region", "Area+Region", "Area")
detect.table = aictab(cand.set=Cand.modG, modnames=Modnames)
detect.table
###########################
###########PLOTS###########Region
##########################
#Round
# DataFrame <- data.frame(RoundA=c("A", "B", "C"))
# model <- distsamp(~1~RoundA, umf, keyfun="hazard")
# Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
RoundNames=data.frame(RoundNames=c("Round1", "Round2", "Round3"))
Elambda
par(mfrow=c(1,5))
with(Elambda, {
x <- barplot(Predicted, names=RoundA, xlab="Round", col="#FFC000", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Region
DataFrame <- data.frame(RegionA=c("NW", "NC", "NE", "SE"))
model <- distsamp(~1~RegionA, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
jpeg('Region.jpg')
par(mfrow=c(1,1))
with(Elambda, {
x <- barplot(Predicted, names=RegionA, xlab="Region", col="#FFC000", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
dev.off()
#Disturbance
DataFrame <- data.frame(Disturbance_Red=c("Mowed", "Disced", "None"))
model <- distsamp(~1~Disturbance_Red, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
jpeg('DisturbanceByRegion')
par(new, mfrow=c(1,4))
with(Elambda, {
x <- barplot(Predicted, names=Disturbance_Red, col="#FFC000", xlab="Disturbance",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Habitat
DataFrame <- data.frame(Habitat=c("PE", "MS", "UP"))
model <- distsamp(~1~Habitat, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
Elambda
ElambdaNC
ElambdaNE
ElambdaSE
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNW, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNC, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNE, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaSE, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Area
DataFrame <- data.frame(AreaA=c("SC", "NV", "SL", "BK", "CC", "FG", "GP", "TS", "MN", "OS", "DC"))
model <- distsamp(~1~AreaA, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNW, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNC, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNE, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaSE, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
###########################
###########PLOTS###########ROUND
##########################
#Round
DataFrame <- data.frame(RoundA=c("A", "B", "C"))
model <- distsamp(~1~RoundA, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(1,1))
with(Elambda, {
x <- barplot(Predicted, names=RoundA, col="#FFC000", xlab="Round", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Region
DataFrame <- data.frame(RegionA=c("NW", "NC", "NE", "SE"))
model <- distsamp(~1~RegionA, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=RegionA, col="#FFC000", xlab="Region", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda1, {
x <- barplot(Predicted, names=RegionA, col="#FFC000", xlab="Region", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda2, {
x <- barplot(Predicted, names=RegionA, col="#FFC000", xlab="Region", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda3, {
x <- barplot(Predicted, names=RegionA, col="#FFC000", xlab="Region", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Disturbance
DataFrame <- data.frame(Disturbance_Red=c("Mowed", "Disced", "None"))
model <- distsamp(~1~Disturbance_Red, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=Disturbance_Red, col="#FFC000", xlab="Disturbance",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda1, {
x <- barplot(Predicted, names=Disturbance_Red, col="#FFC000", xlab="Disturbance",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda2, {
x <- barplot(Predicted, names=Disturbance_Red, col="#FFC000", xlab="Disturbance",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda3, {
x <- barplot(Predicted, names=Disturbance_Red, col="#FFC000", xlab="Round",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Habitat
DataFrame <- data.frame(Habitat_Red=c("PE", "MS", "UP"))
model <- distsamp(~1~Habitat_Red, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda1, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda2, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda3, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Area
DataFrame <- data.frame(AreaA=c("SC", "NV", "SL", "BK", "CC", "FG", "GP", "TS", "MN", "OS", "DC"))
model <- distsamp(~1~AreaA, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda1, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda2, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda3, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
Elambda
Elambda1
Elambda2
Elambda3
|
/old_awful_code/Distance.R
|
no_license
|
aurielfournier/my_unmarked_code
|
R
| false | false | 15,582 |
r
|
#Set Working Directory
#Windows @ UA
setwd("C:/Users/avanderlaar/Dropbox/R/Distance/")
#add these libraries
library(unmarked)
library(AICcmodavg)
#Read in the bird detection information
dists<-read.csv('Sora13.csv', header=TRUE)
#reading in the habitat information
soracov<-read.csv('Veg13_min.csv', header=TRUE )
# List of all transects from observations
tran.obs = unique(dists$Impound)
# List of transects without observations
#list = tran.all[tran.all%in%tran.obs==FALSE]
#dists$Impound<-factor(dists$Impound, levels=tran.all)
dists$Impound<-factor(dists$Impound, levels=c(1:51))
dists$Transect<-factor(dists$Transect, levels=c(1:281))
dists$Occasion<-factor(dists$Occasion, levels=c(1:25))
cutPT = seq(0,13,by=1)
#yDat = formatDistData(dists, distCol="Distance", transectNameCol="Transect", occasionCol="Occasion",
dist.breaks=cutPT)
yDat = formatDistData(dists, distCol="Distance", transectNameCol="Transect", dist.breaks=cutPT)
# umf = unmarkedFrameGDS(y=yDat, yearlySiteCovs=soracov,
# numPrimary=2, survey="line", dist.breaks=seq(0, 20, by=2),
# unitsIn="m", tlength=soracov$Effort_M)
#numPrimary=4 because there are four rounds
#tlength = soracov$Km because that is the column in soracov that has the length in km
umf = unmarkedFrameGDS(y=yDat, numPrimary=25, survey="line", dist.breaks=cutPT, unitsIn="km", tlength=soracov$Km)
cutPT2=c(0:13)
umfD = unmarkedFrameDS(y=yDat, dist.breaks=cutPT2, tlength=soracov$Km, survey="line", unitsIn="km")
head(umf)
nullD = distsamp(~1~1, umfD)
null = gdistsamp(~1, ~1, ~1, umf)
str(umf)
summary(umf)
#Graphs the distrbution of observations from teh transect line in a histogram
hist(dists$Distance)
##################################################
#### Modeling ####################################
##################################################
#in 'distsamp' the space after the first ~ is for modeling factors
#affecting detection and the space after the second ~ is for modeling
#factors affecting density.
##################################################
#### Habitat Management #########################
#################################################
Cand.modG = list()
Cand.modG[[1]] = gdistsamp(~Habitat~1, umf, keyfun="hazard")
Cand.modG[[2]] = gdistsamp(~Habitat~RegionA + Habitat + Year, umf, keyfun="hazard")
Cand.modG[[3]] = gdistsamp(~Habitat~Habitat + Year, umf, keyfun="hazard")
Cand.modG[[4]] = gdistsamp(~Habitat~RegionA, umf, keyfun="hazard")
Cand.modG[[5]] = gdistsamp(~Habitat~AreaA + RegionA, umf, keyfun="hazard")
Cand.modG[[6]] = gdistsamp(~Habitat~AreaA, umf, keyfun="hazard")
Modnames = c("null", "Region+Hab+Dist", "Hab+Dist", "Region", "Area+Region", "Area")
detect.table = aictab(cand.set=Cand.modG, modnames=Modnames)
detect.table
###########################
###########PLOTS###########Region
##########################
#Round
# DataFrame <- data.frame(RoundA=c("A", "B", "C"))
# model <- distsamp(~1~RoundA, umf, keyfun="hazard")
# Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
RoundNames=data.frame(RoundNames=c("Round1", "Round2", "Round3"))
Elambda
par(mfrow=c(1,5))
with(Elambda, {
x <- barplot(Predicted, names=RoundA, xlab="Round", col="#FFC000", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Region
DataFrame <- data.frame(RegionA=c("NW", "NC", "NE", "SE"))
model <- distsamp(~1~RegionA, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
jpeg('Region.jpg')
par(mfrow=c(1,1))
with(Elambda, {
x <- barplot(Predicted, names=RegionA, xlab="Region", col="#FFC000", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
dev.off()
#Disturbance
DataFrame <- data.frame(Disturbance_Red=c("Mowed", "Disced", "None"))
model <- distsamp(~1~Disturbance_Red, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
jpeg('DisturbanceByRegion')
par(new, mfrow=c(1,4))
with(Elambda, {
x <- barplot(Predicted, names=Disturbance_Red, col="#FFC000", xlab="Disturbance",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Habitat
DataFrame <- data.frame(Habitat=c("PE", "MS", "UP"))
model <- distsamp(~1~Habitat, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
Elambda
ElambdaNC
ElambdaNE
ElambdaSE
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNW, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNC, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNE, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaSE, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Area
DataFrame <- data.frame(AreaA=c("SC", "NV", "SL", "BK", "CC", "FG", "GP", "TS", "MN", "OS", "DC"))
model <- distsamp(~1~AreaA, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNW, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNC, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaNE, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(ElambdaSE, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
###########################
###########PLOTS###########ROUND
##########################
#Round
DataFrame <- data.frame(RoundA=c("A", "B", "C"))
model <- distsamp(~1~RoundA, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(1,1))
with(Elambda, {
x <- barplot(Predicted, names=RoundA, col="#FFC000", xlab="Round", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Region
DataFrame <- data.frame(RegionA=c("NW", "NC", "NE", "SE"))
model <- distsamp(~1~RegionA, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=RegionA, col="#FFC000", xlab="Region", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda1, {
x <- barplot(Predicted, names=RegionA, col="#FFC000", xlab="Region", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda2, {
x <- barplot(Predicted, names=RegionA, col="#FFC000", xlab="Region", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda3, {
x <- barplot(Predicted, names=RegionA, col="#FFC000", xlab="Region", ylab="Sora/ha",
ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Disturbance
DataFrame <- data.frame(Disturbance_Red=c("Mowed", "Disced", "None"))
model <- distsamp(~1~Disturbance_Red, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=Disturbance_Red, col="#FFC000", xlab="Disturbance",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda1, {
x <- barplot(Predicted, names=Disturbance_Red, col="#FFC000", xlab="Disturbance",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda2, {
x <- barplot(Predicted, names=Disturbance_Red, col="#FFC000", xlab="Disturbance",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda3, {
x <- barplot(Predicted, names=Disturbance_Red, col="#FFC000", xlab="Round",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Habitat
DataFrame <- data.frame(Habitat_Red=c("PE", "MS", "UP"))
model <- distsamp(~1~Habitat_Red, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda1, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda2, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda3, {
x <- barplot(Predicted, names=Habitat_Red, col="#FFC000", xlab="Habitat Type",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
#Area
DataFrame <- data.frame(AreaA=c("SC", "NV", "SL", "BK", "CC", "FG", "GP", "TS", "MN", "OS", "DC"))
model <- distsamp(~1~AreaA, umf, keyfun="hazard")
Elambda <- predict(model, type="state", newdata=DataFrame, appendData=TRUE)
par(mfrow=c(2,2))
with(Elambda, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda1, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda2, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
with(Elambda3, {
x <- barplot(Predicted, names=AreaA, col="#FFC000", xlab="Management Area",
ylab="Sora/ha", ylim=c(0,25), cex.names=0.7, cex.lab=0.7, cex.axis=0.7)
arrows(x, Predicted, x, Predicted+SE, code=3, angle=90, length=0.05)
arrows(x, Predicted, x, Predicted-SE, code=3, angle=90, length=0.05)
box()
})
Elambda
Elambda1
Elambda2
Elambda3
|
library(readxl)
schema<-read_xlsx(file.choose()) #
data1<-read_xlsx(file.choose()) #
#data2<-read.csv(file.choose(),header = T,stringsAsFactors = F) #
col.name<-schema[schema$TableName=='Name',"ColumnName"]# extract colummn name of schema of a particular table
num.col.name<-nrow(col.name) # number of column of table particular schema
col.data1<-colnames(data1) # 1st data column names
num.data1<-ncol(data1) # number of column in 1st data
#col.data2<-colnames(data2) # 2nd date column names
#num.data2<-ncol(data2) # number of column in 2nd data
temp.result<-NULL
temp.result <- data.frame(
SchemaColumn=character(),
DataColumn=character(),
DstanceScore=numeric(),
stringsAsFactors=FALSE)
result <- data.frame(
SchemaColumn=character(),
DataColumn=character(),
DstanceScore=numeric(),
stringsAsFactors=FALSE)
for(i in 1:num.data1){
for (j in 1:num.col.name){
a<-col.data2[i]
b<-as.character(col.name[j,])
dist.score<-adist(a,b,ignore.case = T)[1]
temp.result[j,c(1,2,3)] <- c(b,a,dist.score)
}
temp<-temp.result[temp.result$DstanceScore==min(as.numeric(temp.result$DstanceScore)),]
result<-rbind(result,temp)
}
write.csv(result,file = '/comapared_column.csv' )
|
/BagOfWords/field-mapping_word_mapping.R
|
no_license
|
SrijaGupta/CapstoneProject-IDS507
|
R
| false | false | 1,215 |
r
|
library(readxl)
schema<-read_xlsx(file.choose()) #
data1<-read_xlsx(file.choose()) #
#data2<-read.csv(file.choose(),header = T,stringsAsFactors = F) #
col.name<-schema[schema$TableName=='Name',"ColumnName"]# extract colummn name of schema of a particular table
num.col.name<-nrow(col.name) # number of column of table particular schema
col.data1<-colnames(data1) # 1st data column names
num.data1<-ncol(data1) # number of column in 1st data
#col.data2<-colnames(data2) # 2nd date column names
#num.data2<-ncol(data2) # number of column in 2nd data
temp.result<-NULL
temp.result <- data.frame(
SchemaColumn=character(),
DataColumn=character(),
DstanceScore=numeric(),
stringsAsFactors=FALSE)
result <- data.frame(
SchemaColumn=character(),
DataColumn=character(),
DstanceScore=numeric(),
stringsAsFactors=FALSE)
for(i in 1:num.data1){
for (j in 1:num.col.name){
a<-col.data2[i]
b<-as.character(col.name[j,])
dist.score<-adist(a,b,ignore.case = T)[1]
temp.result[j,c(1,2,3)] <- c(b,a,dist.score)
}
temp<-temp.result[temp.result$DstanceScore==min(as.numeric(temp.result$DstanceScore)),]
result<-rbind(result,temp)
}
write.csv(result,file = '/comapared_column.csv' )
|
#' Generate a mixed design from existing data
#'
#' \code{sim_mixed_df()} produces a data table with the same distributions of
#' by-subject and by-item random intercepts as an existing data table.
#'
#' @param data the existing tbl
#' @param sub_n the number of subjects to simulate (if NULL, returns data for the same subjects)
#' @param item_n the number of items to simulate (if NULL, returns data for the same items)
#' @param dv the column name or index containing the DV
#' @param sub_id the column name or index for the subject IDs
#' @param item_id the column name or index for the item IDs
#'
#' @return a tbl
#' @examples
#' \donttest{sim_mixed_df(faceratings, 10, 10, "rating", "rater_id", "face_id")}
#' @export
sim_mixed_df <- function(data, sub_n = NULL, item_n = NULL,
dv = "y", sub_id = "sub_id", item_id = "item_id") {
params <- check_mixed_design(data, dv, sub_id, item_id)
# get exact intercepts if sub_n or item_n is NULL
if (is.null(item_n)) {
if (is.numeric(item_id)) item_id <- names(data)[item_id]
params$item_sd <- params$random_effects[[item_id]][1] %>% as.matrix()
item_n <- length(params$item_sd)
}
if (is.null(sub_n)) {
if (is.numeric(sub_id)) sub_id <- names(data)[sub_id]
params$sub_sd <- params$random_effects[[sub_id]][1] %>% as.matrix()
sub_n <- length(params$sub_sd)
}
new_obs <- sim_mixed_cc(sub_n, item_n, params$grand_i,
params$sub_sd, params$item_sd, params$error_sd)
new_obs
}
|
/R/sim_mixed_df.R
|
permissive
|
debruine/faux
|
R
| false | false | 1,525 |
r
|
#' Generate a mixed design from existing data
#'
#' \code{sim_mixed_df()} produces a data table with the same distributions of
#' by-subject and by-item random intercepts as an existing data table.
#'
#' @param data the existing tbl
#' @param sub_n the number of subjects to simulate (if NULL, returns data for the same subjects)
#' @param item_n the number of items to simulate (if NULL, returns data for the same items)
#' @param dv the column name or index containing the DV
#' @param sub_id the column name or index for the subject IDs
#' @param item_id the column name or index for the item IDs
#'
#' @return a tbl
#' @examples
#' \donttest{sim_mixed_df(faceratings, 10, 10, "rating", "rater_id", "face_id")}
#' @export
sim_mixed_df <- function(data, sub_n = NULL, item_n = NULL,
dv = "y", sub_id = "sub_id", item_id = "item_id") {
params <- check_mixed_design(data, dv, sub_id, item_id)
# get exact intercepts if sub_n or item_n is NULL
if (is.null(item_n)) {
if (is.numeric(item_id)) item_id <- names(data)[item_id]
params$item_sd <- params$random_effects[[item_id]][1] %>% as.matrix()
item_n <- length(params$item_sd)
}
if (is.null(sub_n)) {
if (is.numeric(sub_id)) sub_id <- names(data)[sub_id]
params$sub_sd <- params$random_effects[[sub_id]][1] %>% as.matrix()
sub_n <- length(params$sub_sd)
}
new_obs <- sim_mixed_cc(sub_n, item_n, params$grand_i,
params$sub_sd, params$item_sd, params$error_sd)
new_obs
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phaseImpute.R
\name{prePhasingByShapeit}
\alias{prePhasingByShapeit}
\title{Prephasing genotypes using SHAPEIT}
\usage{
prePhasingByShapeit(shapeit, chrs, dataDIR, prefix4plinkEachChr, impRefDIR,
phaseDIR, nThread, effectiveSize, nCore)
}
\arguments{
\item{shapeit}{an executable SHAPEIT program in either the current working directory or somewhere in the command path.}
\item{chrs}{specifiy the chromosomes for phasing.}
\item{dataDIR}{the directory where genotype PLINK files are located.}
\item{prefix4plinkEachChr}{the prefix of PLINK files for each chromosome.}
\item{impRefDIR}{the directory where the imputation reference files are located.}
\item{phaseDIR}{the directory where resulting pre-phased files will be located.}
\item{nThread}{the number of threads used for computation.}
\item{effectiveSize}{this parameter controls the effective population size.}
\item{nCore}{the number of cores used for computation. This can be tuned along with nThread.}
}
\value{
The pre-phased haplotypes for given chromosomes.
}
\description{
Perform prephasing for study genotypes by SHAPEIT for the autosomal and sex chromosome haplotypes using a reference panel (pre-set).
If ChrX is available then it is done differently by passing the flag --chrX to SHAPEIT.
}
\author{
Junfang Chen <junfang.chen3@gmail.com>
}
|
/man/prePhasingByShapeit.Rd
|
no_license
|
Junfang/Gimpute
|
R
| false | true | 1,397 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phaseImpute.R
\name{prePhasingByShapeit}
\alias{prePhasingByShapeit}
\title{Prephasing genotypes using SHAPEIT}
\usage{
prePhasingByShapeit(shapeit, chrs, dataDIR, prefix4plinkEachChr, impRefDIR,
phaseDIR, nThread, effectiveSize, nCore)
}
\arguments{
\item{shapeit}{an executable SHAPEIT program in either the current working directory or somewhere in the command path.}
\item{chrs}{specifiy the chromosomes for phasing.}
\item{dataDIR}{the directory where genotype PLINK files are located.}
\item{prefix4plinkEachChr}{the prefix of PLINK files for each chromosome.}
\item{impRefDIR}{the directory where the imputation reference files are located.}
\item{phaseDIR}{the directory where resulting pre-phased files will be located.}
\item{nThread}{the number of threads used for computation.}
\item{effectiveSize}{this parameter controls the effective population size.}
\item{nCore}{the number of cores used for computation. This can be tuned along with nThread.}
}
\value{
The pre-phased haplotypes for given chromosomes.
}
\description{
Perform prephasing for study genotypes by SHAPEIT for the autosomal and sex chromosome haplotypes using a reference panel (pre-set).
If ChrX is available then it is done differently by passing the flag --chrX to SHAPEIT.
}
\author{
Junfang Chen <junfang.chen3@gmail.com>
}
|
#[export]
kurt.test2 <- function(x, y) {
n1 <- length(x)
n2 <- length(y)
vars1 <- 24 * n1 * (n1 -1 )^2 / ( (n1 - 3) * (n1 - 2) * (n1 + 3) * (n1 + 5) )
vars2 <- 24 * n2 * (n2 - 1)^2 / ( (n2 - 3) * (n2 - 2) * (n2 + 3) * (n2 + 5) )
stat <- ( Rfast::kurt(x) - Rfast::kurt(y) ) / sqrt( vars1 + vars2 )
pval <- 2 * pnorm(abs(stat), lower.tail = FALSE)
res <- c(stat, pval)
names(res) <- c("stat", "p-value")
res
}
|
/fuzzedpackages/Rfast/R/kurt.test2.R
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 442 |
r
|
#[export]
kurt.test2 <- function(x, y) {
n1 <- length(x)
n2 <- length(y)
vars1 <- 24 * n1 * (n1 -1 )^2 / ( (n1 - 3) * (n1 - 2) * (n1 + 3) * (n1 + 5) )
vars2 <- 24 * n2 * (n2 - 1)^2 / ( (n2 - 3) * (n2 - 2) * (n2 + 3) * (n2 + 5) )
stat <- ( Rfast::kurt(x) - Rfast::kurt(y) ) / sqrt( vars1 + vars2 )
pval <- 2 * pnorm(abs(stat), lower.tail = FALSE)
res <- c(stat, pval)
names(res) <- c("stat", "p-value")
res
}
|
#' VS-Lite model of tree ring width growth.
#'
#' \code{VSLite} simulates tree ring width growth.
#'
#' R port of VS-Lite Model of Tree Ring Width by Suz TOlwinski-Ward, 2015. For more references,
#' see xxxxyyyyyzzzz.
#'
#' @param syear Start year of simulation.
#' @param eyear End year of simulation.
#' @param phi Latitude of site (in degrees N).
#' @param T (12 x Nyrs) Matrix of ordered mean monthly temperatures (in degEes C).
#' @param P (12 x Nyrs) Matrix of ordered accumulated monthly precipitation (in mm).
#' @param T1 Lower temperature threshold for growth to begin (scalar, deg. C).
#' @param T2 Upper temperature threshold for growth sensitivity to temp (scalar, deg. C).
#' @param M1 Lower moisture threshold for growth to begin (scalar, v.v).
#' @param M2 Upper moisture threshold for growth sensitivity to moisture (scalar, v/v).
#' @param Mmax Scalar maximum soil moisture held by the soil (in v/v).
#' @param Mmin Scalar minimum soil moisture (for error-catching) (in v/v).
#' @param alph Scalar runoff parameter 1 (in inverse months).
#' @param m.th Scalar runoff parameter 3 (unitless).
#' @param mu.th Scalar runoff parameter 2 (unitless).
#' @param rootd Scalar root/"bucket" depth (in mm).
#' @param M0 Initial value for previous month's soil moisture at t = 1 (in v/v).
#' @param substep Use leaky bucket code with sub-monthly time-stepping? (TRUE/FALSE)
#' @param I_0 lower bound of integration window (months before January in NH)
#' @param I_f upper bound of integration window (months after January in NH)
#' @param hydroclim Switch; value is either "P" (default) or "M" depending on whether the
#' second input climate variable is precipitation, in which case soil moisture is estimated
#' using the Leaky Bucket model of the CPC, or soil moisture, in which case the inputs are
#' used directly to compute the growth response.
#'
#' @return trw
#' @return gT
#' @return gM
#' @return gE
#' @return M
#' @return potEv
#' @return sample.mean.width
#' @return sample.std.width
#'
#' @seealso \code{\link{compute.gE}},\code{\link{std.ramp}},\code{\link{leakybucket.monthly}},\code{\link{leakybucket.submonthly}}
#'
#' @export
####################################################################################################
VSLite <- function(syear,eyear,phi,T,P,
T1 = 8, T2 = 23, M1 = .01, M2 = .05,
Mmax = 0.76,Mmin = 0.01,alph = 0.093,
m.th = 4.886,mu.th = 5.8,rootd = 1000,M0 = .2,
substep = 0,I_0 = 1,I_f = 12,hydroclim = "P"){
#############################################################################
nyrs <- length(syear:eyear)
Gr <- gT <- gM <- M <- potEv <- matrix(NA,12,nyrs);
#############################################################################
## Load in soil moisture, or estimate it with the Leaky Bucket model:
if(hydroclim == "M"){
## Read in soil moisture:
M = P;
}else{# Compute soil moisture:
if(substep == 1){
M <- leakybucket.submonthly(syear,eyear,phi,T,P,
Mmax,Mmin,alph,m.th,mu.th,rootd,M0);
}else{
M <- leakybucket.monthly(syear,eyear,phi,T,P,
Mmax,Mmin,alph,m.th,mu.th,rootd,M0);
}
if(substep !=1 && substep != 0){
cat("'substep' param must either be set to 1 or 0.");
return
}
}
# Compute gE, the scaled monthly proxy for insolation:
gE <- compute.gE(phi);
#############################################################################
### Calculate Growth Response functions gT and gM
# Temperature growth response:
gT <- std.ramp(T,T1,T2)
# Soil moisture growth response:
gM <- std.ramp(M,M1,M2)
# Compute overall growth rate:
Gr <- kronecker(matrix(1,1,nyrs),gE)*pmin(gT,gM)
############## Compute proxy quantity from growth responses #################
width <- matrix(NA,nyrs,1);
if (phi>0){ # Site in Northern Hemisphere:
if (I_0<0){ # if we include part of the previous year in each year's modeled growth:
startmo <- 13+I_0;
endmo <- I_f;
# use average of growth data across modeled years to estimate first year's growth due
# to previous year:
width[1] <- sum(Gr[1:endmo,1]) + sum(rowMeans(Gr[startmo:12,]));
for(cyear in 2:nyrs){
width[cyear] <- colSums(Gr[startmo:12,cyear-1]) + colSums(Gr[1:endmo,cyear]);
}
}else{ # no inclusion of last year's growth conditions in estimates of this year's growth:
startmo <- I_0+1;
endmo <- I_f;
width <- colSums(Gr[startmo:endmo,])
}
}
if(phi<0){ # if site is in the Southern Hemisphere:
# (Note: in the Southern Hemisphere, ring widths are dated to the year in which growth began!)
startmo <- 7+I_0; # (eg. I_0 = -4 in SH corresponds to starting integration in March of cyear)
endmo <- I_f-6; # (eg. I_f = 12 in SH corresponds to ending integraion in June of next year)
for (cyear in 1:(nyrs-1)){
width[cyear] <- sum(Gr[startmo:12,cyear]) + sum(Gr[1:endmo,cyear+1]);
}
# use average of growth data across modeled years to estimate last year's growth due
# to the next year:
width[nyrs] <- sum(Gr[startmo:12,nyrs])+sum(rowMeans(Gr[1:endmo,]));
}
# Simulated proxy series standardized width:
trw <- t((width-mean(width))/sd(width));
trw_org <- width;
#############################################################################
# Return output:
out <- list(trw = trw, gT = gT, gM = gM, gE = gE, M = M, potEv = potEv,
sample.mean.width = mean(width), sample.std.width = sd(width), trw_org=trw_org)
return(out)
}
|
/R/VSLite.R
|
no_license
|
fzhu2e/VSLiteR
|
R
| false | false | 5,671 |
r
|
#' VS-Lite model of tree ring width growth.
#'
#' \code{VSLite} simulates tree ring width growth.
#'
#' R port of VS-Lite Model of Tree Ring Width by Suz TOlwinski-Ward, 2015. For more references,
#' see xxxxyyyyyzzzz.
#'
#' @param syear Start year of simulation.
#' @param eyear End year of simulation.
#' @param phi Latitude of site (in degrees N).
#' @param T (12 x Nyrs) Matrix of ordered mean monthly temperatures (in degEes C).
#' @param P (12 x Nyrs) Matrix of ordered accumulated monthly precipitation (in mm).
#' @param T1 Lower temperature threshold for growth to begin (scalar, deg. C).
#' @param T2 Upper temperature threshold for growth sensitivity to temp (scalar, deg. C).
#' @param M1 Lower moisture threshold for growth to begin (scalar, v.v).
#' @param M2 Upper moisture threshold for growth sensitivity to moisture (scalar, v/v).
#' @param Mmax Scalar maximum soil moisture held by the soil (in v/v).
#' @param Mmin Scalar minimum soil moisture (for error-catching) (in v/v).
#' @param alph Scalar runoff parameter 1 (in inverse months).
#' @param m.th Scalar runoff parameter 3 (unitless).
#' @param mu.th Scalar runoff parameter 2 (unitless).
#' @param rootd Scalar root/"bucket" depth (in mm).
#' @param M0 Initial value for previous month's soil moisture at t = 1 (in v/v).
#' @param substep Use leaky bucket code with sub-monthly time-stepping? (TRUE/FALSE)
#' @param I_0 lower bound of integration window (months before January in NH)
#' @param I_f upper bound of integration window (months after January in NH)
#' @param hydroclim Switch; value is either "P" (default) or "M" depending on whether the
#' second input climate variable is precipitation, in which case soil moisture is estimated
#' using the Leaky Bucket model of the CPC, or soil moisture, in which case the inputs are
#' used directly to compute the growth response.
#'
#' @return trw
#' @return gT
#' @return gM
#' @return gE
#' @return M
#' @return potEv
#' @return sample.mean.width
#' @return sample.std.width
#'
#' @seealso \code{\link{compute.gE}},\code{\link{std.ramp}},\code{\link{leakybucket.monthly}},\code{\link{leakybucket.submonthly}}
#'
#' @export
####################################################################################################
VSLite <- function(syear,eyear,phi,T,P,
T1 = 8, T2 = 23, M1 = .01, M2 = .05,
Mmax = 0.76,Mmin = 0.01,alph = 0.093,
m.th = 4.886,mu.th = 5.8,rootd = 1000,M0 = .2,
substep = 0,I_0 = 1,I_f = 12,hydroclim = "P"){
#############################################################################
nyrs <- length(syear:eyear)
Gr <- gT <- gM <- M <- potEv <- matrix(NA,12,nyrs);
#############################################################################
## Load in soil moisture, or estimate it with the Leaky Bucket model:
if(hydroclim == "M"){
## Read in soil moisture:
M = P;
}else{# Compute soil moisture:
if(substep == 1){
M <- leakybucket.submonthly(syear,eyear,phi,T,P,
Mmax,Mmin,alph,m.th,mu.th,rootd,M0);
}else{
M <- leakybucket.monthly(syear,eyear,phi,T,P,
Mmax,Mmin,alph,m.th,mu.th,rootd,M0);
}
if(substep !=1 && substep != 0){
cat("'substep' param must either be set to 1 or 0.");
return
}
}
# Compute gE, the scaled monthly proxy for insolation:
gE <- compute.gE(phi);
#############################################################################
### Calculate Growth Response functions gT and gM
# Temperature growth response:
gT <- std.ramp(T,T1,T2)
# Soil moisture growth response:
gM <- std.ramp(M,M1,M2)
# Compute overall growth rate:
Gr <- kronecker(matrix(1,1,nyrs),gE)*pmin(gT,gM)
############## Compute proxy quantity from growth responses #################
width <- matrix(NA,nyrs,1);
if (phi>0){ # Site in Northern Hemisphere:
if (I_0<0){ # if we include part of the previous year in each year's modeled growth:
startmo <- 13+I_0;
endmo <- I_f;
# use average of growth data across modeled years to estimate first year's growth due
# to previous year:
width[1] <- sum(Gr[1:endmo,1]) + sum(rowMeans(Gr[startmo:12,]));
for(cyear in 2:nyrs){
width[cyear] <- colSums(Gr[startmo:12,cyear-1]) + colSums(Gr[1:endmo,cyear]);
}
}else{ # no inclusion of last year's growth conditions in estimates of this year's growth:
startmo <- I_0+1;
endmo <- I_f;
width <- colSums(Gr[startmo:endmo,])
}
}
if(phi<0){ # if site is in the Southern Hemisphere:
# (Note: in the Southern Hemisphere, ring widths are dated to the year in which growth began!)
startmo <- 7+I_0; # (eg. I_0 = -4 in SH corresponds to starting integration in March of cyear)
endmo <- I_f-6; # (eg. I_f = 12 in SH corresponds to ending integraion in June of next year)
for (cyear in 1:(nyrs-1)){
width[cyear] <- sum(Gr[startmo:12,cyear]) + sum(Gr[1:endmo,cyear+1]);
}
# use average of growth data across modeled years to estimate last year's growth due
# to the next year:
width[nyrs] <- sum(Gr[startmo:12,nyrs])+sum(rowMeans(Gr[1:endmo,]));
}
# Simulated proxy series standardized width:
trw <- t((width-mean(width))/sd(width));
trw_org <- width;
#############################################################################
# Return output:
out <- list(trw = trw, gT = gT, gM = gM, gE = gE, M = M, potEv = potEv,
sample.mean.width = mean(width), sample.std.width = sd(width), trw_org=trw_org)
return(out)
}
|
library(tidyverse)
library(dplyr)
library(ggplot2)
library(grid)
library(cowplot)
library(readxl)
Bentho <- read_xlsx(here::here("Analysis", "Data", "2018 Tiles Allison's Computer.xlsx"),
sheet = "2017 and 2018")
Snails <- read_xlsx(here::here("Analysis", "Data", "2018 Tiles Allison's Computer.xlsx"),
sheet = "Snails", range = "Q1:T21")
all.snails <- read_xlsx(here::here("Analysis", "Data", "2018 Tiles Allison's Computer.xlsx"),
sheet = "Snails", range = "A1:F81")
reach.snail <- read_xlsx(here::here("Analysis", "Data", "2018 Tiles Allison's Computer.xlsx"),
sheet = "Snails", range = "W1:Z41")
temp_light <- read.csv(here::here("Analysis", "Data", "canopy_light.csv"))
temp_light <- dplyr::rename(temp_light, Year = Year.2, Treatment = Reach.2)
temp_light$Year <- temp_light$Year %>% recode(Post = 2018, Pre = 2017)
temp_light$Treatment <- temp_light$Treatment %>% recode(Reference = "N", Treatment = "Y")
temp_light <- temp_light %>% select(c("Stream", "Treatment", "Year", "PAR", "Max7MovingAMaxT"))
Mean_Chla <- Bentho %>% group_by(Stream, Treatment, Year, Meter) %>%
summarise_at(vars(BenthoTotal), mean) %>% filter(Stream == "W-100" | Stream == "W-113")
Mean_Chla <- merge(Mean_Chla, all.snails, by = c("Stream", "Treatment", "Year", "Meter"))
Mean_Chla <- merge(Mean_Chla, temp_light, by = c("Stream", "Treatment", "Year", "Meter"))
pre <- Mean_Chla %>% filter(Year == 2017)
post <- Mean_Chla %>% filter(Year == 2018)
year.wide <- merge(pre, post, by = c("Stream", "Meter", "Treatment"))
year.wide$d_Chla <- (year.wide$BenthoTotal.y - year.wide$BenthoTotal.x)
year.wide$d_Snail <- (year.wide$Snail.y - year.wide$Snail.x)
diffs <- year.wide
ref <- diffs %>% filter(Treatment == "N")
trt <- diffs %>% filter(Treatment == "Y")
treat.wide <- merge(ref, trt, by = c("Stream", "Meter"))
treat.wide$D_Chla <- (treat.wide$d_Chla.y - treat.wide$d_Chla.x)
treat.wide$D_Snail <- (treat.wide$d_Snail.y - treat.wide$d_Snail.x)
Dubs <- treat.wide %>% select(Stream, Meter, D_Chla, D_Snail)
snail_vars <- Dubs
snail_vars$Gap <- c('Before Gap', "Before Gap", "Gap", "Gap", "Gap", "Gap", "After Gap", "After Gap", "After Gap", "After Gap")
ggplot(data = snail_vars, aes(x = D_Chla, y = D_Snail)) +
labs(title = "Snails by Chla",
x = "Chla",
y = "Snails",
caption = "BACI Response of Snails Compared to BACI Response of Chla") +
theme_bw(base_size = 14) +
geom_smooth(method = 'lm', size = .25, se = T) +
geom_point(aes(shape = Stream, color = Gap), size = 2)
ggplot(data = snail_vars, aes(x = Meter, y = D_Snail)) +
labs(title = "Snails by Meter",
x = "Meter",
y = "Snails",
caption = "BACI of Snails at Each Meter") +
theme_bw(base_size = 14) +
geom_smooth(method = 'loess', size = .25, se = T) +
geom_point(aes(shape = Stream), size = 2)
ggplot(data = snail_vars, aes(x = Meter, y = D_Chla)) +
labs(title = "Chla by Meter",
x = "Meter",
y = "Chla",
caption = "BACI of Chla at Each Meter") +
theme_bw(base_size = 14) +
geom_smooth(method = 'lm', size = .25, se = T) +
geom_point(aes(shape = Stream), size = 2)
mod_snail <- lm(D_Snail ~ D_Chla, data = snail_vars)
summary(mod_snail)
t.test(Snail ~ Treatment, data = reach.snail, paired = T)
pre.reach <- Mean_Chla %>% filter(Year == 2017)
post.reach <- Mean_Chla %>% filter(Year == 2018)
year.wide <- merge(pre.reach, post.reach, by = c("Stream", 'Treatment', "Meter"))
year.wide$d_Chla <- (year.wide$BenthoTotal.y - year.wide$BenthoTotal.x)
reach.diffs <- year.wide %>% select(Stream, Treatment, Meter, d_Chla) %>% filter(Stream == "W-100" | Stream == "W-113")
reach.snails <- merge(reach.snail, reach.diffs, by = c("Stream", "Treatment", "Meter"))
reach.snails$Gap <- c(rep("Ref", 10), 'Before Gap', "Before Gap", "Gap", "Gap", "Gap", "Gap", "Gap", "After Gap", "After Gap", "After Gap")
ggplot(data = reach.snails, aes(x = d_Chla, y = Snail)) +
labs(title = "Snails by Chla",
x = "Chla",
y = "Snails",
caption = "Snail Difference between years Compared to Chla difference between years") +
theme_bw(base_size = 14) +
geom_smooth(method = 'lm', size = .25, se = T) +
geom_point(aes(color = Gap), size = 2) +
facet_wrap("Stream")
|
/Analysis/R/snail BACI.R
|
no_license
|
Cedar-Mac/Thesis
|
R
| false | false | 4,338 |
r
|
library(tidyverse)
library(dplyr)
library(ggplot2)
library(grid)
library(cowplot)
library(readxl)
Bentho <- read_xlsx(here::here("Analysis", "Data", "2018 Tiles Allison's Computer.xlsx"),
sheet = "2017 and 2018")
Snails <- read_xlsx(here::here("Analysis", "Data", "2018 Tiles Allison's Computer.xlsx"),
sheet = "Snails", range = "Q1:T21")
all.snails <- read_xlsx(here::here("Analysis", "Data", "2018 Tiles Allison's Computer.xlsx"),
sheet = "Snails", range = "A1:F81")
reach.snail <- read_xlsx(here::here("Analysis", "Data", "2018 Tiles Allison's Computer.xlsx"),
sheet = "Snails", range = "W1:Z41")
temp_light <- read.csv(here::here("Analysis", "Data", "canopy_light.csv"))
temp_light <- dplyr::rename(temp_light, Year = Year.2, Treatment = Reach.2)
temp_light$Year <- temp_light$Year %>% recode(Post = 2018, Pre = 2017)
temp_light$Treatment <- temp_light$Treatment %>% recode(Reference = "N", Treatment = "Y")
temp_light <- temp_light %>% select(c("Stream", "Treatment", "Year", "PAR", "Max7MovingAMaxT"))
Mean_Chla <- Bentho %>% group_by(Stream, Treatment, Year, Meter) %>%
summarise_at(vars(BenthoTotal), mean) %>% filter(Stream == "W-100" | Stream == "W-113")
Mean_Chla <- merge(Mean_Chla, all.snails, by = c("Stream", "Treatment", "Year", "Meter"))
Mean_Chla <- merge(Mean_Chla, temp_light, by = c("Stream", "Treatment", "Year", "Meter"))
pre <- Mean_Chla %>% filter(Year == 2017)
post <- Mean_Chla %>% filter(Year == 2018)
year.wide <- merge(pre, post, by = c("Stream", "Meter", "Treatment"))
year.wide$d_Chla <- (year.wide$BenthoTotal.y - year.wide$BenthoTotal.x)
year.wide$d_Snail <- (year.wide$Snail.y - year.wide$Snail.x)
diffs <- year.wide
ref <- diffs %>% filter(Treatment == "N")
trt <- diffs %>% filter(Treatment == "Y")
treat.wide <- merge(ref, trt, by = c("Stream", "Meter"))
treat.wide$D_Chla <- (treat.wide$d_Chla.y - treat.wide$d_Chla.x)
treat.wide$D_Snail <- (treat.wide$d_Snail.y - treat.wide$d_Snail.x)
Dubs <- treat.wide %>% select(Stream, Meter, D_Chla, D_Snail)
snail_vars <- Dubs
snail_vars$Gap <- c('Before Gap', "Before Gap", "Gap", "Gap", "Gap", "Gap", "After Gap", "After Gap", "After Gap", "After Gap")
ggplot(data = snail_vars, aes(x = D_Chla, y = D_Snail)) +
labs(title = "Snails by Chla",
x = "Chla",
y = "Snails",
caption = "BACI Response of Snails Compared to BACI Response of Chla") +
theme_bw(base_size = 14) +
geom_smooth(method = 'lm', size = .25, se = T) +
geom_point(aes(shape = Stream, color = Gap), size = 2)
ggplot(data = snail_vars, aes(x = Meter, y = D_Snail)) +
labs(title = "Snails by Meter",
x = "Meter",
y = "Snails",
caption = "BACI of Snails at Each Meter") +
theme_bw(base_size = 14) +
geom_smooth(method = 'loess', size = .25, se = T) +
geom_point(aes(shape = Stream), size = 2)
ggplot(data = snail_vars, aes(x = Meter, y = D_Chla)) +
labs(title = "Chla by Meter",
x = "Meter",
y = "Chla",
caption = "BACI of Chla at Each Meter") +
theme_bw(base_size = 14) +
geom_smooth(method = 'lm', size = .25, se = T) +
geom_point(aes(shape = Stream), size = 2)
mod_snail <- lm(D_Snail ~ D_Chla, data = snail_vars)
summary(mod_snail)
t.test(Snail ~ Treatment, data = reach.snail, paired = T)
pre.reach <- Mean_Chla %>% filter(Year == 2017)
post.reach <- Mean_Chla %>% filter(Year == 2018)
year.wide <- merge(pre.reach, post.reach, by = c("Stream", 'Treatment', "Meter"))
year.wide$d_Chla <- (year.wide$BenthoTotal.y - year.wide$BenthoTotal.x)
reach.diffs <- year.wide %>% select(Stream, Treatment, Meter, d_Chla) %>% filter(Stream == "W-100" | Stream == "W-113")
reach.snails <- merge(reach.snail, reach.diffs, by = c("Stream", "Treatment", "Meter"))
reach.snails$Gap <- c(rep("Ref", 10), 'Before Gap', "Before Gap", "Gap", "Gap", "Gap", "Gap", "Gap", "After Gap", "After Gap", "After Gap")
ggplot(data = reach.snails, aes(x = d_Chla, y = Snail)) +
labs(title = "Snails by Chla",
x = "Chla",
y = "Snails",
caption = "Snail Difference between years Compared to Chla difference between years") +
theme_bw(base_size = 14) +
geom_smooth(method = 'lm', size = .25, se = T) +
geom_point(aes(color = Gap), size = 2) +
facet_wrap("Stream")
|
# Building a Prod-Ready, Robust Shiny Application.
#
# Each step is optional.
#
# 1 - On init
#
## 1.1 - Fill the descripion & set options
##
## Add information about the package that will contain your app
golem::fill_desc(
pkg_name = "OB1.metadata", # The Name of the package containing the App
pkg_title = "Ocean hackathon défi B1", # The Title of the package containing the App
pkg_description = "Initiatives for data description within 48h.", # The Description of the package containing the App
author_first_name = "Elie, Yvan", # Your First Name
author_last_name = "Arnaud, LeBras", # Your Last Name
author_email = "elie.arnaud@mnhn.fr, yvan.le-bras@mnhn.fr", # Your Email
repo_url = NULL # The (optional) URL of the GitHub Repo
)
## Use this desc to set {golem} options
golem::set_golem_options()
## 1.2 - Set common Files
##
## If you want to use the MIT licence, README, code of conduct, lifecycle badge, and news
usethis::use_mit_license( name = "Golem User" ) # You can set another licence here
usethis::use_readme_rmd( open = FALSE )
usethis::use_code_of_conduct()
usethis::use_lifecycle_badge( "Experimental" )
usethis::use_news_md( open = FALSE )
usethis::use_git()
## 1.3 - Add a data-raw folder
##
## If you have data in your package
usethis::use_data_raw( name = "my_dataset", open = FALSE ) # Change "my_dataset"
## 1.4 - Init Tests
##
## Create a template for tests
golem::use_recommended_tests()
## 1.5 : Use Recommended Package
golem::use_recommended_deps()
## 1.6 Add various tools
# If you want to change the favicon (default is golem's one)
golem::remove_favicon()
golem::use_favicon() # path = "path/to/ico". Can be an online file.
# Add helper functions
golem::use_utils_ui()
golem::use_utils_server()
# You're now set!
# go to dev/02_dev.R
rstudioapi::navigateToFile( "dev/02_dev.R" )
|
/dev/01_start.R
|
permissive
|
pole-national-donnees-biodiversite/OB1.metadata
|
R
| false | false | 1,869 |
r
|
# Building a Prod-Ready, Robust Shiny Application.
#
# Each step is optional.
#
# 1 - On init
#
## 1.1 - Fill the descripion & set options
##
## Add information about the package that will contain your app
golem::fill_desc(
pkg_name = "OB1.metadata", # The Name of the package containing the App
pkg_title = "Ocean hackathon défi B1", # The Title of the package containing the App
pkg_description = "Initiatives for data description within 48h.", # The Description of the package containing the App
author_first_name = "Elie, Yvan", # Your First Name
author_last_name = "Arnaud, LeBras", # Your Last Name
author_email = "elie.arnaud@mnhn.fr, yvan.le-bras@mnhn.fr", # Your Email
repo_url = NULL # The (optional) URL of the GitHub Repo
)
## Use this desc to set {golem} options
golem::set_golem_options()
## 1.2 - Set common Files
##
## If you want to use the MIT licence, README, code of conduct, lifecycle badge, and news
usethis::use_mit_license( name = "Golem User" ) # You can set another licence here
usethis::use_readme_rmd( open = FALSE )
usethis::use_code_of_conduct()
usethis::use_lifecycle_badge( "Experimental" )
usethis::use_news_md( open = FALSE )
usethis::use_git()
## 1.3 - Add a data-raw folder
##
## If you have data in your package
usethis::use_data_raw( name = "my_dataset", open = FALSE ) # Change "my_dataset"
## 1.4 - Init Tests
##
## Create a template for tests
golem::use_recommended_tests()
## 1.5 : Use Recommended Package
golem::use_recommended_deps()
## 1.6 Add various tools
# If you want to change the favicon (default is golem's one)
golem::remove_favicon()
golem::use_favicon() # path = "path/to/ico". Can be an online file.
# Add helper functions
golem::use_utils_ui()
golem::use_utils_server()
# You're now set!
# go to dev/02_dev.R
rstudioapi::navigateToFile( "dev/02_dev.R" )
|
load('Rdata/lmer-perROI-out-invage.Rdata')
ageeff <- read.csv('txt/ageeffAgeXphys-invage.csv')
#best.lm <- roirois.lm[order(ageeff$ageXphysio.tval)[1:300]]
#best.lm.order <- order(ageeff$ageXphysio.tval)[1:300]
# only use significant ROIs
ageeff.sigidx <- which(abs(ageeff$ageXphysio.tval)>2.58)
# grav tvalues at sigindex, order for greatest to least,
best.lm.order <- ageeff.sigidx[ rev(order(ageeff$ageXphysio.tval[ageeff.sigidx])) ]
best.lm <- roirois.lm[best.lm.order]
save(list=c('best.lm','best.lm.order'),file="Rdata/ageinv-signficantInteraction.Rdata")
develrois<-c(78,100,174,190,213,215,230,232,241) # sort -n develRois.txt|cut -f1 -d' ' |tr '\n' ','
develidx <- which( (ageeff$ROI1 %in% develrois ) & ( ageeff$ROI2 %in% develrois) )
devel.lm <- roirois.lm[develidx]
save(list=c('devel.lm','develidx','develrois'),file="Rdata/devel-invage.Rdata")
|
/truncateLM.R
|
no_license
|
WillForan/physioCompare
|
R
| false | false | 862 |
r
|
load('Rdata/lmer-perROI-out-invage.Rdata')
ageeff <- read.csv('txt/ageeffAgeXphys-invage.csv')
#best.lm <- roirois.lm[order(ageeff$ageXphysio.tval)[1:300]]
#best.lm.order <- order(ageeff$ageXphysio.tval)[1:300]
# only use significant ROIs
ageeff.sigidx <- which(abs(ageeff$ageXphysio.tval)>2.58)
# grav tvalues at sigindex, order for greatest to least,
best.lm.order <- ageeff.sigidx[ rev(order(ageeff$ageXphysio.tval[ageeff.sigidx])) ]
best.lm <- roirois.lm[best.lm.order]
save(list=c('best.lm','best.lm.order'),file="Rdata/ageinv-signficantInteraction.Rdata")
develrois<-c(78,100,174,190,213,215,230,232,241) # sort -n develRois.txt|cut -f1 -d' ' |tr '\n' ','
develidx <- which( (ageeff$ROI1 %in% develrois ) & ( ageeff$ROI2 %in% develrois) )
devel.lm <- roirois.lm[develidx]
save(list=c('devel.lm','develidx','develrois'),file="Rdata/devel-invage.Rdata")
|
#QCA Plots
#btw, small modifications of this could replace the current scripts for returning the final data set
#need to change the configuration.table file though (and the regression analysis)
source("sim.ltQCA.R")
library(QCA)
library(foreach)
#create a data set with varying distributions, and varying number of variables
dvdists<-seq(.1,.9, by=.2)
dists<-seq(.5,.9, by=.1)
num.vars<-2:7
var.names<-c("AA","BB","CC","DD","EE","FF","GG","HH","II","KK")
sam.sizes<-seq(10,100, by=20)
counter<-0
data.list<-vector(mode="list", length=length(num.vars)*length(sam.sizes)*length(dists))
results<-foreach (dist in dists) %dopar% {
for (dvdist in dvdists) {
for (num.var in num.vars){
for (sam.size in sam.sizes){
counter<-counter+1
qca.data<-as.data.frame(matrix(nrow=sam.size,ncol=num.var + 1))
for (col in 1:ncol(qca.data)){qca.data[,col]<-sample(c(0,1), sam.size, replace=T,prob=c(1-dist,dist))} #simulate data set
names(qca.data)<-c(var.names[1:num.var],"OUT")
qca.data$OUT<-sample(c(0,1), sam.size, replace=T,prob=c(1-dvdist,dvdist))
data.list[[counter]]<-sim.ltQCA(qca.data, outcome="OUT", sim=10, ncut=1:6)
data.list[[counter]]$nvar<-num.var
data.list[[counter]]$sam.size<-sam.size
data.list[[counter]]$dist<-dist
data.list[[counter]]$dvdist<-dvdist
save(data.list,file=paste("data.list_",dist,".Rdata",sep=""))
}}}}
|
/qcaeval.old/final_data_set (Ben Gibson's conflicted copy 2015-05-05).R
|
no_license
|
cbengibson/QCArevision2
|
R
| false | false | 1,332 |
r
|
#QCA Plots
#btw, small modifications of this could replace the current scripts for returning the final data set
#need to change the configuration.table file though (and the regression analysis)
source("sim.ltQCA.R")
library(QCA)
library(foreach)
#create a data set with varying distributions, and varying number of variables
dvdists<-seq(.1,.9, by=.2)
dists<-seq(.5,.9, by=.1)
num.vars<-2:7
var.names<-c("AA","BB","CC","DD","EE","FF","GG","HH","II","KK")
sam.sizes<-seq(10,100, by=20)
counter<-0
data.list<-vector(mode="list", length=length(num.vars)*length(sam.sizes)*length(dists))
results<-foreach (dist in dists) %dopar% {
for (dvdist in dvdists) {
for (num.var in num.vars){
for (sam.size in sam.sizes){
counter<-counter+1
qca.data<-as.data.frame(matrix(nrow=sam.size,ncol=num.var + 1))
for (col in 1:ncol(qca.data)){qca.data[,col]<-sample(c(0,1), sam.size, replace=T,prob=c(1-dist,dist))} #simulate data set
names(qca.data)<-c(var.names[1:num.var],"OUT")
qca.data$OUT<-sample(c(0,1), sam.size, replace=T,prob=c(1-dvdist,dvdist))
data.list[[counter]]<-sim.ltQCA(qca.data, outcome="OUT", sim=10, ncut=1:6)
data.list[[counter]]$nvar<-num.var
data.list[[counter]]$sam.size<-sam.size
data.list[[counter]]$dist<-dist
data.list[[counter]]$dvdist<-dvdist
save(data.list,file=paste("data.list_",dist,".Rdata",sep=""))
}}}}
|
#This script integrates the graphs for plot 2 and plot 3, among with the plots
#for Voltage and Gobal_reactive_power in a single image.
#Data preparation
#Read the header
firstLine <- read.table("./household_power_consumption.txt", header = TRUE,
sep = ";", na.strings = "?", nrows = 1)
#Read the data
consumption <- read.table("./household_power_consumption.txt", header = FALSE,
sep = ";", na.strings = "?", skip = 66637, nrows = 2880)
#Assing the correct header to the data
names(consumption) <- names(firstLine)
#Convert the first two columns to one containing both time and data in
#POSIXlt format
dateTime <- with(consumption, paste(Date, Time))
dateTime <- strptime(dateTime, format = "%d/%m/%Y %H:%M:%S")
consumption <- consumption[, 2:9]
names(consumption)[1] <- "Date_time"
consumption$Date_time <- dateTime
#Create an 2x2 multi-plot graph, filling in collumn-wise with the appropriate
#plots. Draw this graph in file "plot4.png" using the PNG file device.
png("./plot4.png")
par(mfcol = c(2,2), bg = "transparent")
#Plot of Global_active_power
plot(consumption$Date_time, consumption$Global_active_power, type = "l",
xlab = "", ylab = "Global Active Power (kilowatts)")
#Plot of the three sum-metering variables. Notice the legend border line type
#is set to "none" in accordance to the fourht figure of the assignment.
plot(consumption$Date_time, consumption$Sub_metering_1, type = "l",
xlab = "", ylab = "Energy sub metering", col = "black")
lines(consumption$Date_time, consumption$Sub_metering_2, type = "l", col = "red")
lines(consumption$Date_time, consumption$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
#Plot of the Voltage.
plot(consumption$Date_time, consumption$Voltage, type = "l", xlab = "datetime",
ylab = "Voltage")
#Plot of the Global_reactive_power.
plot(consumption$Date_time, consumption$Global_reactive_power, type = "l",
xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
sotmihos/ExData_Plotting1
|
R
| false | false | 2,136 |
r
|
#This script integrates the graphs for plot 2 and plot 3, among with the plots
#for Voltage and Gobal_reactive_power in a single image.
#Data preparation
#Read the header
firstLine <- read.table("./household_power_consumption.txt", header = TRUE,
sep = ";", na.strings = "?", nrows = 1)
#Read the data
consumption <- read.table("./household_power_consumption.txt", header = FALSE,
sep = ";", na.strings = "?", skip = 66637, nrows = 2880)
#Assing the correct header to the data
names(consumption) <- names(firstLine)
#Convert the first two columns to one containing both time and data in
#POSIXlt format
dateTime <- with(consumption, paste(Date, Time))
dateTime <- strptime(dateTime, format = "%d/%m/%Y %H:%M:%S")
consumption <- consumption[, 2:9]
names(consumption)[1] <- "Date_time"
consumption$Date_time <- dateTime
#Create an 2x2 multi-plot graph, filling in collumn-wise with the appropriate
#plots. Draw this graph in file "plot4.png" using the PNG file device.
png("./plot4.png")
par(mfcol = c(2,2), bg = "transparent")
#Plot of Global_active_power
plot(consumption$Date_time, consumption$Global_active_power, type = "l",
xlab = "", ylab = "Global Active Power (kilowatts)")
#Plot of the three sum-metering variables. Notice the legend border line type
#is set to "none" in accordance to the fourht figure of the assignment.
plot(consumption$Date_time, consumption$Sub_metering_1, type = "l",
xlab = "", ylab = "Energy sub metering", col = "black")
lines(consumption$Date_time, consumption$Sub_metering_2, type = "l", col = "red")
lines(consumption$Date_time, consumption$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
#Plot of the Voltage.
plot(consumption$Date_time, consumption$Voltage, type = "l", xlab = "datetime",
ylab = "Voltage")
#Plot of the Global_reactive_power.
plot(consumption$Date_time, consumption$Global_reactive_power, type = "l",
xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
## Put comments here that give an overall description of what your
## functions do
## The makeCacheMatrix is a function which stores the inverse of a matrix in cache.
## When we call the cacheSolve funtin with a matrix passed as an arguement, it will check with the cache memory if the inverse already exists or notte.
## If is does, it retirieve the inverse matrix from the cache itself. If it does not exists, the inverse will be calculated and stored in the cache.
## Write a short comment describing this function
## This function is responsible for creating the vector which is actually a list. The function is responsible for storing the inverse
## of a matrix ot the cache. The set of functions provide the mechanism to store the inverse in the cache. Functions like get(), set() are used
## o get and set the values of the vector. The getinverse() and setinverse() functions are used to retrieve the inverse of the matrix from the
## cache and set the inverse to the cache respectively.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- gunction() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## This function basically takes the marix as the parameter and tries to calculate the inverse of it. It checks with the cache if the inverse exists or not.
## If the inverse exists in the cache, it retrives the value from there and returns it. If it is not in the cache, then it calculates the inverse
## and stores it in the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
return(inv)
}
matrics <- x$get()
inv <- solve(matrics, ...)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
Debjit-Chatterjee/ProgrammingAssignment2
|
R
| false | false | 1,943 |
r
|
## Put comments here that give an overall description of what your
## functions do
## The makeCacheMatrix is a function which stores the inverse of a matrix in cache.
## When we call the cacheSolve funtin with a matrix passed as an arguement, it will check with the cache memory if the inverse already exists or notte.
## If is does, it retirieve the inverse matrix from the cache itself. If it does not exists, the inverse will be calculated and stored in the cache.
## Write a short comment describing this function
## This function is responsible for creating the vector which is actually a list. The function is responsible for storing the inverse
## of a matrix ot the cache. The set of functions provide the mechanism to store the inverse in the cache. Functions like get(), set() are used
## o get and set the values of the vector. The getinverse() and setinverse() functions are used to retrieve the inverse of the matrix from the
## cache and set the inverse to the cache respectively.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- gunction() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## This function basically takes the marix as the parameter and tries to calculate the inverse of it. It checks with the cache if the inverse exists or not.
## If the inverse exists in the cache, it retrives the value from there and returns it. If it is not in the cache, then it calculates the inverse
## and stores it in the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
return(inv)
}
matrics <- x$get()
inv <- solve(matrics, ...)
x$setinverse(inv)
inv
}
|
set_new_model("nearest_neighbor")
set_model_mode("nearest_neighbor", "classification")
set_model_mode("nearest_neighbor", "regression")
# ------------------------------------------------------------------------------
set_model_engine("nearest_neighbor", "classification", "kknn")
set_model_engine("nearest_neighbor", "regression", "kknn")
set_dependency("nearest_neighbor", "kknn", "kknn")
set_model_arg(
model = "nearest_neighbor",
eng = "kknn",
parsnip = "neighbors",
original = "ks",
func = list(pkg = "dials", fun = "neighbors"),
has_submodel = TRUE
)
set_model_arg(
model = "nearest_neighbor",
eng = "kknn",
parsnip = "weight_func",
original = "kernel",
func = list(pkg = "dials", fun = "weight_func"),
has_submodel = FALSE
)
set_model_arg(
model = "nearest_neighbor",
eng = "kknn",
parsnip = "dist_power",
original = "distance",
func = list(pkg = "dials", fun = "distance"),
has_submodel = FALSE
)
set_fit(
model = "nearest_neighbor",
eng = "kknn",
mode = "regression",
value = list(
interface = "formula",
protect = c("formula", "data"),
func = c(pkg = "kknn", fun = "train.kknn"),
defaults = list()
)
)
set_fit(
model = "nearest_neighbor",
eng = "kknn",
mode = "classification",
value = list(
interface = "formula",
protect = c("formula", "data"),
func = c(pkg = "kknn", fun = "train.kknn"),
defaults = list()
)
)
set_pred(
model = "nearest_neighbor",
eng = "kknn",
mode = "regression",
type = "numeric",
value = list(
# seems unnecessary here as the predict_numeric catches it based on the
# model mode
pre = function(x, object) {
if (object$fit$response != "continuous") {
stop("`kknn` model does not appear to use numeric predictions. Was ",
"the model fit with a continuous response variable?",
call. = FALSE)
}
x
},
post = NULL,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data),
type = "raw"
)
)
)
set_pred(
model = "nearest_neighbor",
eng = "kknn",
mode = "regression",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data)
)
)
)
set_pred(
model = "nearest_neighbor",
eng = "kknn",
mode = "classification",
type = "class",
value = list(
pre = function(x, object) {
if (!(object$fit$response %in% c("ordinal", "nominal"))) {
stop("`kknn` model does not appear to use class predictions. Was ",
"the model fit with a factor response variable?",
call. = FALSE)
}
x
},
post = NULL,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data),
type = "raw"
)
)
)
set_pred(
model = "nearest_neighbor",
eng = "kknn",
mode = "classification",
type = "prob",
value = list(
pre = function(x, object) {
if (!(object$fit$response %in% c("ordinal", "nominal"))) {
stop("`kknn` model does not appear to use class predictions. Was ",
"the model fit with a factor response variable?",
call. = FALSE)
}
x
},
post = function(result, object) as_tibble(result),
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data),
type = "prob"
)
)
)
set_pred(
model = "nearest_neighbor",
eng = "kknn",
mode = "classification",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data)
)
)
)
|
/R/nearest_neighbor_data.R
|
no_license
|
conradbm/parsnip
|
R
| false | false | 3,853 |
r
|
set_new_model("nearest_neighbor")
set_model_mode("nearest_neighbor", "classification")
set_model_mode("nearest_neighbor", "regression")
# ------------------------------------------------------------------------------
set_model_engine("nearest_neighbor", "classification", "kknn")
set_model_engine("nearest_neighbor", "regression", "kknn")
set_dependency("nearest_neighbor", "kknn", "kknn")
set_model_arg(
model = "nearest_neighbor",
eng = "kknn",
parsnip = "neighbors",
original = "ks",
func = list(pkg = "dials", fun = "neighbors"),
has_submodel = TRUE
)
set_model_arg(
model = "nearest_neighbor",
eng = "kknn",
parsnip = "weight_func",
original = "kernel",
func = list(pkg = "dials", fun = "weight_func"),
has_submodel = FALSE
)
set_model_arg(
model = "nearest_neighbor",
eng = "kknn",
parsnip = "dist_power",
original = "distance",
func = list(pkg = "dials", fun = "distance"),
has_submodel = FALSE
)
set_fit(
model = "nearest_neighbor",
eng = "kknn",
mode = "regression",
value = list(
interface = "formula",
protect = c("formula", "data"),
func = c(pkg = "kknn", fun = "train.kknn"),
defaults = list()
)
)
set_fit(
model = "nearest_neighbor",
eng = "kknn",
mode = "classification",
value = list(
interface = "formula",
protect = c("formula", "data"),
func = c(pkg = "kknn", fun = "train.kknn"),
defaults = list()
)
)
set_pred(
model = "nearest_neighbor",
eng = "kknn",
mode = "regression",
type = "numeric",
value = list(
# seems unnecessary here as the predict_numeric catches it based on the
# model mode
pre = function(x, object) {
if (object$fit$response != "continuous") {
stop("`kknn` model does not appear to use numeric predictions. Was ",
"the model fit with a continuous response variable?",
call. = FALSE)
}
x
},
post = NULL,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data),
type = "raw"
)
)
)
set_pred(
model = "nearest_neighbor",
eng = "kknn",
mode = "regression",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data)
)
)
)
set_pred(
model = "nearest_neighbor",
eng = "kknn",
mode = "classification",
type = "class",
value = list(
pre = function(x, object) {
if (!(object$fit$response %in% c("ordinal", "nominal"))) {
stop("`kknn` model does not appear to use class predictions. Was ",
"the model fit with a factor response variable?",
call. = FALSE)
}
x
},
post = NULL,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data),
type = "raw"
)
)
)
set_pred(
model = "nearest_neighbor",
eng = "kknn",
mode = "classification",
type = "prob",
value = list(
pre = function(x, object) {
if (!(object$fit$response %in% c("ordinal", "nominal"))) {
stop("`kknn` model does not appear to use class predictions. Was ",
"the model fit with a factor response variable?",
call. = FALSE)
}
x
},
post = function(result, object) as_tibble(result),
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data),
type = "prob"
)
)
)
set_pred(
model = "nearest_neighbor",
eng = "kknn",
mode = "classification",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data)
)
)
)
|
# plot4.R
library(ggplot2)
# Across the United States,
# how have emissions from coal combustion-related sources changed from 1999–2008?
#
# plot4
# argument: [path] - data set path (ex: /work/Exploratory-Data-Analysis/data)
# return: - aggregate data
plot4 <- function(path) {
# backup and replace working dir
backup_wd <- getwd()
setwd(path)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# merge and retrive emissions from coal
NEISCC <- merge(NEI, SCC, by="SCC")
retrivecoal <- grepl("coal", NEISCC$Short.Name, ignore.case=TRUE)
NEISCC <- NEISCC[retrivecoal, ]
ret <- aggregate(Emissions ~ year, NEISCC, sum)
png("plot4.png", width=640, height=480)
g <- ggplot(ret, aes(factor(year), Emissions))
g <- g +
geom_bar(stat="identity") +
xlab("year") +
ylab("Total PM2.5 Emissions")
print(g)
dev.off()
# restore working dir
setwd(backup_wd)
return(ret)
}
|
/plot4.R
|
no_license
|
daxanya1/Exploratory-Data-Analysis
|
R
| false | false | 1,079 |
r
|
# plot4.R
library(ggplot2)
# Across the United States,
# how have emissions from coal combustion-related sources changed from 1999–2008?
#
# plot4
# argument: [path] - data set path (ex: /work/Exploratory-Data-Analysis/data)
# return: - aggregate data
plot4 <- function(path) {
# backup and replace working dir
backup_wd <- getwd()
setwd(path)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# merge and retrive emissions from coal
NEISCC <- merge(NEI, SCC, by="SCC")
retrivecoal <- grepl("coal", NEISCC$Short.Name, ignore.case=TRUE)
NEISCC <- NEISCC[retrivecoal, ]
ret <- aggregate(Emissions ~ year, NEISCC, sum)
png("plot4.png", width=640, height=480)
g <- ggplot(ret, aes(factor(year), Emissions))
g <- g +
geom_bar(stat="identity") +
xlab("year") +
ylab("Total PM2.5 Emissions")
print(g)
dev.off()
# restore working dir
setwd(backup_wd)
return(ret)
}
|
cualquiera <- rbinom(n = 1000000, size = 5, prob = 0.3)
# cualquiera
es_igual_a_dos <- cualquiera == 0
mean(es_igual_a_dos)
dbinom(x = 0, size = 5, prob = 0.3)
es_igual_a_dos <- cualquiera == 1
mean(es_igual_a_dos)
dbinom(x = 1, size = 5, prob = 0.3)
es_igual_a_dos <- cualquiera == 2
mean(es_igual_a_dos)
dbinom(x = 2, size = 5, prob = 0.3)
es_igual_a_dos <- cualquiera == 3
mean(es_igual_a_dos)
es_igual_a_dos <- cualquiera == 4
mean(es_igual_a_dos)
es_igual_a_dos <- cualquiera == 5
mean(es_igual_a_dos)
dbinom(x = 2, size = 5, prob = 0.3)
purrr::map_dbl(c(10, 100, 1000, 10000), ~ mean(rbinom(., 1, 0.2)))
|
/demobinomial.R
|
no_license
|
ricardomayerb/ico8306
|
R
| false | false | 638 |
r
|
cualquiera <- rbinom(n = 1000000, size = 5, prob = 0.3)
# cualquiera
es_igual_a_dos <- cualquiera == 0
mean(es_igual_a_dos)
dbinom(x = 0, size = 5, prob = 0.3)
es_igual_a_dos <- cualquiera == 1
mean(es_igual_a_dos)
dbinom(x = 1, size = 5, prob = 0.3)
es_igual_a_dos <- cualquiera == 2
mean(es_igual_a_dos)
dbinom(x = 2, size = 5, prob = 0.3)
es_igual_a_dos <- cualquiera == 3
mean(es_igual_a_dos)
es_igual_a_dos <- cualquiera == 4
mean(es_igual_a_dos)
es_igual_a_dos <- cualquiera == 5
mean(es_igual_a_dos)
dbinom(x = 2, size = 5, prob = 0.3)
purrr::map_dbl(c(10, 100, 1000, 10000), ~ mean(rbinom(., 1, 0.2)))
|
## Read data file into R.
library(lubridate)
all_data <- read.table("./exdata-data-household_power_consumption/household_power_consumption.txt", header=TRUE,
sep=";", na.strings = "?")
## Subset out only data from 2007-02-01 through 2007-02-02
all_data$Date_Time <- strptime(paste(all_data$Date, all_data$Time, sep=" "), format="%d/%m/%Y %H:%M:%S")
daily_data <- all_data[all_data$Date_Time >= "2007-02-01" & all_data$Date_Time <"2007-02-03", ]
##Create plot matching plot 2.
plot(daily_data$Date_Time, daily_data$Global_active_power,
type= "l",
main = " ",
ylab="Global Active Power (kilowatts)",
xlab=" ",
col="black")
##Copies plot to PNG file.
dev.copy(png, file="plot2.png", width=480, height=480)
##Closes graphics device.
dev.off()
|
/plot2.R
|
no_license
|
MelanieMaggard/ExData_Plotting1
|
R
| false | false | 794 |
r
|
## Read data file into R.
library(lubridate)
all_data <- read.table("./exdata-data-household_power_consumption/household_power_consumption.txt", header=TRUE,
sep=";", na.strings = "?")
## Subset out only data from 2007-02-01 through 2007-02-02
all_data$Date_Time <- strptime(paste(all_data$Date, all_data$Time, sep=" "), format="%d/%m/%Y %H:%M:%S")
daily_data <- all_data[all_data$Date_Time >= "2007-02-01" & all_data$Date_Time <"2007-02-03", ]
##Create plot matching plot 2.
plot(daily_data$Date_Time, daily_data$Global_active_power,
type= "l",
main = " ",
ylab="Global Active Power (kilowatts)",
xlab=" ",
col="black")
##Copies plot to PNG file.
dev.copy(png, file="plot2.png", width=480, height=480)
##Closes graphics device.
dev.off()
|
# Calculate the coordinates from a design matrix D
# coords is a m by 2 matrix, each row of which corresponds to one measurement point
sampling_locs <- function(D, locs_index){
m = nrow(D)
if(length(D)>0){
coords = matrix(NA, nrow = m, ncol = 2)
for(i in 1:m){
coords[i,] = unlist(locs_index[which(D[i,] == 1)])
}
return(coords)
}else{
return(c())
}
}
|
/functions/sampling_locs.R
|
no_license
|
yang221/DynamicSampling
|
R
| false | false | 387 |
r
|
# Calculate the coordinates from a design matrix D
# coords is a m by 2 matrix, each row of which corresponds to one measurement point
sampling_locs <- function(D, locs_index){
m = nrow(D)
if(length(D)>0){
coords = matrix(NA, nrow = m, ncol = 2)
for(i in 1:m){
coords[i,] = unlist(locs_index[which(D[i,] == 1)])
}
return(coords)
}else{
return(c())
}
}
|
library(tidyverse)
library(ICD10gm)
# Less detailed version, but is useful in case detailed version doesn't match
labs <- icd_meta_codes %>%
as_tibble() %>%
filter(year == 2018) %>% # Has different versions categorized per year, arbitrarily chose 2018
mutate(code = str_sub(icd_sub, 1, 3)) %>%
select(icd_block_first, code, label_icd3, icd_normcode) %>%
distinct()
# Detailed version
labs_det <- icd_meta_codes %>%
as_tibble() %>%
filter(year == 2018) %>% # Has different versions categorized per year, arbitrarily chose 2018
select(icd_block_first, icd_normcode, label) %>%
distinct()
# Blocks of ICD
blocks <- icd_meta_blocks %>%
as_tibble() %>%
filter(year == 2018)
# Chapters of ICD
chapter <- icd_meta_chapters %>%
as_tibble() %>%
filter(year == 2018) %>%
select(chapter, chapter_label)
icd_codes <- labs_det %>%
left_join(labs, by = c("icd_block_first", "icd_normcode")) %>%
left_join(blocks, by = "icd_block_first") %>%
left_join(chapter, by = "chapter") %>%
select(icd_normcode, label, icd_code_short = code, label_short = label_icd3,
icd_block_first, block_label, chapter_label)
write_csv(icd_codes, "data-raw/icd_codes.csv")
usethis::use_data(icd_codes, overwrite = TRUE)
|
/data-raw/icd_codes.R
|
no_license
|
bsurial/bernr
|
R
| false | false | 1,233 |
r
|
library(tidyverse)
library(ICD10gm)
# Less detailed version, but is useful in case detailed version doesn't match
labs <- icd_meta_codes %>%
as_tibble() %>%
filter(year == 2018) %>% # Has different versions categorized per year, arbitrarily chose 2018
mutate(code = str_sub(icd_sub, 1, 3)) %>%
select(icd_block_first, code, label_icd3, icd_normcode) %>%
distinct()
# Detailed version
labs_det <- icd_meta_codes %>%
as_tibble() %>%
filter(year == 2018) %>% # Has different versions categorized per year, arbitrarily chose 2018
select(icd_block_first, icd_normcode, label) %>%
distinct()
# Blocks of ICD
blocks <- icd_meta_blocks %>%
as_tibble() %>%
filter(year == 2018)
# Chapters of ICD
chapter <- icd_meta_chapters %>%
as_tibble() %>%
filter(year == 2018) %>%
select(chapter, chapter_label)
icd_codes <- labs_det %>%
left_join(labs, by = c("icd_block_first", "icd_normcode")) %>%
left_join(blocks, by = "icd_block_first") %>%
left_join(chapter, by = "chapter") %>%
select(icd_normcode, label, icd_code_short = code, label_short = label_icd3,
icd_block_first, block_label, chapter_label)
write_csv(icd_codes, "data-raw/icd_codes.csv")
usethis::use_data(icd_codes, overwrite = TRUE)
|
transposer <- function(pitch, numshift) {
if (pitch == "XX") {
return(pitch)
}
PITCH_CLASS <- c("C", "d", "D", "e", "E", "F", "g", "G", "a", "A", "b", "B")
PITCH <- substr(pitch, 1, 1)
OCTAVE_MAP <- rep(as.numeric(substr(pitch, 2, 2)), length((PITCH_CLASS)))
if (numshift > 0) {
TRANSPOSED <- c(PITCH_CLASS[(1 + numshift):12], PITCH_CLASS[1:(1 + numshift - 1)])
OCTAVE_MAP[(12- numshift + 1):12] <- OCTAVE_MAP[(12- numshift + 1):12] + 1
} else if (numshift == 0) {
TRANSPOSED <- PITCH_CLASS
} else {
numshift <- abs(numshift)
TRANSPOSED <- c(PITCH_CLASS[(12 - numshift + 1):12], PITCH_CLASS[1:(12 - numshift)])
OCTAVE_MAP[1:(1 + numshift - 1)] <- OCTAVE_MAP[1:(1 + numshift - 1)] - 1
}
idx <- PITCH_CLASS == PITCH
pitch_new <- paste(TRANSPOSED[idx], as.character(OCTAVE_MAP[idx]), sep = "")
return(pitch_new)
}
|
/Scripts/R/lib/func_transposer.R
|
permissive
|
comp-music-lab/agreement-human-automated
|
R
| false | false | 924 |
r
|
transposer <- function(pitch, numshift) {
if (pitch == "XX") {
return(pitch)
}
PITCH_CLASS <- c("C", "d", "D", "e", "E", "F", "g", "G", "a", "A", "b", "B")
PITCH <- substr(pitch, 1, 1)
OCTAVE_MAP <- rep(as.numeric(substr(pitch, 2, 2)), length((PITCH_CLASS)))
if (numshift > 0) {
TRANSPOSED <- c(PITCH_CLASS[(1 + numshift):12], PITCH_CLASS[1:(1 + numshift - 1)])
OCTAVE_MAP[(12- numshift + 1):12] <- OCTAVE_MAP[(12- numshift + 1):12] + 1
} else if (numshift == 0) {
TRANSPOSED <- PITCH_CLASS
} else {
numshift <- abs(numshift)
TRANSPOSED <- c(PITCH_CLASS[(12 - numshift + 1):12], PITCH_CLASS[1:(12 - numshift)])
OCTAVE_MAP[1:(1 + numshift - 1)] <- OCTAVE_MAP[1:(1 + numshift - 1)] - 1
}
idx <- PITCH_CLASS == PITCH
pitch_new <- paste(TRANSPOSED[idx], as.character(OCTAVE_MAP[idx]), sep = "")
return(pitch_new)
}
|
library(shiny)
source("calculations.R")
my_server <- function(input, output) {
output$initial_margin <- renderText({
c(initialMarginCalculator(input$entry_price, input$asset_quantity, input$leverage),
"USDT")
})
output$profit_without_fees <- renderText({
c(profitCalculator(input$entry_price, input$exit_price, input$asset_quantity),
"USDT")
})
output$return_percentage <- renderText({
c(profitCalculator(input$entry_price, input$exit_price, input$asset_quantity) /
initialMarginCalculator(input$entry_price, input$asset_quantity, input$leverage) * 100,
"%")
})
output$net_return_with_fees <- renderText({
c((profitCalculator(input$entry_price, input$exit_price, input$asset_quantity) -
(individualFeeCalculator(input$trading_level,
input$entry_maker_or_taker,
input$entry_price,
input$asset_quantity,
input$bnb_fees) +
individualFeeCalculator(input$trading_level,
input$exit_maker_or_taker,
input$exit_price,
input$asset_quantity,
input$bnb_fees))) /
initialMarginCalculator(input$entry_price, input$asset_quantity, input$leverage) *
100,
"%")
})
output$entry_fees <- renderText({
c(individualFeeCalculator(input$trading_level,
input$entry_maker_or_taker,
input$entry_price,
input$asset_quantity,
input$bnb_fees),
"USDT")
})
output$exit_fees <- renderText({
c(individualFeeCalculator(input$trading_level,
input$exit_maker_or_taker,
input$exit_price,
input$asset_quantity,
input$bnb_fees),
"USDT")
})
output$total_fees <- renderText({
c(individualFeeCalculator(input$trading_level,
input$entry_maker_or_taker,
input$entry_price,
input$asset_quantity,
input$bnb_fees) +
individualFeeCalculator(input$trading_level,
input$exit_maker_or_taker,
input$exit_price,
input$asset_quantity,
input$bnb_fees),
"USDT")
})
output$net_profit_with_fees <- renderText({
c(profitCalculator(input$entry_price, input$exit_price, input$asset_quantity) -
(individualFeeCalculator(input$trading_level,
input$entry_maker_or_taker,
input$entry_price,
input$asset_quantity,
input$bnb_fees) +
individualFeeCalculator(input$trading_level,
input$exit_maker_or_taker,
input$exit_price,
input$asset_quantity,
input$bnb_fees)),
"USDT")
})
output$return_exit_price <- renderTable({
temp_matrix <- matrix(c(input$expected_return,returnExitPriceCalculator(input$expected_return, input$entry_price, input$leverage),
10,returnExitPriceCalculator(10, input$entry_price, input$leverage),
25,returnExitPriceCalculator(25, input$entry_price, input$leverage),
50,returnExitPriceCalculator(50, input$entry_price, input$leverage),
75,returnExitPriceCalculator(75, input$entry_price, input$leverage),
100,returnExitPriceCalculator(100, input$entry_price, input$leverage),
125,returnExitPriceCalculator(125, input$entry_price, input$leverage),
150,returnExitPriceCalculator(150, input$entry_price, input$leverage),
175,returnExitPriceCalculator(175, input$entry_price, input$leverage),
200,returnExitPriceCalculator(200, input$entry_price, input$leverage)
),ncol=2,byrow=TRUE)
colnames(temp_matrix) <- c("Return Percentage (in %)",
"Exit Price Required (in USDT)")
return(temp_matrix)
})
output$return_exit_price_with_fees <- renderTable({
temp_matrix <- matrix(c(input$expected_return, returnExitPriceCalculatorWithFees(input$expected_return,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
10, returnExitPriceCalculatorWithFees(10,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
25, returnExitPriceCalculatorWithFees(25,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
50, returnExitPriceCalculatorWithFees(50,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
75, returnExitPriceCalculatorWithFees(75,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
100, returnExitPriceCalculatorWithFees(100,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
125, returnExitPriceCalculatorWithFees(125,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
150, returnExitPriceCalculatorWithFees(150,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
175, returnExitPriceCalculatorWithFees(175,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
200, returnExitPriceCalculatorWithFees(200,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees)),
ncol = 2, byrow = TRUE)
colnames(temp_matrix) <- c("Return Percentage (in %)",
"Exit Price Required (in USDT)")
return(temp_matrix)
})
}
|
/my_server.R
|
permissive
|
jsamyak/CryptoFuturesCalculator
|
R
| false | false | 11,730 |
r
|
library(shiny)
source("calculations.R")
my_server <- function(input, output) {
output$initial_margin <- renderText({
c(initialMarginCalculator(input$entry_price, input$asset_quantity, input$leverage),
"USDT")
})
output$profit_without_fees <- renderText({
c(profitCalculator(input$entry_price, input$exit_price, input$asset_quantity),
"USDT")
})
output$return_percentage <- renderText({
c(profitCalculator(input$entry_price, input$exit_price, input$asset_quantity) /
initialMarginCalculator(input$entry_price, input$asset_quantity, input$leverage) * 100,
"%")
})
output$net_return_with_fees <- renderText({
c((profitCalculator(input$entry_price, input$exit_price, input$asset_quantity) -
(individualFeeCalculator(input$trading_level,
input$entry_maker_or_taker,
input$entry_price,
input$asset_quantity,
input$bnb_fees) +
individualFeeCalculator(input$trading_level,
input$exit_maker_or_taker,
input$exit_price,
input$asset_quantity,
input$bnb_fees))) /
initialMarginCalculator(input$entry_price, input$asset_quantity, input$leverage) *
100,
"%")
})
output$entry_fees <- renderText({
c(individualFeeCalculator(input$trading_level,
input$entry_maker_or_taker,
input$entry_price,
input$asset_quantity,
input$bnb_fees),
"USDT")
})
output$exit_fees <- renderText({
c(individualFeeCalculator(input$trading_level,
input$exit_maker_or_taker,
input$exit_price,
input$asset_quantity,
input$bnb_fees),
"USDT")
})
output$total_fees <- renderText({
c(individualFeeCalculator(input$trading_level,
input$entry_maker_or_taker,
input$entry_price,
input$asset_quantity,
input$bnb_fees) +
individualFeeCalculator(input$trading_level,
input$exit_maker_or_taker,
input$exit_price,
input$asset_quantity,
input$bnb_fees),
"USDT")
})
output$net_profit_with_fees <- renderText({
c(profitCalculator(input$entry_price, input$exit_price, input$asset_quantity) -
(individualFeeCalculator(input$trading_level,
input$entry_maker_or_taker,
input$entry_price,
input$asset_quantity,
input$bnb_fees) +
individualFeeCalculator(input$trading_level,
input$exit_maker_or_taker,
input$exit_price,
input$asset_quantity,
input$bnb_fees)),
"USDT")
})
output$return_exit_price <- renderTable({
temp_matrix <- matrix(c(input$expected_return,returnExitPriceCalculator(input$expected_return, input$entry_price, input$leverage),
10,returnExitPriceCalculator(10, input$entry_price, input$leverage),
25,returnExitPriceCalculator(25, input$entry_price, input$leverage),
50,returnExitPriceCalculator(50, input$entry_price, input$leverage),
75,returnExitPriceCalculator(75, input$entry_price, input$leverage),
100,returnExitPriceCalculator(100, input$entry_price, input$leverage),
125,returnExitPriceCalculator(125, input$entry_price, input$leverage),
150,returnExitPriceCalculator(150, input$entry_price, input$leverage),
175,returnExitPriceCalculator(175, input$entry_price, input$leverage),
200,returnExitPriceCalculator(200, input$entry_price, input$leverage)
),ncol=2,byrow=TRUE)
colnames(temp_matrix) <- c("Return Percentage (in %)",
"Exit Price Required (in USDT)")
return(temp_matrix)
})
output$return_exit_price_with_fees <- renderTable({
temp_matrix <- matrix(c(input$expected_return, returnExitPriceCalculatorWithFees(input$expected_return,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
10, returnExitPriceCalculatorWithFees(10,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
25, returnExitPriceCalculatorWithFees(25,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
50, returnExitPriceCalculatorWithFees(50,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
75, returnExitPriceCalculatorWithFees(75,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
100, returnExitPriceCalculatorWithFees(100,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
125, returnExitPriceCalculatorWithFees(125,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
150, returnExitPriceCalculatorWithFees(150,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
175, returnExitPriceCalculatorWithFees(175,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees),
200, returnExitPriceCalculatorWithFees(200,
input$entry_price,
input$asset_quantity,
input$leverage,
input$trading_level,
input$entry_maker_or_taker,
input$exit_maker_or_taker,
input$bnb_fees)),
ncol = 2, byrow = TRUE)
colnames(temp_matrix) <- c("Return Percentage (in %)",
"Exit Price Required (in USDT)")
return(temp_matrix)
})
}
|
# Load Daily Toronto Temperature Data (1990-2017)
# Initialize Session ####
cat("\014")
rm(list=ls())
cat("\014")
Sys.Date()
sessionInfo()
list.of.packages <- c("readxl","readr","ggplot2","plyr","tidyr","dplyr","magrittr","viridis","lubridate","grid","gridExtra")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only = TRUE)
dir.path <- getwd()
setwd(dir.path)
weather.files <- list.files("data/weather", pattern = "\\.csv$", full.names = T)
# NO MEAN TEMPERATURE FOR >2003 years, download more files from
# http://climate.weather.gc.ca/climate_data/daily_data_e.html?timeframe=2&hlyRange=2002-06-04%7C2017-10-24&dlyRange=2002-06-04%7C2017-10-24&mlyRange=2003-07-01%7C2006-12-01&StationID=31688&Prov=ON&urlExtension=_e.html&searchType=stnProx&optLimit=yearRange&StartYear=2004&EndYear=2017&selRowPerPage=25&Line=0&txtRadius=25&optProxType=custom&selCity=&selPark=&Day=24&Year=2004&Month=12#
df <- weather.files %>%
lapply(read_csv,skip = 25,
col_types = cols(`Date/Time` = col_character(),
`Year` = col_integer(),
`Month` = col_integer(),
`Day` = col_integer(),
`Data Quality` = col_character(),
`Max Temp (°C)` = col_number(),
`Max Temp Flag` = col_character(),
`Min Temp (°C)` = col_number(),
`Min Temp Flag` = col_character(),
`Mean Temp (°C)` = col_number(),
`Mean Temp Flag` = col_character(),
`Heat Deg Days (°C)` = col_number(),
`Heat Deg Days Flag` = col_character(),
`Cool Deg Days (°C)` = col_number(),
`Cool Deg Days Flag` = col_character(),
`Total Rain (mm)` = col_number(),
`Total Rain Flag` = col_character(),
`Total Snow (cm)` = col_number(),
`Total Snow Flag` = col_character(),
`Total Precip (mm)` = col_number(),
`Total Precip Flag` = col_character(),
`Snow on Grnd (cm)` = col_number(),
`Snow on Grnd Flag` = col_character(),
`Dir of Max Gust (10s deg)` = col_number(),
`Dir of Max Gust Flag` = col_character(),
`Spd of Max Gust (km/h)` = col_number(),
`Spd of Max Gust Flag` = col_character())) %>%
bind_rows()
# Rename Column Names
names(df)
df <- df %>%
rename(Date = `Date/Time`, Data_Quality = `Data Quality`,
Max_TC = `Max Temp (°C)`, Min_TC = `Min Temp (°C)`,
Mean_TC = `Mean Temp (°C)`, Heat_C = `Heat Deg Days (°C)`,
Cool_C = `Cool Deg Days (°C)`, Tot_Rain_mm = `Total Rain (mm)`,
Tot_Snow_cm = `Total Snow (cm)`, Tot_Precip_mm = `Total Precip (mm)`,
Snow_G_cm = `Snow on Grnd (cm)`, D_Max_Gust_deg = `Dir of Max Gust (10s deg)`,
Max_Gust_kmh = `Spd of Max Gust (km/h)`) %>%
rename(Max_T_Flag = `Max Temp Flag`,
Min_T_Flag = `Min Temp Flag`,
Mean_T_Flag = `Mean Temp Flag`,
Heat_Flag = `Heat Deg Days Flag`,
Cool_Flag = `Cool Deg Days Flag`,
Tot_Rain_Flag = `Total Rain Flag`,
Tot_Snow_Flag = `Total Snow Flag`,
Tot_Precip_Flag = `Total Precip Flag`,
Snow_G_Flag = `Snow on Grnd Flag`,
D_Max_Gust_Flag = `Dir of Max Gust Flag`,
Max_Gust_Flag = `Spd of Max Gust Flag`)
# Change to Appropriate Column Types ####
str(df)
remover <- function(x) {gsub("\\s*", "F", x)}
# newflags <- df %>% select(ends_with("Flag")) %>%
# mutate_each(funs(remover)) %>% #replaces all blanks with F
# mutate_each(funs(gsub("^FTF","T",.))) %>% #replaces all "FTF" with T
# mutate_each(funs(as.logical)) #convert to logical
df <- df %>% select(-ends_with("Flag")) %>%
bind_cols(df %>% select(ends_with("Flag")) %>%
mutate_each(funs(remover)) %>% #replaces all blanks with F
mutate_each(funs(gsub("^FTF","T",.))) %>% #replaces all "FTF" with T
mutate_each(funs(as.logical)) #convert to logical
)
# Convert to Date-time
df$Date <- ymd(df$Date) # convert 'Date' to date object
df$Year <- as.Date(paste(df$Year, 1, 1, sep = "-")) #convert 'Year' to date object
year(df$Year) #convert back to numeric value
df$Month <- month(df$Month, label = T) #
df$Day #day of month
df %>% filter(Year > "2004-01-01") %>% select(starts_with("Mean"))
# watermain data
as.POSIXct(df$Date, tz ="UTC")
str(df$Date)
wm.df$Date <- as.Date(wm.df$Date)
wm.df$wmbreak <- rep(38,length(wm.df$Date))
merge.df <- left_join(df,wm.df, by="Date")
getSeason <- function(DATES) {
WS <- as.Date("2012-12-15", format = "%Y-%m-%d") # Winter Solstice
SE <- as.Date("2012-3-15", format = "%Y-%m-%d") # Spring Equinox
SS <- as.Date("2012-6-15", format = "%Y-%m-%d") # Summer Solstice
FE <- as.Date("2012-9-15", format = "%Y-%m-%d") # Fall Equinox
# Convert dates from any year to 2012 dates
d <- as.Date(strftime(DATES, format="2012-%m-%d"))
ifelse (d >= WS | d < SE, "Winter",
ifelse (d >= SE & d < SS, "Spring",
ifelse (d >= SS & d < FE, "Summer", "Fall")))
}
merge.df$Season <- getSeason(merge.df$Date)
merge.df <- merge.df %>% mutate(B = gl(4,37470/4,labels = c("Fall","Summer","Spring","Winter")))
length(merge.df$Date)
ggplot(data = merge.df, aes(x = Date)) +
geom_line(aes(y = Mean_TC), alpha = 0.5, size = 0.75) +
geom_jitter(aes(y = wmbreak), colour = "#298A08", height = 20, alpha = 0.1, size = 2) +
scale_x_date(limits = c(as.Date("1990-01-01"),as.Date("2004-01-01"))) +
guides(colour = guide_legend(override.aes = list(alpha=1)))
|
/TO_weather.R
|
no_license
|
eugejoh/TO_Watermain
|
R
| false | false | 6,148 |
r
|
# Load Daily Toronto Temperature Data (1990-2017)
# Initialize Session ####
cat("\014")
rm(list=ls())
cat("\014")
Sys.Date()
sessionInfo()
list.of.packages <- c("readxl","readr","ggplot2","plyr","tidyr","dplyr","magrittr","viridis","lubridate","grid","gridExtra")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only = TRUE)
dir.path <- getwd()
setwd(dir.path)
weather.files <- list.files("data/weather", pattern = "\\.csv$", full.names = T)
# NO MEAN TEMPERATURE FOR >2003 years, download more files from
# http://climate.weather.gc.ca/climate_data/daily_data_e.html?timeframe=2&hlyRange=2002-06-04%7C2017-10-24&dlyRange=2002-06-04%7C2017-10-24&mlyRange=2003-07-01%7C2006-12-01&StationID=31688&Prov=ON&urlExtension=_e.html&searchType=stnProx&optLimit=yearRange&StartYear=2004&EndYear=2017&selRowPerPage=25&Line=0&txtRadius=25&optProxType=custom&selCity=&selPark=&Day=24&Year=2004&Month=12#
df <- weather.files %>%
lapply(read_csv,skip = 25,
col_types = cols(`Date/Time` = col_character(),
`Year` = col_integer(),
`Month` = col_integer(),
`Day` = col_integer(),
`Data Quality` = col_character(),
`Max Temp (°C)` = col_number(),
`Max Temp Flag` = col_character(),
`Min Temp (°C)` = col_number(),
`Min Temp Flag` = col_character(),
`Mean Temp (°C)` = col_number(),
`Mean Temp Flag` = col_character(),
`Heat Deg Days (°C)` = col_number(),
`Heat Deg Days Flag` = col_character(),
`Cool Deg Days (°C)` = col_number(),
`Cool Deg Days Flag` = col_character(),
`Total Rain (mm)` = col_number(),
`Total Rain Flag` = col_character(),
`Total Snow (cm)` = col_number(),
`Total Snow Flag` = col_character(),
`Total Precip (mm)` = col_number(),
`Total Precip Flag` = col_character(),
`Snow on Grnd (cm)` = col_number(),
`Snow on Grnd Flag` = col_character(),
`Dir of Max Gust (10s deg)` = col_number(),
`Dir of Max Gust Flag` = col_character(),
`Spd of Max Gust (km/h)` = col_number(),
`Spd of Max Gust Flag` = col_character())) %>%
bind_rows()
# Rename Column Names
names(df)
df <- df %>%
rename(Date = `Date/Time`, Data_Quality = `Data Quality`,
Max_TC = `Max Temp (°C)`, Min_TC = `Min Temp (°C)`,
Mean_TC = `Mean Temp (°C)`, Heat_C = `Heat Deg Days (°C)`,
Cool_C = `Cool Deg Days (°C)`, Tot_Rain_mm = `Total Rain (mm)`,
Tot_Snow_cm = `Total Snow (cm)`, Tot_Precip_mm = `Total Precip (mm)`,
Snow_G_cm = `Snow on Grnd (cm)`, D_Max_Gust_deg = `Dir of Max Gust (10s deg)`,
Max_Gust_kmh = `Spd of Max Gust (km/h)`) %>%
rename(Max_T_Flag = `Max Temp Flag`,
Min_T_Flag = `Min Temp Flag`,
Mean_T_Flag = `Mean Temp Flag`,
Heat_Flag = `Heat Deg Days Flag`,
Cool_Flag = `Cool Deg Days Flag`,
Tot_Rain_Flag = `Total Rain Flag`,
Tot_Snow_Flag = `Total Snow Flag`,
Tot_Precip_Flag = `Total Precip Flag`,
Snow_G_Flag = `Snow on Grnd Flag`,
D_Max_Gust_Flag = `Dir of Max Gust Flag`,
Max_Gust_Flag = `Spd of Max Gust Flag`)
# Change to Appropriate Column Types ####
str(df)
remover <- function(x) {gsub("\\s*", "F", x)}
# newflags <- df %>% select(ends_with("Flag")) %>%
# mutate_each(funs(remover)) %>% #replaces all blanks with F
# mutate_each(funs(gsub("^FTF","T",.))) %>% #replaces all "FTF" with T
# mutate_each(funs(as.logical)) #convert to logical
df <- df %>% select(-ends_with("Flag")) %>%
bind_cols(df %>% select(ends_with("Flag")) %>%
mutate_each(funs(remover)) %>% #replaces all blanks with F
mutate_each(funs(gsub("^FTF","T",.))) %>% #replaces all "FTF" with T
mutate_each(funs(as.logical)) #convert to logical
)
# Convert to Date-time
df$Date <- ymd(df$Date) # convert 'Date' to date object
df$Year <- as.Date(paste(df$Year, 1, 1, sep = "-")) #convert 'Year' to date object
year(df$Year) #convert back to numeric value
df$Month <- month(df$Month, label = T) #
df$Day #day of month
df %>% filter(Year > "2004-01-01") %>% select(starts_with("Mean"))
# watermain data
as.POSIXct(df$Date, tz ="UTC")
str(df$Date)
wm.df$Date <- as.Date(wm.df$Date)
wm.df$wmbreak <- rep(38,length(wm.df$Date))
merge.df <- left_join(df,wm.df, by="Date")
getSeason <- function(DATES) {
WS <- as.Date("2012-12-15", format = "%Y-%m-%d") # Winter Solstice
SE <- as.Date("2012-3-15", format = "%Y-%m-%d") # Spring Equinox
SS <- as.Date("2012-6-15", format = "%Y-%m-%d") # Summer Solstice
FE <- as.Date("2012-9-15", format = "%Y-%m-%d") # Fall Equinox
# Convert dates from any year to 2012 dates
d <- as.Date(strftime(DATES, format="2012-%m-%d"))
ifelse (d >= WS | d < SE, "Winter",
ifelse (d >= SE & d < SS, "Spring",
ifelse (d >= SS & d < FE, "Summer", "Fall")))
}
merge.df$Season <- getSeason(merge.df$Date)
merge.df <- merge.df %>% mutate(B = gl(4,37470/4,labels = c("Fall","Summer","Spring","Winter")))
length(merge.df$Date)
ggplot(data = merge.df, aes(x = Date)) +
geom_line(aes(y = Mean_TC), alpha = 0.5, size = 0.75) +
geom_jitter(aes(y = wmbreak), colour = "#298A08", height = 20, alpha = 0.1, size = 2) +
scale_x_date(limits = c(as.Date("1990-01-01"),as.Date("2004-01-01"))) +
guides(colour = guide_legend(override.aes = list(alpha=1)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots_biotmle.R
\name{volcano_biotmle}
\alias{volcano_biotmle}
\title{Volcano plot for class biotmle}
\usage{
volcano_biotmle(biotmle)
}
\arguments{
\item{biotmle}{object of class \code{biotmle} as produced by an appropriate
call to \code{biomarkertmle}}
}
\value{
object of class \code{ggplot} containing a standard volcano plot of
the log-fold change in the causal target parameter against the raw
log p-value computed from the moderated t-test in \code{limmatmle}.
}
\description{
Volcano plot of the log-changes in the target causal paramter against the
log raw p-values from the moderated t-test.
}
\examples{
library(dplyr)
data(illuminaData)
data(biomarkertmleOut)
"\%ni\%" = Negate("\%in\%")
W <- illuminaData \%>\%
dplyr::select(which(colnames(.) \%in\% c("age", "sex", "smoking"))) \%>\%
dplyr::mutate(
age = as.numeric((age > quantile(age, 0.25))),
sex = I(sex),
smoking = I(smoking)
)
A <- illuminaData \%>\%
dplyr::select(which(colnames(.) \%in\% c("benzene")))
A <- A[, 1]
Y <- illuminaData \%>\%
dplyr::select(which(colnames(.) \%ni\% c("age", "sex", "smoking", "benzene",
"id")))
geneIDs <- colnames(Y)
design <- as.data.frame(cbind(rep(1, nrow(Y)),
as.numeric(A == max(unique(A)))))
colnames(design) <- c("intercept", "Tx")
limmaTMLEout <- limmatmle(biotmle = biomarkerTMLEout, IDs = NULL,
designMat = design)
volcano_biotmle(biotmle = limmaTMLEout)
}
|
/man/volcano_biotmle.Rd
|
permissive
|
guhjy/biotmle
|
R
| false | true | 1,577 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots_biotmle.R
\name{volcano_biotmle}
\alias{volcano_biotmle}
\title{Volcano plot for class biotmle}
\usage{
volcano_biotmle(biotmle)
}
\arguments{
\item{biotmle}{object of class \code{biotmle} as produced by an appropriate
call to \code{biomarkertmle}}
}
\value{
object of class \code{ggplot} containing a standard volcano plot of
the log-fold change in the causal target parameter against the raw
log p-value computed from the moderated t-test in \code{limmatmle}.
}
\description{
Volcano plot of the log-changes in the target causal paramter against the
log raw p-values from the moderated t-test.
}
\examples{
library(dplyr)
data(illuminaData)
data(biomarkertmleOut)
"\%ni\%" = Negate("\%in\%")
W <- illuminaData \%>\%
dplyr::select(which(colnames(.) \%in\% c("age", "sex", "smoking"))) \%>\%
dplyr::mutate(
age = as.numeric((age > quantile(age, 0.25))),
sex = I(sex),
smoking = I(smoking)
)
A <- illuminaData \%>\%
dplyr::select(which(colnames(.) \%in\% c("benzene")))
A <- A[, 1]
Y <- illuminaData \%>\%
dplyr::select(which(colnames(.) \%ni\% c("age", "sex", "smoking", "benzene",
"id")))
geneIDs <- colnames(Y)
design <- as.data.frame(cbind(rep(1, nrow(Y)),
as.numeric(A == max(unique(A)))))
colnames(design) <- c("intercept", "Tx")
limmaTMLEout <- limmatmle(biotmle = biomarkerTMLEout, IDs = NULL,
designMat = design)
volcano_biotmle(biotmle = limmaTMLEout)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_enrichment.R
\name{.filterSeqs}
\alias{.filterSeqs}
\title{Filter Sequences}
\usage{
.filterSeqs(
seqs,
maxFracN = 0.7,
minLength = 5L,
maxLength = 100000L,
verbose = FALSE
)
}
\arguments{
\item{seqs}{a \code{DNAStringSet} object.}
\item{maxFracN}{A numeric scalar with the maximal fraction of N bases allowed
in a sequence (defaults to 0.7).}
\item{minLength}{The minimum sequence length (default from Homer).
Sequences shorter than this will be filtered out.}
\item{maxLength}{The maximum sequence length (default from Homer).
Sequences bigger than this will be filtered out.}
\item{verbose}{A logical scalar. If \code{TRUE}, report on filtering.}
}
\value{
a logical vector of the same length as \code{seqs} with \code{TRUE}
indicated to keep the sequence and \code{FALSE} to filter it out.
}
\description{
Filter sequences that are unlikely to be useful for motif
enrichment analysis. The current defaults are based on HOMER (version 4.11).
}
\details{
The filtering logic is based on \code{removePoorSeq.pl} from Homer.
}
\keyword{internal}
|
/man/dot-filterSeqs.Rd
|
no_license
|
shaoyoucheng/monaLisa
|
R
| false | true | 1,148 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_enrichment.R
\name{.filterSeqs}
\alias{.filterSeqs}
\title{Filter Sequences}
\usage{
.filterSeqs(
seqs,
maxFracN = 0.7,
minLength = 5L,
maxLength = 100000L,
verbose = FALSE
)
}
\arguments{
\item{seqs}{a \code{DNAStringSet} object.}
\item{maxFracN}{A numeric scalar with the maximal fraction of N bases allowed
in a sequence (defaults to 0.7).}
\item{minLength}{The minimum sequence length (default from Homer).
Sequences shorter than this will be filtered out.}
\item{maxLength}{The maximum sequence length (default from Homer).
Sequences bigger than this will be filtered out.}
\item{verbose}{A logical scalar. If \code{TRUE}, report on filtering.}
}
\value{
a logical vector of the same length as \code{seqs} with \code{TRUE}
indicated to keep the sequence and \code{FALSE} to filter it out.
}
\description{
Filter sequences that are unlikely to be useful for motif
enrichment analysis. The current defaults are based on HOMER (version 4.11).
}
\details{
The filtering logic is based on \code{removePoorSeq.pl} from Homer.
}
\keyword{internal}
|
setwd ("//.../Coursera/Exploratory Data Analysis/Week 4")
getwd()
install.packages("downloader")
library("downloader")
dataset_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download(dataset_url, dest = "data.zip", mode = "wb")
unzip("data.zip", exdir = "//.../Coursera/Exploratory Data Analysis/Week 4")
Emissions <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
str(Emissions)
dim(Emissions)
[1] 6497651 6
names(Emissions)
[1] "fips" "SCC" "Pollutant" "Emissions" "type" "year"
years <- unique(Emissions$year)
# Question 2 #
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
# Use the base plotting system to make a plot answering this question.
#Calulate total emissions of Baltimore city by year
Balt_emissions <- Emissions[Emissions$fips=="24510",]
total_Balt <- tapply(Balt_emissions$Emissions,Balt_emissions$year,sum)
total_Balt
1999 2002 2005 2008
3274.180 2453.916 3091.354 1862.282
png(filename='plot_2_Baltimore.png', width=480, height=480, units='px')
plot(names(total_Balt ), total_Balt, type = "l",
xlab = "Year", ylab = expression("PM2.5 Emissions"),
main = expression("Total Emissions Baltimore City"), pch = 19, col = "green", lwd = 6)
dev.off()
|
/plot_2_Baltimore.R
|
no_license
|
xetaro/Exploratory-Data-Analysis-Course-Project-2
|
R
| false | false | 1,358 |
r
|
setwd ("//.../Coursera/Exploratory Data Analysis/Week 4")
getwd()
install.packages("downloader")
library("downloader")
dataset_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download(dataset_url, dest = "data.zip", mode = "wb")
unzip("data.zip", exdir = "//.../Coursera/Exploratory Data Analysis/Week 4")
Emissions <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
str(Emissions)
dim(Emissions)
[1] 6497651 6
names(Emissions)
[1] "fips" "SCC" "Pollutant" "Emissions" "type" "year"
years <- unique(Emissions$year)
# Question 2 #
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
# Use the base plotting system to make a plot answering this question.
#Calulate total emissions of Baltimore city by year
Balt_emissions <- Emissions[Emissions$fips=="24510",]
total_Balt <- tapply(Balt_emissions$Emissions,Balt_emissions$year,sum)
total_Balt
1999 2002 2005 2008
3274.180 2453.916 3091.354 1862.282
png(filename='plot_2_Baltimore.png', width=480, height=480, units='px')
plot(names(total_Balt ), total_Balt, type = "l",
xlab = "Year", ylab = expression("PM2.5 Emissions"),
main = expression("Total Emissions Baltimore City"), pch = 19, col = "green", lwd = 6)
dev.off()
|
############################################################################################
## package 'secrlinear'
## getLineID.R
## 2022-11-12 separate file
############################################################################################
getLineID <- function (mask, laboffset= rep(spacing(mask)*3,2), ...) {
if (is.null(covariates(mask)$LineID))
stop("LineID not found in covariates(mask)")
plot(mask, ...)
cat ("click on line \n")
output <- data.frame(Point=numeric(0), LineID=character(0))
repeat {
xy1 <- as.data.frame(locator(1))
if (nrow(xy1) < 1)
break
else {
matched <- nearesttrap(xy1, mask)
lineID <- as.character(covariates(mask)$LineID[matched])
cat("Point ", matched, " is on line ", lineID, "\n")
points(mask$x[matched], mask$y[matched], pch=1)
text (mask$x[matched] + laboffset[1], mask$y[matched] + laboffset[2],
lineID, cex=0.7, col = 'red')
output<- rbind(output, data.frame(Point=matched, LineID=lineID, stringsAsFactors=FALSE))
}
}
invisible (output)
}
|
/R/getLineID.R
|
no_license
|
cran/secrlinear
|
R
| false | false | 1,187 |
r
|
############################################################################################
## package 'secrlinear'
## getLineID.R
## 2022-11-12 separate file
############################################################################################
getLineID <- function (mask, laboffset= rep(spacing(mask)*3,2), ...) {
if (is.null(covariates(mask)$LineID))
stop("LineID not found in covariates(mask)")
plot(mask, ...)
cat ("click on line \n")
output <- data.frame(Point=numeric(0), LineID=character(0))
repeat {
xy1 <- as.data.frame(locator(1))
if (nrow(xy1) < 1)
break
else {
matched <- nearesttrap(xy1, mask)
lineID <- as.character(covariates(mask)$LineID[matched])
cat("Point ", matched, " is on line ", lineID, "\n")
points(mask$x[matched], mask$y[matched], pch=1)
text (mask$x[matched] + laboffset[1], mask$y[matched] + laboffset[2],
lineID, cex=0.7, col = 'red')
output<- rbind(output, data.frame(Point=matched, LineID=lineID, stringsAsFactors=FALSE))
}
}
invisible (output)
}
|
library(xts)
file="H:\\Trading\\EATesting\\MedAvgCross\\EURJPY_1H_13.csv";
df = read.csv(file, header = TRUE , as.is = TRUE )
df$date <- as.POSIXct(df$time, format = "%d-%m-%Y %H:%M")
IndiRawXTS <- xts(x = df, order.by = df$date)
mean(df$cross)
sd(df$cross)
|
/MovingMedianCross/Research.R
|
no_license
|
phanigenin/Trading-Research
|
R
| false | false | 265 |
r
|
library(xts)
file="H:\\Trading\\EATesting\\MedAvgCross\\EURJPY_1H_13.csv";
df = read.csv(file, header = TRUE , as.is = TRUE )
df$date <- as.POSIXct(df$time, format = "%d-%m-%Y %H:%M")
IndiRawXTS <- xts(x = df, order.by = df$date)
mean(df$cross)
sd(df$cross)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.