Dataset Viewer
content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
## MannWhitney_SplitYearSensitivity.R
# This script will test the sensitivity of the Mann-Whitney results to the year chosen for the split.
source(file.path("code", "paths+packages.R"))
## load data
gage_regions <-
readr::read_csv(file.path("results", "00_SelectGagesForAnalysis_GageRegions.csv"))
gage_sample_annual <-
readr::read_csv(file = file.path("results", "00_SelectGagesForAnalysis_GageSampleAnnual.csv")) %>%
dplyr::left_join(gage_regions, by = "gage_ID")
## variables to test?
vars_all <- c("annualnoflowdays", "zeroflowfirst", "peak2z_length")
## years to split for mann-whitney? (this will be included in the first set)
mw_yr_all <- seq(1994, by = 1, length.out = 9)
## loop through gages
sites <- unique(gage_sample_annual$gage_ID)
start_flag <- T
for (s in sites){
for (var in vars_all){
for (mw_yr_split in mw_yr_all){
# mann-whitney groups
group1 <-
gage_sample_annual %>%
subset(gage_ID == s & currentclimyear <= mw_yr_split) %>%
dplyr::pull(var)
group2 <-
gage_sample_annual %>%
subset(gage_ID == s & currentclimyear > mw_yr_split) %>%
dplyr::pull(var)
if (sum(is.finite(group1)) > 5 & sum(is.finite(group2)) > 5){
mw_test <- wilcox.test(group1, group2)
mw_p <- mw_test$p.value
mw_out <- tibble::tibble(gage_ID = s,
metric = var,
mw_yr = mw_yr_split,
mw_p = mw_p,
mw_meanGroup1 = mean(group1, na.rm = T),
mw_meanGroup2 = mean(group2, na.rm = T),
mw_medianGroup1 = median(group1, na.rm = T),
mw_medianGroup2 = median(group2, na.rm = T),
n_yrGroup1 = sum(is.finite(group1)),
n_yrGroup2 = sum(is.finite(group1)))
} else {
mw_out <- tibble::tibble(gage_ID = s,
metric = var,
mw_yr = mw_yr_split,
mw_p = NA,
mw_meanGroup1 = mean(group1, na.rm = T),
mw_meanGroup2 = mean(group2, na.rm = T),
mw_medianGroup1 = median(group1, na.rm = T),
mw_medianGroup2 = median(group2, na.rm = T),
n_yrGroup1 = sum(is.finite(group1)),
n_yrGroup2 = sum(is.finite(group1)))
}
if (start_flag){
mw_all <- mw_out
start_flag <- F
} else {
mw_all <- dplyr::bind_rows(mw_all, mw_out)
}
}
}
print(paste0(s, " complete"))
}
## plot
df_mw <-
mw_all %>%
dplyr::mutate(mw_diff_mean = mw_meanGroup2 - mw_meanGroup1,
mw_diff_median = mw_medianGroup2 - mw_medianGroup1) %>%
subset(complete.cases(.))
# make a column for Mann-Whitney about significance and directio of change
p_thres <- 0.05
df_mw$mw_sig[df_mw$mw_p > p_thres] <- "NotSig"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean < 0 &
df_mw$metric %in% c("zeroflowfirst", "peak2z_length")] <- "SigDry"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean < 0 &
df_mw$metric %in% c("annualnoflowdays")] <- "SigWet"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean > 0 &
df_mw$metric %in% c("zeroflowfirst", "peak2z_length")] <- "SigWet"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean > 0 &
df_mw$metric %in% c("annualnoflowdays")] <- "SigDry"
# histograms
p_mw_hist_afnf <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "annualnoflowdays"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in Annual No-Flow Days, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-annualnoflowdays.png"),
width = 190, height = 220, units = "mm")
p_mw_hist_zff <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "zeroflowfirst"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in First No-Flow Day, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-zeroflowfirst.png"),
width = 190, height = 220, units = "mm")
p_mw_hist_p2z <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "peak2z_length"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in Days from Peak to No-Flow, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-peak2z.png"),
width = 190, height = 220, units = "mm")
## organize data into a table:
# Metric, Split Year, # Sig Wet, # Sig Dry, # Not Sig, Mean Change Sig Wet, Mean Change Sig Dry
mw_summary_table <-
df_mw %>%
dplyr::group_by(metric, mw_yr) %>%
dplyr::summarize(n_SigDry = sum(mw_sig == "SigDry"),
n_SigWet = sum(mw_sig == "SigWet"),
n_NotSig = sum(mw_sig == "NotSig"),
n_tested = n_SigDry + n_SigWet + n_NotSig,
prc_SigDry = n_SigDry/540,
prc_SigWet = n_SigWet/540,
prc_NotSig = n_NotSig/540) %>%
dplyr::mutate(SigDryText = paste0("n = ", n_SigDry, " (", round(prc_SigDry*100, 1), "%)"),
SigWetText = paste0("n = ", n_SigWet, " (", round(prc_SigWet*100, 1), "%)")) %>%
dplyr::select(metric, mw_yr, SigDryText, SigWetText)
readr::write_csv(mw_summary_table, file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity.csv"))
|
/figures_manuscript/MannWhitney_SplitYearSensitivity.R
|
no_license
|
dry-rivers-rcn/IntermittencyTrends
|
R
| false | false | 7,667 |
r
|
## MannWhitney_SplitYearSensitivity.R
# This script will test the sensitivity of the Mann-Whitney results to the year chosen for the split.
source(file.path("code", "paths+packages.R"))
## load data
gage_regions <-
readr::read_csv(file.path("results", "00_SelectGagesForAnalysis_GageRegions.csv"))
gage_sample_annual <-
readr::read_csv(file = file.path("results", "00_SelectGagesForAnalysis_GageSampleAnnual.csv")) %>%
dplyr::left_join(gage_regions, by = "gage_ID")
## variables to test?
vars_all <- c("annualnoflowdays", "zeroflowfirst", "peak2z_length")
## years to split for mann-whitney? (this will be included in the first set)
mw_yr_all <- seq(1994, by = 1, length.out = 9)
## loop through gages
sites <- unique(gage_sample_annual$gage_ID)
start_flag <- T
for (s in sites){
for (var in vars_all){
for (mw_yr_split in mw_yr_all){
# mann-whitney groups
group1 <-
gage_sample_annual %>%
subset(gage_ID == s & currentclimyear <= mw_yr_split) %>%
dplyr::pull(var)
group2 <-
gage_sample_annual %>%
subset(gage_ID == s & currentclimyear > mw_yr_split) %>%
dplyr::pull(var)
if (sum(is.finite(group1)) > 5 & sum(is.finite(group2)) > 5){
mw_test <- wilcox.test(group1, group2)
mw_p <- mw_test$p.value
mw_out <- tibble::tibble(gage_ID = s,
metric = var,
mw_yr = mw_yr_split,
mw_p = mw_p,
mw_meanGroup1 = mean(group1, na.rm = T),
mw_meanGroup2 = mean(group2, na.rm = T),
mw_medianGroup1 = median(group1, na.rm = T),
mw_medianGroup2 = median(group2, na.rm = T),
n_yrGroup1 = sum(is.finite(group1)),
n_yrGroup2 = sum(is.finite(group1)))
} else {
mw_out <- tibble::tibble(gage_ID = s,
metric = var,
mw_yr = mw_yr_split,
mw_p = NA,
mw_meanGroup1 = mean(group1, na.rm = T),
mw_meanGroup2 = mean(group2, na.rm = T),
mw_medianGroup1 = median(group1, na.rm = T),
mw_medianGroup2 = median(group2, na.rm = T),
n_yrGroup1 = sum(is.finite(group1)),
n_yrGroup2 = sum(is.finite(group1)))
}
if (start_flag){
mw_all <- mw_out
start_flag <- F
} else {
mw_all <- dplyr::bind_rows(mw_all, mw_out)
}
}
}
print(paste0(s, " complete"))
}
## plot
df_mw <-
mw_all %>%
dplyr::mutate(mw_diff_mean = mw_meanGroup2 - mw_meanGroup1,
mw_diff_median = mw_medianGroup2 - mw_medianGroup1) %>%
subset(complete.cases(.))
# make a column for Mann-Whitney about significance and directio of change
p_thres <- 0.05
df_mw$mw_sig[df_mw$mw_p > p_thres] <- "NotSig"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean < 0 &
df_mw$metric %in% c("zeroflowfirst", "peak2z_length")] <- "SigDry"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean < 0 &
df_mw$metric %in% c("annualnoflowdays")] <- "SigWet"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean > 0 &
df_mw$metric %in% c("zeroflowfirst", "peak2z_length")] <- "SigWet"
df_mw$mw_sig[df_mw$mw_p < p_thres &
df_mw$mw_diff_mean > 0 &
df_mw$metric %in% c("annualnoflowdays")] <- "SigDry"
# histograms
p_mw_hist_afnf <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "annualnoflowdays"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in Annual No-Flow Days, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-annualnoflowdays.png"),
width = 190, height = 220, units = "mm")
p_mw_hist_zff <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "zeroflowfirst"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in First No-Flow Day, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-zeroflowfirst.png"),
width = 190, height = 220, units = "mm")
p_mw_hist_p2z <-
ggplot() +
geom_histogram(data = subset(df_mw, metric == "peak2z_length"),
aes(x = mw_diff_mean, fill = mw_sig), binwidth = 10) +
geom_vline(xintercept = 0, color = "#ffffbf") +
facet_wrap(~mw_yr, ncol = 3) +
scale_x_continuous(name = "Change in Days from Peak to No-Flow, (Split+1 to 2017) - (1980 to Split) [days]") +
scale_y_continuous(name = "Number of Gages") +
scale_fill_manual(name = "Mann-Whitney Significance",
values = c("SigDry" = col.cat.red, "SigWet" = col.cat.blu, "NotSig" = col.gray),
labels = c("SigDry" = "Drier", "SigWet" = "Wetter", "NotSig" = "No Change")) +
theme(panel.border = element_blank(),
legend.position = "bottom") +
guides(fill = guide_legend(order = 1, title.position = "top", title.hjust = 0.5)) +
ggsave(file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity-peak2z.png"),
width = 190, height = 220, units = "mm")
## organize data into a table:
# Metric, Split Year, # Sig Wet, # Sig Dry, # Not Sig, Mean Change Sig Wet, Mean Change Sig Dry
mw_summary_table <-
df_mw %>%
dplyr::group_by(metric, mw_yr) %>%
dplyr::summarize(n_SigDry = sum(mw_sig == "SigDry"),
n_SigWet = sum(mw_sig == "SigWet"),
n_NotSig = sum(mw_sig == "NotSig"),
n_tested = n_SigDry + n_SigWet + n_NotSig,
prc_SigDry = n_SigDry/540,
prc_SigWet = n_SigWet/540,
prc_NotSig = n_NotSig/540) %>%
dplyr::mutate(SigDryText = paste0("n = ", n_SigDry, " (", round(prc_SigDry*100, 1), "%)"),
SigWetText = paste0("n = ", n_SigWet, " (", round(prc_SigWet*100, 1), "%)")) %>%
dplyr::select(metric, mw_yr, SigDryText, SigWetText)
readr::write_csv(mw_summary_table, file.path("figures_manuscript", "MannWhitney_SplitYearSensitivity.csv"))
|
\name{CPTtools-package}
\alias{CPTtools-package}
\alias{CPTtools}
\docType{package}
\title{
\packageTitle{CPTtools}
}
\description{
\packageDescription{CPTtools}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{CPTtools}
CPTtools is a collection of various bits of R code useful for processing
Bayes net output. Some were designed to work with ETS's proprietary
StatShop code, and some with RNetica. The code collected in this
package is all free from explicit dependencies on the specific Bayes net
package and will hopefully be useful with other systems as well.
The majority of the code are related to building conditional probability
tables (CPTs) for Bayesian networks. The package has two output
representations for a CPT. The first is a \code{data.frame} object
where the first several columns are factor variables corresponding the
the parent variables, and the remaining columns are numeric variables
corresponding to the state of the child variables. The rows represent
possible configurations of the parent variables. An example is shown below.
\preformatted{
S1 S2 Full Partial None
1 High High 0.81940043 0.15821522 0.02238436
2 Medium High 0.46696668 0.46696668 0.06606664
3 Low High 0.14468106 0.74930671 0.10601223
4 High Medium 0.76603829 0.14791170 0.08605000
5 Medium Medium 0.38733177 0.38733177 0.22533647
6 Low Medium 0.10879020 0.56342707 0.32778273
7 High Low 0.65574465 0.12661548 0.21763987
8 Medium Low 0.26889642 0.26889642 0.46220715
9 Low Low 0.06630741 0.34340770 0.59028489
10 High LowerYet 0.39095414 0.07548799 0.53355787
11 Medium LowerYet 0.11027649 0.11027649 0.77944702
12 Low LowerYet 0.02337270 0.12104775 0.85557955
}
The second representation is a table (\code{matrix}) with just the
numeric part. Two approaches to building these tables from parameters
are described below. The more flexible discrete partial credit model is
used for the basis of the parameterized networks in the
\code{\link[Peanut:Peanut-package]{Peanut}} package.
In addition to the code for building partial credit networks, this
package contains some code for building Bayesian network structures from
(inverse) correlation matrixes, and graphical displays for Bayes net
output. The latter includes some diagnostic plots and additional
diagnostic tests.
}
\section{Discrete Partial Credit Framework}{
The original parameterization for creating conditional probability
tables based on Almond et al (2001) proved to be insufficiently
flexible. Almond (2015) describes a newer parameterization based on
three steps:
\enumerate{
\item{Translate the parent variables onto a numeric effective theta
scale (\code{\link{effectiveThetas}}).}
\item{Combine the parent effective thetas into a single effective
theta using a combination rule (\code{\link{Compensatory}},
\code{\link{OffsetConjunctive}}).}
\item{Convert the effective theta for each row of the table into
conditional probabilities using a link function
(\code{\link{gradedResponse}}, \code{\link{partialCredit}},
\code{\link{normalLink}}).}
}
The \code{\link{partialCredit}} link function is particularly flexible
as it allows different parameterizations and different combination rules
for each state of the child variable. This functionality is best
captured by the two high level functions:
\describe{
\item{\code{\link{calcDPCTable}}}{Creates the probability table for
the discrete partial credit model given the parameters.}
\item{\code{\link{mapDPC}}}{Finds an MAP estimate for the parameters
given an observed table of counts.}
}
This parameterization serves as basis for the model used in the
\code{\link[Peanut:Peanut-package]{Peanut}} package.
}
\section{Other parametric CPT models}{
The first two steps of the discrete partial credit framework outlined
above are due to a suggestion by Lou DiBello (Almond et al, 2001).
This lead to an older framework, in which the link function was hard
coded into the conditional probability table formation. The models
were called DiBello-\emph{XX}, where \emph{XX} is the name of the link
function. Almond et al. (2015) describes several additional
examples.
\describe{
\item{\code{\link{calcDDTable}}}{Calculates DiBello-Dirichlet model
probability and parameter tables.}
\item{\code{\link{calcDNTable}}}{Creates the probability table for
DiBello-Normal distribution. This is equivalent to using the
\code{\link{normalLink}} in the DPC framework. This also uses a link
scale parameter.}
\item{\code{\link{calcDSTable}}}{Creates the probability table for
DiBello-Samejima distribution. This is equivalent to using the
\code{\link{gradedResponse}} in the DPC framework.}
\item{\code{\link{calcDSllike}}}{Calculates the log-likelihood for
data from a DiBello-Samejima (Normal) distribution.}
}
Diez (1993) and Srinivas (1993) describe an older parametric framework
for Bayes nets based on the noisy-or or noisy-max function. These are
also available.
\describe{
\item{\code{\link{calcNoisyAndTable}}}{Calculate the conditional
probability table for a Noisy-And or Noisy-Min distribution.}
\item{\code{\link{calcNoisyOrTable}}}{Calculate the conditional
probability table for a Noisy-Or distribution.}
}
}
\section{Building Bayes nets from (inverse) correlation matrixes}{
Almond (2010) noted that in many cases the best information about the
relationship among variables came from a procedure that produces a
correlation matrix (e.g., a factor analysis). Applying a trick from
Whittaker (1990), connecting pairs of nodes corresponding to nonzero
entries in an inverse correlation matrix produces an undirected
graphical model. Ordering in the nodes in a perfect ordering allows
the undirected model to be converted into a directed model (Bayesian
network). The conditional probability tables can then be created
through a series of regressions.
The following functions implement this protocol:
\describe{
\item{\code{\link{structMatrix}}}{Finds graphical structure from a
covariance matrix.}
\item{\code{\link{mcSearch}}}{Orders variables using Maximum
Cardinality search.}
\item{\code{\link{buildParentList}}}{Builds a list of parents of
nodes in a graph.}
\item{\code{\link{buildRegressions}}}{Creates a series of regressions
from a covariance matrix.}
\item{\code{\link{buildRegressionTables}}}{Builds conditional
probability tables from regressions.}
}
}
\section{Other model construction tools}{
These functions are a grab bag of lower level utilities useful for
building CPTs:
\describe{
\item{\code{\link{areaProbs}}}{Translates between normal and
categorical probabilities.}
\item{\code{\link{numericPart}}}{Splits a mixed data frame into a
numeric matrix and a factor part..}
\item{\code{\link{dataTable}}}{Constructs a table of counts from a
setof discrete observations..}
\item{\code{\link{eThetaFrame}}}{Constructs a data frame showing the
effective thetas for each parent combination..}
\item{\code{\link{effectiveThetas}}}{Assigns effective theta levels
for categorical variable.}
\item{\code{\link{getTableStates}}}{Gets meta data about a
conditional probability table..}
\item{\code{\link{rescaleTable}}}{Rescales the numeric part of the
table.}
\item{\code{\link{scaleMatrix}}}{Scales a matrix to have a unit
diagonal.}
\item{\code{\link{scaleTable}}}{Scales a table according to the Sum
and Scale column.}
}
}
\section{Bayes net output displays and tests}{
Almond et al. (2009) suggested using hanging barplots for displaying
Bayes net output and gives several examples. The function
\code{\link{stackedBars}} produces the simple version of this plot and
the function \code{\link{compareBars}} compares two distributions
(e.g., prior and posterior). The function
\code{\link{buildFactorTab}} is useful for building the data and the
function \code{\link{colorspread}} is useful for building color
gradients.
Madigan, Mosurski and Almond (1997) describe a graphical weight of
evidence balance sheet (see also Almond et al, 2015, Chapter 7; Almond
et al, 2013). The function \code{\link{woeHist}} calculates the weights of
evidence for a series of observations and the function
\code{\link{woeBal}} produces a graphical display.
Sinharay and Almond (2006) propose a graphical fit test for
conditional probability tables (see also, Almond et al, 2015, Chapter
10). The function \code{\link{OCP}} implements this test, and the
function \code{\link{betaci}} creates the beta credibility intervals
around which the function is built.
The key to Bayesian network models are the assumptions of conditional
independence which underlie the model. The function
\code{\link{localDepTest}} tests these assumptions based on observed
(or imputed) data tables.
The function \code{\link{mutualInformation}} calculates the mutual
information of a two-way table, a measure of the strength of
association. This is similar to the measure used in many Bayes net
packages (e.g., \code{\link[RNetica]{MutualInfo}}).
}
\section{Data sets}{
Two data sets are provided with this package:
\describe{
\item{\code{\link{ACED}}}{Data from ACED field trial (Shute, Hansen,
and Almond, 2008). This example is based on a field trial of a
Bayesian network based Assessment for Learning system, and contains
both item-level response and high-level network summaries. A
complete description of the Bayes net can be found at
\url{http://ecd.ralmond.net/ecdwiki/ACED/ACED}.}
\item{\code{\link{MathGrades}}}{Grades on 5 mathematics tests from
Mardia, Kent and Bibby (from Whittaker, 1990).}
}
}
\section{Index}{
Complete index of all functions.
\packageIndices{CPTtools}
}
\author{
\packageAuthor{CPTtools}
Maintainer: \packageMaintainer{CPTtools}
}
\references{
Almond, R.G. (2015). An IRT-based Parameterization for Conditional
Probability Tables. Paper submitted to the 2015 Bayesian Application
Workshop at the Uncertainty in Artificial Intelligence conference.
Almond, R.G., Mislevy, R.J., Steinberg, L.S., Williamson, D.M. and
Yan, D. (2015) \emph{Bayesian Networks in Educational Assessment.}
Springer.
Almond, R. G. (2010). \sQuote{I can name that Bayesian network in two
matrixes.} \emph{International Journal of Approximate Reasoning.}
\bold{51}, 167-178.
Almond, R. G., Shute, V. J., Underwood, J. S., and Zapata-Rivera,
J.-D (2009). Bayesian Networks: A Teacher's View. \emph{International
Journal of Approximate Reasoning.} \bold{50}, 450-460.
Almond, R.G., DiBello, L., Jenkins, F., Mislevy, R.J.,
Senturk, D., Steinberg, L.S. and Yan, D. (2001) Models for Conditional
Probability Tables in Educational Assessment. \emph{Artificial
Intelligence and Statistics 2001} Jaakkola and Richardson (eds).,
Morgan Kaufmann, 137--143.
Diez, F. J. (1993) Parameter adjustment in Bayes networks. The
generalized noisy OR-gate. In Heckerman and Mamdani (eds)
\emph{Uncertainty in Artificial Intelligence 93.} Morgan Kaufmann.
99--105.
Muraki, E. (1992). A Generalized Partial Credit Model: Application
of an EM Algorithm. \emph{Applied Psychological Measurement}, \bold{16},
159-176. DOI: 10.1177/014662169201600206
Samejima, F. (1969) Estimation of latent ability using a
response pattern of graded scores. \emph{Psychometrika Monograph No.
17}, \bold{34}, (No. 4, Part 2).
Shute, V. J., Hansen, E. G., & Almond, R. G. (2008). You can't fatten
a hog by weighing it---Or can you? Evaluating an assessment for learning
system called ACED. \emph{International Journal of Artificial
Intelligence and Education}, \bold{18}(4), 289-316.
Sinharay, S. and Almond, R.G. (2006). Assessing Fit of Cognitively
Diagnostic Models: A case study. \emph{Educational and Psychological
Measurement}. \bold{67}(2), 239--257.
Srinivas, S. (1993) A generalization of the Noisy-Or model, the
generalized noisy OR-gate. In Heckerman and Mamdani (eds)
\emph{Uncertainty in Artificial Intelligence 93.} Morgan Kaufmann.
208--215.
Whittaker, J. (1990). \emph{Graphical Models in Applied Multivariate
Statistics}. Wiley.
Madigan, D., Mosurski, K. and Almond, R. (1997) Graphical explanation
in belief networks. \emph{Journal of Computational Graphics and
Statistics}, \bold{6}, 160-181.
Almond, R. G., Kim, Y. J., Shute, V. J. and Ventura, M. (2013).
Debugging the Evidence Chain. In Almond, R. G. and Mengshoel,
O. (Eds.) \emph{Proceedings of the 2013 UAI Application Workshops:
Big Data meet Complex Models and Models for Spatial, Temporal and
Network Data (UAI2013AW)}, 1-10.
\url{http://ceur-ws.org/Vol-1024/paper-01.pdf}
}
\keyword{ package }
\seealso{
\code{\link[RNetica]{RNetica}} ~~
\code{\link[Peanut:Peanut-package]{Peanut}} ~~
}
\examples{
## Set up variables
skill1l <- c("High","Medium","Low")
skill2l <- c("High","Medium","Low","LowerYet")
correctL <- c("Correct","Incorrect")
pcreditL <- c("Full","Partial","None")
gradeL <- c("A","B","C","D","E")
## New Discrete Partial Credit framework:
## Complex model, different rules for different levels
cptPC2 <- calcDPCFrame(list(S1=skill1l,S2=skill2l),pcreditL,
list(full=log(1),partial=log(c(S1=1,S2=.75))),
betas=list(full=c(0,999),partial=1.0),
rule=list("OffsetDisjunctive","Compensatory"))
## Graded Response using the older DiBello-Samejima framework.
cptGraded <- calcDSTable(list(S1=skill1l),gradeL, 0.0, 0.0, dinc=c(.3,.4,.3))
## Building a Bayes net from a correlation matrix.
data(MathGrades)
pl <- buildParentList(structMatrix(MathGrades$var),"Algebra")
rt <- buildRegressions(MathGrades$var,MathGrades$means,pl)
tabs <- buildRegressionTables(rt, MathGrades$pvecs, MathGrades$means,
sqrt(diag(MathGrades$var)))
## Stacked Barplots:
margins.prior <- data.frame (
Trouble=c(Novice=.19,Semester1=.24,Semester2=.28,Semseter3=.20,Semester4=.09),
NDK=c(Novice=.01,Semester1=.09,Semester2=.35,Semseter3=.41,Semester4=.14),
Model=c(Novice=.19,Semester1=.28,Semester2=.31,Semseter3=.18,Semester4=.04)
)
margins.post <- data.frame(
Trouble=c(Novice=.03,Semester1=.15,Semester2=.39,Semseter3=.32,Semester4=.11),
NDK=c(Novice=.00,Semester1=.03,Semester2=.28,Semseter3=.52,Semester4=.17),
Model=c(Novice=.10,Semester1=.25,Semester2=.37,Semseter3=.23,Semester4=.05))
stackedBars(margins.post,3,
main="Marginal Distributions for NetPASS skills",
sub="Baseline at 3rd Semester level.",
cex.names=.75, col=hsv(223/360,.2,0.10*(5:1)+.5))
compareBars(margins.prior,margins.post,3,c("Prior","Post"),
main="Margins before/after Medium Trouble Shooting Task",
sub="Observables: cfgCor=Medium, logCor=High, logEff=Medium",
legend.loc = "topright",
cex.names=.75, col1=hsv(h=.1,s=.2*1:5-.1,alpha=1),
col2=hsv(h=.6,s=.2*1:5-.1,alpha=1))
## Weight of evidence balance sheets
sampleSequence <- read.csv(paste(library(help="CPTtools")$path,
"testFiles","SampleStudent.csv",
sep=.Platform$file.sep),
header=TRUE,row.names=1)
woeBal(sampleSequence[,c("H","M","L")],c("H"),c("M","L"),lcex=1.25)
### Observable Characteristic Plot
pi <- c("+"=.15,"-"=.85)
nnn <- c("(0,0,0)"=20,"(0,0,1)"=10,
"(0,1,0)"=10,"(0,1,0)"=5,
"(1,0,0)"=10,"(1,0,1)"=10,
"(1,1,1)"=10,"(1,1,1)"=25)
xx1 <- c("(0,0,0)"=2,"(0,0,1)"=5,
"(0,1,0)"=1,"(0,1,1)"=3,
"(1,0,0)"=0,"(1,0,1)"=2,
"(1,1,0)"=5,"(1,1,1)"=24)
grouplabs <- c(rep("-",3),"+")
grouplabs1 <- rep(grouplabs,each=2)
OCP2 (xx1,nnn,grouplabs1,pi,c("-","+"),ylim=c(0,1), reflty=c(2,4),
setlabs=c("Low Skill3","High Skill3"),setat=-.8,
main="Data for which Skill 3 is relevant")
}
|
/man/CPTtools-package.Rd
|
permissive
|
erge324/CPTtools
|
R
| false | false | 16,265 |
rd
|
\name{CPTtools-package}
\alias{CPTtools-package}
\alias{CPTtools}
\docType{package}
\title{
\packageTitle{CPTtools}
}
\description{
\packageDescription{CPTtools}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{CPTtools}
CPTtools is a collection of various bits of R code useful for processing
Bayes net output. Some were designed to work with ETS's proprietary
StatShop code, and some with RNetica. The code collected in this
package is all free from explicit dependencies on the specific Bayes net
package and will hopefully be useful with other systems as well.
The majority of the code are related to building conditional probability
tables (CPTs) for Bayesian networks. The package has two output
representations for a CPT. The first is a \code{data.frame} object
where the first several columns are factor variables corresponding the
the parent variables, and the remaining columns are numeric variables
corresponding to the state of the child variables. The rows represent
possible configurations of the parent variables. An example is shown below.
\preformatted{
S1 S2 Full Partial None
1 High High 0.81940043 0.15821522 0.02238436
2 Medium High 0.46696668 0.46696668 0.06606664
3 Low High 0.14468106 0.74930671 0.10601223
4 High Medium 0.76603829 0.14791170 0.08605000
5 Medium Medium 0.38733177 0.38733177 0.22533647
6 Low Medium 0.10879020 0.56342707 0.32778273
7 High Low 0.65574465 0.12661548 0.21763987
8 Medium Low 0.26889642 0.26889642 0.46220715
9 Low Low 0.06630741 0.34340770 0.59028489
10 High LowerYet 0.39095414 0.07548799 0.53355787
11 Medium LowerYet 0.11027649 0.11027649 0.77944702
12 Low LowerYet 0.02337270 0.12104775 0.85557955
}
The second representation is a table (\code{matrix}) with just the
numeric part. Two approaches to building these tables from parameters
are described below. The more flexible discrete partial credit model is
used for the basis of the parameterized networks in the
\code{\link[Peanut:Peanut-package]{Peanut}} package.
In addition to the code for building partial credit networks, this
package contains some code for building Bayesian network structures from
(inverse) correlation matrixes, and graphical displays for Bayes net
output. The latter includes some diagnostic plots and additional
diagnostic tests.
}
\section{Discrete Partial Credit Framework}{
The original parameterization for creating conditional probability
tables based on Almond et al (2001) proved to be insufficiently
flexible. Almond (2015) describes a newer parameterization based on
three steps:
\enumerate{
\item{Translate the parent variables onto a numeric effective theta
scale (\code{\link{effectiveThetas}}).}
\item{Combine the parent effective thetas into a single effective
theta using a combination rule (\code{\link{Compensatory}},
\code{\link{OffsetConjunctive}}).}
\item{Convert the effective theta for each row of the table into
conditional probabilities using a link function
(\code{\link{gradedResponse}}, \code{\link{partialCredit}},
\code{\link{normalLink}}).}
}
The \code{\link{partialCredit}} link function is particularly flexible
as it allows different parameterizations and different combination rules
for each state of the child variable. This functionality is best
captured by the two high level functions:
\describe{
\item{\code{\link{calcDPCTable}}}{Creates the probability table for
the discrete partial credit model given the parameters.}
\item{\code{\link{mapDPC}}}{Finds an MAP estimate for the parameters
given an observed table of counts.}
}
This parameterization serves as basis for the model used in the
\code{\link[Peanut:Peanut-package]{Peanut}} package.
}
\section{Other parametric CPT models}{
The first two steps of the discrete partial credit framework outlined
above are due to a suggestion by Lou DiBello (Almond et al, 2001).
This lead to an older framework, in which the link function was hard
coded into the conditional probability table formation. The models
were called DiBello-\emph{XX}, where \emph{XX} is the name of the link
function. Almond et al. (2015) describes several additional
examples.
\describe{
\item{\code{\link{calcDDTable}}}{Calculates DiBello-Dirichlet model
probability and parameter tables.}
\item{\code{\link{calcDNTable}}}{Creates the probability table for
DiBello-Normal distribution. This is equivalent to using the
\code{\link{normalLink}} in the DPC framework. This also uses a link
scale parameter.}
\item{\code{\link{calcDSTable}}}{Creates the probability table for
DiBello-Samejima distribution. This is equivalent to using the
\code{\link{gradedResponse}} in the DPC framework.}
\item{\code{\link{calcDSllike}}}{Calculates the log-likelihood for
data from a DiBello-Samejima (Normal) distribution.}
}
Diez (1993) and Srinivas (1993) describe an older parametric framework
for Bayes nets based on the noisy-or or noisy-max function. These are
also available.
\describe{
\item{\code{\link{calcNoisyAndTable}}}{Calculate the conditional
probability table for a Noisy-And or Noisy-Min distribution.}
\item{\code{\link{calcNoisyOrTable}}}{Calculate the conditional
probability table for a Noisy-Or distribution.}
}
}
\section{Building Bayes nets from (inverse) correlation matrixes}{
Almond (2010) noted that in many cases the best information about the
relationship among variables came from a procedure that produces a
correlation matrix (e.g., a factor analysis). Applying a trick from
Whittaker (1990), connecting pairs of nodes corresponding to nonzero
entries in an inverse correlation matrix produces an undirected
graphical model. Ordering in the nodes in a perfect ordering allows
the undirected model to be converted into a directed model (Bayesian
network). The conditional probability tables can then be created
through a series of regressions.
The following functions implement this protocol:
\describe{
\item{\code{\link{structMatrix}}}{Finds graphical structure from a
covariance matrix.}
\item{\code{\link{mcSearch}}}{Orders variables using Maximum
Cardinality search.}
\item{\code{\link{buildParentList}}}{Builds a list of parents of
nodes in a graph.}
\item{\code{\link{buildRegressions}}}{Creates a series of regressions
from a covariance matrix.}
\item{\code{\link{buildRegressionTables}}}{Builds conditional
probability tables from regressions.}
}
}
\section{Other model construction tools}{
These functions are a grab bag of lower level utilities useful for
building CPTs:
\describe{
\item{\code{\link{areaProbs}}}{Translates between normal and
categorical probabilities.}
\item{\code{\link{numericPart}}}{Splits a mixed data frame into a
numeric matrix and a factor part..}
\item{\code{\link{dataTable}}}{Constructs a table of counts from a
setof discrete observations..}
\item{\code{\link{eThetaFrame}}}{Constructs a data frame showing the
effective thetas for each parent combination..}
\item{\code{\link{effectiveThetas}}}{Assigns effective theta levels
for categorical variable.}
\item{\code{\link{getTableStates}}}{Gets meta data about a
conditional probability table..}
\item{\code{\link{rescaleTable}}}{Rescales the numeric part of the
table.}
\item{\code{\link{scaleMatrix}}}{Scales a matrix to have a unit
diagonal.}
\item{\code{\link{scaleTable}}}{Scales a table according to the Sum
and Scale column.}
}
}
\section{Bayes net output displays and tests}{
Almond et al. (2009) suggested using hanging barplots for displaying
Bayes net output and gives several examples. The function
\code{\link{stackedBars}} produces the simple version of this plot and
the function \code{\link{compareBars}} compares two distributions
(e.g., prior and posterior). The function
\code{\link{buildFactorTab}} is useful for building the data and the
function \code{\link{colorspread}} is useful for building color
gradients.
Madigan, Mosurski and Almond (1997) describe a graphical weight of
evidence balance sheet (see also Almond et al, 2015, Chapter 7; Almond
et al, 2013). The function \code{\link{woeHist}} calculates the weights of
evidence for a series of observations and the function
\code{\link{woeBal}} produces a graphical display.
Sinharay and Almond (2006) propose a graphical fit test for
conditional probability tables (see also, Almond et al, 2015, Chapter
10). The function \code{\link{OCP}} implements this test, and the
function \code{\link{betaci}} creates the beta credibility intervals
around which the function is built.
The key to Bayesian network models are the assumptions of conditional
independence which underlie the model. The function
\code{\link{localDepTest}} tests these assumptions based on observed
(or imputed) data tables.
The function \code{\link{mutualInformation}} calculates the mutual
information of a two-way table, a measure of the strength of
association. This is similar to the measure used in many Bayes net
packages (e.g., \code{\link[RNetica]{MutualInfo}}).
}
\section{Data sets}{
Two data sets are provided with this package:
\describe{
\item{\code{\link{ACED}}}{Data from ACED field trial (Shute, Hansen,
and Almond, 2008). This example is based on a field trial of a
Bayesian network based Assessment for Learning system, and contains
both item-level response and high-level network summaries. A
complete description of the Bayes net can be found at
\url{http://ecd.ralmond.net/ecdwiki/ACED/ACED}.}
\item{\code{\link{MathGrades}}}{Grades on 5 mathematics tests from
Mardia, Kent and Bibby (from Whittaker, 1990).}
}
}
\section{Index}{
Complete index of all functions.
\packageIndices{CPTtools}
}
\author{
\packageAuthor{CPTtools}
Maintainer: \packageMaintainer{CPTtools}
}
\references{
Almond, R.G. (2015). An IRT-based Parameterization for Conditional
Probability Tables. Paper submitted to the 2015 Bayesian Application
Workshop at the Uncertainty in Artificial Intelligence conference.
Almond, R.G., Mislevy, R.J., Steinberg, L.S., Williamson, D.M. and
Yan, D. (2015) \emph{Bayesian Networks in Educational Assessment.}
Springer.
Almond, R. G. (2010). \sQuote{I can name that Bayesian network in two
matrixes.} \emph{International Journal of Approximate Reasoning.}
\bold{51}, 167-178.
Almond, R. G., Shute, V. J., Underwood, J. S., and Zapata-Rivera,
J.-D (2009). Bayesian Networks: A Teacher's View. \emph{International
Journal of Approximate Reasoning.} \bold{50}, 450-460.
Almond, R.G., DiBello, L., Jenkins, F., Mislevy, R.J.,
Senturk, D., Steinberg, L.S. and Yan, D. (2001) Models for Conditional
Probability Tables in Educational Assessment. \emph{Artificial
Intelligence and Statistics 2001} Jaakkola and Richardson (eds).,
Morgan Kaufmann, 137--143.
Diez, F. J. (1993) Parameter adjustment in Bayes networks. The
generalized noisy OR-gate. In Heckerman and Mamdani (eds)
\emph{Uncertainty in Artificial Intelligence 93.} Morgan Kaufmann.
99--105.
Muraki, E. (1992). A Generalized Partial Credit Model: Application
of an EM Algorithm. \emph{Applied Psychological Measurement}, \bold{16},
159-176. DOI: 10.1177/014662169201600206
Samejima, F. (1969) Estimation of latent ability using a
response pattern of graded scores. \emph{Psychometrika Monograph No.
17}, \bold{34}, (No. 4, Part 2).
Shute, V. J., Hansen, E. G., & Almond, R. G. (2008). You can't fatten
a hog by weighing it---Or can you? Evaluating an assessment for learning
system called ACED. \emph{International Journal of Artificial
Intelligence and Education}, \bold{18}(4), 289-316.
Sinharay, S. and Almond, R.G. (2006). Assessing Fit of Cognitively
Diagnostic Models: A case study. \emph{Educational and Psychological
Measurement}. \bold{67}(2), 239--257.
Srinivas, S. (1993) A generalization of the Noisy-Or model, the
generalized noisy OR-gate. In Heckerman and Mamdani (eds)
\emph{Uncertainty in Artificial Intelligence 93.} Morgan Kaufmann.
208--215.
Whittaker, J. (1990). \emph{Graphical Models in Applied Multivariate
Statistics}. Wiley.
Madigan, D., Mosurski, K. and Almond, R. (1997) Graphical explanation
in belief networks. \emph{Journal of Computational Graphics and
Statistics}, \bold{6}, 160-181.
Almond, R. G., Kim, Y. J., Shute, V. J. and Ventura, M. (2013).
Debugging the Evidence Chain. In Almond, R. G. and Mengshoel,
O. (Eds.) \emph{Proceedings of the 2013 UAI Application Workshops:
Big Data meet Complex Models and Models for Spatial, Temporal and
Network Data (UAI2013AW)}, 1-10.
\url{http://ceur-ws.org/Vol-1024/paper-01.pdf}
}
\keyword{ package }
\seealso{
\code{\link[RNetica]{RNetica}} ~~
\code{\link[Peanut:Peanut-package]{Peanut}} ~~
}
\examples{
## Set up variables
skill1l <- c("High","Medium","Low")
skill2l <- c("High","Medium","Low","LowerYet")
correctL <- c("Correct","Incorrect")
pcreditL <- c("Full","Partial","None")
gradeL <- c("A","B","C","D","E")
## New Discrete Partial Credit framework:
## Complex model, different rules for different levels
cptPC2 <- calcDPCFrame(list(S1=skill1l,S2=skill2l),pcreditL,
list(full=log(1),partial=log(c(S1=1,S2=.75))),
betas=list(full=c(0,999),partial=1.0),
rule=list("OffsetDisjunctive","Compensatory"))
## Graded Response using the older DiBello-Samejima framework.
cptGraded <- calcDSTable(list(S1=skill1l),gradeL, 0.0, 0.0, dinc=c(.3,.4,.3))
## Building a Bayes net from a correlation matrix.
data(MathGrades)
pl <- buildParentList(structMatrix(MathGrades$var),"Algebra")
rt <- buildRegressions(MathGrades$var,MathGrades$means,pl)
tabs <- buildRegressionTables(rt, MathGrades$pvecs, MathGrades$means,
sqrt(diag(MathGrades$var)))
## Stacked Barplots:
margins.prior <- data.frame (
Trouble=c(Novice=.19,Semester1=.24,Semester2=.28,Semseter3=.20,Semester4=.09),
NDK=c(Novice=.01,Semester1=.09,Semester2=.35,Semseter3=.41,Semester4=.14),
Model=c(Novice=.19,Semester1=.28,Semester2=.31,Semseter3=.18,Semester4=.04)
)
margins.post <- data.frame(
Trouble=c(Novice=.03,Semester1=.15,Semester2=.39,Semseter3=.32,Semester4=.11),
NDK=c(Novice=.00,Semester1=.03,Semester2=.28,Semseter3=.52,Semester4=.17),
Model=c(Novice=.10,Semester1=.25,Semester2=.37,Semseter3=.23,Semester4=.05))
stackedBars(margins.post,3,
main="Marginal Distributions for NetPASS skills",
sub="Baseline at 3rd Semester level.",
cex.names=.75, col=hsv(223/360,.2,0.10*(5:1)+.5))
compareBars(margins.prior,margins.post,3,c("Prior","Post"),
main="Margins before/after Medium Trouble Shooting Task",
sub="Observables: cfgCor=Medium, logCor=High, logEff=Medium",
legend.loc = "topright",
cex.names=.75, col1=hsv(h=.1,s=.2*1:5-.1,alpha=1),
col2=hsv(h=.6,s=.2*1:5-.1,alpha=1))
## Weight of evidence balance sheets
sampleSequence <- read.csv(paste(library(help="CPTtools")$path,
"testFiles","SampleStudent.csv",
sep=.Platform$file.sep),
header=TRUE,row.names=1)
woeBal(sampleSequence[,c("H","M","L")],c("H"),c("M","L"),lcex=1.25)
### Observable Characteristic Plot
pi <- c("+"=.15,"-"=.85)
nnn <- c("(0,0,0)"=20,"(0,0,1)"=10,
"(0,1,0)"=10,"(0,1,0)"=5,
"(1,0,0)"=10,"(1,0,1)"=10,
"(1,1,1)"=10,"(1,1,1)"=25)
xx1 <- c("(0,0,0)"=2,"(0,0,1)"=5,
"(0,1,0)"=1,"(0,1,1)"=3,
"(1,0,0)"=0,"(1,0,1)"=2,
"(1,1,0)"=5,"(1,1,1)"=24)
grouplabs <- c(rep("-",3),"+")
grouplabs1 <- rep(grouplabs,each=2)
OCP2 (xx1,nnn,grouplabs1,pi,c("-","+"),ylim=c(0,1), reflty=c(2,4),
setlabs=c("Low Skill3","High Skill3"),setat=-.8,
main="Data for which Skill 3 is relevant")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\name{data_path}
\alias{data_path}
\title{Represents a path to data in a datastore.}
\usage{
data_path(datastore, path_on_datastore = NULL, name = NULL)
}
\arguments{
\item{datastore}{The Datastore to reference.}
\item{path_on_datastore}{The relative path in the backing storage for the data reference.}
\item{name}{An optional name for the DataPath.}
}
\value{
The \code{DataPath} object.
}
\description{
The path represented by DataPath object can point to a directory or a data artifact (blob, file).
}
\examples{
\dontrun{
my_data <- register_azure_blob_container_datastore(
workspace = ws,
datastore_name = blob_datastore_name,
container_name = ws_blob_datastore$container_name,
account_name = ws_blob_datastore$account_name,
account_key = ws_blob_datastore$account_key,
create_if_not_exists = TRUE)
datapath <- data_path(my_data, <path_on_my_datastore>)
dataset <- create_file_dataset_from_files(datapath)
}
}
\seealso{
\code{\link{create_file_dataset_from_files}}
\code{\link{create_tabular_dataset_from_parquet_files}}
\code{\link{create_tabular_dataset_from_delimited_files}}
\code{\link{create_tabular_dataset_from_json_lines_files}}
\code{\link{create_tabular_dataset_from_sql_query}}
}
|
/man/data_path.Rd
|
permissive
|
revodavid/azureml-sdk-for-r
|
R
| false | true | 1,314 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\name{data_path}
\alias{data_path}
\title{Represents a path to data in a datastore.}
\usage{
data_path(datastore, path_on_datastore = NULL, name = NULL)
}
\arguments{
\item{datastore}{The Datastore to reference.}
\item{path_on_datastore}{The relative path in the backing storage for the data reference.}
\item{name}{An optional name for the DataPath.}
}
\value{
The \code{DataPath} object.
}
\description{
The path represented by DataPath object can point to a directory or a data artifact (blob, file).
}
\examples{
\dontrun{
my_data <- register_azure_blob_container_datastore(
workspace = ws,
datastore_name = blob_datastore_name,
container_name = ws_blob_datastore$container_name,
account_name = ws_blob_datastore$account_name,
account_key = ws_blob_datastore$account_key,
create_if_not_exists = TRUE)
datapath <- data_path(my_data, <path_on_my_datastore>)
dataset <- create_file_dataset_from_files(datapath)
}
}
\seealso{
\code{\link{create_file_dataset_from_files}}
\code{\link{create_tabular_dataset_from_parquet_files}}
\code{\link{create_tabular_dataset_from_delimited_files}}
\code{\link{create_tabular_dataset_from_json_lines_files}}
\code{\link{create_tabular_dataset_from_sql_query}}
}
|
# nocov - compat-purrr (last updated: rlang 0.0.0.9007)
# This file serves as a reference for compatibility functions for
# purrr. They are not drop-in replacements but allow a similar style
# of programming. This is useful in cases where purrr is too heavy a
# package to depend on. Please find the most recent version in rlang's
# repository.
map <- function(.x, .f, ...) {
lapply(.x, .f, ...)
}
map_mold <- function(.x, .f, .mold, ...) {
out <- vapply(.x, .f, .mold, ..., USE.NAMES = FALSE)
names(out) <- names(.x)
out
}
map_lgl <- function(.x, .f, ...) {
map_mold(.x, .f, logical(1), ...)
}
map_int <- function(.x, .f, ...) {
map_mold(.x, .f, integer(1), ...)
}
map_dbl <- function(.x, .f, ...) {
map_mold(.x, .f, double(1), ...)
}
map_chr <- function(.x, .f, ...) {
map_mold(.x, .f, character(1), ...)
}
map_cpl <- function(.x, .f, ...) {
map_mold(.x, .f, complex(1), ...)
}
pluck <- function(.x, .f) {
map(.x, `[[`, .f)
}
pluck_lgl <- function(.x, .f) {
map_lgl(.x, `[[`, .f)
}
pluck_int <- function(.x, .f) {
map_int(.x, `[[`, .f)
}
pluck_dbl <- function(.x, .f) {
map_dbl(.x, `[[`, .f)
}
pluck_chr <- function(.x, .f) {
map_chr(.x, `[[`, .f)
}
pluck_cpl <- function(.x, .f) {
map_cpl(.x, `[[`, .f)
}
map2 <- function(.x, .y, .f, ...) {
Map(.f, .x, .y, ...)
}
map2_lgl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "logical")
}
map2_int <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "integer")
}
map2_dbl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "double")
}
map2_chr <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "character")
}
map2_cpl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "complex")
}
args_recycle <- function(args) {
lengths <- map_int(args, length)
n <- max(lengths)
stopifnot(all(lengths == 1L | lengths == n))
to_recycle <- lengths == 1L
args[to_recycle] <- map(args[to_recycle], function(x) rep.int(x, n))
args
}
pmap <- function(.l, .f, ...) {
args <- args_recycle(.l)
do.call("mapply", c(
FUN = list(quote(.f)),
args, MoreArgs = quote(list(...)),
SIMPLIFY = FALSE, USE.NAMES = FALSE
))
}
probe <- function(.x, .p, ...) {
if (is_logical(.p)) {
stopifnot(length(.p) == length(.x))
.p
} else {
map_lgl(.x, .p, ...)
}
}
keep <- function(.x, .f, ...) {
.x[probe(.x, .f, ...)]
}
discard <- function(.x, .p, ...) {
sel <- probe(.x, .p, ...)
.x[is.na(sel) | !sel]
}
map_if <- function(.x, .p, .f, ...) {
matches <- probe(.x, .p)
.x[matches] <- map(.x[matches], .f, ...)
.x
}
compact <- function(.x) {
Filter(length, .x)
}
transpose <- function(.l) {
inner_names <- names(.l[[1]])
if (is.null(inner_names)) {
fields <- seq_along(.l[[1]])
} else {
fields <- set_names(inner_names)
}
map(fields, function(i) {
map(.l, .subset2, i)
})
}
every <- function(.x, .p, ...) {
for (i in seq_along(.x)) {
if (!rlang::is_true(.p(.x[[i]], ...))) return(FALSE)
}
TRUE
}
some <- function(.x, .p, ...) {
for (i in seq_along(.x)) {
if (rlang::is_true(.p(.x[[i]], ...))) return(TRUE)
}
FALSE
}
negate <- function(.p) {
function(...) !.p(...)
}
reduce <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(x, y, ...)
Reduce(f, .x, init = .init)
}
reduce_right <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(y, x, ...)
Reduce(f, .x, init = .init, right = TRUE)
}
accumulate <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(x, y, ...)
Reduce(f, .x, init = .init, accumulate = TRUE)
}
accumulate_right <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(y, x, ...)
Reduce(f, .x, init = .init, right = TRUE, accumulate = TRUE)
}
# nocov end
|
/R/compat-purrr.R
|
no_license
|
krlmlr/brushthat
|
R
| false | false | 3,736 |
r
|
# nocov - compat-purrr (last updated: rlang 0.0.0.9007)
# This file serves as a reference for compatibility functions for
# purrr. They are not drop-in replacements but allow a similar style
# of programming. This is useful in cases where purrr is too heavy a
# package to depend on. Please find the most recent version in rlang's
# repository.
map <- function(.x, .f, ...) {
lapply(.x, .f, ...)
}
map_mold <- function(.x, .f, .mold, ...) {
out <- vapply(.x, .f, .mold, ..., USE.NAMES = FALSE)
names(out) <- names(.x)
out
}
map_lgl <- function(.x, .f, ...) {
map_mold(.x, .f, logical(1), ...)
}
map_int <- function(.x, .f, ...) {
map_mold(.x, .f, integer(1), ...)
}
map_dbl <- function(.x, .f, ...) {
map_mold(.x, .f, double(1), ...)
}
map_chr <- function(.x, .f, ...) {
map_mold(.x, .f, character(1), ...)
}
map_cpl <- function(.x, .f, ...) {
map_mold(.x, .f, complex(1), ...)
}
pluck <- function(.x, .f) {
map(.x, `[[`, .f)
}
pluck_lgl <- function(.x, .f) {
map_lgl(.x, `[[`, .f)
}
pluck_int <- function(.x, .f) {
map_int(.x, `[[`, .f)
}
pluck_dbl <- function(.x, .f) {
map_dbl(.x, `[[`, .f)
}
pluck_chr <- function(.x, .f) {
map_chr(.x, `[[`, .f)
}
pluck_cpl <- function(.x, .f) {
map_cpl(.x, `[[`, .f)
}
map2 <- function(.x, .y, .f, ...) {
Map(.f, .x, .y, ...)
}
map2_lgl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "logical")
}
map2_int <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "integer")
}
map2_dbl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "double")
}
map2_chr <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "character")
}
map2_cpl <- function(.x, .y, .f, ...) {
as.vector(map2(.x, .y, .f, ...), "complex")
}
args_recycle <- function(args) {
lengths <- map_int(args, length)
n <- max(lengths)
stopifnot(all(lengths == 1L | lengths == n))
to_recycle <- lengths == 1L
args[to_recycle] <- map(args[to_recycle], function(x) rep.int(x, n))
args
}
pmap <- function(.l, .f, ...) {
args <- args_recycle(.l)
do.call("mapply", c(
FUN = list(quote(.f)),
args, MoreArgs = quote(list(...)),
SIMPLIFY = FALSE, USE.NAMES = FALSE
))
}
probe <- function(.x, .p, ...) {
if (is_logical(.p)) {
stopifnot(length(.p) == length(.x))
.p
} else {
map_lgl(.x, .p, ...)
}
}
keep <- function(.x, .f, ...) {
.x[probe(.x, .f, ...)]
}
discard <- function(.x, .p, ...) {
sel <- probe(.x, .p, ...)
.x[is.na(sel) | !sel]
}
map_if <- function(.x, .p, .f, ...) {
matches <- probe(.x, .p)
.x[matches] <- map(.x[matches], .f, ...)
.x
}
compact <- function(.x) {
Filter(length, .x)
}
transpose <- function(.l) {
inner_names <- names(.l[[1]])
if (is.null(inner_names)) {
fields <- seq_along(.l[[1]])
} else {
fields <- set_names(inner_names)
}
map(fields, function(i) {
map(.l, .subset2, i)
})
}
every <- function(.x, .p, ...) {
for (i in seq_along(.x)) {
if (!rlang::is_true(.p(.x[[i]], ...))) return(FALSE)
}
TRUE
}
some <- function(.x, .p, ...) {
for (i in seq_along(.x)) {
if (rlang::is_true(.p(.x[[i]], ...))) return(TRUE)
}
FALSE
}
negate <- function(.p) {
function(...) !.p(...)
}
reduce <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(x, y, ...)
Reduce(f, .x, init = .init)
}
reduce_right <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(y, x, ...)
Reduce(f, .x, init = .init, right = TRUE)
}
accumulate <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(x, y, ...)
Reduce(f, .x, init = .init, accumulate = TRUE)
}
accumulate_right <- function(.x, .f, ..., .init) {
f <- function(x, y) .f(y, x, ...)
Reduce(f, .x, init = .init, right = TRUE, accumulate = TRUE)
}
# nocov end
|
gammaresiduals <-
function(Y,X,model){
Y <- as.matrix(Y)
residuals <- model$residuals
variance <- model$variance
phi <- model$precision
yestimado <- model$fitted.values
#Absolute residuals
rabs<-abs(residuals)
#Standardized Weighted Residual 1
rp<-residuals/sqrt(variance)
# Res Deviance
rd = -2*(log(Y/yestimado) - (Y-yestimado)/yestimado)
#Residuals astesrisc
rast= (log(Y) + log(phi/yestimado) - digamma(phi))/sqrt(trigamma(phi))
gammaresiduals<- list()
gammaresiduals$abs <- rabs
gammaresiduals$pearson <-rp
gammaresiduals$deviance <- rd
gammaresiduals$rgamma<- rast
return(gammaresiduals)
}
|
/R/gammaresiduals.R
|
no_license
|
cran/Bayesiangammareg
|
R
| false | false | 688 |
r
|
gammaresiduals <-
function(Y,X,model){
Y <- as.matrix(Y)
residuals <- model$residuals
variance <- model$variance
phi <- model$precision
yestimado <- model$fitted.values
#Absolute residuals
rabs<-abs(residuals)
#Standardized Weighted Residual 1
rp<-residuals/sqrt(variance)
# Res Deviance
rd = -2*(log(Y/yestimado) - (Y-yestimado)/yestimado)
#Residuals astesrisc
rast= (log(Y) + log(phi/yestimado) - digamma(phi))/sqrt(trigamma(phi))
gammaresiduals<- list()
gammaresiduals$abs <- rabs
gammaresiduals$pearson <-rp
gammaresiduals$deviance <- rd
gammaresiduals$rgamma<- rast
return(gammaresiduals)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Package.R
\docType{package}
\name{PLEFinal-package}
\alias{PLEFinal}
\alias{PLEFinal-package}
\title{PLEFinal: A Package Skeleton for Comparative Effectiveness Studies}
\description{
A skeleton package, to be used as a starting point when implementing comparative effect studies.
}
\keyword{internal}
|
/PLEFinal/man/SkeletonComparativeEffectStudy-package.Rd
|
permissive
|
jennifercelane/PLEMSKAI_working
|
R
| false | true | 390 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Package.R
\docType{package}
\name{PLEFinal-package}
\alias{PLEFinal}
\alias{PLEFinal-package}
\title{PLEFinal: A Package Skeleton for Comparative Effectiveness Studies}
\description{
A skeleton package, to be used as a starting point when implementing comparative effect studies.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.IDR.discrete.R
\name{get.IDR.discrete}
\alias{get.IDR.discrete}
\title{compute IDR for discrete categories}
\usage{
get.IDR.discrete(idr, cat.counts)
}
\arguments{
\item{idr}{local idr for each category.}
\item{cat.counts}{the number of observations in each category.}
}
\value{
a numerical vector of the expected irreproducible discovery rate for categories that are as irreproducible or more irreproducible than the given categories.
}
\description{
compute IDR for discrete categories
}
|
/man/get.IDR.discrete.Rd
|
no_license
|
TaoYang-dev/gIDR
|
R
| false | true | 592 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.IDR.discrete.R
\name{get.IDR.discrete}
\alias{get.IDR.discrete}
\title{compute IDR for discrete categories}
\usage{
get.IDR.discrete(idr, cat.counts)
}
\arguments{
\item{idr}{local idr for each category.}
\item{cat.counts}{the number of observations in each category.}
}
\value{
a numerical vector of the expected irreproducible discovery rate for categories that are as irreproducible or more irreproducible than the given categories.
}
\description{
compute IDR for discrete categories
}
|
## Packages used
library(dplyr); library(tidyr)
## Download data
if(!file.exists("./data")){
dir.create("./data")
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/zip.zip", method = "curl")
rm(fileUrl)
unzip("./data/zip.zip", exdir = "./data")
}
## Read data
options("stringsAsFactors" = FALSE)
features <- read.table("./data/UCI Har Dataset/features.txt")
activity <- read.table("./data/UCI Har Dataset/activity_labels.txt")
test.subject <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
test.activity <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
test.measures <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
train.subject <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
train.activity <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
train.measures <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
## Merge test and train data
test <- data.frame(test.subject, test.activity, test.measures)
train <- data.frame(train.subject, train.activity, train.measures)
samsung <- rbind(test, train)
## Add column names
features <- features$V2
names(samsung) <- c("subject", "activity", features)
## Translate activity to all lower-case
activity <- activity$V2
activity <- tolower(activity)
## Convert activity and subject to factors
samsung$activity <- factor(samsung$activity, labels = activity)
samsung$subject <- factor(samsung$subject, ordered = FALSE)
## Subset mean() and std() from samsung
mean <- grep("mean[^F]", names(samsung))
std <- grep("std", names(samsung))
criteria <- c(mean, std)
samsung <- samsung[, c(1:2, criteria)]
## Clear memory of unnecessry objects
rm(features, activity, test.subject, test.activity, test.measures, train.subject,
train.activity, train.measures, test, train, mean, std, criteria)
## Convert samsung to a tbl_df object
samsung <- tbl_df(samsung)
## Add "all" to end of multidirectional features
index <- grep("[^X-Z]$", names(samsung))
index <- index[-(1:2)]
for(i in index){
names(samsung)[i] <- paste(names(samsung)[i], "all", sep = "-")
}
rm(index, i)
## Gather variables and separate into feature, summary, direction, and measure
samsung <- samsung %>%
gather(demo, measure, -subject, -activity) %>%
separate(demo, c("feature", "summary", "axis"), sep = "-")
## Convert feature, summary, and direction to factors
samsung <- samsung %>%
mutate(feature = factor(feature),
summary = factor(summary, labels = c("mean", "std")),
axis = factor(tolower(axis)))
## Write samsung to file
if(!file.exists("./samsung.txt")){
write.table(samsung, "./samsung.txt", row.name = FALSE)
}
## Summarize samsung by average of each summary for each direction,
## each feature, each activity, and each subject.
summarized <- samsung %>%
group_by(subject, activity, feature, axis, summary) %>%
summarize(average = mean(measure))
## Write summ to file
if(!file.exists("./summarized.txt")){
write.table(summarized, "./summarized.txt", row.name = FALSE)
}
|
/run_analysis.R
|
no_license
|
mattayes/samsung-har
|
R
| false | false | 3,177 |
r
|
## Packages used
library(dplyr); library(tidyr)
## Download data
if(!file.exists("./data")){
dir.create("./data")
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/zip.zip", method = "curl")
rm(fileUrl)
unzip("./data/zip.zip", exdir = "./data")
}
## Read data
options("stringsAsFactors" = FALSE)
features <- read.table("./data/UCI Har Dataset/features.txt")
activity <- read.table("./data/UCI Har Dataset/activity_labels.txt")
test.subject <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
test.activity <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
test.measures <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
train.subject <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
train.activity <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
train.measures <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
## Merge test and train data
test <- data.frame(test.subject, test.activity, test.measures)
train <- data.frame(train.subject, train.activity, train.measures)
samsung <- rbind(test, train)
## Add column names
features <- features$V2
names(samsung) <- c("subject", "activity", features)
## Translate activity to all lower-case
activity <- activity$V2
activity <- tolower(activity)
## Convert activity and subject to factors
samsung$activity <- factor(samsung$activity, labels = activity)
samsung$subject <- factor(samsung$subject, ordered = FALSE)
## Subset mean() and std() from samsung
mean <- grep("mean[^F]", names(samsung))
std <- grep("std", names(samsung))
criteria <- c(mean, std)
samsung <- samsung[, c(1:2, criteria)]
## Clear memory of unnecessry objects
rm(features, activity, test.subject, test.activity, test.measures, train.subject,
train.activity, train.measures, test, train, mean, std, criteria)
## Convert samsung to a tbl_df object
samsung <- tbl_df(samsung)
## Add "all" to end of multidirectional features
index <- grep("[^X-Z]$", names(samsung))
index <- index[-(1:2)]
for(i in index){
names(samsung)[i] <- paste(names(samsung)[i], "all", sep = "-")
}
rm(index, i)
## Gather variables and separate into feature, summary, direction, and measure
samsung <- samsung %>%
gather(demo, measure, -subject, -activity) %>%
separate(demo, c("feature", "summary", "axis"), sep = "-")
## Convert feature, summary, and direction to factors
samsung <- samsung %>%
mutate(feature = factor(feature),
summary = factor(summary, labels = c("mean", "std")),
axis = factor(tolower(axis)))
## Write samsung to file
if(!file.exists("./samsung.txt")){
write.table(samsung, "./samsung.txt", row.name = FALSE)
}
## Summarize samsung by average of each summary for each direction,
## each feature, each activity, and each subject.
summarized <- samsung %>%
group_by(subject, activity, feature, axis, summary) %>%
summarize(average = mean(measure))
## Write summ to file
if(!file.exists("./summarized.txt")){
write.table(summarized, "./summarized.txt", row.name = FALSE)
}
|
library(tidyverse)
library(scales)
library(Cairo)
theme_set(theme_classic())
Ex_1 <- tribble(
~Tier, ~Number_Account, ~Percentage_Accounts, ~Revenue_M, ~Percentage_Revenue,
'A', 77, 7.08, 4.68, 25,
'A+', 19, 1.75, 3.93, 21,
'B', 338, 31.07, 5.98, 32,
'C', 425, 39.06, 2.81, 15,
'D', 24, 2.21, 0.37, 2
) %>%
mutate(
class = ifelse((Percentage_Accounts - Percentage_Revenue) < 0, 'blue', 'slategrey')
)
left_label <- Ex_1$Tier
positions_y <- Ex_1$Percentage_Accounts
positions_y[c(2,5)] <- positions_y[c(2,5)] + c(-.5,.5)
ggplot(Ex_1) +
geom_segment(aes(x=1, xend=2, y=Percentage_Accounts, yend=Percentage_Revenue, col=class),
size=.75, show.legend=F) +
geom_vline(xintercept=1, linetype="dashed", size=.1, color = 'lightslategrey') +
geom_vline(xintercept=2, linetype="dashed", size=.1, color = 'lightslategrey') +
scale_color_manual(labels = c("Up", "Down"),
values = c("slategrey"="slategrey", "blue"="blue")) + # color of lines
labs(x="", y="",
title = 'New Client tier share changes when looking at Accounts or Revenue') + # Axis labels
scale_x_continuous(limits = c(.5, 2.5), breaks = NULL) +
scale_y_continuous(
limits = c(0,(1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)))),
labels = percent_format(scale = 1)
) +
geom_text(
label=left_label, y=positions_y,
x=c(.99,1.005,.99,.99,.99), hjust=1.2, size=3
) +
geom_text(
label=left_label, y=Ex_1$Percentage_Revenue,
x=c(2.01,2.01,2.01,2.01,2.01), hjust=-.2, size=3
) +
geom_text(
label="Participation\nin Accounts", x=.68, y = 1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)),
hjust=0, size=4.3, color = 'darkslategrey') +
geom_text(
label="Participation\nin Revenue", x=2.02, y = 1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)),
hjust=0, size=4.3, color = 'darkslategrey') +
geom_text(
label = "C tier has low participation in \nrevenues despite the biggest \nshare of new accounts.",
x = 2.1, y = 30, hjust = 0, size = 3.5, color = 'slategrey'
) +
geom_text(
label = "Together A and A+ make up for \nalmost half of the revenue \ndespite low share of \naccounts.",
x = 2.1, y = 20, hjust = 0, size = 3.5, color = 'blue'
) +
theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.ticks.y = element_line(color = 'lightslategrey'),
axis.text.x = element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_line(color = 'lightslategrey'),
axis.text.y = element_text(color = 'lightslategrey'),
panel.border = element_blank(),
title = element_text(colour = "darkslategrey", face = 'bold'))
path <- paste0(here::here("docs", "assets", "images"),"/", '2019_10_SWD.png')
ggsave(path, type = 'cairo', scale = 1.5)
|
/Storytelling_with_Data/2019_10_SWD_Challenge.R
|
no_license
|
jorgel-mendes/Behold-the-Vision
|
R
| false | false | 2,875 |
r
|
library(tidyverse)
library(scales)
library(Cairo)
theme_set(theme_classic())
Ex_1 <- tribble(
~Tier, ~Number_Account, ~Percentage_Accounts, ~Revenue_M, ~Percentage_Revenue,
'A', 77, 7.08, 4.68, 25,
'A+', 19, 1.75, 3.93, 21,
'B', 338, 31.07, 5.98, 32,
'C', 425, 39.06, 2.81, 15,
'D', 24, 2.21, 0.37, 2
) %>%
mutate(
class = ifelse((Percentage_Accounts - Percentage_Revenue) < 0, 'blue', 'slategrey')
)
left_label <- Ex_1$Tier
positions_y <- Ex_1$Percentage_Accounts
positions_y[c(2,5)] <- positions_y[c(2,5)] + c(-.5,.5)
ggplot(Ex_1) +
geom_segment(aes(x=1, xend=2, y=Percentage_Accounts, yend=Percentage_Revenue, col=class),
size=.75, show.legend=F) +
geom_vline(xintercept=1, linetype="dashed", size=.1, color = 'lightslategrey') +
geom_vline(xintercept=2, linetype="dashed", size=.1, color = 'lightslategrey') +
scale_color_manual(labels = c("Up", "Down"),
values = c("slategrey"="slategrey", "blue"="blue")) + # color of lines
labs(x="", y="",
title = 'New Client tier share changes when looking at Accounts or Revenue') + # Axis labels
scale_x_continuous(limits = c(.5, 2.5), breaks = NULL) +
scale_y_continuous(
limits = c(0,(1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)))),
labels = percent_format(scale = 1)
) +
geom_text(
label=left_label, y=positions_y,
x=c(.99,1.005,.99,.99,.99), hjust=1.2, size=3
) +
geom_text(
label=left_label, y=Ex_1$Percentage_Revenue,
x=c(2.01,2.01,2.01,2.01,2.01), hjust=-.2, size=3
) +
geom_text(
label="Participation\nin Accounts", x=.68, y = 1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)),
hjust=0, size=4.3, color = 'darkslategrey') +
geom_text(
label="Participation\nin Revenue", x=2.02, y = 1.1*(max(Ex_1$Percentage_Accounts, Ex_1$Percentage_Revenue)),
hjust=0, size=4.3, color = 'darkslategrey') +
geom_text(
label = "C tier has low participation in \nrevenues despite the biggest \nshare of new accounts.",
x = 2.1, y = 30, hjust = 0, size = 3.5, color = 'slategrey'
) +
geom_text(
label = "Together A and A+ make up for \nalmost half of the revenue \ndespite low share of \naccounts.",
x = 2.1, y = 20, hjust = 0, size = 3.5, color = 'blue'
) +
theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.ticks.y = element_line(color = 'lightslategrey'),
axis.text.x = element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_line(color = 'lightslategrey'),
axis.text.y = element_text(color = 'lightslategrey'),
panel.border = element_blank(),
title = element_text(colour = "darkslategrey", face = 'bold'))
path <- paste0(here::here("docs", "assets", "images"),"/", '2019_10_SWD.png')
ggsave(path, type = 'cairo', scale = 1.5)
|
# scalar(스칼라): 한개의 값이 저장된 객체(object, 변수 variable).
# vector(벡터): 한가지 타입(유형)의 여러개의 값이 1차원으로 저장된 객체.
# scalar의 예
x <- 100 # x: 숫자 한개를 저장하고 있는 scalar
name <- '오쌤' # name: 문자열 한개를 저장하고 있는 scalar
name
# R에서는 문자열을 작은따옴표('') 또는 큰따옴표("")로 묶을 수 있음.
# (비교) SQL에서는 문자열을 사용할 때 작은따옴표만 사용해야 함.
is_big <- TRUE # 논릿값(logical: TRUE, FALSE) 한개를 저장하는 scalar.
is_big <- (5 > 3)
is_big <- (3 > 5)
# 비교 연산(>, >=, <, <=, ==, !=)
is_same <- (3 == 5)
# vector의 예
# c(): combine
numbers <- c(1, 2, 10, 20, 50, 100)
# 숫자(numeric) 6개를 저장하는 vector
numbers
stu_names <- c('Abc', '홍길동')
# 문자열(characters) 2개를 저장하는 vector
stu_names
bools <- c(TRUE, TRUE, FALSE, TRUE, FALSE)
# 논리(logical) 타입 값 5개를 저장하는 vector
# vector의 원소(element)를 선택하는 방법 - 인덱스 사용.
# 1) 특정 위치(인덱스)에 있는 원소 1개를 선택:
numbers[1]
numbers[2]
# 2) 특정 (인덱스) 범위(range)에 있는 원소 여러개를 선택:
numbers[2:4] # 2 <= index <= 4 범위의 원소 선택
# 3) 특정 위치(인덱스) 여러곳의 원소들을 선택:
numbers[c(1, 4, 6)]
# R에서 변수에 값을 저장(할당)할 때: 변수 <- 값
# 변수는 Global Environment에 생기게 됨.
# 함수를 호출할 때 함수에게 argument를 전달할 때: arg = 값
# 함수(function): 기능. 연산.
# argument: 함수를 호출할 때 함수에게 전달하는 값.
# 필수(mandatory) argument: 함수를 호출할 때 반드시 전달해야 하는 값.
# 선택(optional) argument: 기본값(default)이 설정되어 있어서,
# 함수를 호출할 때 생략해도 되는 값.
# parameter: argument를 저장하기 위한 함수 내부의 변수.
# return value(반환 값): 함수가 기능을 수행한 후 반환하는 값. 함수 수행 결과.
# seq(): Sequence.
# 함수를 호출할 때, 파라미터 이름을 생략하고 argument를 전달함.
evens <- seq(2, 10, 2) # 2부터 10까지 2씩 증가하는 숫자들로 이루어진 vector.
# 함수를 호출할 때, 어떤 파라미터에 무슨 값을 전달할 지를 지정함.
odds <- seq(from = 1, to = 10, by = 2) # 1부터 10까지 2씩 증가하는 숫자들로 이루어진 vector.
# optional argument들을 전달하지 않으면(생략하면), 기본값이 사용됨.
numbers <- seq(from = 1, to = 10) # by의 기본값 1이 사용됨.
numbers
numbers <- seq(to = 5) # from=1, by=1 기본값이 사용됨.
numbers
countdown <- seq(from = 10, to = 1, by = -1) # 10부터 1까지 1씩 감소하는 수열 생성.
countdown
# vector와 scalar 연산
numbers <- c(1, 10, 100)
numbers
numbers + 1
# vector와 vector의 연산
numbers1 <- c(1, 10, 100)
numbers2 <- c(2, 4, 6)
numbers1 + numbers2
|
/r02_scalar_vector.R
|
no_license
|
seanhong7777/R
|
R
| false | false | 3,065 |
r
|
# scalar(스칼라): 한개의 값이 저장된 객체(object, 변수 variable).
# vector(벡터): 한가지 타입(유형)의 여러개의 값이 1차원으로 저장된 객체.
# scalar의 예
x <- 100 # x: 숫자 한개를 저장하고 있는 scalar
name <- '오쌤' # name: 문자열 한개를 저장하고 있는 scalar
name
# R에서는 문자열을 작은따옴표('') 또는 큰따옴표("")로 묶을 수 있음.
# (비교) SQL에서는 문자열을 사용할 때 작은따옴표만 사용해야 함.
is_big <- TRUE # 논릿값(logical: TRUE, FALSE) 한개를 저장하는 scalar.
is_big <- (5 > 3)
is_big <- (3 > 5)
# 비교 연산(>, >=, <, <=, ==, !=)
is_same <- (3 == 5)
# vector의 예
# c(): combine
numbers <- c(1, 2, 10, 20, 50, 100)
# 숫자(numeric) 6개를 저장하는 vector
numbers
stu_names <- c('Abc', '홍길동')
# 문자열(characters) 2개를 저장하는 vector
stu_names
bools <- c(TRUE, TRUE, FALSE, TRUE, FALSE)
# 논리(logical) 타입 값 5개를 저장하는 vector
# vector의 원소(element)를 선택하는 방법 - 인덱스 사용.
# 1) 특정 위치(인덱스)에 있는 원소 1개를 선택:
numbers[1]
numbers[2]
# 2) 특정 (인덱스) 범위(range)에 있는 원소 여러개를 선택:
numbers[2:4] # 2 <= index <= 4 범위의 원소 선택
# 3) 특정 위치(인덱스) 여러곳의 원소들을 선택:
numbers[c(1, 4, 6)]
# R에서 변수에 값을 저장(할당)할 때: 변수 <- 값
# 변수는 Global Environment에 생기게 됨.
# 함수를 호출할 때 함수에게 argument를 전달할 때: arg = 값
# 함수(function): 기능. 연산.
# argument: 함수를 호출할 때 함수에게 전달하는 값.
# 필수(mandatory) argument: 함수를 호출할 때 반드시 전달해야 하는 값.
# 선택(optional) argument: 기본값(default)이 설정되어 있어서,
# 함수를 호출할 때 생략해도 되는 값.
# parameter: argument를 저장하기 위한 함수 내부의 변수.
# return value(반환 값): 함수가 기능을 수행한 후 반환하는 값. 함수 수행 결과.
# seq(): Sequence.
# 함수를 호출할 때, 파라미터 이름을 생략하고 argument를 전달함.
evens <- seq(2, 10, 2) # 2부터 10까지 2씩 증가하는 숫자들로 이루어진 vector.
# 함수를 호출할 때, 어떤 파라미터에 무슨 값을 전달할 지를 지정함.
odds <- seq(from = 1, to = 10, by = 2) # 1부터 10까지 2씩 증가하는 숫자들로 이루어진 vector.
# optional argument들을 전달하지 않으면(생략하면), 기본값이 사용됨.
numbers <- seq(from = 1, to = 10) # by의 기본값 1이 사용됨.
numbers
numbers <- seq(to = 5) # from=1, by=1 기본값이 사용됨.
numbers
countdown <- seq(from = 10, to = 1, by = -1) # 10부터 1까지 1씩 감소하는 수열 생성.
countdown
# vector와 scalar 연산
numbers <- c(1, 10, 100)
numbers
numbers + 1
# vector와 vector의 연산
numbers1 <- c(1, 10, 100)
numbers2 <- c(2, 4, 6)
numbers1 + numbers2
|
#html_session_try adds:
#1.auto retry functionality using exponantial delay(2s,4s,8s,16s etc)
#2.use tryCatch to create robust scraper, any network issues or error will not break the script. It's safe to run it in loops
#3.keep track of unsuccessful request(including both error and warning).Conditions of failed requests are saved as attributes in function output
html_session_try <- function(url,do_try=3,...){
library(rvest)
library(httr)
dots <- c(...)
#auto retry
my_session <- NULL
tried = 0
while(is.null(my_session) && tried <= do_try) {
tried <- tried + 1
tryCatch(
{
my_session <- suppressWarnings(html_session(url,dots))
},
error=function(cond){
try_error_message<<-conditionMessage(cond)
Sys.sleep(2^tried)
}
)
}
#if request failed: error occurs or status_code is not 200, function otput will be NA with attributes:"status_code" and "condition_message"
if(is.null(my_session)){
my_session<-structure(NA,
status_code=NA,
condition_message=try_error_message)
} else if (status_code(my_session)!=200) {
my_session<-structure(NA,
status_code=status_code(my_session),
condition_message=NA)
} else {
my_session<-structure(my_session,
status_code=status_code(my_session),
condition_message=NA)
}
return(my_session)
}
|
/R Projects/function/html_session_try.R
|
no_license
|
yusuzech/web-scraping-projects
|
R
| false | false | 1,613 |
r
|
#html_session_try adds:
#1.auto retry functionality using exponantial delay(2s,4s,8s,16s etc)
#2.use tryCatch to create robust scraper, any network issues or error will not break the script. It's safe to run it in loops
#3.keep track of unsuccessful request(including both error and warning).Conditions of failed requests are saved as attributes in function output
html_session_try <- function(url,do_try=3,...){
library(rvest)
library(httr)
dots <- c(...)
#auto retry
my_session <- NULL
tried = 0
while(is.null(my_session) && tried <= do_try) {
tried <- tried + 1
tryCatch(
{
my_session <- suppressWarnings(html_session(url,dots))
},
error=function(cond){
try_error_message<<-conditionMessage(cond)
Sys.sleep(2^tried)
}
)
}
#if request failed: error occurs or status_code is not 200, function otput will be NA with attributes:"status_code" and "condition_message"
if(is.null(my_session)){
my_session<-structure(NA,
status_code=NA,
condition_message=try_error_message)
} else if (status_code(my_session)!=200) {
my_session<-structure(NA,
status_code=status_code(my_session),
condition_message=NA)
} else {
my_session<-structure(my_session,
status_code=status_code(my_session),
condition_message=NA)
}
return(my_session)
}
|
# cmd_args=commandArgs(TRUE)
#
# ngenecl <- as.numeric(cmd_args[1]) # cells per cell type
# out <- cmd_args[2]
source("/proj/milovelab/mu/SC-ASE/simulation/cluster.R")
source("/proj/milovelab/mu/SC-ASE/simulation/fusedlasso.R")
library("smurf")
library(emdbook)
library(mclust)
library(pbapply)
library(aricode)
library(pheatmap)
ngenecl<-80
n<-10
cnt<-50
ncl<-4 #4 gene cluster. large AI,NO AI, consistent AI, small AI
ngene<-ncl*ngenecl
nct<-8
x <- factor(rep(1:nct,each=n))
mu1 <- 5
nb.disp <- 1/100
ncl<-4 #4 gene cluster. large AI,NO AI, consistent AI, small AI
step1<-4 #First AI step [0-4]
ans <- pbsapply(1:10, function(i) {
set.seed(i)
# total count
cts <- matrix(rep(c(rnbinom(ngene*n/2,mu=mu1,size=1/nb.disp),
rnbinom(ngene*n/2,mu=cnt,size=1/nb.disp)),nct),ncol = nct*n)
colnames(cts)<-paste0("cell",1:(nct*n))
p.vec <- (5 + rep(c(seq(from=-step1,to=step1,length.out=nct/2),rep(0,nct/2),rep(2,nct/2),seq(from=3.5,to=4.5,length.out=nct/2)),each=2))/10
p <- rep(p.vec, each=n*nct*ngene/length(p.vec)) # true prob
nclgene<-ngene/ncl #number genes within cluster
nclcell<-nct*n*nclgene #number elements within cluster
ase.cts<-lapply(1:ncl,function(m) {
matrix(rbetabinom(nclcell, prob=p[(nclcell*m-nclcell+1):(nclcell*m)], size=cts[(m*nclgene-nclgene+1):(m*nclgene),], theta=10),ncol = nct*n)})
ase.cts<-do.call(rbind,ase.cts)
ratio<-(ase.cts)/(cts)
ratio_pseudo<-(ase.cts+1)/(cts+2) ## pseudo allelic ratio for gene clustering
level<-paste0(rep("type",nct),1:nct) # pheatmap of ratio
# anno_df <- data.frame(celltype=rep(level,each=n), row.names=colnames(ratio_pseudo))
# pheatmap(ratio_pseudo, cluster_rows = FALSE, cluster_cols = FALSE,annotation_col=anno_df,show_colnames = F,
# color = colorRampPalette(colors = c("blue","white","red"))(100))
cluster<-genecluster(ratio_pseudo,nct=nct,G=ncl) #return gene cluster
mcl<-adjustedRandIndex(cluster,rep(1:ncl,each=ngene/ncl))
# modeling
out<-list()
for (j in 1:ncl) {
# poi<-which(cluster==unique(factor(cluster))[j]) # gene position
poi<-(ngenecl*j-ngenecl+1):(ngenecl*j) # gene position
r<-as.vector(ratio[poi,])
size<-as.vector(cts[poi,])
data=data.frame(x=rep(x,each=length(poi)),ratio=r,cts=size)
f <- ratio ~ p(x, pen="gflasso", refcat="1") # formula
t <- system.time(fit<-fusedlasso(formula=f,model="binomial",data=data,ncores=1))[[3]] # saving the elapsed time
t2 <- system.time(fit2<-fusedlasso(formula=f,model="gaussian",data,ncores=1))[[3]] # saving the elapsed time
co <- coef(fit)
co <- co + c(0,rep(co[1],nct-1))
a <- adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(co))
co2 <- coef(fit2)
co2 <- co2 + c(0,rep(co2[1],nct-1))
a2 <- adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(co2))
t3 <- system.time(fit3<-wilcox(data,nct,method="holm"))[[3]]
a3<-adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(fit3))
out[[j]]=c(a,a2,a3,t,t2,t3)
}
out
},cl=10)
ans <- do.call(cbind, ans)
ans2<-rbind(ans,rep(1:200,each=4))
id<-which(!is.numeric(ans2[3,]))
# a<-pbsapply(1:ncol(ans),function(i){adjustedRandIndex(ans[,i],rep(c(0,1),each=ngene/ncl))})
# save the results as a data.frame
dat2 <- data.frame(type=rep(c("bin","gau","wilcoxon"),each=ncol(ans)),
ARI_mcl=as.vector(t(ans[1,])),
ARI=as.vector(t(ans[2:4,])),
cl=rep(c("large AI gap","NAI","consisAI","small AI gap"),n=ncol(ans)/ncl*3),
time=as.vector(t(ans[5:7,])))
# write out as a table
# write.table(dat, file="/proj/milovelab/mu/SC-ASE/simulation/csv/sim2.csv", row.names=FALSE, col.names=FALSE, quote=FALSE, sep=",")
write.table(dat2, file="/proj/milovelab/mu/SC-ASE/simulation/csv/sim2_80.csv", row.names=FALSE, col.names=FALSE, quote=FALSE, sep=",")
|
/simulation/sim2.R
|
no_license
|
Wancen/SC-ASE
|
R
| false | false | 3,866 |
r
|
# cmd_args=commandArgs(TRUE)
#
# ngenecl <- as.numeric(cmd_args[1]) # cells per cell type
# out <- cmd_args[2]
source("/proj/milovelab/mu/SC-ASE/simulation/cluster.R")
source("/proj/milovelab/mu/SC-ASE/simulation/fusedlasso.R")
library("smurf")
library(emdbook)
library(mclust)
library(pbapply)
library(aricode)
library(pheatmap)
ngenecl<-80
n<-10
cnt<-50
ncl<-4 #4 gene cluster. large AI,NO AI, consistent AI, small AI
ngene<-ncl*ngenecl
nct<-8
x <- factor(rep(1:nct,each=n))
mu1 <- 5
nb.disp <- 1/100
ncl<-4 #4 gene cluster. large AI,NO AI, consistent AI, small AI
step1<-4 #First AI step [0-4]
ans <- pbsapply(1:10, function(i) {
set.seed(i)
# total count
cts <- matrix(rep(c(rnbinom(ngene*n/2,mu=mu1,size=1/nb.disp),
rnbinom(ngene*n/2,mu=cnt,size=1/nb.disp)),nct),ncol = nct*n)
colnames(cts)<-paste0("cell",1:(nct*n))
p.vec <- (5 + rep(c(seq(from=-step1,to=step1,length.out=nct/2),rep(0,nct/2),rep(2,nct/2),seq(from=3.5,to=4.5,length.out=nct/2)),each=2))/10
p <- rep(p.vec, each=n*nct*ngene/length(p.vec)) # true prob
nclgene<-ngene/ncl #number genes within cluster
nclcell<-nct*n*nclgene #number elements within cluster
ase.cts<-lapply(1:ncl,function(m) {
matrix(rbetabinom(nclcell, prob=p[(nclcell*m-nclcell+1):(nclcell*m)], size=cts[(m*nclgene-nclgene+1):(m*nclgene),], theta=10),ncol = nct*n)})
ase.cts<-do.call(rbind,ase.cts)
ratio<-(ase.cts)/(cts)
ratio_pseudo<-(ase.cts+1)/(cts+2) ## pseudo allelic ratio for gene clustering
level<-paste0(rep("type",nct),1:nct) # pheatmap of ratio
# anno_df <- data.frame(celltype=rep(level,each=n), row.names=colnames(ratio_pseudo))
# pheatmap(ratio_pseudo, cluster_rows = FALSE, cluster_cols = FALSE,annotation_col=anno_df,show_colnames = F,
# color = colorRampPalette(colors = c("blue","white","red"))(100))
cluster<-genecluster(ratio_pseudo,nct=nct,G=ncl) #return gene cluster
mcl<-adjustedRandIndex(cluster,rep(1:ncl,each=ngene/ncl))
# modeling
out<-list()
for (j in 1:ncl) {
# poi<-which(cluster==unique(factor(cluster))[j]) # gene position
poi<-(ngenecl*j-ngenecl+1):(ngenecl*j) # gene position
r<-as.vector(ratio[poi,])
size<-as.vector(cts[poi,])
data=data.frame(x=rep(x,each=length(poi)),ratio=r,cts=size)
f <- ratio ~ p(x, pen="gflasso", refcat="1") # formula
t <- system.time(fit<-fusedlasso(formula=f,model="binomial",data=data,ncores=1))[[3]] # saving the elapsed time
t2 <- system.time(fit2<-fusedlasso(formula=f,model="gaussian",data,ncores=1))[[3]] # saving the elapsed time
co <- coef(fit)
co <- co + c(0,rep(co[1],nct-1))
a <- adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(co))
co2 <- coef(fit2)
co2 <- co2 + c(0,rep(co2[1],nct-1))
a2 <- adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(co2))
t3 <- system.time(fit3<-wilcox(data,nct,method="holm"))[[3]]
a3<-adjustedRandIndex(factor(p.vec[(nct*j-nct+1):(nct*j)]), factor(fit3))
out[[j]]=c(a,a2,a3,t,t2,t3)
}
out
},cl=10)
ans <- do.call(cbind, ans)
ans2<-rbind(ans,rep(1:200,each=4))
id<-which(!is.numeric(ans2[3,]))
# a<-pbsapply(1:ncol(ans),function(i){adjustedRandIndex(ans[,i],rep(c(0,1),each=ngene/ncl))})
# save the results as a data.frame
dat2 <- data.frame(type=rep(c("bin","gau","wilcoxon"),each=ncol(ans)),
ARI_mcl=as.vector(t(ans[1,])),
ARI=as.vector(t(ans[2:4,])),
cl=rep(c("large AI gap","NAI","consisAI","small AI gap"),n=ncol(ans)/ncl*3),
time=as.vector(t(ans[5:7,])))
# write out as a table
# write.table(dat, file="/proj/milovelab/mu/SC-ASE/simulation/csv/sim2.csv", row.names=FALSE, col.names=FALSE, quote=FALSE, sep=",")
write.table(dat2, file="/proj/milovelab/mu/SC-ASE/simulation/csv/sim2_80.csv", row.names=FALSE, col.names=FALSE, quote=FALSE, sep=",")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/currentarrows.R
\name{currentarrows}
\alias{currentarrows}
\title{Plot arrows and segments showing the size and direction of currents.}
\usage{
currentarrows(
data,
maxsize = 0.5,
maxn,
col = "blue",
lwd = 2,
arrowsize = 0.2,
center = T
)
}
\arguments{
\item{data}{Data in a list with components \code{lat} and \code{lon} with
decimal degrees, and \code{current} with the current magnitude.}
\item{maxsize}{Maximum current segment size.}
\item{maxn}{Current given with \code{maxsize}, defaults to
\code{max(data$current)}.}
\item{col}{Color of current arrows and segments.}
\item{lwd}{Line width of the segments showing current.}
\item{arrowsize}{Arrow size.}
\item{center}{Whether or not to center the arrow, defaults to \code{TRUE}.}
}
\description{
Plot arrows and segments showing the size and direction of currents.
}
\note{
Needs further checking and elaboration.
}
\keyword{aplot}
|
/man/currentarrows.Rd
|
no_license
|
Hafro/geo
|
R
| false | true | 986 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/currentarrows.R
\name{currentarrows}
\alias{currentarrows}
\title{Plot arrows and segments showing the size and direction of currents.}
\usage{
currentarrows(
data,
maxsize = 0.5,
maxn,
col = "blue",
lwd = 2,
arrowsize = 0.2,
center = T
)
}
\arguments{
\item{data}{Data in a list with components \code{lat} and \code{lon} with
decimal degrees, and \code{current} with the current magnitude.}
\item{maxsize}{Maximum current segment size.}
\item{maxn}{Current given with \code{maxsize}, defaults to
\code{max(data$current)}.}
\item{col}{Color of current arrows and segments.}
\item{lwd}{Line width of the segments showing current.}
\item{arrowsize}{Arrow size.}
\item{center}{Whether or not to center the arrow, defaults to \code{TRUE}.}
}
\description{
Plot arrows and segments showing the size and direction of currents.
}
\note{
Needs further checking and elaboration.
}
\keyword{aplot}
|
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
}
## Computing the inverse of a square matrix can be done with the solve function in R. For example, if X is a square invertible matrix, then solve(X) returns its inverse
## Just looking if possible to edit
|
/cachematrix.R
|
no_license
|
datatool/ProgrammingAssignment2
|
R
| false | false | 619 |
r
|
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
}
## Computing the inverse of a square matrix can be done with the solve function in R. For example, if X is a square invertible matrix, then solve(X) returns its inverse
## Just looking if possible to edit
|
#!/usr/bin/Rscript
# Daily Pick
##############
#
# Standalone script intended to be run by Cron job to report daily picks.
#
delta=30
theDate=as.Date(Sys.time())
#Share Select
setwd("/home/raffles/Raffles/")
source("Quantlib.R")
setwd("./Data/")
loadLocalData()
#Loads basic libraries and sets up required environments for Quantstrat
loadLibraries<-function()
{
require(slackr)
require(quantmod)
require(quantstrat)
require(readr)
require(chron)
}
loadLibraries()
library(chron)
#Make a list
shift<-list()
#Loop through known symbols
for (symbol in ls(LoadedSymbols))
{
data<-Cl(LoadedSymbols[[symbol]])
#Only use data we have access to.
data<-data[paste("::",theDate,sep="")]
#If we have enough data
if(nrow(data)>delta)
{
#Get ROC vs delta periods ago
#val=median(ROC(data,n = delta),na.rm = TRUE)
val=median(ROC(data,n = delta),na.rm = TRUE)
}
else
{
#Else shove it to bottom of the pile
val=0
}
shift[symbol]<-val
}
#Transpose and sort
res<-(t(as.data.frame(shift)))
res<-res[order(res,decreasing = TRUE),]
picks<-gsub(names(res),pattern = "\\.",replacement = ":")
write.csv(picks,"picks.csv")
picks<-head(picks,5)
slackr_setup()
slackrBot("Making daily picks from highest median gain in previous 30 days:")
slackrBot(print(head(res)))
messageLinks=""
for(pick in picks)
{
messageLinks<-paste(messageLinks,"\nhttp://www.iii.co.uk/research/",pick,sep="")
}
messageLinks<-gsub(messageLinks,pattern = "LON",replacement = "LSE")
slackrMsg(txt=messageLinks)
#Blart Everything to Slack
for(i in 1:5)
{
jpeg("Plot.jpeg")
barChart(LoadedSymbols[[picks[i]]],name=picks[i],TA='addRSI();addVo()')
dev.off()
slackrUpload(filename = "Plot.jpeg", title = picks[i], channels = "raffles")
}
|
/DailyPick.R.save
|
no_license
|
piratesjustarr/Raffles
|
R
| false | false | 1,755 |
save
|
#!/usr/bin/Rscript
# Daily Pick
##############
#
# Standalone script intended to be run by Cron job to report daily picks.
#
delta=30
theDate=as.Date(Sys.time())
#Share Select
setwd("/home/raffles/Raffles/")
source("Quantlib.R")
setwd("./Data/")
loadLocalData()
#Loads basic libraries and sets up required environments for Quantstrat
loadLibraries<-function()
{
require(slackr)
require(quantmod)
require(quantstrat)
require(readr)
require(chron)
}
loadLibraries()
library(chron)
#Make a list
shift<-list()
#Loop through known symbols
for (symbol in ls(LoadedSymbols))
{
data<-Cl(LoadedSymbols[[symbol]])
#Only use data we have access to.
data<-data[paste("::",theDate,sep="")]
#If we have enough data
if(nrow(data)>delta)
{
#Get ROC vs delta periods ago
#val=median(ROC(data,n = delta),na.rm = TRUE)
val=median(ROC(data,n = delta),na.rm = TRUE)
}
else
{
#Else shove it to bottom of the pile
val=0
}
shift[symbol]<-val
}
#Transpose and sort
res<-(t(as.data.frame(shift)))
res<-res[order(res,decreasing = TRUE),]
picks<-gsub(names(res),pattern = "\\.",replacement = ":")
write.csv(picks,"picks.csv")
picks<-head(picks,5)
slackr_setup()
slackrBot("Making daily picks from highest median gain in previous 30 days:")
slackrBot(print(head(res)))
messageLinks=""
for(pick in picks)
{
messageLinks<-paste(messageLinks,"\nhttp://www.iii.co.uk/research/",pick,sep="")
}
messageLinks<-gsub(messageLinks,pattern = "LON",replacement = "LSE")
slackrMsg(txt=messageLinks)
#Blart Everything to Slack
for(i in 1:5)
{
jpeg("Plot.jpeg")
barChart(LoadedSymbols[[picks[i]]],name=picks[i],TA='addRSI();addVo()')
dev.off()
slackrUpload(filename = "Plot.jpeg", title = picks[i], channels = "raffles")
}
|
testlist <- list(scale = 1.17613105186789e-309, shape = -2.95612684604669e-196)
result <- do.call(bama:::rand_igamma,testlist)
str(result)
|
/bama/inst/testfiles/rand_igamma/AFL_rand_igamma/rand_igamma_valgrind_files/1615926417-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false | false | 138 |
r
|
testlist <- list(scale = 1.17613105186789e-309, shape = -2.95612684604669e-196)
result <- do.call(bama:::rand_igamma,testlist)
str(result)
|
#代码更适合批量化和自动化,鼠标是替代不了的
|
/excel案例.R
|
no_license
|
liuiscoding/R_learn
|
R
| false | false | 64 |
r
|
#代码更适合批量化和自动化,鼠标是替代不了的
|
gap.barplot<-function (y,gap,xaxlab,xtics,yaxlab,ytics,xlim=NA,ylim=NA,
xlab=NULL,ylab=NULL,horiz=FALSE,col=NULL,...) {
if (missing(y)) stop("y values required")
if(missing(xtics)) xtics <- 1:length(y)
if (missing(gap)) stop("gap must be specified")
if (is.null(ylab)) ylab <- deparse(substitute(y))
if (is.null(col)) col <- color.gradient(c(0,1),c(0,1,0),c(1,0),length(y))
else if(length(col) < length(y)) rep(col,length.out=length(y))
littleones <- which(y <= gap[1])
bigones <- which(y >= gap[2])
valid.y<-y[!is.na(y)]
if(any(valid.y > gap[1] & valid.y < gap[2]))
warning("gap includes some values of y")
gapsize <- gap[2] - gap[1]
if(missing(xaxlab)) xaxlab <- as.character(xtics)
if(is.na(xlim[1])) xlim <- range(xtics)
if(is.na(ylim[1])) ylim <- c(min(valid.y)-gapsize,max(valid.y)-gapsize)
if(ylim[1] < 0) ylim[1]<-0
if(missing(ytics)) ytics <- pretty(y)
if(any(ytics<0)) ytics<-ytics[ytics >= 0]
if(missing(yaxlab)) yaxlab <- ytics
littletics <- which(ytics < gap[1])
bigtics <- which(ytics >= gap[2])
halfwidth <- min(diff(xtics))/2
if(horiz) {
if(!is.null(xlab)) {
tmplab<-xlab
xlab<-ylab
ylab<-tmplab
}
plot(0,xlim=ylim,ylim=xlim,xlab=xlab,ylab=ylab,axes=FALSE,type="n",...)
plot.lim <- par("usr")
botgap<-ifelse(gap[1]<0,gap[1],ylim[1])
box()
axis(2,at=xtics,labels=xaxlab,...)
axis(1,at=c(ytics[littletics],ytics[bigtics]-gapsize),
labels=c(yaxlab[littletics],yaxlab[bigtics]),...)
rect(botgap,xtics[y<gap[1]] - halfwidth,y[y<gap[1]],
xtics[y<gap[1]] + halfwidth,col=col[y<gap[1]])
rect(botgap,xtics[bigones] - halfwidth,y[bigones]-gapsize,
xtics[bigones] + halfwidth,col=col[bigones])
axis.break(1,gap[1],style="gap")
}
else {
plot(0,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,axes=FALSE,type="n",...)
plot.lim <- par("usr")
botgap<-ylim[1]
box()
axis(1,at=xtics,labels=xaxlab,...)
axis(2,at=c(ytics[littletics],ytics[bigtics] - gapsize),
labels=c(yaxlab[littletics],yaxlab[bigtics]),...)
rect(xtics[littleones] - halfwidth,botgap,
xtics[littleones] + halfwidth,y[littleones],col=col[littleones])
rect(xtics[bigones] - halfwidth,botgap,xtics[bigones] + halfwidth,
y[bigones]-gapsize,col=col[bigones])
axis.break(2,gap[1],style="gap")
}
invisible(xtics)
}
|
/primeiroProjetoR/plotrix/R/gap.barplot.R
|
no_license
|
bernardomsvieira/Rproject
|
R
| false | false | 2,321 |
r
|
gap.barplot<-function (y,gap,xaxlab,xtics,yaxlab,ytics,xlim=NA,ylim=NA,
xlab=NULL,ylab=NULL,horiz=FALSE,col=NULL,...) {
if (missing(y)) stop("y values required")
if(missing(xtics)) xtics <- 1:length(y)
if (missing(gap)) stop("gap must be specified")
if (is.null(ylab)) ylab <- deparse(substitute(y))
if (is.null(col)) col <- color.gradient(c(0,1),c(0,1,0),c(1,0),length(y))
else if(length(col) < length(y)) rep(col,length.out=length(y))
littleones <- which(y <= gap[1])
bigones <- which(y >= gap[2])
valid.y<-y[!is.na(y)]
if(any(valid.y > gap[1] & valid.y < gap[2]))
warning("gap includes some values of y")
gapsize <- gap[2] - gap[1]
if(missing(xaxlab)) xaxlab <- as.character(xtics)
if(is.na(xlim[1])) xlim <- range(xtics)
if(is.na(ylim[1])) ylim <- c(min(valid.y)-gapsize,max(valid.y)-gapsize)
if(ylim[1] < 0) ylim[1]<-0
if(missing(ytics)) ytics <- pretty(y)
if(any(ytics<0)) ytics<-ytics[ytics >= 0]
if(missing(yaxlab)) yaxlab <- ytics
littletics <- which(ytics < gap[1])
bigtics <- which(ytics >= gap[2])
halfwidth <- min(diff(xtics))/2
if(horiz) {
if(!is.null(xlab)) {
tmplab<-xlab
xlab<-ylab
ylab<-tmplab
}
plot(0,xlim=ylim,ylim=xlim,xlab=xlab,ylab=ylab,axes=FALSE,type="n",...)
plot.lim <- par("usr")
botgap<-ifelse(gap[1]<0,gap[1],ylim[1])
box()
axis(2,at=xtics,labels=xaxlab,...)
axis(1,at=c(ytics[littletics],ytics[bigtics]-gapsize),
labels=c(yaxlab[littletics],yaxlab[bigtics]),...)
rect(botgap,xtics[y<gap[1]] - halfwidth,y[y<gap[1]],
xtics[y<gap[1]] + halfwidth,col=col[y<gap[1]])
rect(botgap,xtics[bigones] - halfwidth,y[bigones]-gapsize,
xtics[bigones] + halfwidth,col=col[bigones])
axis.break(1,gap[1],style="gap")
}
else {
plot(0,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,axes=FALSE,type="n",...)
plot.lim <- par("usr")
botgap<-ylim[1]
box()
axis(1,at=xtics,labels=xaxlab,...)
axis(2,at=c(ytics[littletics],ytics[bigtics] - gapsize),
labels=c(yaxlab[littletics],yaxlab[bigtics]),...)
rect(xtics[littleones] - halfwidth,botgap,
xtics[littleones] + halfwidth,y[littleones],col=col[littleones])
rect(xtics[bigones] - halfwidth,botgap,xtics[bigones] + halfwidth,
y[bigones]-gapsize,col=col[bigones])
axis.break(2,gap[1],style="gap")
}
invisible(xtics)
}
|
library(multistate)
### Name: sm4rs
### Title: 4-State Relative Survival Semi-Markov Model with Additive Risks
### Aliases: sm4rs
### Keywords: semi-Markov relative survival
### ** Examples
# import the observed data
# (X=1 corresponds to initial state with a functioning graft, X=2 to acute rejection episode,
# X=3 to return to dialysis, X=4 to death with a functioning graft)
data(dataDIVAT)
# A subgroup analysis to reduce the time needed for this example
dataDIVAT$id<-c(1:nrow(dataDIVAT))
set.seed(2)
d4<-dataDIVAT[dataDIVAT$id %in% sample(dataDIVAT$id, 300, replace = FALSE),]
# import the expected mortality rates
data(fr.ratetable)
# 4-state parametric additive relative survival semi-Markov model including one
# explicative variable (z is the delayed graft function) on the transition from X=1 to X=2
# Note: a semi-Markovian process with sojourn times exponentially distributed
# is a time-homogeneous Markov process
# We only reduced the precision and the number of iteration to save time in this example,
# prefer the default values.
sm4rs(t1=d4$time1, t2=d4$time2, sequence=d4$trajectory, dist=c("E","E","E","E","E"),
ini.dist.12=c(8.34), ini.dist.13=c(10.44), ini.dist.14=c(10.70),
ini.dist.23=c(9.43), ini.dist.24=c(11.11),
cov.12=d4$z, init.cov.12=c(0.04), names.12=c("beta12_z"),
p.age=d4$ageR*365.24, p.sex=d4$sexR,
p.year=as.date(paste("01","01",d4$year.tx), order = "mdy"),
p.rate.table=fr.ratetable, conf.int=TRUE,
silent=FALSE, precision=0.001)
|
/data/genthat_extracted_code/multistate/examples/sm4rs.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,506 |
r
|
library(multistate)
### Name: sm4rs
### Title: 4-State Relative Survival Semi-Markov Model with Additive Risks
### Aliases: sm4rs
### Keywords: semi-Markov relative survival
### ** Examples
# import the observed data
# (X=1 corresponds to initial state with a functioning graft, X=2 to acute rejection episode,
# X=3 to return to dialysis, X=4 to death with a functioning graft)
data(dataDIVAT)
# A subgroup analysis to reduce the time needed for this example
dataDIVAT$id<-c(1:nrow(dataDIVAT))
set.seed(2)
d4<-dataDIVAT[dataDIVAT$id %in% sample(dataDIVAT$id, 300, replace = FALSE),]
# import the expected mortality rates
data(fr.ratetable)
# 4-state parametric additive relative survival semi-Markov model including one
# explicative variable (z is the delayed graft function) on the transition from X=1 to X=2
# Note: a semi-Markovian process with sojourn times exponentially distributed
# is a time-homogeneous Markov process
# We only reduced the precision and the number of iteration to save time in this example,
# prefer the default values.
sm4rs(t1=d4$time1, t2=d4$time2, sequence=d4$trajectory, dist=c("E","E","E","E","E"),
ini.dist.12=c(8.34), ini.dist.13=c(10.44), ini.dist.14=c(10.70),
ini.dist.23=c(9.43), ini.dist.24=c(11.11),
cov.12=d4$z, init.cov.12=c(0.04), names.12=c("beta12_z"),
p.age=d4$ageR*365.24, p.sex=d4$sexR,
p.year=as.date(paste("01","01",d4$year.tx), order = "mdy"),
p.rate.table=fr.ratetable, conf.int=TRUE,
silent=FALSE, precision=0.001)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sw.R
\name{T68fromT90}
\alias{T68fromT90}
\title{Convert from ITS-90 to IPTS-68 temperature}
\usage{
T68fromT90(temperature)
}
\arguments{
\item{temperature}{Vector of temperatures expressed in the ITS-90 scale.}
}
\value{
Temperature expressed in the IPTS-68 scale.
}
\description{
Today's instruments typically record in the ITS-90 scale, but some old
datasets will be in the IPTS-68 scale. \code{T90fromT68()} converts from the
IPTS-68 to the ITS-90 scale, using Saunders' (1990) formula, while
\code{T68fromT90()} does the reverse. The difference between IPTS-68 and
ITS-90 values is typically a few millidegrees (see \sQuote{Examples}), which
is seldom visible on a typical temperature profile, but may be of interest
in some precise work. Mostly for historical interest, \code{T90fromT48()}
is provided to convert from the ITS-48 system to ITS-90.
}
\examples{
library(oce)
T68 <- seq(3, 20, 1)
T90 <- T90fromT68(T68)
sqrt(mean((T68-T90)^2))
}
\references{
P. M. Saunders, 1990. The international temperature scale of
1990, ITS-90. WOCE Newsletter, volume 10, September 1990, page 10.
(\url{http://www.nodc.noaa.gov/woce/wdiu/wocedocs/newsltr/news10/contents.htm})
}
\seealso{
Other functions that calculate seawater properties: \code{\link{T90fromT48}},
\code{\link{T90fromT68}},
\code{\link{swAbsoluteSalinity}},
\code{\link{swAlphaOverBeta}}, \code{\link{swAlpha}},
\code{\link{swBeta}}, \code{\link{swCSTp}},
\code{\link{swConservativeTemperature}},
\code{\link{swDepth}}, \code{\link{swDynamicHeight}},
\code{\link{swLapseRate}}, \code{\link{swN2}},
\code{\link{swPressure}}, \code{\link{swRho}},
\code{\link{swRrho}}, \code{\link{swSCTp}},
\code{\link{swSTrho}}, \code{\link{swSigma0}},
\code{\link{swSigma1}}, \code{\link{swSigma2}},
\code{\link{swSigma3}}, \code{\link{swSigma4}},
\code{\link{swSigmaTheta}}, \code{\link{swSigmaT}},
\code{\link{swSigma}}, \code{\link{swSoundAbsorption}},
\code{\link{swSoundSpeed}}, \code{\link{swSpecificHeat}},
\code{\link{swSpice}}, \code{\link{swTFreeze}},
\code{\link{swTSrho}},
\code{\link{swThermalConductivity}},
\code{\link{swTheta}}, \code{\link{swViscosity}},
\code{\link{swZ}}
}
\author{
Dan Kelley
}
|
/pkgs/oce/man/T68fromT90.Rd
|
no_license
|
vaguiar/EDAV_Project_2017
|
R
| false | true | 2,283 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sw.R
\name{T68fromT90}
\alias{T68fromT90}
\title{Convert from ITS-90 to IPTS-68 temperature}
\usage{
T68fromT90(temperature)
}
\arguments{
\item{temperature}{Vector of temperatures expressed in the ITS-90 scale.}
}
\value{
Temperature expressed in the IPTS-68 scale.
}
\description{
Today's instruments typically record in the ITS-90 scale, but some old
datasets will be in the IPTS-68 scale. \code{T90fromT68()} converts from the
IPTS-68 to the ITS-90 scale, using Saunders' (1990) formula, while
\code{T68fromT90()} does the reverse. The difference between IPTS-68 and
ITS-90 values is typically a few millidegrees (see \sQuote{Examples}), which
is seldom visible on a typical temperature profile, but may be of interest
in some precise work. Mostly for historical interest, \code{T90fromT48()}
is provided to convert from the ITS-48 system to ITS-90.
}
\examples{
library(oce)
T68 <- seq(3, 20, 1)
T90 <- T90fromT68(T68)
sqrt(mean((T68-T90)^2))
}
\references{
P. M. Saunders, 1990. The international temperature scale of
1990, ITS-90. WOCE Newsletter, volume 10, September 1990, page 10.
(\url{http://www.nodc.noaa.gov/woce/wdiu/wocedocs/newsltr/news10/contents.htm})
}
\seealso{
Other functions that calculate seawater properties: \code{\link{T90fromT48}},
\code{\link{T90fromT68}},
\code{\link{swAbsoluteSalinity}},
\code{\link{swAlphaOverBeta}}, \code{\link{swAlpha}},
\code{\link{swBeta}}, \code{\link{swCSTp}},
\code{\link{swConservativeTemperature}},
\code{\link{swDepth}}, \code{\link{swDynamicHeight}},
\code{\link{swLapseRate}}, \code{\link{swN2}},
\code{\link{swPressure}}, \code{\link{swRho}},
\code{\link{swRrho}}, \code{\link{swSCTp}},
\code{\link{swSTrho}}, \code{\link{swSigma0}},
\code{\link{swSigma1}}, \code{\link{swSigma2}},
\code{\link{swSigma3}}, \code{\link{swSigma4}},
\code{\link{swSigmaTheta}}, \code{\link{swSigmaT}},
\code{\link{swSigma}}, \code{\link{swSoundAbsorption}},
\code{\link{swSoundSpeed}}, \code{\link{swSpecificHeat}},
\code{\link{swSpice}}, \code{\link{swTFreeze}},
\code{\link{swTSrho}},
\code{\link{swThermalConductivity}},
\code{\link{swTheta}}, \code{\link{swViscosity}},
\code{\link{swZ}}
}
\author{
Dan Kelley
}
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
test.pub_697_exec_bad_key_name <- function() {
prostatePath = locate("smalldata/prostate/prostate.csv")
prostate.hex = h2o.importFile(path = prostatePath, destination_frame = "prostate.hex")
prostate.local = as.data.frame(prostate.hex)
# Are we in the right universe?
expect_equal(380, dim(prostate.local)[1])
expect_equal(9, dim(prostate.local)[2])
remote = t(prostate.hex$AGE) %*% prostate.hex$CAPSULE
expect_equal(1, dim(remote)[1])
expect_equal(1, dim(remote)[2])
expect_error(t(pub697$AGE) %*% prostate.hex$CAPSULE)
}
doTest("PUB-697 bad key should not cause crash", test.pub_697_exec_bad_key_name)
|
/h2o-r/tests/testdir_jira/runit_pub_697_exec_bad_key_name.R
|
permissive
|
tamseo/h2o-3
|
R
| false | false | 711 |
r
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
test.pub_697_exec_bad_key_name <- function() {
prostatePath = locate("smalldata/prostate/prostate.csv")
prostate.hex = h2o.importFile(path = prostatePath, destination_frame = "prostate.hex")
prostate.local = as.data.frame(prostate.hex)
# Are we in the right universe?
expect_equal(380, dim(prostate.local)[1])
expect_equal(9, dim(prostate.local)[2])
remote = t(prostate.hex$AGE) %*% prostate.hex$CAPSULE
expect_equal(1, dim(remote)[1])
expect_equal(1, dim(remote)[2])
expect_error(t(pub697$AGE) %*% prostate.hex$CAPSULE)
}
doTest("PUB-697 bad key should not cause crash", test.pub_697_exec_bad_key_name)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ParetoShrinkage.R
\name{R2_Wherry}
\alias{R2_Wherry}
\title{R2_Wherry function}
\usage{
R2_Wherry(N, p, R2)
}
\arguments{
\item{N}{Sample size}
\item{p}{number of predictors}
\item{R2}{R-squared}
}
\value{
R2_W formula-adjusted R2 based on Wherry (1931) shrinkage formula
}
\description{
Estimate shrunken R2 based on Wherry (1931) formula
}
\examples{
# (1) Sample size
N <- 100
# (2) Number of predictors
p <- 5
# (3) R2 R-squared
R2 <- 0.30
# Estimate shrunken R2
R2_Wherry(N = N, p = p, R2 = R2)
}
|
/man/R2_Wherry.Rd
|
no_license
|
Diversity-ParetoOptimal/ParetoR
|
R
| false | true | 613 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ParetoShrinkage.R
\name{R2_Wherry}
\alias{R2_Wherry}
\title{R2_Wherry function}
\usage{
R2_Wherry(N, p, R2)
}
\arguments{
\item{N}{Sample size}
\item{p}{number of predictors}
\item{R2}{R-squared}
}
\value{
R2_W formula-adjusted R2 based on Wherry (1931) shrinkage formula
}
\description{
Estimate shrunken R2 based on Wherry (1931) formula
}
\examples{
# (1) Sample size
N <- 100
# (2) Number of predictors
p <- 5
# (3) R2 R-squared
R2 <- 0.30
# Estimate shrunken R2
R2_Wherry(N = N, p = p, R2 = R2)
}
|
###############################################################################
# #
# execute exp3_bayes_t_priors.R #
# #
###############################################################################
setwd("Documents/wiskunde/2017-2018/bachelor_project/R/handin")
setwd("/media/mynewdrive1/Documenten/Wiskunde/2017-2018/bachelor_project/R/handin")
# libraries
library("arm")
# load functions
source("helpers.R")
source("helpers_CV_bayesglm.R")
# load variables
source("variables_exp3.R")
# Choose what initial betas and sample size to use
## Betas
k = 1
initial_betas[[k]] = c(1,1,1,1,1,1)
## Sample size
l = 1
order_fit <- 2
repeat_cross_val <-1001
df_plot = TRUE; data_plot = TRUE
df_plot = FALSE; data_plot = FALSE
# Execute experiment
for (i in 1:repeat_cross_val){
cross_val_temp <- execute_cross_val_tprior(df = df, x_min = x_min, x_max = x_max, order_sample = order_p, order_fit = order_fit, initial_betas = initial_betas[[k]], sample_size = sample_size[l], orthog =FALSE, df_plot = df_plot, data_plot = data_plot, multiple_samples = FALSE)
#
df_stars[i] <- cross_val_temp[[1]]$df_star
mses[i] <- cross_val_temp[[1]]$df_star_cross_val$mse
mu_news[[i]] <- cross_val_temp[[1]]$df_star_cross_val$mu_new
print(i)
}
# calc out of back mse = test mse using mu_news
i <- 1
test_mses <- c()
for (beta in mu_news[1:1001]) {
#print(beta)
test_mses[i] <- out_of_back_mse(beta_hat = beta, beta_true = c(1,1,1), x_min = x_min, x_max = x_max, n = 100)
i <- i + 1
}
mean(test_mses)
mean(test_mses,trim=0.1)
hist(test_mses)
hist(test_mses,breaks=10000,main="",xlab="MSE",ylab="Frequentie")
hist(sort(test_mses)[1:(0.8*length(test_mses))],breaks=10,main="",xlab="MSE",ylab="Frequentie")
# Visualise experiment
hist(df_stars,main="",xlab="df",ylab="Frequentie")
plot(df_stars,mses)
plot(df_stars)
hist(mses,breaks =50,main="",xlab="MSE",ylab="Frequentie")
# numbers
mean(df_stars)
mean(mses)
mean(mses,trim=0.1)
mu_1 <- 0
mu_2 <- 0
mu_3 <- 0
mu_4 <- 0
mu_5 <- 0
mu_6 <- 0
for (j in 1:length(mu_news)){
mu_1 <- mu_1 + mu_news[[j]][1]
mu_2 <- mu_2 + mu_news[[j]][2]
mu_3 <- mu_3 + mu_news[[j]][3]
mu_4 <- mu_4 + mu_news[[j]][4]
mu_5 <- mu_5 + mu_news[[j]][5]
mu_6 <- mu_6 + mu_news[[j]][6]
}
mu_1_mean <- mu_1 / length(mu_news)
mu_2_mean <- mu_2 / length(mu_news)
mu_3_mean <- mu_3 / length(mu_news)
mu_4_mean <- mu_4 / length(mu_news)
mu_5_mean <- mu_5 / length(mu_news)
mu_6_mean <- mu_6 / length(mu_news)
mu_1_mean
mu_2_mean
mu_3_mean
mu_4_mean
mu_5_mean
mu_6_mean
mu_news_mean <- c(mu_1_mean,mu_2_mean,mu_3_mean,mu_4_mean,mu_5_mean,mu_6_mean)
# bias?!
verschil1 <- abs(c(1,1,1,1,1,1)-c(mu_1_mean,mu_2_mean,mu_3_mean,mu_4_mean,mu_5_mean,mu_6_mean))
bias_sq <- mean(verschil1)**2
bias_sq
verschil2 <- 0
var1 <- 0
for (beta in mu_news[1:1000]){
# bias
#verschil2 <- verschil2 + abs(c(beta[1,1],beta[2,1],beta[3,1]) - c(1,1,1))
verschil2 <- verschil2 + abs(c(beta[1],beta[2],beta[3]) - c(1,1,1))
#print(verschil2)
# var
#var1 <- var1 + sum((mu_news_mean - beta)**2)
}
bias_sq2 <- mean(verschil2 / 1000)**2
bias_sq2
var1 <- var1 / 1000
var1
# Var?
var1 <- mean(mean(mu_news)-mu_news)
|
/R/exp3_bayes_tpriors.R
|
no_license
|
StudentThom/handin_bachelor_project
|
R
| false | false | 3,190 |
r
|
###############################################################################
# #
# execute exp3_bayes_t_priors.R #
# #
###############################################################################
setwd("Documents/wiskunde/2017-2018/bachelor_project/R/handin")
setwd("/media/mynewdrive1/Documenten/Wiskunde/2017-2018/bachelor_project/R/handin")
# libraries
library("arm")
# load functions
source("helpers.R")
source("helpers_CV_bayesglm.R")
# load variables
source("variables_exp3.R")
# Choose what initial betas and sample size to use
## Betas
k = 1
initial_betas[[k]] = c(1,1,1,1,1,1)
## Sample size
l = 1
order_fit <- 2
repeat_cross_val <-1001
df_plot = TRUE; data_plot = TRUE
df_plot = FALSE; data_plot = FALSE
# Execute experiment
for (i in 1:repeat_cross_val){
cross_val_temp <- execute_cross_val_tprior(df = df, x_min = x_min, x_max = x_max, order_sample = order_p, order_fit = order_fit, initial_betas = initial_betas[[k]], sample_size = sample_size[l], orthog =FALSE, df_plot = df_plot, data_plot = data_plot, multiple_samples = FALSE)
#
df_stars[i] <- cross_val_temp[[1]]$df_star
mses[i] <- cross_val_temp[[1]]$df_star_cross_val$mse
mu_news[[i]] <- cross_val_temp[[1]]$df_star_cross_val$mu_new
print(i)
}
# calc out of back mse = test mse using mu_news
i <- 1
test_mses <- c()
for (beta in mu_news[1:1001]) {
#print(beta)
test_mses[i] <- out_of_back_mse(beta_hat = beta, beta_true = c(1,1,1), x_min = x_min, x_max = x_max, n = 100)
i <- i + 1
}
mean(test_mses)
mean(test_mses,trim=0.1)
hist(test_mses)
hist(test_mses,breaks=10000,main="",xlab="MSE",ylab="Frequentie")
hist(sort(test_mses)[1:(0.8*length(test_mses))],breaks=10,main="",xlab="MSE",ylab="Frequentie")
# Visualise experiment
hist(df_stars,main="",xlab="df",ylab="Frequentie")
plot(df_stars,mses)
plot(df_stars)
hist(mses,breaks =50,main="",xlab="MSE",ylab="Frequentie")
# numbers
mean(df_stars)
mean(mses)
mean(mses,trim=0.1)
mu_1 <- 0
mu_2 <- 0
mu_3 <- 0
mu_4 <- 0
mu_5 <- 0
mu_6 <- 0
for (j in 1:length(mu_news)){
mu_1 <- mu_1 + mu_news[[j]][1]
mu_2 <- mu_2 + mu_news[[j]][2]
mu_3 <- mu_3 + mu_news[[j]][3]
mu_4 <- mu_4 + mu_news[[j]][4]
mu_5 <- mu_5 + mu_news[[j]][5]
mu_6 <- mu_6 + mu_news[[j]][6]
}
mu_1_mean <- mu_1 / length(mu_news)
mu_2_mean <- mu_2 / length(mu_news)
mu_3_mean <- mu_3 / length(mu_news)
mu_4_mean <- mu_4 / length(mu_news)
mu_5_mean <- mu_5 / length(mu_news)
mu_6_mean <- mu_6 / length(mu_news)
mu_1_mean
mu_2_mean
mu_3_mean
mu_4_mean
mu_5_mean
mu_6_mean
mu_news_mean <- c(mu_1_mean,mu_2_mean,mu_3_mean,mu_4_mean,mu_5_mean,mu_6_mean)
# bias?!
verschil1 <- abs(c(1,1,1,1,1,1)-c(mu_1_mean,mu_2_mean,mu_3_mean,mu_4_mean,mu_5_mean,mu_6_mean))
bias_sq <- mean(verschil1)**2
bias_sq
verschil2 <- 0
var1 <- 0
for (beta in mu_news[1:1000]){
# bias
#verschil2 <- verschil2 + abs(c(beta[1,1],beta[2,1],beta[3,1]) - c(1,1,1))
verschil2 <- verschil2 + abs(c(beta[1],beta[2],beta[3]) - c(1,1,1))
#print(verschil2)
# var
#var1 <- var1 + sum((mu_news_mean - beta)**2)
}
bias_sq2 <- mean(verschil2 / 1000)**2
bias_sq2
var1 <- var1 / 1000
var1
# Var?
var1 <- mean(mean(mu_news)-mu_news)
|
#######################
### Meta-Analyse: Korrelationen
# von Julien P. Irmer
## Vorbereitung
library(metafor)
## Übersicht über den Datensatz verschaffen
head(dat.molloy2014)
summary(dat.molloy2014$ri)
## Grafische Veranschaulichung der Beziehung zwischen der Medikamenteneinnahme und der Gewissenhaftigkeit
boxplot(dat.molloy2014$ri)
## Fisher's z-Transformation
data_transformed <- escalc(measure="ZCOR", # z-Transformation
ri=ri, # beobachtete Korrelationskoeffizienten
ni=ni, # Stichprobengröße pro Studie
data=dat.molloy2014, # Datensatz
var.names = c("z_ri", "v_ri")) # Namen der neu zu erstellenden Variablen
head(data_transformed)
data_transformed_2 <- escalc(measure="ZCOR", # z-Transformation
ri=dat.molloy2014$ri, # beobachtete Korrelationskoeffizienten
ni=dat.molloy2014$ni, # Stichprobengröße pro Studie
var.names = c("z_ri", "v_ri")) # Namen der neu zu erstellenden Variablen
head(data_transformed_2)
data_transformed$v_ri[1:4] # die ersten 4 Einträge betrachten
1/(dat.molloy2014$ni - 3)[1:4]
plot(x = data_transformed$ri, y = data_transformed$z_ri,
xlab = "r", ylab = "z",
main = "Fisher's z-Transformation")
## Random Effects Model
REM <- rma(yi = z_ri, vi = v_ri, data=data_transformed)
summary(REM)
REM$b # mittlere Schätzung b
REM$tau2 # tau²
predict(REM, transf=transf.ztor) # Retransformation
pred_REM <- predict(REM, transf=transf.ztor)
names(pred_REM)
pred_REM$pred # retransformierter gepoolter Korrelationskoeffizient
## Weitere Moderatoren und Psychometrische Metaanalysen
df <- data.frame(r = c(0.3, 0.3, 0.5, 0.4),
RelX = c(0.6, 0.8, 1, 1),
RelY = c(0.5, 0.7, 0.8, 1),
n = c(65, 65, 34, 46))
head(df)
df$r_correct <- df$r/sqrt(df$RelX*df$RelY) # Minderungskorrektur
head(df)
|
/content/post/KliPPs_MSc5a_R_Files/8_meta-analyse_korrelationen_RCode.R
|
no_license
|
martscht/projekte
|
R
| false | false | 2,090 |
r
|
#######################
### Meta-Analyse: Korrelationen
# von Julien P. Irmer
## Vorbereitung
library(metafor)
## Übersicht über den Datensatz verschaffen
head(dat.molloy2014)
summary(dat.molloy2014$ri)
## Grafische Veranschaulichung der Beziehung zwischen der Medikamenteneinnahme und der Gewissenhaftigkeit
boxplot(dat.molloy2014$ri)
## Fisher's z-Transformation
data_transformed <- escalc(measure="ZCOR", # z-Transformation
ri=ri, # beobachtete Korrelationskoeffizienten
ni=ni, # Stichprobengröße pro Studie
data=dat.molloy2014, # Datensatz
var.names = c("z_ri", "v_ri")) # Namen der neu zu erstellenden Variablen
head(data_transformed)
data_transformed_2 <- escalc(measure="ZCOR", # z-Transformation
ri=dat.molloy2014$ri, # beobachtete Korrelationskoeffizienten
ni=dat.molloy2014$ni, # Stichprobengröße pro Studie
var.names = c("z_ri", "v_ri")) # Namen der neu zu erstellenden Variablen
head(data_transformed_2)
data_transformed$v_ri[1:4] # die ersten 4 Einträge betrachten
1/(dat.molloy2014$ni - 3)[1:4]
plot(x = data_transformed$ri, y = data_transformed$z_ri,
xlab = "r", ylab = "z",
main = "Fisher's z-Transformation")
## Random Effects Model
REM <- rma(yi = z_ri, vi = v_ri, data=data_transformed)
summary(REM)
REM$b # mittlere Schätzung b
REM$tau2 # tau²
predict(REM, transf=transf.ztor) # Retransformation
pred_REM <- predict(REM, transf=transf.ztor)
names(pred_REM)
pred_REM$pred # retransformierter gepoolter Korrelationskoeffizient
## Weitere Moderatoren und Psychometrische Metaanalysen
df <- data.frame(r = c(0.3, 0.3, 0.5, 0.4),
RelX = c(0.6, 0.8, 1, 1),
RelY = c(0.5, 0.7, 0.8, 1),
n = c(65, 65, 34, 46))
head(df)
df$r_correct <- df$r/sqrt(df$RelX*df$RelY) # Minderungskorrektur
head(df)
|
#######################################################################################
#
# This file is Question5.R
# The purpose is to address the fifth question on the merged data.
# "Cut the GDP rankings into 5 separate quantile groups."
# "Make a table versus Income Group."
# "How many countries are Lower middle income but among the 38 nations with
# highest GDP?"
#
#######################################################################################
#######################################################################################
# Create a new variable for the GDP Group making sure it is the right data type
# Stick it on the end of the mergedReducedSorted data frame, optionally, check
# Populate it appropriately
#######################################################################################
GDPGroup <- numeric(nrow(mergedReducedSorted))
mergedReducedSorted <- cbind(mergedReducedSorted,GDPGroup)
if (debug == 1) {
str(mergedReducedSorted)
}
for (i in 1:nrow(mergedReducedSorted)) {
if (mergedReducedSorted$GDPRanking[i] <= nrow(mergedReducedSorted)/5) {
mergedReducedSorted$GDPGroup[i] = 1
} else if ( (mergedReducedSorted$GDPRanking[i] > nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 2*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 2
} else if ( (mergedReducedSorted$GDPRanking[i] > 2*nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 3*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 3
} else if ( (mergedReducedSorted$GDPRanking[i] > 3*nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 4*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 4
} else if (mergedReducedSorted$GDPRanking[i] > 4*nrow(mergedReducedSorted)/5) {
mergedReducedSorted$GDPGroup[i] = 5
}
}
#######################################################################################
# Make the requested table
# Create a vector of the Lower middle income GDP rankings
# Determine (programatically) how many Lower middle income are in the top 38
#######################################################################################
message ("In the table below, the GDP quantiles are rows indicated by the numbers 1 through 5 on the left. The Income Groups are listed across the top. The elements of the table indicate number of countries corresponding to the GDP quantile and Income Group." )
table(mergedReducedSorted$GDPGroup, mergedReducedSorted$IncomeGroup)
TopLMI <- mergedReducedSorted[2][mergedReducedSorted[5]== "Lower middle income"]
message ("The number of Lower middle income countries in the top 38 GDP are ", sum(TopLMI < 39) )
#######################################################################################
# The requested table shows 4 but I reported 5
# The reason is the cutoff. There were 189 countries (not 190), 189/5 = 37.8
# So, my highest group had only 37 countries. The 38th was a Lower middle income
# The table went through 37 while the question asked through 38
#######################################################################################
|
/Analysis/Question5.R
|
no_license
|
bgobran/CaseStudy1FinalVersion
|
R
| false | false | 3,259 |
r
|
#######################################################################################
#
# This file is Question5.R
# The purpose is to address the fifth question on the merged data.
# "Cut the GDP rankings into 5 separate quantile groups."
# "Make a table versus Income Group."
# "How many countries are Lower middle income but among the 38 nations with
# highest GDP?"
#
#######################################################################################
#######################################################################################
# Create a new variable for the GDP Group making sure it is the right data type
# Stick it on the end of the mergedReducedSorted data frame, optionally, check
# Populate it appropriately
#######################################################################################
GDPGroup <- numeric(nrow(mergedReducedSorted))
mergedReducedSorted <- cbind(mergedReducedSorted,GDPGroup)
if (debug == 1) {
str(mergedReducedSorted)
}
for (i in 1:nrow(mergedReducedSorted)) {
if (mergedReducedSorted$GDPRanking[i] <= nrow(mergedReducedSorted)/5) {
mergedReducedSorted$GDPGroup[i] = 1
} else if ( (mergedReducedSorted$GDPRanking[i] > nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 2*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 2
} else if ( (mergedReducedSorted$GDPRanking[i] > 2*nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 3*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 3
} else if ( (mergedReducedSorted$GDPRanking[i] > 3*nrow(mergedReducedSorted)/5) && (mergedReducedSorted$GDPRanking[i] <= 4*nrow(mergedReducedSorted)/5) ) {
mergedReducedSorted$GDPGroup[i] = 4
} else if (mergedReducedSorted$GDPRanking[i] > 4*nrow(mergedReducedSorted)/5) {
mergedReducedSorted$GDPGroup[i] = 5
}
}
#######################################################################################
# Make the requested table
# Create a vector of the Lower middle income GDP rankings
# Determine (programatically) how many Lower middle income are in the top 38
#######################################################################################
message ("In the table below, the GDP quantiles are rows indicated by the numbers 1 through 5 on the left. The Income Groups are listed across the top. The elements of the table indicate number of countries corresponding to the GDP quantile and Income Group." )
table(mergedReducedSorted$GDPGroup, mergedReducedSorted$IncomeGroup)
TopLMI <- mergedReducedSorted[2][mergedReducedSorted[5]== "Lower middle income"]
message ("The number of Lower middle income countries in the top 38 GDP are ", sum(TopLMI < 39) )
#######################################################################################
# The requested table shows 4 but I reported 5
# The reason is the cutoff. There were 189 countries (not 190), 189/5 = 37.8
# So, my highest group had only 37 countries. The 38th was a Lower middle income
# The table went through 37 while the question asked through 38
#######################################################################################
|
#' Print DataM Object
#'
#' Modifies the "print" function to take objects of class \code{DataM} (or any of its subclasses) and print out a matrix where the first column is the dependent variable and the remaining columns are the independent variables.
#'
#' @param DataM An object of class DataM
#'
#' @author Thomas Carroll: \email{thomasscarroll89@gmail.com}
#' @rdname print
#' @export
setMethod("print",
signature(x="DataM"),
function(x, ...){
print(cbind(x@depvar, x@covariates))
}
)
getMethod("print", signature="DataM")
|
/MyPackage/R/print-mod.R
|
no_license
|
thomasscarroll89/RPackageProblemSet
|
R
| false | false | 573 |
r
|
#' Print DataM Object
#'
#' Modifies the "print" function to take objects of class \code{DataM} (or any of its subclasses) and print out a matrix where the first column is the dependent variable and the remaining columns are the independent variables.
#'
#' @param DataM An object of class DataM
#'
#' @author Thomas Carroll: \email{thomasscarroll89@gmail.com}
#' @rdname print
#' @export
setMethod("print",
signature(x="DataM"),
function(x, ...){
print(cbind(x@depvar, x@covariates))
}
)
getMethod("print", signature="DataM")
|
testlist <- list(data = structure(c(6.53867576132537e+286, 6.53867576126997e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576130081e+286, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 8L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610554326-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 713 |
r
|
testlist <- list(data = structure(c(6.53867576132537e+286, 6.53867576126997e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576132537e+286, 6.53867576130081e+286, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 8L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
#Read the two files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
str(NEI)
library(ggplot2)
library(plyr)
#Retain just the Baltimore City data
NEI_Baltimore <- NEI[NEI$fips == "24510",]
#Convert type variable to a factor
NEI_Baltimore$type <- as.factor(NEI_Baltimore$type)
#Aggregate emission data by year
NEI_yearem_Baltimore <- ddply(NEI_Baltimore, .(type, year), summarize, Emissions = sum(Emissions))
NEI_yearem_Baltimore$Pollutant_type <- NEI_yearem_Baltimore$type
#Set margins
par("mar" = c(4,6,4,4))
#Create the plot
qplot(x = year, y = Emissions, data = NEI_yearem_Baltimore, group = Pollutant_type, color = Pollutant_type, geom = c("point", "line"), xlab = "Year",
ylab = "Total" ~ PM[2.5] ~"Emissions", main = "Total" ~ PM[2.5] ~"Emissions for Baltimore by Pollutant Type")
#Save the plot as a png
dev.copy(png, file = "plot3.png")
dev.off()
|
/Exploratory_Data_Analysis_Assignment2/plot3.R
|
no_license
|
sharathlives/JohnHopkins_Coursera_Exploratory_Data_Analysis
|
R
| false | false | 905 |
r
|
#Read the two files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
str(NEI)
library(ggplot2)
library(plyr)
#Retain just the Baltimore City data
NEI_Baltimore <- NEI[NEI$fips == "24510",]
#Convert type variable to a factor
NEI_Baltimore$type <- as.factor(NEI_Baltimore$type)
#Aggregate emission data by year
NEI_yearem_Baltimore <- ddply(NEI_Baltimore, .(type, year), summarize, Emissions = sum(Emissions))
NEI_yearem_Baltimore$Pollutant_type <- NEI_yearem_Baltimore$type
#Set margins
par("mar" = c(4,6,4,4))
#Create the plot
qplot(x = year, y = Emissions, data = NEI_yearem_Baltimore, group = Pollutant_type, color = Pollutant_type, geom = c("point", "line"), xlab = "Year",
ylab = "Total" ~ PM[2.5] ~"Emissions", main = "Total" ~ PM[2.5] ~"Emissions for Baltimore by Pollutant Type")
#Save the plot as a png
dev.copy(png, file = "plot3.png")
dev.off()
|
library(lattice)
extract_chrom <- function(t, thisdata, productmz, extraction_window=0.05)
{
this_spectrum = subset(thisdata, SEC == t)
return(sum(subset(this_spectrum, MZ > productmz-(extraction_window/2) & MZ < productmz+(extraction_window/2))$INT))
}
graphme <- function(xxp,allmx){
xxp <- xxp[length(xxp):1]
allmx <- allmx[allmx$MZ > 400,]
sum(is.element(allmx$label,xxp))
allmx <- allmx[is.element(allmx$label,xxp),]
print(dim(allmx))
allmx$MZ <- as.factor(allmx$MZ)
return(allmx)
}
irt2rt <- function(x,c=2148.68,m=33.87) {
return(m*x+c)
}
plotgraph <- function(assay_irt,background,rt_extraction_window=180) {
txtfiles <- dir(pattern=glob2rx(paste("*",background,"*","._chrom.mzML.dta2d",sep="")))
rawdata <- list()
for(i in 1:length(txtfiles))
{
rawdata[[i]] <- read.csv(txtfiles[i], sep="\t")
names(rawdata[[i]])<-c("SEC","MZ","INT")
}
# use this code to extract chromatograms
# data <- list()
# for(i in 1:length(txtfiles))
# {
# df<-data.frame()
# for(j in 1:length(productmz)) {
# dfj <- data.frame("INT" = sapply( unique(rawdata[[i]]$SEC), extract_chrom, thisdata=rawdata[[i]], productmz=productmz[j]), "SEC"=unique(rawdata[[i]]$SEC))
# dfj$MZ <- rep(productmz[j],dim(dfj)[1])
# df<-rbind(df,dfj)
# }
# data[[i]] = df
# }
data<-rawdata
xx <- c("x512","x256","x128","x064","x032","x016","x008","x004","x002","x001")
length(xx)
allm <- NULL
label <- NULL
for(i in 1:10){
allm <- rbind(allm,data[[i]])
labelt <- rep(xx[i],dim(data[[i]])[1])
label <- c(label, labelt)
}
allm <- cbind(label, allm)
allm <- data.frame(as.factor(allm$label), as.numeric(allm$SEC), as.numeric(allm$MZ), as.numeric(allm$INT))
colnames(allm) <- c("label","SEC","MZ","INT")
colnames(allm)
allm$label[1:10]
xxs <- c("x512","x256","x128","x064","x032","x016","x008","x004","x002","x001")
allmx <- allm
if (background=="human") {
irt<-irt2rt(assay_irt[[1]],1687.64,33.61)
}
else if (background=="yeast") {
irt<-irt2rt(assay_irt[[1]],2105.2,34.27)
}
else if (background=="no_background") {
irt<-irt2rt(assay_irt[[1]],2150.32,35.05)
}
pdf(file=paste(names(assay_irt)[[1]],"_",background,".pdf",sep=""),width=6, height=length(xxs)*1.5)
print(xyplot(INT ~ SEC | label ,data=subset(allmx,SEC >= irt-rt_extraction_window & SEC <= irt+rt_extraction_window),type="l",xlim=c(irt-rt_extraction_window,irt+rt_extraction_window),scales=list(y=list(relation="free", cex=0.7,rot=45)),groups=MZ,layout=c(1,length(xxs)),xlab="RT [s]", ylab="INT",as.table=TRUE))
dev.off()
}
background<-list("water"="no_background","yeast"="yeast","human"="human")
assays<-list("VGDTVLYGK"=3.7,"IADIQLEGLR"=49.4,"TGGDEFDEAIIK"=40.8,"LITVEGPDGAGK"=10.9,"LVDEEGNDVTPEK"=-5.1)
assay_irt<-assays[tail(strsplit(getwd(),"/")[[1]],n=1)]
for(j in 1:length(background)) {
plotgraph(assay_irt,background[[j]])
}
|
/analysis/scripts/plotChrom.R
|
permissive
|
msproteomicstools/msproteomicstools
|
R
| false | false | 2,920 |
r
|
library(lattice)
extract_chrom <- function(t, thisdata, productmz, extraction_window=0.05)
{
this_spectrum = subset(thisdata, SEC == t)
return(sum(subset(this_spectrum, MZ > productmz-(extraction_window/2) & MZ < productmz+(extraction_window/2))$INT))
}
graphme <- function(xxp,allmx){
xxp <- xxp[length(xxp):1]
allmx <- allmx[allmx$MZ > 400,]
sum(is.element(allmx$label,xxp))
allmx <- allmx[is.element(allmx$label,xxp),]
print(dim(allmx))
allmx$MZ <- as.factor(allmx$MZ)
return(allmx)
}
irt2rt <- function(x,c=2148.68,m=33.87) {
return(m*x+c)
}
plotgraph <- function(assay_irt,background,rt_extraction_window=180) {
txtfiles <- dir(pattern=glob2rx(paste("*",background,"*","._chrom.mzML.dta2d",sep="")))
rawdata <- list()
for(i in 1:length(txtfiles))
{
rawdata[[i]] <- read.csv(txtfiles[i], sep="\t")
names(rawdata[[i]])<-c("SEC","MZ","INT")
}
# use this code to extract chromatograms
# data <- list()
# for(i in 1:length(txtfiles))
# {
# df<-data.frame()
# for(j in 1:length(productmz)) {
# dfj <- data.frame("INT" = sapply( unique(rawdata[[i]]$SEC), extract_chrom, thisdata=rawdata[[i]], productmz=productmz[j]), "SEC"=unique(rawdata[[i]]$SEC))
# dfj$MZ <- rep(productmz[j],dim(dfj)[1])
# df<-rbind(df,dfj)
# }
# data[[i]] = df
# }
data<-rawdata
xx <- c("x512","x256","x128","x064","x032","x016","x008","x004","x002","x001")
length(xx)
allm <- NULL
label <- NULL
for(i in 1:10){
allm <- rbind(allm,data[[i]])
labelt <- rep(xx[i],dim(data[[i]])[1])
label <- c(label, labelt)
}
allm <- cbind(label, allm)
allm <- data.frame(as.factor(allm$label), as.numeric(allm$SEC), as.numeric(allm$MZ), as.numeric(allm$INT))
colnames(allm) <- c("label","SEC","MZ","INT")
colnames(allm)
allm$label[1:10]
xxs <- c("x512","x256","x128","x064","x032","x016","x008","x004","x002","x001")
allmx <- allm
if (background=="human") {
irt<-irt2rt(assay_irt[[1]],1687.64,33.61)
}
else if (background=="yeast") {
irt<-irt2rt(assay_irt[[1]],2105.2,34.27)
}
else if (background=="no_background") {
irt<-irt2rt(assay_irt[[1]],2150.32,35.05)
}
pdf(file=paste(names(assay_irt)[[1]],"_",background,".pdf",sep=""),width=6, height=length(xxs)*1.5)
print(xyplot(INT ~ SEC | label ,data=subset(allmx,SEC >= irt-rt_extraction_window & SEC <= irt+rt_extraction_window),type="l",xlim=c(irt-rt_extraction_window,irt+rt_extraction_window),scales=list(y=list(relation="free", cex=0.7,rot=45)),groups=MZ,layout=c(1,length(xxs)),xlab="RT [s]", ylab="INT",as.table=TRUE))
dev.off()
}
background<-list("water"="no_background","yeast"="yeast","human"="human")
assays<-list("VGDTVLYGK"=3.7,"IADIQLEGLR"=49.4,"TGGDEFDEAIIK"=40.8,"LITVEGPDGAGK"=10.9,"LVDEEGNDVTPEK"=-5.1)
assay_irt<-assays[tail(strsplit(getwd(),"/")[[1]],n=1)]
for(j in 1:length(background)) {
plotgraph(assay_irt,background[[j]])
}
|
kurtosis <-
function(x) {
x<-na.omit(x)
n<-length(x)
suma<-sum((x-mean(x))^4)/(var(x))^2
k <- n*(n+1)*suma/((n-1)*(n-2)*(n-3)) - 3*(n-1)^2/((n-2)*(n-3))
return(k)
}
|
/R/kurtosis.R
|
no_license
|
cran/agricolae
|
R
| false | false | 173 |
r
|
kurtosis <-
function(x) {
x<-na.omit(x)
n<-length(x)
suma<-sum((x-mean(x))^4)/(var(x))^2
k <- n*(n+1)*suma/((n-1)*(n-2)*(n-3)) - 3*(n-1)^2/((n-2)*(n-3))
return(k)
}
|
library(shiny)
library(gapminder)
library(dplyr)
library(plotly)
library(ggplot2)
library()
server <- function(input, output){
rGDP <- reactive({ input$GDP })
rContinent <- reactive({ input$Continent})
output$scatterPlot <- renderPlot({
ggplot(subset(gapminder, continent == rContinent() & gdpPercap >= rGDP()),
aes(x = gdpPercap, y = lifeExp, z = pop)) + geom_point() +
geom_smooth(method=lm, color = "darkred") +
labs(x = "GDP per Capita", y = "Life Expectancy")
})
output$timePlot <- renderPlot({
dataT<- subset(gapminder,continent == rContinent() & gdpPercap >= rGDP())
ggplot(dataT, aes(x = year, y = lifeExp, color = country)) +
geom_line(lwd = 1, show.legend = TRUE) + facet_wrap(~ continent) +
scale_color_manual(values = country_colors)
})
output$boxPlot <- renderPlot({
ggplot(subset(gapminder, continent == rContinent() & gdpPercap >= rGDP()),
aes(x = country, y = lifeExp)) + geom_boxplot() + coord_flip()
})
output$table <- renderTable({
subset((gapminder), continent == rContinent() & gdpPercap >= rGDP())
})
}
|
/server.R
|
no_license
|
brianmblakely/DataProduct
|
R
| false | false | 1,149 |
r
|
library(shiny)
library(gapminder)
library(dplyr)
library(plotly)
library(ggplot2)
library()
server <- function(input, output){
rGDP <- reactive({ input$GDP })
rContinent <- reactive({ input$Continent})
output$scatterPlot <- renderPlot({
ggplot(subset(gapminder, continent == rContinent() & gdpPercap >= rGDP()),
aes(x = gdpPercap, y = lifeExp, z = pop)) + geom_point() +
geom_smooth(method=lm, color = "darkred") +
labs(x = "GDP per Capita", y = "Life Expectancy")
})
output$timePlot <- renderPlot({
dataT<- subset(gapminder,continent == rContinent() & gdpPercap >= rGDP())
ggplot(dataT, aes(x = year, y = lifeExp, color = country)) +
geom_line(lwd = 1, show.legend = TRUE) + facet_wrap(~ continent) +
scale_color_manual(values = country_colors)
})
output$boxPlot <- renderPlot({
ggplot(subset(gapminder, continent == rContinent() & gdpPercap >= rGDP()),
aes(x = country, y = lifeExp)) + geom_boxplot() + coord_flip()
})
output$table <- renderTable({
subset((gapminder), continent == rContinent() & gdpPercap >= rGDP())
})
}
|
#include <AudioUnit/AudioUnit.r>
#include "FullBacanoVersion.h"
// Note that resource IDs must be spaced 2 apart for the 'STR ' name and description
#define kAudioUnitResID_FullBacano 1000
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FullBacano~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#define RES_ID kAudioUnitResID_FullBacano
#define COMP_TYPE kAudioUnitType_Effect
#define COMP_SUBTYPE FullBacano_COMP_SUBTYPE
#define COMP_MANUF FullBacano_COMP_MANF
#define VERSION kFullBacanoVersion
#define NAME "Activata: FullBacano"
#define DESCRIPTION "FullBacano 1.0"
#define ENTRY_POINT "FullBacanoEntry"
#include "AUResources.r"
|
/FullBacano/FullBacano/FullBacano.r
|
no_license
|
activata/FullBacano
|
R
| false | false | 641 |
r
|
#include <AudioUnit/AudioUnit.r>
#include "FullBacanoVersion.h"
// Note that resource IDs must be spaced 2 apart for the 'STR ' name and description
#define kAudioUnitResID_FullBacano 1000
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FullBacano~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#define RES_ID kAudioUnitResID_FullBacano
#define COMP_TYPE kAudioUnitType_Effect
#define COMP_SUBTYPE FullBacano_COMP_SUBTYPE
#define COMP_MANUF FullBacano_COMP_MANF
#define VERSION kFullBacanoVersion
#define NAME "Activata: FullBacano"
#define DESCRIPTION "FullBacano 1.0"
#define ENTRY_POINT "FullBacanoEntry"
#include "AUResources.r"
|
#' Model Playground (Gadget) UI Function
#'
#' @param id, character used to specify namespace, see \code{shiny::\link[shiny]{NS}}
#'
#' @importFrom shiny tagList
#'
#' @return a \code{shiny::\link[shiny]{tag}} containing UI elements
#'
#' @export
patientGraphUI <- function(id) {
ns <- shiny::NS(id)
bs4Dash::bs4Card(
title = "Gadget Playground",
elevation = 3,
width = 12,
closable = FALSE,
collapsible = FALSE,
headerBorder = FALSE,
style = 'padding_0px'
)
}
#' Graph Output Server Function
#'
#' @param input Shiny inputs.
#' @param output Shiny Outputs.
#' @param session Session object.
#'
#' @return list with following components
#' \describe{
#' \item{xvar}{reactive character string indicating x variable selection}
#' \item{yvar}{reactive character string indicating y variable selection}
#' }
#'
#' @export
patientGraph <- function(input, output, session) {
ns <- session$ns
output$patient_graph <- shiny::renderUI({
# fluidRow(
# column(
# width = 6,
# style = 'padding:0px;'
# #uiOutput("graph_box")
# )
# )
## Add Davids gadget
#tags$iframe(src="https://cardiomodel.shinyapps.io/gadget/", height=600, width=535)
#tagList(
# bs4Card(
# withSpinner(
# plotlyOutput(
# "plot_node",
# height = "500px",
# width = "100%"
# ),
# size = 2,
# type = 8,
# color = "#000000"
# )
# )
#)
})
}
|
/R/gadget.R
|
no_license
|
ddezel/CardioResp
|
R
| false | false | 1,544 |
r
|
#' Model Playground (Gadget) UI Function
#'
#' @param id, character used to specify namespace, see \code{shiny::\link[shiny]{NS}}
#'
#' @importFrom shiny tagList
#'
#' @return a \code{shiny::\link[shiny]{tag}} containing UI elements
#'
#' @export
patientGraphUI <- function(id) {
ns <- shiny::NS(id)
bs4Dash::bs4Card(
title = "Gadget Playground",
elevation = 3,
width = 12,
closable = FALSE,
collapsible = FALSE,
headerBorder = FALSE,
style = 'padding_0px'
)
}
#' Graph Output Server Function
#'
#' @param input Shiny inputs.
#' @param output Shiny Outputs.
#' @param session Session object.
#'
#' @return list with following components
#' \describe{
#' \item{xvar}{reactive character string indicating x variable selection}
#' \item{yvar}{reactive character string indicating y variable selection}
#' }
#'
#' @export
patientGraph <- function(input, output, session) {
ns <- session$ns
output$patient_graph <- shiny::renderUI({
# fluidRow(
# column(
# width = 6,
# style = 'padding:0px;'
# #uiOutput("graph_box")
# )
# )
## Add Davids gadget
#tags$iframe(src="https://cardiomodel.shinyapps.io/gadget/", height=600, width=535)
#tagList(
# bs4Card(
# withSpinner(
# plotlyOutput(
# "plot_node",
# height = "500px",
# width = "100%"
# ),
# size = 2,
# type = 8,
# color = "#000000"
# )
# )
#)
})
}
|
##First all data is read and then a subset is taken.
Dataset<-read.table("household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?")
Dataset<-subset(Dataset, Date=="2/2/2007"|Date=="1/2/2007")
#Extra column created psting date and time together
Dataset$DateTime <-paste(Dataset$Date, Dataset$Time)
png("plot2.png") #Initiate plot
plot(strptime(Dataset$DateTime, "%d/%m/%Y %H:%M:%S"), Dataset$Global_active_power,
xlab="",
ylab = "Global Active Power (kilowatts)",
type = "l")
dev.off() #Close plot
|
/plot2.R
|
no_license
|
FlorienM/ExData_Plotting1
|
R
| false | false | 556 |
r
|
##First all data is read and then a subset is taken.
Dataset<-read.table("household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?")
Dataset<-subset(Dataset, Date=="2/2/2007"|Date=="1/2/2007")
#Extra column created psting date and time together
Dataset$DateTime <-paste(Dataset$Date, Dataset$Time)
png("plot2.png") #Initiate plot
plot(strptime(Dataset$DateTime, "%d/%m/%Y %H:%M:%S"), Dataset$Global_active_power,
xlab="",
ylab = "Global Active Power (kilowatts)",
type = "l")
dev.off() #Close plot
|
library(staRdom)
### Name: abs_fit_slope
### Title: Fit absorbance data to exponential curve. 'drm' is used for the
### fitting process.
### Aliases: abs_fit_slope
### ** Examples
data(abs_data)
abs_fit_slope(abs_data$wavelength,abs_data$sample1,lim=c(350,400),l_ref=350)
|
/data/genthat_extracted_code/staRdom/examples/abs_fit_slope.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 281 |
r
|
library(staRdom)
### Name: abs_fit_slope
### Title: Fit absorbance data to exponential curve. 'drm' is used for the
### fitting process.
### Aliases: abs_fit_slope
### ** Examples
data(abs_data)
abs_fit_slope(abs_data$wavelength,abs_data$sample1,lim=c(350,400),l_ref=350)
|
makeCacheMatrix <- function(x = matrix()) {
invrs <- NULL
setorig <- function(y) {
x <<- y
invrs <<- NULL
}
getorig <- function() x
setinversevalue <- function(inverse) invrs <<- inverse
getinversevalue <- function() invrs
list(set = setorig,
get = getorig,
setinverse = setinversevalue,
getinverse = getinversevalue)
}
cacheSolve <- function(x, ...) {
invrs <- x$getinverse()
if (!is.null(invrs)) {
message("Inverse is already caluculated before and is cached")
return(invrs)
}
data <- x$get()
invrs <- solve(data, ...)
x$setinverse(invrs)
invrs
}
|
/cachematrix.R
|
no_license
|
manjuvegesna/ProgrammingAssignment2
|
R
| false | false | 649 |
r
|
makeCacheMatrix <- function(x = matrix()) {
invrs <- NULL
setorig <- function(y) {
x <<- y
invrs <<- NULL
}
getorig <- function() x
setinversevalue <- function(inverse) invrs <<- inverse
getinversevalue <- function() invrs
list(set = setorig,
get = getorig,
setinverse = setinversevalue,
getinverse = getinversevalue)
}
cacheSolve <- function(x, ...) {
invrs <- x$getinverse()
if (!is.null(invrs)) {
message("Inverse is already caluculated before and is cached")
return(invrs)
}
data <- x$get()
invrs <- solve(data, ...)
x$setinverse(invrs)
invrs
}
|
\name{Zimmerman}
\alias{Zimmerman}
\docType{data}
\title{Stand Your Ground Simpson's Paradox }
\description{
Data from 220 cases in Florida where a "Stand your ground" defense was used.
}
\format{
A data frame with 220 observations on the following 5 variables.
\describe{
\item{\code{Convicted}}{Was the defendant Convicted? (\code{No} or \code{Yes})}
\item{\code{IndWhiteVictim}}{Was the victim white? (\code{1}=yes or \code{0}=no)}
\item{\code{IndWhiteDefendant}}{Was the defendant white? (\code{1}=yes or \code{0}=no)}
\item{\code{VictimRace}}{Race of the victim (\code{Minority} or \code{White})}
\item{\code{DefendantRace}}{Race of the defendant (\code{Minority} or \code{White})}
}
}
\details{
Inspired by the Travon Martin case, combined fatal and non-fatal cases of assault in Florida for which the defendant used the Stand Your Ground law in defense. These data show Simpson's Paradox. Race of the victim is more important than race of the defendant.
}
\source{
Data from Tampa Bay Times, male plus female cases, as of 2/8/15 -- final posted data
http://www.tampabay.com/stand-your-ground-law/nonfatal-cases
http://www.tampabay.com/stand-your-ground-law/fatal-cases
}
\keyword{datasets}
|
/man/Zimmerman.Rd
|
permissive
|
tessington/qsci381
|
R
| false | false | 1,223 |
rd
|
\name{Zimmerman}
\alias{Zimmerman}
\docType{data}
\title{Stand Your Ground Simpson's Paradox }
\description{
Data from 220 cases in Florida where a "Stand your ground" defense was used.
}
\format{
A data frame with 220 observations on the following 5 variables.
\describe{
\item{\code{Convicted}}{Was the defendant Convicted? (\code{No} or \code{Yes})}
\item{\code{IndWhiteVictim}}{Was the victim white? (\code{1}=yes or \code{0}=no)}
\item{\code{IndWhiteDefendant}}{Was the defendant white? (\code{1}=yes or \code{0}=no)}
\item{\code{VictimRace}}{Race of the victim (\code{Minority} or \code{White})}
\item{\code{DefendantRace}}{Race of the defendant (\code{Minority} or \code{White})}
}
}
\details{
Inspired by the Travon Martin case, combined fatal and non-fatal cases of assault in Florida for which the defendant used the Stand Your Ground law in defense. These data show Simpson's Paradox. Race of the victim is more important than race of the defendant.
}
\source{
Data from Tampa Bay Times, male plus female cases, as of 2/8/15 -- final posted data
http://www.tampabay.com/stand-your-ground-law/nonfatal-cases
http://www.tampabay.com/stand-your-ground-law/fatal-cases
}
\keyword{datasets}
|
#' Estimates principal component functions by computing eigenfunctions of the covariance function
#'
#' Estimates principal component functions by computing eigenfunctions of the covariance function
#'
#' @param dat functional data set that can be passed to \code{ssfcov2::estimate_cov_function()}. See documentation for details.
#' @param n.marginal.knots number of knot locations to use on the marginal domain. The number of knot locations actually used in estimation will be n^2 on the product domain.
#' @return list containing first two principal component functions
fpca_ss <- function(dat, n.marginal.knots=NULL, marginal.knots=NULL){
cov.est <- estimate_cov_function(dat, n.marginal.knots = n.marginal.knots, marginal.knots=marginal.knots)
eig.est <- estimate_eigenfunctions(cov.est)
fpc1 <- extract_pcf(nharm = 1, method = 'ss', eig.est)
fpc2 <- extract_pcf(nharm = 2, method = 'ss', eig.est)
return(list(fpc1 = fpc1, fpc2 = fpc2))
}
|
/R/fpca_ss.R
|
no_license
|
dan410/SimStudy_eigenfunction_estimation
|
R
| false | false | 955 |
r
|
#' Estimates principal component functions by computing eigenfunctions of the covariance function
#'
#' Estimates principal component functions by computing eigenfunctions of the covariance function
#'
#' @param dat functional data set that can be passed to \code{ssfcov2::estimate_cov_function()}. See documentation for details.
#' @param n.marginal.knots number of knot locations to use on the marginal domain. The number of knot locations actually used in estimation will be n^2 on the product domain.
#' @return list containing first two principal component functions
fpca_ss <- function(dat, n.marginal.knots=NULL, marginal.knots=NULL){
cov.est <- estimate_cov_function(dat, n.marginal.knots = n.marginal.knots, marginal.knots=marginal.knots)
eig.est <- estimate_eigenfunctions(cov.est)
fpc1 <- extract_pcf(nharm = 1, method = 'ss', eig.est)
fpc2 <- extract_pcf(nharm = 2, method = 'ss', eig.est)
return(list(fpc1 = fpc1, fpc2 = fpc2))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.R
\name{extract_1d}
\alias{extract_1d}
\title{Extract 1d Values}
\usage{
extract_1d(core_table = NULL, input = NULL, data_location = NULL)
}
\arguments{
\item{core_table}{the core table from make_core}
\item{input}{the HIC code for the variable of interest}
\item{data_location}{the column name that stores the primary data for this
variable}
}
\value{
a tibble with HIC data for a specified variable
}
\description{
This function extracts the correct column from the CC-HIC database
depending upon what type of data is called for
}
|
/man/extract_1d.Rd
|
no_license
|
CC-HIC/inspectEHR
|
R
| false | true | 621 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.R
\name{extract_1d}
\alias{extract_1d}
\title{Extract 1d Values}
\usage{
extract_1d(core_table = NULL, input = NULL, data_location = NULL)
}
\arguments{
\item{core_table}{the core table from make_core}
\item{input}{the HIC code for the variable of interest}
\item{data_location}{the column name that stores the primary data for this
variable}
}
\value{
a tibble with HIC data for a specified variable
}
\description{
This function extracts the correct column from the CC-HIC database
depending upon what type of data is called for
}
|
main <- function() {
library(sqldf)
data <- read.csv.sql("household_power_consumption.txt", sql = "select * from file where Date = '1/2/2007' OR Date = '2/2/2007'", eol = "\n", header = TRUE, sep = ";")dat$DateTime <- strptime(paste(dat$Date, dat$Time), "%d/%m/%Y %H:%M")
data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
png(filename = "plot2.png")
plot(data$DateTime, data$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (killowatts)")
dev.off()
}
|
/plot2.R
|
no_license
|
pnwhitney/ExData_Plotting1
|
R
| false | false | 525 |
r
|
main <- function() {
library(sqldf)
data <- read.csv.sql("household_power_consumption.txt", sql = "select * from file where Date = '1/2/2007' OR Date = '2/2/2007'", eol = "\n", header = TRUE, sep = ";")dat$DateTime <- strptime(paste(dat$Date, dat$Time), "%d/%m/%Y %H:%M")
data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
png(filename = "plot2.png")
plot(data$DateTime, data$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (killowatts)")
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot_util.R
\name{geom_txt}
\alias{geom_txt}
\title{geom_txt}
\usage{
geom_txt(..., family = theme_get()$text$family, size = 3,
colour = "#2b2b2b")
}
\arguments{
\item{...}{Passed to \code{geom_text}.}
\item{family}{Font family. Defaults to theme-defined family.}
\item{size}{Font size. Defaults to 3.}
\item{colour}{Font colour. Defaults to \code{#2b2b2b}}
}
\description{
Helper for \code{geom_text} with some defaults.
}
|
/man/geom_txt.Rd
|
no_license
|
arbelt/azwmisc
|
R
| false | true | 510 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot_util.R
\name{geom_txt}
\alias{geom_txt}
\title{geom_txt}
\usage{
geom_txt(..., family = theme_get()$text$family, size = 3,
colour = "#2b2b2b")
}
\arguments{
\item{...}{Passed to \code{geom_text}.}
\item{family}{Font family. Defaults to theme-defined family.}
\item{size}{Font size. Defaults to 3.}
\item{colour}{Font colour. Defaults to \code{#2b2b2b}}
}
\description{
Helper for \code{geom_text} with some defaults.
}
|
library(testthat)
library(BrokenAdaptiveRidge)
test_check("BrokenAdaptiveRidge")
|
/tests/testthat.R
|
permissive
|
yuxitian/BrokenAdaptiveRidge
|
R
| false | false | 82 |
r
|
library(testthat)
library(BrokenAdaptiveRidge)
test_check("BrokenAdaptiveRidge")
|
library(shiny)
CohortEffect <- function(x1,
x2,
min.meaningful.effect) {
dat <- data.frame(y=c(x1,x2),
d2=c(rep(0, length(x1)), rep(1, length(x2))))
res <- lm(y ~ d2, data=dat)
coefs <- summary(res)$coefficients
effect.mean <- coefs[2,1]
effect.sd <- coefs[2,2]
xmin <- min(c(-min.meaningful.effect, effect.mean - 3*effect.sd))
xmax <- max(c(min.meaningful.effect, effect.mean + 3*effect.sd))
ymax <- max(dnorm(0, sd=effect.sd))
prob.near.zero <- pnorm(min.meaningful.effect, mean=effect.mean, sd=effect.sd) -
pnorm(-min.meaningful.effect, mean=effect.mean, sd=effect.sd)
prob.positive <- 1 - pnorm(min.meaningful.effect, mean=effect.mean, sd=effect.sd)
prob.negative <- pnorm(-min.meaningful.effect, mean=effect.mean, sd=effect.sd)
return(list(effect.mean=effect.mean,
effect.sd=effect.sd,
min.meaningful.effect=min.meaningful.effect,
plot.xmin=xmin,
plot.xmax=xmax,
plot.ymax=ymax,
prob.near.zero=prob.near.zero,
prob.positive=prob.positive,
prob.negative=prob.negative,
n=length(x1)+length(x2)))
}
PlotCohortEffect <- function(cohort.effect,
col="RoyalBlue",
xlab="seconds") {
plot(0,0, type='n',
main=paste("Cohort Effects:", ifelse(cohort.effect$effect.mean > 0, "Comparison", "Baseline"), "Cohort Did Better"),
sub=paste("n =", cohort.effect$n, " mean =", signif(cohort.effect$effect.mean, 2)),
xlim=c(cohort.effect$plot.xmin, cohort.effect$plot.xmax),
ylim=c(0, cohort.effect$plot.ymax),
xlab=xlab,
ylab="",
yaxt='n')
d1 <- function(x) dnorm(x, mean=cohort.effect$effect.mean, sd=cohort.effect$effect.sd)
polygon(x=c(cohort.effect$plot.xmin,
seq(from=cohort.effect$plot.xmin, to=cohort.effect$plot.xmax, length.out=101),
cohort.effect$plot.xmax),
y=c(0, d1(seq(from=cohort.effect$plot.xmin, to=cohort.effect$plot.xmax, length.out=101)), 0),
col=col)
curve(d1,
from=cohort.effect$plot.xmin,
to=cohort.effect$plot.xmax,
add=TRUE,
lwd=2)
abline(v=c(-cohort.effect$min.meaningful.effect, cohort.effect$min.meaningful.effect),
lwd=2,
lty=2)
legend(.98 * cohort.effect$plot.xmin + .02 * cohort.effect$plot.ymax,
.8*cohort.effect$plot.ymax,
c(paste("positive: ", round(100*cohort.effect$prob.positive), "%", sep=""),
paste("near zero: ", round(100*cohort.effect$prob.near.zero), "%", sep=""),
paste("negative: ", round(100*cohort.effect$prob.negative), "%", sep="")))
}
ObservationsNeeded <- function(x1, x2, min.meaningful.effect, confidence) {
stopifnot(confidence > .5)
y <- c(x1, x2)
x <- c(rep(0, length(x1)), rep(1, length(x2)))
res <- lm(y ~ x)
post.mean <- summary(res)$coefficients[2,1]
data.sd <- summary(res)$coefficients[2,2] * sqrt(length(x1) + length(x2))
#return(paste("data.sd:", data.sd))
#return(paste("post.mean:", post.mean))
#return(paste("confidence:", confidence))
if(post.mean > min.meaningful.effect) {
# case: positive effect
# return("DEBUG: positive effect")
f <- function(N) pnorm((post.mean - min.meaningful.effect) * sqrt(N) / data.sd) - confidence / 100
} else if(post.mean < -min.meaningful.effect) {
# case: negative effect
# return("DEBUG: negative effect")
f <- function(N) pnorm((-min.meaningful.effect - post.mean) * sqrt(N) / data.sd) - confidence / 100
} else {
# case: no meaningful effect
# return("DEBUG: no meaningful effect")
f <- function(N) pnorm((min.meaningful.effect - post.mean) * sqrt(N) / data.sd) -
pnorm((-min.meaningful.effect - post.mean) * sqrt(N) / data.sd) - confidence / 100
}
root.res <- uniroot(f, interval=c(2, 1e10))
# root.res <- tryCatch(uniroot(f, interval=c(1, 1e10)), error=function(e) NA)
if(is.na(root.res)) return("Unable to estimate number of needed observations.")
if(root.res$estim.prec > 1e-3) {
warning("Unable to estimate number of needed observations")
return(Inf)
} else {
return(ceiling(root.res$root))
}
}
ObservationsNeededMessage <- function(current.obs, needed.obs, desired.certainty) {
if(current.obs >= needed.obs) {
return(paste("You (should) already have already exceeded ", desired.certainty, "% certainty. If not, you're very close.", sep=""))
} else {
return(paste("If the mean so far is correct, you will need a total of ", needed.obs,
" (", needed.obs-current.obs, " additional) observations to reach ",
desired.certainty, "% certainty.",
sep=""))
}
}
ReadX <- function(start.date, end.date, cohort) {
x <- read.table(
paste("http://172.31.2.98/shiny-data/ab-data.php?start=", start.date,
"&end=", end.date,
"&cohort=", cohort, sep=""))[,1]
x <- x[x < 60 * 60 * 24]
return(x)
}
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$obs.needed <- renderText({
# Read the data
x1 <- read.delim('data1.tsv')[,1]
x2 <- read.delim('data2.tsv')[,1]
needed.obs <- ObservationsNeeded(x1, x2, input$min.meaningful.effect, input$confidence)
ObservationsNeededMessage(current.obs=length(x1) + length(x2),
needed.obs=ObservationsNeeded(x1, x2, input$min.meaningful.effect, input$confidence),
desired.certainty=input$confidence)
})
output$distPlot <- renderPlot({
# Read the data
x1 <- read.delim('data1.tsv')[,1]
x2 <- read.delim('data2.tsv')[,1]
ce <- CohortEffect(x1, x2, input$min.meaningful.effect)
PlotCohortEffect(ce)
})
# output$densityPlot <- renderPlot({
# # Read the data
# x1 <- ReadX(input$start.date, end.date=input$end.date, cohort=input$cohort1)
# x2 <- ReadX(input$start.date, end.date=input$end.date, cohort=input$cohort2)
#
# plot(1, 1,
# type='n',
# xlim=c(min(c(x1, x2)), max(c(x1, x2))),
# ylim=c(0, max(c(density(x1)$y, density(x2)$y))),
# main="Distributions of times for the Cohorts",
# xlab="seconds",
# ylab="")
# lines(density(x1), lwd=3)
# lines(density(x2), lwd=3, lty=2)
# legend(.6 * max(c(x1, x2)),
# max(c(density(x1)$y, density(x2)$y)),
# legend=paste("cohort", c(input$cohort1, input$cohort2)),
# lwd=3, lty=1:2)
# })
})
|
/demo/ab/server.R
|
no_license
|
shaptonstahl/abtest
|
R
| false | false | 6,607 |
r
|
library(shiny)
CohortEffect <- function(x1,
x2,
min.meaningful.effect) {
dat <- data.frame(y=c(x1,x2),
d2=c(rep(0, length(x1)), rep(1, length(x2))))
res <- lm(y ~ d2, data=dat)
coefs <- summary(res)$coefficients
effect.mean <- coefs[2,1]
effect.sd <- coefs[2,2]
xmin <- min(c(-min.meaningful.effect, effect.mean - 3*effect.sd))
xmax <- max(c(min.meaningful.effect, effect.mean + 3*effect.sd))
ymax <- max(dnorm(0, sd=effect.sd))
prob.near.zero <- pnorm(min.meaningful.effect, mean=effect.mean, sd=effect.sd) -
pnorm(-min.meaningful.effect, mean=effect.mean, sd=effect.sd)
prob.positive <- 1 - pnorm(min.meaningful.effect, mean=effect.mean, sd=effect.sd)
prob.negative <- pnorm(-min.meaningful.effect, mean=effect.mean, sd=effect.sd)
return(list(effect.mean=effect.mean,
effect.sd=effect.sd,
min.meaningful.effect=min.meaningful.effect,
plot.xmin=xmin,
plot.xmax=xmax,
plot.ymax=ymax,
prob.near.zero=prob.near.zero,
prob.positive=prob.positive,
prob.negative=prob.negative,
n=length(x1)+length(x2)))
}
PlotCohortEffect <- function(cohort.effect,
col="RoyalBlue",
xlab="seconds") {
plot(0,0, type='n',
main=paste("Cohort Effects:", ifelse(cohort.effect$effect.mean > 0, "Comparison", "Baseline"), "Cohort Did Better"),
sub=paste("n =", cohort.effect$n, " mean =", signif(cohort.effect$effect.mean, 2)),
xlim=c(cohort.effect$plot.xmin, cohort.effect$plot.xmax),
ylim=c(0, cohort.effect$plot.ymax),
xlab=xlab,
ylab="",
yaxt='n')
d1 <- function(x) dnorm(x, mean=cohort.effect$effect.mean, sd=cohort.effect$effect.sd)
polygon(x=c(cohort.effect$plot.xmin,
seq(from=cohort.effect$plot.xmin, to=cohort.effect$plot.xmax, length.out=101),
cohort.effect$plot.xmax),
y=c(0, d1(seq(from=cohort.effect$plot.xmin, to=cohort.effect$plot.xmax, length.out=101)), 0),
col=col)
curve(d1,
from=cohort.effect$plot.xmin,
to=cohort.effect$plot.xmax,
add=TRUE,
lwd=2)
abline(v=c(-cohort.effect$min.meaningful.effect, cohort.effect$min.meaningful.effect),
lwd=2,
lty=2)
legend(.98 * cohort.effect$plot.xmin + .02 * cohort.effect$plot.ymax,
.8*cohort.effect$plot.ymax,
c(paste("positive: ", round(100*cohort.effect$prob.positive), "%", sep=""),
paste("near zero: ", round(100*cohort.effect$prob.near.zero), "%", sep=""),
paste("negative: ", round(100*cohort.effect$prob.negative), "%", sep="")))
}
ObservationsNeeded <- function(x1, x2, min.meaningful.effect, confidence) {
stopifnot(confidence > .5)
y <- c(x1, x2)
x <- c(rep(0, length(x1)), rep(1, length(x2)))
res <- lm(y ~ x)
post.mean <- summary(res)$coefficients[2,1]
data.sd <- summary(res)$coefficients[2,2] * sqrt(length(x1) + length(x2))
#return(paste("data.sd:", data.sd))
#return(paste("post.mean:", post.mean))
#return(paste("confidence:", confidence))
if(post.mean > min.meaningful.effect) {
# case: positive effect
# return("DEBUG: positive effect")
f <- function(N) pnorm((post.mean - min.meaningful.effect) * sqrt(N) / data.sd) - confidence / 100
} else if(post.mean < -min.meaningful.effect) {
# case: negative effect
# return("DEBUG: negative effect")
f <- function(N) pnorm((-min.meaningful.effect - post.mean) * sqrt(N) / data.sd) - confidence / 100
} else {
# case: no meaningful effect
# return("DEBUG: no meaningful effect")
f <- function(N) pnorm((min.meaningful.effect - post.mean) * sqrt(N) / data.sd) -
pnorm((-min.meaningful.effect - post.mean) * sqrt(N) / data.sd) - confidence / 100
}
root.res <- uniroot(f, interval=c(2, 1e10))
# root.res <- tryCatch(uniroot(f, interval=c(1, 1e10)), error=function(e) NA)
if(is.na(root.res)) return("Unable to estimate number of needed observations.")
if(root.res$estim.prec > 1e-3) {
warning("Unable to estimate number of needed observations")
return(Inf)
} else {
return(ceiling(root.res$root))
}
}
ObservationsNeededMessage <- function(current.obs, needed.obs, desired.certainty) {
if(current.obs >= needed.obs) {
return(paste("You (should) already have already exceeded ", desired.certainty, "% certainty. If not, you're very close.", sep=""))
} else {
return(paste("If the mean so far is correct, you will need a total of ", needed.obs,
" (", needed.obs-current.obs, " additional) observations to reach ",
desired.certainty, "% certainty.",
sep=""))
}
}
ReadX <- function(start.date, end.date, cohort) {
x <- read.table(
paste("http://172.31.2.98/shiny-data/ab-data.php?start=", start.date,
"&end=", end.date,
"&cohort=", cohort, sep=""))[,1]
x <- x[x < 60 * 60 * 24]
return(x)
}
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$obs.needed <- renderText({
# Read the data
x1 <- read.delim('data1.tsv')[,1]
x2 <- read.delim('data2.tsv')[,1]
needed.obs <- ObservationsNeeded(x1, x2, input$min.meaningful.effect, input$confidence)
ObservationsNeededMessage(current.obs=length(x1) + length(x2),
needed.obs=ObservationsNeeded(x1, x2, input$min.meaningful.effect, input$confidence),
desired.certainty=input$confidence)
})
output$distPlot <- renderPlot({
# Read the data
x1 <- read.delim('data1.tsv')[,1]
x2 <- read.delim('data2.tsv')[,1]
ce <- CohortEffect(x1, x2, input$min.meaningful.effect)
PlotCohortEffect(ce)
})
# output$densityPlot <- renderPlot({
# # Read the data
# x1 <- ReadX(input$start.date, end.date=input$end.date, cohort=input$cohort1)
# x2 <- ReadX(input$start.date, end.date=input$end.date, cohort=input$cohort2)
#
# plot(1, 1,
# type='n',
# xlim=c(min(c(x1, x2)), max(c(x1, x2))),
# ylim=c(0, max(c(density(x1)$y, density(x2)$y))),
# main="Distributions of times for the Cohorts",
# xlab="seconds",
# ylab="")
# lines(density(x1), lwd=3)
# lines(density(x2), lwd=3, lty=2)
# legend(.6 * max(c(x1, x2)),
# max(c(density(x1)$y, density(x2)$y)),
# legend=paste("cohort", c(input$cohort1, input$cohort2)),
# lwd=3, lty=1:2)
# })
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom.mean.R
\name{geom.mean}
\alias{geom.mean}
\title{Geometric Mean}
\usage{
geom.mean(x)
}
\arguments{
\item{x}{a numeric vector for which geometric mean computations shall be performed.}
}
\description{
This function computes the geometric mean of a numeric input vector \code{x}.
}
\examples{
x <- 1:10
geom.mean(x)
}
\author{
Hajk-Georg Drost
}
|
/man/geom.mean.Rd
|
no_license
|
AcaDemIQ/myTAI
|
R
| false | true | 431 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom.mean.R
\name{geom.mean}
\alias{geom.mean}
\title{Geometric Mean}
\usage{
geom.mean(x)
}
\arguments{
\item{x}{a numeric vector for which geometric mean computations shall be performed.}
}
\description{
This function computes the geometric mean of a numeric input vector \code{x}.
}
\examples{
x <- 1:10
geom.mean(x)
}
\author{
Hajk-Georg Drost
}
|
End of preview. Expand
in Data Studio
The R code only of https://huggingface.co/datasets/bigcode/the-stack-v2, downloaded content and ready to use.
- Downloads last month
- 13