content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
\name{check.validity.NNP}
\alias{check.validity.NNP}
\title{Whether a given matrix is concurrence matrix or not}
\description{This function checks whether a v by v matrix is concurrence matrix or not. Mainly it
checks whether the sum of off-diagonal elements of the matrix is (k-1) times the diagonal element for each row of the given matrix. Applicable for proper binary incomplete block design only. If the condition is satisfied, it returns a value of 1 else it returns 0.}
\usage{check.validity.NNP(NNP,k)}
\arguments{
\item{NNP}{a v by v matrix}
\item{k}{block size}
}
\value{
A value of 1 for valid or 0 for not valid.
}
\author{B N Mandal <mandal.stat@gmail.com>}
\keyword{internal}
|
/man/check.validity.NNP.Rd
|
no_license
|
cran/ibd
|
R
| false | false | 716 |
rd
|
\name{check.validity.NNP}
\alias{check.validity.NNP}
\title{Whether a given matrix is concurrence matrix or not}
\description{This function checks whether a v by v matrix is concurrence matrix or not. Mainly it
checks whether the sum of off-diagonal elements of the matrix is (k-1) times the diagonal element for each row of the given matrix. Applicable for proper binary incomplete block design only. If the condition is satisfied, it returns a value of 1 else it returns 0.}
\usage{check.validity.NNP(NNP,k)}
\arguments{
\item{NNP}{a v by v matrix}
\item{k}{block size}
}
\value{
A value of 1 for valid or 0 for not valid.
}
\author{B N Mandal <mandal.stat@gmail.com>}
\keyword{internal}
|
getRankTable <- function(rankingFunc="Batting average ranking"){
if (rankingFunc == "Batting average ranking"){
load("./projectDataFrames/ranking/Batsman-NormalRank.RData")
a <- mutate(batsman_normal_rank,rank=1:nrow(batsman_normal_rank))
}
else if (rankingFunc == "Batting MVPI ranking"){
load("./projectDataFrames/ranking/Batsman-MVPI.RData")
a <- mutate(batsman_MVPI,rank=1:nrow(batsman_MVPI))
}
else if (rankingFunc == "Batting DPI ranking"){
load("./projectDataFrames/ranking/Batsman-DPIRank.RData")
a <- mutate(dpiRanks,rank=1:nrow(dpiRanks))
}
else if (rankingFunc == "Bowling average ranking"){
load("./projectDataFrames/ranking/Bowler-NormalRank.RData")
a <- mutate(bowler_normal_rank,rank=1:nrow(bowler_normal_rank))
}
else if (rankingFunc == "Bowling MVPI ranking"){
load("./projectDataFrames/ranking/Bowler-MVPI.RData")
a <- mutate(bowler_MVPI,rank=1:nrow(bowler_MVPI))
}
else if (rankingFunc == "Bowling DPI ranking"){
load("./projectDataFrames/ranking/Bowler-DPIRank.RData")
a <- mutate(dpiRanks,rank=1:nrow(dpiRanks))
}
}
|
/ranking/getRankTable.R
|
no_license
|
amolmishra23/IPL_CDA
|
R
| false | false | 1,180 |
r
|
getRankTable <- function(rankingFunc="Batting average ranking"){
if (rankingFunc == "Batting average ranking"){
load("./projectDataFrames/ranking/Batsman-NormalRank.RData")
a <- mutate(batsman_normal_rank,rank=1:nrow(batsman_normal_rank))
}
else if (rankingFunc == "Batting MVPI ranking"){
load("./projectDataFrames/ranking/Batsman-MVPI.RData")
a <- mutate(batsman_MVPI,rank=1:nrow(batsman_MVPI))
}
else if (rankingFunc == "Batting DPI ranking"){
load("./projectDataFrames/ranking/Batsman-DPIRank.RData")
a <- mutate(dpiRanks,rank=1:nrow(dpiRanks))
}
else if (rankingFunc == "Bowling average ranking"){
load("./projectDataFrames/ranking/Bowler-NormalRank.RData")
a <- mutate(bowler_normal_rank,rank=1:nrow(bowler_normal_rank))
}
else if (rankingFunc == "Bowling MVPI ranking"){
load("./projectDataFrames/ranking/Bowler-MVPI.RData")
a <- mutate(bowler_MVPI,rank=1:nrow(bowler_MVPI))
}
else if (rankingFunc == "Bowling DPI ranking"){
load("./projectDataFrames/ranking/Bowler-DPIRank.RData")
a <- mutate(dpiRanks,rank=1:nrow(dpiRanks))
}
}
|
library(cvms)
context("validate()")
# NOTICE:
# Numbers tested are the results I got and not "what should be"
# This will allow me to see if something changes, but it shouldn't give false confidence.
test_that("binomial model work with validate()", {
# skip_test_if_old_R_version()
# Load data and partition it
xpectr::set_test_seed(2)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
Vbinom <- validate(
train_data = dat,
formulas = "diagnosis~score",
test_data = NULL,
partitions_col = ".partitions",
family = "binomial",
REML = FALSE,
verbose = FALSE,
positive = 1
)
expect_equal(Vbinom$AUC, 0.833, tolerance = 1e-3)
expect_equal(Vbinom$`Lower CI`, 0.475, tolerance = 1e-3)
expect_equal(Vbinom$`Upper CI`, 1, tolerance = 1e-3)
expect_equal(Vbinom$Kappa, 0.7272727, tolerance = 1e-3)
expect_equal(Vbinom$Sensitivity, 0.6666667, tolerance = 1e-3)
expect_equal(Vbinom$Specificity, 1, tolerance = 1e-3)
expect_equal(Vbinom$`Pos Pred Value`, 1, tolerance = 1e-3)
expect_equal(Vbinom$`Neg Pred Value`, 0.8571429, tolerance = 1e-3)
expect_equal(Vbinom$F1, 0.8, tolerance = 1e-3)
expect_equal(Vbinom$Prevalence, 0.3333333, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Rate`, 0.2222222, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Prevalence`,
0.2222222,
tolerance = 1e-3
)
expect_equal(Vbinom$`Balanced Accuracy`, 0.8333333,
tolerance =
1e-3
)
expect_equal(Vbinom$`Convergence Warnings`, 0)
expect_equal(Vbinom$Dependent, "diagnosis")
expect_equal(Vbinom$Fixed, "score")
# Enter sub tibbles
expect_is(Vbinom$Predictions[[1]], "tbl_df")
expect_is(Vbinom$ROC[[1]], "roc")
expect_equal(
colnames(Vbinom$Predictions[[1]]),
c("Observation", "Target", "Prediction", "Predicted Class")
)
expect_equal(nrow(Vbinom$Predictions[[1]]), 9)
expect_equal(
names(Vbinom$ROC[[1]]),
c(
"percent", "sensitivities", "specificities", "thresholds",
"direction", "cases", "controls", "fun.sesp", "auc", "call",
"original.predictor", "original.response", "predictor", "response",
"levels"
)
)
expect_equal(
Vbinom$ROC[[1]]$direction,
">"
)
expect_equal(
Vbinom$ROC[[1]]$thresholds,
c(Inf, 0.882622758109746, 0.827264825824089, 0.75965587124329,
0.725216199854617, 0.648987905756078, 0.540457154631025, 0.426633976157444,
0.224265219974917, -Inf),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$sensitivities,
c(1, 1, 1, 1, 0.666666666666667, 0.666666666666667, 0.666666666666667,
0.666666666666667, 0.333333333333333, 0),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$specificities,
c(0, 0.166666666666667, 0.333333333333333, 0.5, 0.5, 0.666666666666667,
0.833333333333333, 1, 1, 1),
tolerance = 1e-5
)
expect_equal(as.numeric(Vbinom$ROC[[1]]$auc),
0.833333333333333,
tolerance = 1e-5
)
# Test Process
expect_true(
as.character(Vbinom$Process[[1]]) %in%
paste0("---\nProcess Information\n---\nTarget column: target\nPredi",
"ction column: prediction\nFamily / type: Binomial\nClasses: ",
"0, 1\nPositive class: 0\nCutoff: 0.5\nProbabilities are of c",
"lass: 1\nProbabilities < 0.5 are considered: 0\nProbabilitie",
"s >= 0.5 are considered: 1\nLocale used when sorting class l",
"evels (LC_ALL): \n ",
c("en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
"C/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
Sys.getlocale()),
"\nTarget counts: total=9, 0=3, 1=6\nPro",
"bability summary: mean: 0.615, median: 0.719, range: [0.097,",
" 0.899], SD: 0.262, IQR: 0.286\n---"))
})
test_that("binomial model with metrics list work with validate()", {
testthat::skip_on_cran()
# Load data and partition it
xpectr::set_test_seed(2)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
Vbinom <- validate(
train_data = dat,
formulas = "diagnosis~score",
test_data = NULL,
partitions_col = ".partitions",
family = "binomial",
REML = FALSE,
metrics = list(
"Accuracy" = TRUE,
"Lower CI" = FALSE
),
verbose = FALSE,
positive = 1
)
expect_equal(Vbinom$`Balanced Accuracy`, 0.8333333,
tolerance = 1e-3
)
expect_equal(Vbinom$Accuracy, 0.8888889,
tolerance = 1e-3
)
expect_equal(
colnames(Vbinom),
c(
"Fixed", "Balanced Accuracy", "Accuracy", "F1", "Sensitivity", "Specificity",
"Pos Pred Value", "Neg Pred Value", "AUC", "Upper CI", "Kappa",
"MCC", "Detection Rate", "Detection Prevalence", "Prevalence",
"Predictions", "ROC", "Confusion Matrix", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent"
)
)
})
test_that("binomial mixed model work with validate()", {
# skip_test_if_old_R_version()
# Load data and fold it
xpectr::set_test_seed(7)
dat <- groupdata2::partition(
participant.scores,
p = 0.7,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
# Making sure the partitioning is not the error
expect_equal(
dat$.partitions,
factor(c(2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2)))
Vbinom <-
validate(
train_data = dat,
formulas = "diagnosis~score + (1|session)",
test_data = NULL,
partitions_col = ".partitions",
family = "binomial",
REML = FALSE,
verbose = FALSE,
positive = 1
)
expect_equal(Vbinom$AUC, 0.764, tolerance = 1e-3)
expect_equal(Vbinom$`Lower CI`, 0.475, tolerance = 1e-3)
expect_equal(Vbinom$`Upper CI`, 1, tolerance = 1e-3)
expect_equal(Vbinom$Kappa, 0.167, tolerance = 1e-3)
expect_equal(Vbinom$Sensitivity, 0.5, tolerance = 1e-3)
expect_equal(Vbinom$Specificity, 0.667, tolerance = 1e-3)
expect_equal(Vbinom$`Pos Pred Value`, 0.6, tolerance = 1e-3)
expect_equal(Vbinom$`Neg Pred Value`, 0.571, tolerance = 1e-3)
expect_equal(Vbinom$F1, 0.545, tolerance = 1e-3)
expect_equal(Vbinom$Prevalence, 0.5, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Rate`, 0.25, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Prevalence`,
0.417,
tolerance = 1e-3
)
expect_equal(Vbinom$`Balanced Accuracy`, 0.583,
tolerance = 1e-3
)
expect_equal(Vbinom$`Convergence Warnings`, 0)
expect_equal(Vbinom$`Singular Fit Messages`, 0)
expect_equal(Vbinom$Dependent, "diagnosis")
expect_equal(Vbinom$Fixed, "score")
expect_equal(Vbinom$Random, "(1|session)")
# Enter sub tibbles
expect_is(Vbinom$Predictions[[1]], "tbl_df")
expect_is(Vbinom$ROC[[1]], "roc")
expect_equal(
colnames(Vbinom$Predictions[[1]]),
c("Observation", "Target", "Prediction", "Predicted Class")
)
expect_equal(nrow(Vbinom$Predictions[[1]]), 12)
expect_equal(
names(Vbinom$ROC[[1]]),
c("percent", "sensitivities", "specificities", "thresholds",
"direction", "cases", "controls", "fun.sesp", "auc", "call",
"original.predictor", "original.response", "predictor", "response",
"levels"
)
)
expect_equal(
Vbinom$ROC[[1]]$direction,
">"
)
expect_equal(
Vbinom$ROC[[1]]$thresholds,
c(Inf, 0.99999933823515, 0.999619864886364, 0.998594470992238,
0.983056382137284, 0.833659423893193, 0.349577298215006, 3.80808821466656e-07,
1.13438806464474e-07, 2.9859423313853e-08, 5.26142227038134e-11,
-Inf),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$sensitivities,
c(1, 1, 1, 0.833333333333333, 0.833333333333333, 0.666666666666667,
0.5, 0.5, 0.333333333333333, 0.333333333333333, 0.166666666666667,
0),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$specificities,
c(0, 0.166666666666667, 0.333333333333333, 0.5, 0.666666666666667,
0.666666666666667, 0.666666666666667, 0.833333333333333, 0.833333333333333,
1, 1, 1),
tolerance = 1e-5
)
expect_equal(as.numeric(Vbinom$ROC[[1]]$auc),
0.763888888888889,
tolerance = 1e-5
)
# Test Process
expect_true(
as.character(Vbinom$Process[[1]]) %in%
paste0("---\nProcess Information\n---\nTarget column: target\nPredi",
"ction column: prediction\nFamily / type: Binomial\nClasses: ",
"0, 1\nPositive class: 0\nCutoff: 0.5\nProbabilities are of c",
"lass: 1\nProbabilities < 0.5 are considered: 0\nProbabilitie",
"s >= 0.5 are considered: 1\nLocale used when sorting class l",
"evels (LC_ALL): \n ",
c("en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
"C/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
Sys.getlocale()),
"\nTarget counts: total=12, 0=6, 1=6\nPro",
"bability summary: mean: 0.555, median: 0.834, range: [0, 1], ",
"SD: 0.497, IQR: 0.999\n---"))
})
test_that("binomial model work with test_data in validate()", {
testthat::skip_on_cran()
# Load data and partition it
xpectr::set_test_seed(1)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = TRUE
)
Vbinom <-
validate(
train_data = dat[[1]],
formulas = "diagnosis~score",
test_data = dat[[2]],
family = "binomial",
REML = FALSE,
verbose = FALSE,
positive = 1
)
expect_equal(Vbinom$AUC, 0.944, tolerance = 1e-3)
expect_equal(Vbinom$`Lower CI`, 0.79, tolerance = 1e-3)
expect_equal(Vbinom$`Upper CI`, 1, tolerance = 1e-3)
expect_equal(Vbinom$Kappa, 0.7272727, tolerance = 1e-3)
expect_equal(Vbinom$Sensitivity, 0.6666667, tolerance = 1e-3)
expect_equal(Vbinom$Specificity, 1, tolerance = 1e-3)
expect_equal(Vbinom$`Pos Pred Value`, 1, tolerance = 1e-3)
expect_equal(Vbinom$`Neg Pred Value`, 0.8571429, tolerance = 1e-3)
expect_equal(Vbinom$F1, 0.8, tolerance = 1e-3)
expect_equal(Vbinom$Prevalence, 0.3333333, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Rate`, 0.2222222, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Prevalence`,
0.2222222,
tolerance = 1e-3
)
expect_equal(Vbinom$`Balanced Accuracy`, 0.8333333,
tolerance =
1e-3
)
expect_equal(Vbinom$`Convergence Warnings`, 0)
expect_equal(Vbinom$Dependent, "diagnosis")
expect_equal(Vbinom$Fixed, "score")
# Enter sub tibbles
expect_is(Vbinom$Predictions[[1]], "tbl_df")
expect_is(Vbinom$ROC[[1]], "roc")
expect_equal(
colnames(Vbinom$Predictions[[1]]),
c("Observation", "Target", "Prediction", "Predicted Class")
)
expect_equal(nrow(Vbinom$Predictions[[1]]), 9)
expect_equal(length(Vbinom$ROC), 1)
expect_equal(length(Vbinom$ROC[[1]]$sensitivities), 9)
expect_equal(
Vbinom$ROC[[1]]$sensitivities,
c(1, 1, 1, 1, 1, 0.666666666666667, 0.666666666666667, 0.333333333333333, 0),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$specificities,
c(0, 0.166666666666667, 0.333333333333333, 0.5, 0.833333333333333,
0.833333333333333, 1, 1, 1),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$thresholds,
c(Inf, 0.848041386220925, 0.802650625978057, 0.734648936984941,
0.679450474597164, 0.618752367349243, 0.520681562211535, 0.305300064306695,
-Inf),
tolerance = 1e-5
)
})
test_that("gaussian model with validate()", {
# skip_test_if_old_R_version()
# Load data and fold it
xpectr::set_test_seed(4)
dat <- groupdata2::partition(
participant.scores,
p = 0.7,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
Vgauss <-
validate(
train_data = dat,
formulas = "score~diagnosis+(1|session)",
test_data = NULL,
partitions_col = ".partitions",
family = "gaussian",
metrics = list("r2m" = TRUE, "r2c" = TRUE),
REML = FALSE,
verbose = FALSE
)
expect_equal(Vgauss$RMSE, 7.75, tolerance = 1e-3)
expect_equal(Vgauss$r2m, 0.305, tolerance = 1e-3)
expect_equal(Vgauss$r2c, 0.749, tolerance = 1e-3)
expect_equal(Vgauss$AIC, 149, tolerance = 1e-3)
expect_equal(Vgauss$AICc, 152, tolerance = 1e-3)
expect_equal(Vgauss$BIC, 152.5377, tolerance = 1e-3)
expect_equal(Vgauss$`Convergence Warnings`, 0)
expect_equal(Vgauss$`Singular Fit Messages`, 0)
expect_equal(Vgauss$Dependent, "score")
expect_equal(Vgauss$Fixed, "diagnosis")
expect_equal(Vgauss$Random, "(1|session)")
expect_true(
as.character(Vgauss$Process[[1]]) %in%
paste0("---\nProcess Information\n---\nTarget column: target\nPredi",
"ction column: prediction\nFamily / type: Gaussian\nTarget su",
"mmary: mean: 37.417, median: 37.5, range: [10, 67], SD: 18.7",
"01, IQR: 23\nPrediction summary: mean: 43.417, median: 42.80",
"7, range: [16.173, 69.441], SD: 17.635, IQR: 22.5\nLocale (L",
"C_ALL): \n ",
c("en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
"C/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
Sys.getlocale()),
"\n---"))
})
test_that("gaussian model with metrics list works with validate()", {
testthat::skip_on_cran()
# Load data and fold it
xpectr::set_test_seed(4)
dat <- groupdata2::partition(
participant.scores,
p = 0.7,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
Vgauss <-
validate(
train_data = dat,
formulas = "score~diagnosis+(1|session)",
test_data = NULL,
partitions_col = ".partitions",
family = "gaussian",
REML = FALSE,
metrics = list(
"RMSE" = FALSE,
"r2m" = TRUE
),
verbose = FALSE
)
expect_equal(Vgauss$r2m, 0.305, tolerance = 1e-3)
expect_equal(
colnames(Vgauss),
c("Fixed", "MAE", "NRMSE(IQR)", "RRSE", "RAE", "RMSLE", "r2m",
"AIC", "AICc", "BIC", "Predictions", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent", "Random")
)
})
test_that("Right glm model used in validate()", {
# skip_test_if_old_R_version()
# Create data that should be easy to model
xpectr::set_test_seed(7)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
validated <-
validate(
train_data = dat,
formulas = "diagnosis~score",
partitions_col = ".partitions",
family = "binomial",
positive = 1
)
same_model <-
glm(diagnosis ~ score, data = dat[dat$.partitions == 1, ], family = "binomial")
expect_equal(validated$Model[[1]]$coefficients,
same_model$coefficients,
tolerance = 1e-3
)
expect_equal(validated$Model[[1]]$residuals,
same_model$residuals,
tolerance = 1e-3
)
expect_equal(validated$Model[[1]]$aic, same_model$aic, tolerance = 1e-3)
expect_equal(validated$Model[[1]]$effects, same_model$effects,
tolerance =
1e-3
)
})
test_that("Right glmer model used in validate()", {
# skip_test_if_old_R_version()
# Create data that should be easy to model
xpectr::set_test_seed(7)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
validated <-
validate(
train_data = dat,
formulas = "diagnosis~score+(1|session)",
partitions_col = ".partitions",
family = "binomial",
positive = 1
)
same_model <-
lme4::glmer(diagnosis ~ score + (1 | session),
data = dat[dat$.partitions == 1, ],
family = "binomial"
)
expect_equal(validated$Model[[1]]@resp, same_model@resp, tolerance = 1e-3)
# expect_equal(validated$Model[[1]]@call, same_model@call, tolerance = 1e-3) # TODO: not working?
expect_equal(validated$Model[[1]]@optinfo$val,
same_model@optinfo$val,
tolerance = 1e-3
)
expect_equal(validated$Model[[1]]@beta, same_model@beta, tolerance = 1e-3)
expect_equal(
validated$Predictions[[1]]$Target,
c(0, 0, 0, 1, 1, 1, 1, 1, 1)
)
})
test_that("model using dot in formula ( y ~ . ) works with validate()", {
# skip_test_if_old_R_version()
# We wish to test if using the dot "y~." method in the model formula
# correctly leaves out .folds column.
# Create data that should be easy to model
xpectr::set_test_seed(7)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
) %>%
dplyr::select(-c(participant, session))
# Expect no warnings
# https://stackoverflow.com/questions/22003306/is-there-something-in-testthat-like-expect-no-warnings
expect_warning(validate(dat,
formulas = c("diagnosis~."),
family = "binomial",
partitions_col = ".partitions",
REML = FALSE, verbose = FALSE
),
regexp = NA
)
# Expect no warnings
# https://stackoverflow.com/questions/22003306/is-there-something-in-testthat-like-expect-no-warnings
expect_warning(validate(dat,
formulas = c("score~."),
partitions_col = ".partitions",
family = "gaussian",
REML = FALSE, verbose = FALSE
),
regexp = NA
)
})
test_that("Singular fit messages counted in validate()", {
# skip_test_if_old_R_version()
# Create data that should be easy to model
xpectr::set_test_seed(7)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
expect_message(validated <-
validate(
train_data = dat,
formulas = "diagnosis~score+(1|session)+(1|participant)",
partitions_col = ".partitions",
family = "binomial"
), "Boundary \\(Singular\\) Fit Message")
expect_equal(validated$`Singular Fit Messages`, 1)
})
test_that("the expected errors are thrown by validate()", {
# Load data and fold it
xpectr::set_test_seed(1)
dat <- participant.scores
expect_error(
xpectr::strip_msg(validate(dat, dat,
formulas = c("diagnosis~score", "diagnosis~age"),
family = "fdsfs",
REML = FALSE, verbose = FALSE,
positive = 1
)),
xpectr::strip(paste0(
"1 assertions failed:\n * Variable 'family': Must be element",
" of set\n * {'gaussian','binomial','multinomial'}, but is 'f",
"dsfs'."
)),
fixed = TRUE
)
expect_error(suppressWarnings(
validate(
train_data = dat,
test_data = dplyr::sample_frac(dat, 0.2),
formulas = c("diagnosis~score*age+(1|session)"),
family = "gaussian",
REML = FALSE,
verbose = FALSE,
control = lme4::lmerControl(
optimizer = "bobyqa",
optCtrl = list(maxfun = 10)
),
err_nc = TRUE
)
),
"Model did not converge.",
fixed = TRUE
)
})
test_that("verbose reports the correct model functions in validate()", {
testthat::skip_on_cran()
# Load data and fold it
xpectr::set_test_seed(1)
dat <- groupdata2::partition(participant.scores,
p = .75,
cat_col = "diagnosis",
id_col = "participant"
)
if (!is_tibble_v2() && is_newer_lme4()){
# Test the list of verbose messages
# glm()
## Testing 'validate(dat[[1]], dat[[2]], formulas = c(...' ####
## Initially generated by xpectr
xpectr::set_test_seed(42)
# Testing side effects
# Assigning side effects
side_effects_12059 <- xpectr::capture_side_effects(validate(dat[[1]], dat[[2]],
formulas = c("diagnosis~score"),
family = "binomial",
REML = FALSE, verbose = TRUE,
positive = 1
), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_12059[['warnings']]),
xpectr::strip(character(0)),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_12059[['messages']]),
xpectr::strip(c("Will validate 1 models.\n", "---\nvalidate(): cross_validate(): Used glm() to fit the model.'\nFor:\nFormula: diagnosis~score\nFold column: .partitions\nFold: 2\nHyperparameters: REML : FALSE, control : list(list(optimizer = c(\"bobyqa\", \"Nelder_Mead\"), restart_edge = FALSE, boundary.tol = 1e-05, calc.derivs = TRUE, use.last.params = FALSE, checkControl = list(check.nobs.vs.rankZ = \"ignore\", check.nobs.vs.nlev = \"stop\", check.nlev.gtreq.5 = \"ignore\", check.nlev.gtr.1 = \"stop\", check.nobs.vs.nRE = \"stop\", check.rankX = \"message+drop.cols\", check.scaleX = \"warning\", check.formula.LHS = \"stop\", check.response.not.const = \"stop\"), checkConv = list(check.conv.grad = list(action = \"warning\", tol = 0.002, \n relTol = NULL), check.conv.singular = list(action = \"message\", tol = 1e-04), check.conv.hess = list(action = \"warning\", tol = 1e-06)), optCtrl = list(), tolPwrss = 1e-07, compDev = TRUE, nAGQ0initStep = TRUE)), model_verbose : TRUE, family : binomial, is_special_fn : TRUE\n")),
fixed = TRUE)
# Assigning output
output_12059 <- xpectr::suppress_mw(validate(dat[[1]], dat[[2]],
formulas = c("diagnosis~score"),
family = "binomial",
REML = FALSE, verbose = TRUE,
positive = 1
))
# Testing class
expect_equal(
class(output_12059),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_12059[["Fixed"]],
"score",
fixed = TRUE)
expect_equal(
output_12059[["Balanced Accuracy"]],
0.83333,
tolerance = 1e-4)
expect_equal(
output_12059[["F1"]],
0.8,
tolerance = 1e-4)
expect_equal(
output_12059[["Sensitivity"]],
0.66667,
tolerance = 1e-4)
expect_equal(
output_12059[["Specificity"]],
1,
tolerance = 1e-4)
expect_equal(
output_12059[["Pos Pred Value"]],
1,
tolerance = 1e-4)
expect_equal(
output_12059[["Neg Pred Value"]],
0.85714,
tolerance = 1e-4)
expect_equal(
output_12059[["AUC"]],
0.94444,
tolerance = 1e-4)
expect_equal(
output_12059[["Lower CI"]],
0.79046,
tolerance = 1e-4)
expect_equal(
output_12059[["Upper CI"]],
1,
tolerance = 1e-4)
expect_equal(
output_12059[["Kappa"]],
0.72727,
tolerance = 1e-4)
expect_equal(
output_12059[["MCC"]],
0.75593,
tolerance = 1e-4)
expect_equal(
output_12059[["Detection Rate"]],
0.22222,
tolerance = 1e-4)
expect_equal(
output_12059[["Detection Prevalence"]],
0.22222,
tolerance = 1e-4)
expect_equal(
output_12059[["Prevalence"]],
0.33333,
tolerance = 1e-4)
expect_equal(
output_12059[["Convergence Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_12059[["Singular Fit Messages"]],
0,
tolerance = 1e-4)
expect_equal(
output_12059[["Other Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_12059[["Process"]][[1]][["Positive Class"]],
"0",
fixed = TRUE)
expect_equal(
output_12059[["Dependent"]],
"diagnosis",
fixed = TRUE)
# Testing column names
expect_equal(
names(output_12059),
c("Fixed", "Balanced Accuracy", "F1", "Sensitivity", "Specificity",
"Pos Pred Value", "Neg Pred Value", "AUC", "Lower CI", "Upper CI",
"Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence",
"Predictions", "ROC", "Confusion Matrix", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_12059),
c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "list", "list", "list", "list",
"integer", "integer", "integer", "list", "list",
"list", "character"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_12059),
c("character", "double", "double", "double", "double", "double",
"double", "double", "double", "double", "double", "double",
"double", "double", "double", "list", "list", "list", "list",
"integer", "integer", "integer", "list", "list",
"list", "character"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_12059),
c(1L, 26L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_12059)),
character(0),
fixed = TRUE)
## Finished testing 'validate(dat[[1]], dat[[2]], formulas = c(...' ####
}
if (!is_tibble_v2() && is_newer_lme4()){
# glmer
## Testing 'validate(dat[[1]], dat[[2]], formulas = c(...' ####
## Initially generated by xpectr
xpectr::set_test_seed(42)
# Testing side effects
# Assigning side effects
side_effects_19148 <- xpectr::capture_side_effects(validate(dat[[1]], dat[[2]],
formulas = c("diagnosis~score+(1|session)"),
family = "binomial",
REML = FALSE, verbose = TRUE,
positive = 1
), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['warnings']]),
xpectr::strip(c("ci.auc() of a ROC curve with AUC == 1 is always 1-1 and can be misleading.",
"ci.auc() of a ROC curve with AUC == 1 is always 1-1 and can be misleading.")),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['messages']]),
xpectr::strip(c("Will validate 1 models.\n", "---\nvalidate(): cross_validate(): Used lme4::glmer() to fit the model.'\nFor:\nFormula: diagnosis~score+(1|session)\nFold column: .partitions\nFold: 2\nHyperparameters: REML : FALSE, control : list(list(optimizer = c(\"bobyqa\", \"Nelder_Mead\"), restart_edge = FALSE, boundary.tol = 1e-05, calc.derivs = TRUE, use.last.params = FALSE, checkControl = list(check.nobs.vs.rankZ = \"ignore\", check.nobs.vs.nlev = \"stop\", check.nlev.gtreq.5 = \"ignore\", check.nlev.gtr.1 = \"stop\", check.nobs.vs.nRE = \"stop\", check.rankX = \"message+drop.cols\", check.scaleX = \"warning\", check.formula.LHS = \"stop\", check.response.not.const = \"stop\"), checkConv = list(check.conv.grad = list(action = \"warning\", tol = 0.002, \n relTol = NULL), check.conv.singular = list(action = \"message\", tol = 1e-04), check.conv.hess = list(action = \"warning\", tol = 1e-06)), optCtrl = list(), tolPwrss = 1e-07, compDev = TRUE, nAGQ0initStep = TRUE)), model_verbose : TRUE, family : binomial, is_special_fn : TRUE\n")),
fixed = TRUE)
# Assigning output
output_19148 <- xpectr::suppress_mw(validate(dat[[1]], dat[[2]],
formulas = c("diagnosis~score+(1|session)"),
family = "binomial",
REML = FALSE, verbose = TRUE,
positive = 1
))
# Testing class
expect_equal(
class(output_19148),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_19148[["Fixed"]],
"score",
fixed = TRUE)
expect_equal(
output_19148[["Balanced Accuracy"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["F1"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Sensitivity"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Specificity"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Pos Pred Value"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Neg Pred Value"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["AUC"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Lower CI"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Upper CI"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Kappa"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["MCC"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Detection Rate"]],
0.33333,
tolerance = 1e-4)
expect_equal(
output_19148[["Detection Prevalence"]],
0.33333,
tolerance = 1e-4)
expect_equal(
output_19148[["Prevalence"]],
0.33333,
tolerance = 1e-4)
expect_equal(
output_19148[["Convergence Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Singular Fit Messages"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Other Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Process"]][[1]][["Positive Class"]],
"0",
fixed = TRUE)
expect_equal(
output_19148[["Process"]][[1]][["Family"]],
"Binomial",
fixed = TRUE)
expect_equal(
output_19148[["Dependent"]],
"diagnosis",
fixed = TRUE)
expect_equal(
output_19148[["Random"]],
"(1|session)",
fixed = TRUE)
# Testing column names
expect_equal(
names(output_19148),
c("Fixed", "Balanced Accuracy", "F1", "Sensitivity", "Specificity",
"Pos Pred Value", "Neg Pred Value", "AUC", "Lower CI", "Upper CI",
"Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence",
"Predictions", "ROC", "Confusion Matrix", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent", "Random"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_19148),
c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "list", "list", "list", "list",
"integer", "integer", "integer", "list", "list",
"list", "character", "character"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_19148),
c("character", "double", "double", "double", "double", "double",
"double", "double", "double", "double", "double", "double",
"double", "double", "double", "list", "list", "list", "list",
"integer", "integer", "integer", "list", "list",
"list", "character", "character"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_19148),
c(1L, 27L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_19148)),
character(0),
fixed = TRUE)
## Finished testing 'validate(dat[[1]], dat[[2]], formulas = c(...' ####
}
# lm
## Testing 'validate(dat[[1]], dat[[2]], formulas = c("s...' ####
## Initially generated by xpectr
xpectr::set_test_seed(42)
# Testing side effects
# Assigning side effects
side_effects_19148 <- xpectr::capture_side_effects(validate(dat[[1]], dat[[2]],
formulas = c("score~diagnosis"),
family = "gaussian",
REML = FALSE, verbose = TRUE,
positive = 1
), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['warnings']]),
xpectr::strip(character(0)),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['messages']]),
xpectr::strip(c("Will validate 1 models.\n", "---\nvalidate(): cross_validate(): Used lm() to fit the model.'\nFor:\nFormula: score~diagnosis\nFold column: .partitions\nFold: 2\nHyperparameters: REML : FALSE, control : list(list(optimizer = \"nloptwrap\", restart_edge = TRUE, boundary.tol = 1e-05, calc.derivs = TRUE, use.last.params = FALSE, checkControl = list(check.nobs.vs.rankZ = \"ignore\", check.nobs.vs.nlev = \"stop\", check.nlev.gtreq.5 = \"ignore\", check.nlev.gtr.1 = \"stop\", check.nobs.vs.nRE = \"stop\", check.rankX = \"message+drop.cols\", check.scaleX = \"warning\", check.formula.LHS = \"stop\"), checkConv = list(check.conv.grad = list(action = \"warning\", tol = 0.002, relTol = NULL), check.conv.singular = list(action = \"message\", \n tol = 1e-04), check.conv.hess = list(action = \"warning\", tol = 1e-06)), optCtrl = list())), model_verbose : TRUE, family : gaussian, is_special_fn : TRUE\n")),
fixed = TRUE)
# Assigning output
output_19148 <- xpectr::suppress_mw(validate(dat[[1]], dat[[2]],
formulas = c("score~diagnosis"),
family = "gaussian",
REML = FALSE, verbose = TRUE,
positive = 1
))
# Testing class
expect_equal(
class(output_19148),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_19148[["Fixed"]],
"diagnosis",
fixed = TRUE)
expect_equal(
output_19148[["RMSE"]],
14.32077,
tolerance = 1e-4)
expect_equal(
output_19148[["MAE"]],
11.32099,
tolerance = 1e-4)
expect_equal(
output_19148[["NRMSE(IQR)"]],
0.95472,
tolerance = 1e-4)
expect_equal(
output_19148[["RRSE"]],
0.77293,
tolerance = 1e-4)
expect_equal(
output_19148[["RAE"]],
0.81729,
tolerance = 1e-4)
expect_equal(
output_19148[["RMSLE"]],
0.4338,
tolerance = 1e-4)
expect_equal(
output_19148[["AIC"]],
184.78402,
tolerance = 1e-4)
expect_equal(
output_19148[["AICc"]],
186.19579,
tolerance = 1e-4)
expect_equal(
output_19148[["BIC"]],
187.91759,
tolerance = 1e-4)
expect_equal(
output_19148[["Convergence Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Singular Fit Messages"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Other Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Process"]][[1]][["Family"]],
"Gaussian",
fixed = TRUE)
expect_equal(
output_19148[["Dependent"]],
"score",
fixed = TRUE)
# Testing column names
expect_equal(
names(output_19148),
c("Fixed", "RMSE", "MAE", "NRMSE(IQR)", "RRSE", "RAE", "RMSLE",
"AIC", "AICc", "BIC", "Predictions", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_19148),
c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "list", "list",
"integer", "integer", "integer", "list", "list", "list",
"character"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_19148),
c("character", "double", "double", "double", "double", "double",
"double", "double", "double", "double", "list", "list", "integer",
"integer", "integer", "list", "list", "list", "character"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_19148),
c(1L, 19L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_19148)),
character(0),
fixed = TRUE)
## Finished testing 'validate(dat[[1]], dat[[2]], formulas = c("s...' ####
# lmer
## Testing 'validate(dat[[1]], dat[[2]], formulas = c("s...' ####
## Initially generated by xpectr
xpectr::set_test_seed(42)
# Testing side effects
# Assigning side effects
side_effects_19148 <- xpectr::capture_side_effects(validate(dat[[1]], dat[[2]],
formulas = c("score~diagnosis+(1|session)"),
family = "gaussian",
REML = FALSE, verbose = TRUE,
positive = 1
), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['warnings']]),
xpectr::strip(character(0)),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['messages']]),
xpectr::strip(c("Will validate 1 models.\n", "---\nvalidate(): cross_validate(): Used lme4::lmer() to fit the model.'\nFor:\nFormula: score~diagnosis+(1|session)\nFold column: .partitions\nFold: 2\nHyperparameters: REML : FALSE, control : list(list(optimizer = \"nloptwrap\", restart_edge = TRUE, boundary.tol = 1e-05, calc.derivs = TRUE, use.last.params = FALSE, checkControl = list(check.nobs.vs.rankZ = \"ignore\", check.nobs.vs.nlev = \"stop\", check.nlev.gtreq.5 = \"ignore\", check.nlev.gtr.1 = \"stop\", check.nobs.vs.nRE = \"stop\", check.rankX = \"message+drop.cols\", check.scaleX = \"warning\", check.formula.LHS = \"stop\"), checkConv = list(check.conv.grad = list(action = \"warning\", tol = 0.002, relTol = NULL), check.conv.singular = list(action = \"message\", \n tol = 1e-04), check.conv.hess = list(action = \"warning\", tol = 1e-06)), optCtrl = list())), model_verbose : TRUE, family : gaussian, is_special_fn : TRUE\n")),
fixed = TRUE)
# Assigning output
output_19148 <- xpectr::suppress_mw(validate(dat[[1]], dat[[2]],
formulas = c("score~diagnosis+(1|session)"),
family = "gaussian",
REML = FALSE, verbose = TRUE,
positive = 1
))
# Testing class
expect_equal(
class(output_19148),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_19148[["Fixed"]],
"diagnosis",
fixed = TRUE)
expect_equal(
output_19148[["RMSE"]],
9.20986,
tolerance = 1e-4)
expect_equal(
output_19148[["MAE"]],
6.85731,
tolerance = 1e-4)
expect_equal(
output_19148[["NRMSE(IQR)"]],
0.61399,
tolerance = 1e-4)
expect_equal(
output_19148[["RRSE"]],
0.49708,
tolerance = 1e-4)
expect_equal(
output_19148[["RAE"]],
0.49505,
tolerance = 1e-4)
expect_equal(
output_19148[["RMSLE"]],
0.22504,
tolerance = 1e-4)
expect_equal(
output_19148[["AIC"]],
166.88262,
tolerance = 1e-4)
expect_equal(
output_19148[["AICc"]],
169.38262,
tolerance = 1e-4)
expect_equal(
output_19148[["BIC"]],
171.06071,
tolerance = 1e-4)
expect_equal(
output_19148[["Convergence Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Singular Fit Messages"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Other Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Process"]][[1]][["Family"]],
"Gaussian",
fixed = TRUE)
expect_equal(
output_19148[["Dependent"]],
"score",
fixed = TRUE)
expect_equal(
output_19148[["Random"]],
"(1|session)",
fixed = TRUE)
# Testing column names
expect_equal(
names(output_19148),
c("Fixed", "RMSE", "MAE", "NRMSE(IQR)", "RRSE", "RAE", "RMSLE",
"AIC", "AICc", "BIC", "Predictions", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent", "Random"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_19148),
c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "list", "list",
"integer", "integer", "integer", "list", "list", "list",
"character", "character"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_19148),
c("character", "double", "double", "double", "double", "double",
"double", "double", "double", "double", "list", "list", "integer",
"integer", "integer", "list", "list", "list", "character",
"character"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_19148),
c(1L, 20L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_19148)),
character(0),
fixed = TRUE)
## Finished testing 'validate(dat[[1]], dat[[2]], formulas = c("s...' ####
})
|
/tests/testthat/test_validate.R
|
permissive
|
LudvigOlsen/cvms
|
R
| false | false | 40,498 |
r
|
library(cvms)
context("validate()")
# NOTICE:
# Numbers tested are the results I got and not "what should be"
# This will allow me to see if something changes, but it shouldn't give false confidence.
test_that("binomial model work with validate()", {
# skip_test_if_old_R_version()
# Load data and partition it
xpectr::set_test_seed(2)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
Vbinom <- validate(
train_data = dat,
formulas = "diagnosis~score",
test_data = NULL,
partitions_col = ".partitions",
family = "binomial",
REML = FALSE,
verbose = FALSE,
positive = 1
)
expect_equal(Vbinom$AUC, 0.833, tolerance = 1e-3)
expect_equal(Vbinom$`Lower CI`, 0.475, tolerance = 1e-3)
expect_equal(Vbinom$`Upper CI`, 1, tolerance = 1e-3)
expect_equal(Vbinom$Kappa, 0.7272727, tolerance = 1e-3)
expect_equal(Vbinom$Sensitivity, 0.6666667, tolerance = 1e-3)
expect_equal(Vbinom$Specificity, 1, tolerance = 1e-3)
expect_equal(Vbinom$`Pos Pred Value`, 1, tolerance = 1e-3)
expect_equal(Vbinom$`Neg Pred Value`, 0.8571429, tolerance = 1e-3)
expect_equal(Vbinom$F1, 0.8, tolerance = 1e-3)
expect_equal(Vbinom$Prevalence, 0.3333333, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Rate`, 0.2222222, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Prevalence`,
0.2222222,
tolerance = 1e-3
)
expect_equal(Vbinom$`Balanced Accuracy`, 0.8333333,
tolerance =
1e-3
)
expect_equal(Vbinom$`Convergence Warnings`, 0)
expect_equal(Vbinom$Dependent, "diagnosis")
expect_equal(Vbinom$Fixed, "score")
# Enter sub tibbles
expect_is(Vbinom$Predictions[[1]], "tbl_df")
expect_is(Vbinom$ROC[[1]], "roc")
expect_equal(
colnames(Vbinom$Predictions[[1]]),
c("Observation", "Target", "Prediction", "Predicted Class")
)
expect_equal(nrow(Vbinom$Predictions[[1]]), 9)
expect_equal(
names(Vbinom$ROC[[1]]),
c(
"percent", "sensitivities", "specificities", "thresholds",
"direction", "cases", "controls", "fun.sesp", "auc", "call",
"original.predictor", "original.response", "predictor", "response",
"levels"
)
)
expect_equal(
Vbinom$ROC[[1]]$direction,
">"
)
expect_equal(
Vbinom$ROC[[1]]$thresholds,
c(Inf, 0.882622758109746, 0.827264825824089, 0.75965587124329,
0.725216199854617, 0.648987905756078, 0.540457154631025, 0.426633976157444,
0.224265219974917, -Inf),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$sensitivities,
c(1, 1, 1, 1, 0.666666666666667, 0.666666666666667, 0.666666666666667,
0.666666666666667, 0.333333333333333, 0),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$specificities,
c(0, 0.166666666666667, 0.333333333333333, 0.5, 0.5, 0.666666666666667,
0.833333333333333, 1, 1, 1),
tolerance = 1e-5
)
expect_equal(as.numeric(Vbinom$ROC[[1]]$auc),
0.833333333333333,
tolerance = 1e-5
)
# Test Process
expect_true(
as.character(Vbinom$Process[[1]]) %in%
paste0("---\nProcess Information\n---\nTarget column: target\nPredi",
"ction column: prediction\nFamily / type: Binomial\nClasses: ",
"0, 1\nPositive class: 0\nCutoff: 0.5\nProbabilities are of c",
"lass: 1\nProbabilities < 0.5 are considered: 0\nProbabilitie",
"s >= 0.5 are considered: 1\nLocale used when sorting class l",
"evels (LC_ALL): \n ",
c("en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
"C/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
Sys.getlocale()),
"\nTarget counts: total=9, 0=3, 1=6\nPro",
"bability summary: mean: 0.615, median: 0.719, range: [0.097,",
" 0.899], SD: 0.262, IQR: 0.286\n---"))
})
test_that("binomial model with metrics list work with validate()", {
testthat::skip_on_cran()
# Load data and partition it
xpectr::set_test_seed(2)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
Vbinom <- validate(
train_data = dat,
formulas = "diagnosis~score",
test_data = NULL,
partitions_col = ".partitions",
family = "binomial",
REML = FALSE,
metrics = list(
"Accuracy" = TRUE,
"Lower CI" = FALSE
),
verbose = FALSE,
positive = 1
)
expect_equal(Vbinom$`Balanced Accuracy`, 0.8333333,
tolerance = 1e-3
)
expect_equal(Vbinom$Accuracy, 0.8888889,
tolerance = 1e-3
)
expect_equal(
colnames(Vbinom),
c(
"Fixed", "Balanced Accuracy", "Accuracy", "F1", "Sensitivity", "Specificity",
"Pos Pred Value", "Neg Pred Value", "AUC", "Upper CI", "Kappa",
"MCC", "Detection Rate", "Detection Prevalence", "Prevalence",
"Predictions", "ROC", "Confusion Matrix", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent"
)
)
})
test_that("binomial mixed model work with validate()", {
# skip_test_if_old_R_version()
# Load data and fold it
xpectr::set_test_seed(7)
dat <- groupdata2::partition(
participant.scores,
p = 0.7,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
# Making sure the partitioning is not the error
expect_equal(
dat$.partitions,
factor(c(2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2)))
Vbinom <-
validate(
train_data = dat,
formulas = "diagnosis~score + (1|session)",
test_data = NULL,
partitions_col = ".partitions",
family = "binomial",
REML = FALSE,
verbose = FALSE,
positive = 1
)
expect_equal(Vbinom$AUC, 0.764, tolerance = 1e-3)
expect_equal(Vbinom$`Lower CI`, 0.475, tolerance = 1e-3)
expect_equal(Vbinom$`Upper CI`, 1, tolerance = 1e-3)
expect_equal(Vbinom$Kappa, 0.167, tolerance = 1e-3)
expect_equal(Vbinom$Sensitivity, 0.5, tolerance = 1e-3)
expect_equal(Vbinom$Specificity, 0.667, tolerance = 1e-3)
expect_equal(Vbinom$`Pos Pred Value`, 0.6, tolerance = 1e-3)
expect_equal(Vbinom$`Neg Pred Value`, 0.571, tolerance = 1e-3)
expect_equal(Vbinom$F1, 0.545, tolerance = 1e-3)
expect_equal(Vbinom$Prevalence, 0.5, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Rate`, 0.25, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Prevalence`,
0.417,
tolerance = 1e-3
)
expect_equal(Vbinom$`Balanced Accuracy`, 0.583,
tolerance = 1e-3
)
expect_equal(Vbinom$`Convergence Warnings`, 0)
expect_equal(Vbinom$`Singular Fit Messages`, 0)
expect_equal(Vbinom$Dependent, "diagnosis")
expect_equal(Vbinom$Fixed, "score")
expect_equal(Vbinom$Random, "(1|session)")
# Enter sub tibbles
expect_is(Vbinom$Predictions[[1]], "tbl_df")
expect_is(Vbinom$ROC[[1]], "roc")
expect_equal(
colnames(Vbinom$Predictions[[1]]),
c("Observation", "Target", "Prediction", "Predicted Class")
)
expect_equal(nrow(Vbinom$Predictions[[1]]), 12)
expect_equal(
names(Vbinom$ROC[[1]]),
c("percent", "sensitivities", "specificities", "thresholds",
"direction", "cases", "controls", "fun.sesp", "auc", "call",
"original.predictor", "original.response", "predictor", "response",
"levels"
)
)
expect_equal(
Vbinom$ROC[[1]]$direction,
">"
)
expect_equal(
Vbinom$ROC[[1]]$thresholds,
c(Inf, 0.99999933823515, 0.999619864886364, 0.998594470992238,
0.983056382137284, 0.833659423893193, 0.349577298215006, 3.80808821466656e-07,
1.13438806464474e-07, 2.9859423313853e-08, 5.26142227038134e-11,
-Inf),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$sensitivities,
c(1, 1, 1, 0.833333333333333, 0.833333333333333, 0.666666666666667,
0.5, 0.5, 0.333333333333333, 0.333333333333333, 0.166666666666667,
0),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$specificities,
c(0, 0.166666666666667, 0.333333333333333, 0.5, 0.666666666666667,
0.666666666666667, 0.666666666666667, 0.833333333333333, 0.833333333333333,
1, 1, 1),
tolerance = 1e-5
)
expect_equal(as.numeric(Vbinom$ROC[[1]]$auc),
0.763888888888889,
tolerance = 1e-5
)
# Test Process
expect_true(
as.character(Vbinom$Process[[1]]) %in%
paste0("---\nProcess Information\n---\nTarget column: target\nPredi",
"ction column: prediction\nFamily / type: Binomial\nClasses: ",
"0, 1\nPositive class: 0\nCutoff: 0.5\nProbabilities are of c",
"lass: 1\nProbabilities < 0.5 are considered: 0\nProbabilitie",
"s >= 0.5 are considered: 1\nLocale used when sorting class l",
"evels (LC_ALL): \n ",
c("en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
"C/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
Sys.getlocale()),
"\nTarget counts: total=12, 0=6, 1=6\nPro",
"bability summary: mean: 0.555, median: 0.834, range: [0, 1], ",
"SD: 0.497, IQR: 0.999\n---"))
})
test_that("binomial model work with test_data in validate()", {
testthat::skip_on_cran()
# Load data and partition it
xpectr::set_test_seed(1)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = TRUE
)
Vbinom <-
validate(
train_data = dat[[1]],
formulas = "diagnosis~score",
test_data = dat[[2]],
family = "binomial",
REML = FALSE,
verbose = FALSE,
positive = 1
)
expect_equal(Vbinom$AUC, 0.944, tolerance = 1e-3)
expect_equal(Vbinom$`Lower CI`, 0.79, tolerance = 1e-3)
expect_equal(Vbinom$`Upper CI`, 1, tolerance = 1e-3)
expect_equal(Vbinom$Kappa, 0.7272727, tolerance = 1e-3)
expect_equal(Vbinom$Sensitivity, 0.6666667, tolerance = 1e-3)
expect_equal(Vbinom$Specificity, 1, tolerance = 1e-3)
expect_equal(Vbinom$`Pos Pred Value`, 1, tolerance = 1e-3)
expect_equal(Vbinom$`Neg Pred Value`, 0.8571429, tolerance = 1e-3)
expect_equal(Vbinom$F1, 0.8, tolerance = 1e-3)
expect_equal(Vbinom$Prevalence, 0.3333333, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Rate`, 0.2222222, tolerance = 1e-3)
expect_equal(Vbinom$`Detection Prevalence`,
0.2222222,
tolerance = 1e-3
)
expect_equal(Vbinom$`Balanced Accuracy`, 0.8333333,
tolerance =
1e-3
)
expect_equal(Vbinom$`Convergence Warnings`, 0)
expect_equal(Vbinom$Dependent, "diagnosis")
expect_equal(Vbinom$Fixed, "score")
# Enter sub tibbles
expect_is(Vbinom$Predictions[[1]], "tbl_df")
expect_is(Vbinom$ROC[[1]], "roc")
expect_equal(
colnames(Vbinom$Predictions[[1]]),
c("Observation", "Target", "Prediction", "Predicted Class")
)
expect_equal(nrow(Vbinom$Predictions[[1]]), 9)
expect_equal(length(Vbinom$ROC), 1)
expect_equal(length(Vbinom$ROC[[1]]$sensitivities), 9)
expect_equal(
Vbinom$ROC[[1]]$sensitivities,
c(1, 1, 1, 1, 1, 0.666666666666667, 0.666666666666667, 0.333333333333333, 0),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$specificities,
c(0, 0.166666666666667, 0.333333333333333, 0.5, 0.833333333333333,
0.833333333333333, 1, 1, 1),
tolerance = 1e-5
)
expect_equal(
Vbinom$ROC[[1]]$thresholds,
c(Inf, 0.848041386220925, 0.802650625978057, 0.734648936984941,
0.679450474597164, 0.618752367349243, 0.520681562211535, 0.305300064306695,
-Inf),
tolerance = 1e-5
)
})
test_that("gaussian model with validate()", {
# skip_test_if_old_R_version()
# Load data and fold it
xpectr::set_test_seed(4)
dat <- groupdata2::partition(
participant.scores,
p = 0.7,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
Vgauss <-
validate(
train_data = dat,
formulas = "score~diagnosis+(1|session)",
test_data = NULL,
partitions_col = ".partitions",
family = "gaussian",
metrics = list("r2m" = TRUE, "r2c" = TRUE),
REML = FALSE,
verbose = FALSE
)
expect_equal(Vgauss$RMSE, 7.75, tolerance = 1e-3)
expect_equal(Vgauss$r2m, 0.305, tolerance = 1e-3)
expect_equal(Vgauss$r2c, 0.749, tolerance = 1e-3)
expect_equal(Vgauss$AIC, 149, tolerance = 1e-3)
expect_equal(Vgauss$AICc, 152, tolerance = 1e-3)
expect_equal(Vgauss$BIC, 152.5377, tolerance = 1e-3)
expect_equal(Vgauss$`Convergence Warnings`, 0)
expect_equal(Vgauss$`Singular Fit Messages`, 0)
expect_equal(Vgauss$Dependent, "score")
expect_equal(Vgauss$Fixed, "diagnosis")
expect_equal(Vgauss$Random, "(1|session)")
expect_true(
as.character(Vgauss$Process[[1]]) %in%
paste0("---\nProcess Information\n---\nTarget column: target\nPredi",
"ction column: prediction\nFamily / type: Gaussian\nTarget su",
"mmary: mean: 37.417, median: 37.5, range: [10, 67], SD: 18.7",
"01, IQR: 23\nPrediction summary: mean: 43.417, median: 42.80",
"7, range: [16.173, 69.441], SD: 17.635, IQR: 22.5\nLocale (L",
"C_ALL): \n ",
c("en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
"C/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
Sys.getlocale()),
"\n---"))
})
test_that("gaussian model with metrics list works with validate()", {
testthat::skip_on_cran()
# Load data and fold it
xpectr::set_test_seed(4)
dat <- groupdata2::partition(
participant.scores,
p = 0.7,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
Vgauss <-
validate(
train_data = dat,
formulas = "score~diagnosis+(1|session)",
test_data = NULL,
partitions_col = ".partitions",
family = "gaussian",
REML = FALSE,
metrics = list(
"RMSE" = FALSE,
"r2m" = TRUE
),
verbose = FALSE
)
expect_equal(Vgauss$r2m, 0.305, tolerance = 1e-3)
expect_equal(
colnames(Vgauss),
c("Fixed", "MAE", "NRMSE(IQR)", "RRSE", "RAE", "RMSLE", "r2m",
"AIC", "AICc", "BIC", "Predictions", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent", "Random")
)
})
test_that("Right glm model used in validate()", {
# skip_test_if_old_R_version()
# Create data that should be easy to model
xpectr::set_test_seed(7)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
validated <-
validate(
train_data = dat,
formulas = "diagnosis~score",
partitions_col = ".partitions",
family = "binomial",
positive = 1
)
same_model <-
glm(diagnosis ~ score, data = dat[dat$.partitions == 1, ], family = "binomial")
expect_equal(validated$Model[[1]]$coefficients,
same_model$coefficients,
tolerance = 1e-3
)
expect_equal(validated$Model[[1]]$residuals,
same_model$residuals,
tolerance = 1e-3
)
expect_equal(validated$Model[[1]]$aic, same_model$aic, tolerance = 1e-3)
expect_equal(validated$Model[[1]]$effects, same_model$effects,
tolerance =
1e-3
)
})
test_that("Right glmer model used in validate()", {
# skip_test_if_old_R_version()
# Create data that should be easy to model
xpectr::set_test_seed(7)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
validated <-
validate(
train_data = dat,
formulas = "diagnosis~score+(1|session)",
partitions_col = ".partitions",
family = "binomial",
positive = 1
)
same_model <-
lme4::glmer(diagnosis ~ score + (1 | session),
data = dat[dat$.partitions == 1, ],
family = "binomial"
)
expect_equal(validated$Model[[1]]@resp, same_model@resp, tolerance = 1e-3)
# expect_equal(validated$Model[[1]]@call, same_model@call, tolerance = 1e-3) # TODO: not working?
expect_equal(validated$Model[[1]]@optinfo$val,
same_model@optinfo$val,
tolerance = 1e-3
)
expect_equal(validated$Model[[1]]@beta, same_model@beta, tolerance = 1e-3)
expect_equal(
validated$Predictions[[1]]$Target,
c(0, 0, 0, 1, 1, 1, 1, 1, 1)
)
})
test_that("model using dot in formula ( y ~ . ) works with validate()", {
# skip_test_if_old_R_version()
# We wish to test if using the dot "y~." method in the model formula
# correctly leaves out .folds column.
# Create data that should be easy to model
xpectr::set_test_seed(7)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
) %>%
dplyr::select(-c(participant, session))
# Expect no warnings
# https://stackoverflow.com/questions/22003306/is-there-something-in-testthat-like-expect-no-warnings
expect_warning(validate(dat,
formulas = c("diagnosis~."),
family = "binomial",
partitions_col = ".partitions",
REML = FALSE, verbose = FALSE
),
regexp = NA
)
# Expect no warnings
# https://stackoverflow.com/questions/22003306/is-there-something-in-testthat-like-expect-no-warnings
expect_warning(validate(dat,
formulas = c("score~."),
partitions_col = ".partitions",
family = "gaussian",
REML = FALSE, verbose = FALSE
),
regexp = NA
)
})
test_that("Singular fit messages counted in validate()", {
# skip_test_if_old_R_version()
# Create data that should be easy to model
xpectr::set_test_seed(7)
dat <- groupdata2::partition(
participant.scores,
p = 0.8,
cat_col = "diagnosis",
id_col = "participant",
list_out = FALSE
)
expect_message(validated <-
validate(
train_data = dat,
formulas = "diagnosis~score+(1|session)+(1|participant)",
partitions_col = ".partitions",
family = "binomial"
), "Boundary \\(Singular\\) Fit Message")
expect_equal(validated$`Singular Fit Messages`, 1)
})
test_that("the expected errors are thrown by validate()", {
# Load data and fold it
xpectr::set_test_seed(1)
dat <- participant.scores
expect_error(
xpectr::strip_msg(validate(dat, dat,
formulas = c("diagnosis~score", "diagnosis~age"),
family = "fdsfs",
REML = FALSE, verbose = FALSE,
positive = 1
)),
xpectr::strip(paste0(
"1 assertions failed:\n * Variable 'family': Must be element",
" of set\n * {'gaussian','binomial','multinomial'}, but is 'f",
"dsfs'."
)),
fixed = TRUE
)
expect_error(suppressWarnings(
validate(
train_data = dat,
test_data = dplyr::sample_frac(dat, 0.2),
formulas = c("diagnosis~score*age+(1|session)"),
family = "gaussian",
REML = FALSE,
verbose = FALSE,
control = lme4::lmerControl(
optimizer = "bobyqa",
optCtrl = list(maxfun = 10)
),
err_nc = TRUE
)
),
"Model did not converge.",
fixed = TRUE
)
})
test_that("verbose reports the correct model functions in validate()", {
testthat::skip_on_cran()
# Load data and fold it
xpectr::set_test_seed(1)
dat <- groupdata2::partition(participant.scores,
p = .75,
cat_col = "diagnosis",
id_col = "participant"
)
if (!is_tibble_v2() && is_newer_lme4()){
# Test the list of verbose messages
# glm()
## Testing 'validate(dat[[1]], dat[[2]], formulas = c(...' ####
## Initially generated by xpectr
xpectr::set_test_seed(42)
# Testing side effects
# Assigning side effects
side_effects_12059 <- xpectr::capture_side_effects(validate(dat[[1]], dat[[2]],
formulas = c("diagnosis~score"),
family = "binomial",
REML = FALSE, verbose = TRUE,
positive = 1
), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_12059[['warnings']]),
xpectr::strip(character(0)),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_12059[['messages']]),
xpectr::strip(c("Will validate 1 models.\n", "---\nvalidate(): cross_validate(): Used glm() to fit the model.'\nFor:\nFormula: diagnosis~score\nFold column: .partitions\nFold: 2\nHyperparameters: REML : FALSE, control : list(list(optimizer = c(\"bobyqa\", \"Nelder_Mead\"), restart_edge = FALSE, boundary.tol = 1e-05, calc.derivs = TRUE, use.last.params = FALSE, checkControl = list(check.nobs.vs.rankZ = \"ignore\", check.nobs.vs.nlev = \"stop\", check.nlev.gtreq.5 = \"ignore\", check.nlev.gtr.1 = \"stop\", check.nobs.vs.nRE = \"stop\", check.rankX = \"message+drop.cols\", check.scaleX = \"warning\", check.formula.LHS = \"stop\", check.response.not.const = \"stop\"), checkConv = list(check.conv.grad = list(action = \"warning\", tol = 0.002, \n relTol = NULL), check.conv.singular = list(action = \"message\", tol = 1e-04), check.conv.hess = list(action = \"warning\", tol = 1e-06)), optCtrl = list(), tolPwrss = 1e-07, compDev = TRUE, nAGQ0initStep = TRUE)), model_verbose : TRUE, family : binomial, is_special_fn : TRUE\n")),
fixed = TRUE)
# Assigning output
output_12059 <- xpectr::suppress_mw(validate(dat[[1]], dat[[2]],
formulas = c("diagnosis~score"),
family = "binomial",
REML = FALSE, verbose = TRUE,
positive = 1
))
# Testing class
expect_equal(
class(output_12059),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_12059[["Fixed"]],
"score",
fixed = TRUE)
expect_equal(
output_12059[["Balanced Accuracy"]],
0.83333,
tolerance = 1e-4)
expect_equal(
output_12059[["F1"]],
0.8,
tolerance = 1e-4)
expect_equal(
output_12059[["Sensitivity"]],
0.66667,
tolerance = 1e-4)
expect_equal(
output_12059[["Specificity"]],
1,
tolerance = 1e-4)
expect_equal(
output_12059[["Pos Pred Value"]],
1,
tolerance = 1e-4)
expect_equal(
output_12059[["Neg Pred Value"]],
0.85714,
tolerance = 1e-4)
expect_equal(
output_12059[["AUC"]],
0.94444,
tolerance = 1e-4)
expect_equal(
output_12059[["Lower CI"]],
0.79046,
tolerance = 1e-4)
expect_equal(
output_12059[["Upper CI"]],
1,
tolerance = 1e-4)
expect_equal(
output_12059[["Kappa"]],
0.72727,
tolerance = 1e-4)
expect_equal(
output_12059[["MCC"]],
0.75593,
tolerance = 1e-4)
expect_equal(
output_12059[["Detection Rate"]],
0.22222,
tolerance = 1e-4)
expect_equal(
output_12059[["Detection Prevalence"]],
0.22222,
tolerance = 1e-4)
expect_equal(
output_12059[["Prevalence"]],
0.33333,
tolerance = 1e-4)
expect_equal(
output_12059[["Convergence Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_12059[["Singular Fit Messages"]],
0,
tolerance = 1e-4)
expect_equal(
output_12059[["Other Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_12059[["Process"]][[1]][["Positive Class"]],
"0",
fixed = TRUE)
expect_equal(
output_12059[["Dependent"]],
"diagnosis",
fixed = TRUE)
# Testing column names
expect_equal(
names(output_12059),
c("Fixed", "Balanced Accuracy", "F1", "Sensitivity", "Specificity",
"Pos Pred Value", "Neg Pred Value", "AUC", "Lower CI", "Upper CI",
"Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence",
"Predictions", "ROC", "Confusion Matrix", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_12059),
c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "list", "list", "list", "list",
"integer", "integer", "integer", "list", "list",
"list", "character"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_12059),
c("character", "double", "double", "double", "double", "double",
"double", "double", "double", "double", "double", "double",
"double", "double", "double", "list", "list", "list", "list",
"integer", "integer", "integer", "list", "list",
"list", "character"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_12059),
c(1L, 26L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_12059)),
character(0),
fixed = TRUE)
## Finished testing 'validate(dat[[1]], dat[[2]], formulas = c(...' ####
}
if (!is_tibble_v2() && is_newer_lme4()){
# glmer
## Testing 'validate(dat[[1]], dat[[2]], formulas = c(...' ####
## Initially generated by xpectr
xpectr::set_test_seed(42)
# Testing side effects
# Assigning side effects
side_effects_19148 <- xpectr::capture_side_effects(validate(dat[[1]], dat[[2]],
formulas = c("diagnosis~score+(1|session)"),
family = "binomial",
REML = FALSE, verbose = TRUE,
positive = 1
), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['warnings']]),
xpectr::strip(c("ci.auc() of a ROC curve with AUC == 1 is always 1-1 and can be misleading.",
"ci.auc() of a ROC curve with AUC == 1 is always 1-1 and can be misleading.")),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['messages']]),
xpectr::strip(c("Will validate 1 models.\n", "---\nvalidate(): cross_validate(): Used lme4::glmer() to fit the model.'\nFor:\nFormula: diagnosis~score+(1|session)\nFold column: .partitions\nFold: 2\nHyperparameters: REML : FALSE, control : list(list(optimizer = c(\"bobyqa\", \"Nelder_Mead\"), restart_edge = FALSE, boundary.tol = 1e-05, calc.derivs = TRUE, use.last.params = FALSE, checkControl = list(check.nobs.vs.rankZ = \"ignore\", check.nobs.vs.nlev = \"stop\", check.nlev.gtreq.5 = \"ignore\", check.nlev.gtr.1 = \"stop\", check.nobs.vs.nRE = \"stop\", check.rankX = \"message+drop.cols\", check.scaleX = \"warning\", check.formula.LHS = \"stop\", check.response.not.const = \"stop\"), checkConv = list(check.conv.grad = list(action = \"warning\", tol = 0.002, \n relTol = NULL), check.conv.singular = list(action = \"message\", tol = 1e-04), check.conv.hess = list(action = \"warning\", tol = 1e-06)), optCtrl = list(), tolPwrss = 1e-07, compDev = TRUE, nAGQ0initStep = TRUE)), model_verbose : TRUE, family : binomial, is_special_fn : TRUE\n")),
fixed = TRUE)
# Assigning output
output_19148 <- xpectr::suppress_mw(validate(dat[[1]], dat[[2]],
formulas = c("diagnosis~score+(1|session)"),
family = "binomial",
REML = FALSE, verbose = TRUE,
positive = 1
))
# Testing class
expect_equal(
class(output_19148),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_19148[["Fixed"]],
"score",
fixed = TRUE)
expect_equal(
output_19148[["Balanced Accuracy"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["F1"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Sensitivity"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Specificity"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Pos Pred Value"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Neg Pred Value"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["AUC"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Lower CI"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Upper CI"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Kappa"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["MCC"]],
1,
tolerance = 1e-4)
expect_equal(
output_19148[["Detection Rate"]],
0.33333,
tolerance = 1e-4)
expect_equal(
output_19148[["Detection Prevalence"]],
0.33333,
tolerance = 1e-4)
expect_equal(
output_19148[["Prevalence"]],
0.33333,
tolerance = 1e-4)
expect_equal(
output_19148[["Convergence Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Singular Fit Messages"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Other Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Process"]][[1]][["Positive Class"]],
"0",
fixed = TRUE)
expect_equal(
output_19148[["Process"]][[1]][["Family"]],
"Binomial",
fixed = TRUE)
expect_equal(
output_19148[["Dependent"]],
"diagnosis",
fixed = TRUE)
expect_equal(
output_19148[["Random"]],
"(1|session)",
fixed = TRUE)
# Testing column names
expect_equal(
names(output_19148),
c("Fixed", "Balanced Accuracy", "F1", "Sensitivity", "Specificity",
"Pos Pred Value", "Neg Pred Value", "AUC", "Lower CI", "Upper CI",
"Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence",
"Predictions", "ROC", "Confusion Matrix", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent", "Random"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_19148),
c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "list", "list", "list", "list",
"integer", "integer", "integer", "list", "list",
"list", "character", "character"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_19148),
c("character", "double", "double", "double", "double", "double",
"double", "double", "double", "double", "double", "double",
"double", "double", "double", "list", "list", "list", "list",
"integer", "integer", "integer", "list", "list",
"list", "character", "character"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_19148),
c(1L, 27L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_19148)),
character(0),
fixed = TRUE)
## Finished testing 'validate(dat[[1]], dat[[2]], formulas = c(...' ####
}
# lm
## Testing 'validate(dat[[1]], dat[[2]], formulas = c("s...' ####
## Initially generated by xpectr
xpectr::set_test_seed(42)
# Testing side effects
# Assigning side effects
side_effects_19148 <- xpectr::capture_side_effects(validate(dat[[1]], dat[[2]],
formulas = c("score~diagnosis"),
family = "gaussian",
REML = FALSE, verbose = TRUE,
positive = 1
), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['warnings']]),
xpectr::strip(character(0)),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['messages']]),
xpectr::strip(c("Will validate 1 models.\n", "---\nvalidate(): cross_validate(): Used lm() to fit the model.'\nFor:\nFormula: score~diagnosis\nFold column: .partitions\nFold: 2\nHyperparameters: REML : FALSE, control : list(list(optimizer = \"nloptwrap\", restart_edge = TRUE, boundary.tol = 1e-05, calc.derivs = TRUE, use.last.params = FALSE, checkControl = list(check.nobs.vs.rankZ = \"ignore\", check.nobs.vs.nlev = \"stop\", check.nlev.gtreq.5 = \"ignore\", check.nlev.gtr.1 = \"stop\", check.nobs.vs.nRE = \"stop\", check.rankX = \"message+drop.cols\", check.scaleX = \"warning\", check.formula.LHS = \"stop\"), checkConv = list(check.conv.grad = list(action = \"warning\", tol = 0.002, relTol = NULL), check.conv.singular = list(action = \"message\", \n tol = 1e-04), check.conv.hess = list(action = \"warning\", tol = 1e-06)), optCtrl = list())), model_verbose : TRUE, family : gaussian, is_special_fn : TRUE\n")),
fixed = TRUE)
# Assigning output
output_19148 <- xpectr::suppress_mw(validate(dat[[1]], dat[[2]],
formulas = c("score~diagnosis"),
family = "gaussian",
REML = FALSE, verbose = TRUE,
positive = 1
))
# Testing class
expect_equal(
class(output_19148),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_19148[["Fixed"]],
"diagnosis",
fixed = TRUE)
expect_equal(
output_19148[["RMSE"]],
14.32077,
tolerance = 1e-4)
expect_equal(
output_19148[["MAE"]],
11.32099,
tolerance = 1e-4)
expect_equal(
output_19148[["NRMSE(IQR)"]],
0.95472,
tolerance = 1e-4)
expect_equal(
output_19148[["RRSE"]],
0.77293,
tolerance = 1e-4)
expect_equal(
output_19148[["RAE"]],
0.81729,
tolerance = 1e-4)
expect_equal(
output_19148[["RMSLE"]],
0.4338,
tolerance = 1e-4)
expect_equal(
output_19148[["AIC"]],
184.78402,
tolerance = 1e-4)
expect_equal(
output_19148[["AICc"]],
186.19579,
tolerance = 1e-4)
expect_equal(
output_19148[["BIC"]],
187.91759,
tolerance = 1e-4)
expect_equal(
output_19148[["Convergence Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Singular Fit Messages"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Other Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Process"]][[1]][["Family"]],
"Gaussian",
fixed = TRUE)
expect_equal(
output_19148[["Dependent"]],
"score",
fixed = TRUE)
# Testing column names
expect_equal(
names(output_19148),
c("Fixed", "RMSE", "MAE", "NRMSE(IQR)", "RRSE", "RAE", "RMSLE",
"AIC", "AICc", "BIC", "Predictions", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_19148),
c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "list", "list",
"integer", "integer", "integer", "list", "list", "list",
"character"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_19148),
c("character", "double", "double", "double", "double", "double",
"double", "double", "double", "double", "list", "list", "integer",
"integer", "integer", "list", "list", "list", "character"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_19148),
c(1L, 19L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_19148)),
character(0),
fixed = TRUE)
## Finished testing 'validate(dat[[1]], dat[[2]], formulas = c("s...' ####
# lmer
## Testing 'validate(dat[[1]], dat[[2]], formulas = c("s...' ####
## Initially generated by xpectr
xpectr::set_test_seed(42)
# Testing side effects
# Assigning side effects
side_effects_19148 <- xpectr::capture_side_effects(validate(dat[[1]], dat[[2]],
formulas = c("score~diagnosis+(1|session)"),
family = "gaussian",
REML = FALSE, verbose = TRUE,
positive = 1
), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['warnings']]),
xpectr::strip(character(0)),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_19148[['messages']]),
xpectr::strip(c("Will validate 1 models.\n", "---\nvalidate(): cross_validate(): Used lme4::lmer() to fit the model.'\nFor:\nFormula: score~diagnosis+(1|session)\nFold column: .partitions\nFold: 2\nHyperparameters: REML : FALSE, control : list(list(optimizer = \"nloptwrap\", restart_edge = TRUE, boundary.tol = 1e-05, calc.derivs = TRUE, use.last.params = FALSE, checkControl = list(check.nobs.vs.rankZ = \"ignore\", check.nobs.vs.nlev = \"stop\", check.nlev.gtreq.5 = \"ignore\", check.nlev.gtr.1 = \"stop\", check.nobs.vs.nRE = \"stop\", check.rankX = \"message+drop.cols\", check.scaleX = \"warning\", check.formula.LHS = \"stop\"), checkConv = list(check.conv.grad = list(action = \"warning\", tol = 0.002, relTol = NULL), check.conv.singular = list(action = \"message\", \n tol = 1e-04), check.conv.hess = list(action = \"warning\", tol = 1e-06)), optCtrl = list())), model_verbose : TRUE, family : gaussian, is_special_fn : TRUE\n")),
fixed = TRUE)
# Assigning output
output_19148 <- xpectr::suppress_mw(validate(dat[[1]], dat[[2]],
formulas = c("score~diagnosis+(1|session)"),
family = "gaussian",
REML = FALSE, verbose = TRUE,
positive = 1
))
# Testing class
expect_equal(
class(output_19148),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_19148[["Fixed"]],
"diagnosis",
fixed = TRUE)
expect_equal(
output_19148[["RMSE"]],
9.20986,
tolerance = 1e-4)
expect_equal(
output_19148[["MAE"]],
6.85731,
tolerance = 1e-4)
expect_equal(
output_19148[["NRMSE(IQR)"]],
0.61399,
tolerance = 1e-4)
expect_equal(
output_19148[["RRSE"]],
0.49708,
tolerance = 1e-4)
expect_equal(
output_19148[["RAE"]],
0.49505,
tolerance = 1e-4)
expect_equal(
output_19148[["RMSLE"]],
0.22504,
tolerance = 1e-4)
expect_equal(
output_19148[["AIC"]],
166.88262,
tolerance = 1e-4)
expect_equal(
output_19148[["AICc"]],
169.38262,
tolerance = 1e-4)
expect_equal(
output_19148[["BIC"]],
171.06071,
tolerance = 1e-4)
expect_equal(
output_19148[["Convergence Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Singular Fit Messages"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Other Warnings"]],
0,
tolerance = 1e-4)
expect_equal(
output_19148[["Process"]][[1]][["Family"]],
"Gaussian",
fixed = TRUE)
expect_equal(
output_19148[["Dependent"]],
"score",
fixed = TRUE)
expect_equal(
output_19148[["Random"]],
"(1|session)",
fixed = TRUE)
# Testing column names
expect_equal(
names(output_19148),
c("Fixed", "RMSE", "MAE", "NRMSE(IQR)", "RRSE", "RAE", "RMSLE",
"AIC", "AICc", "BIC", "Predictions", "Coefficients", "Convergence Warnings",
"Singular Fit Messages", "Other Warnings", "Warnings and Messages",
"Process", "Model", "Dependent", "Random"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_19148),
c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "list", "list",
"integer", "integer", "integer", "list", "list", "list",
"character", "character"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_19148),
c("character", "double", "double", "double", "double", "double",
"double", "double", "double", "double", "list", "list", "integer",
"integer", "integer", "list", "list", "list", "character",
"character"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_19148),
c(1L, 20L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_19148)),
character(0),
fixed = TRUE)
## Finished testing 'validate(dat[[1]], dat[[2]], formulas = c("s...' ####
})
|
#『Rで始めるデータサイエンス』オライリー・ジャパン、ISBN978-4-87311-814-7.
#https://github.com/hadley/r4ds
################################################################################
library(tidyverse)
library(maps)
library(mapproj) #coord_map
library(nycflights13)
################################################################################
#まえがき####
devtools::install_github("hadley/r4ds")
install.packages("tidyverse")
install.packages(c("nycflights13", "gapminder", "Lahman"))
library(tidyverse)
tidyverse_update()
################################################################################
#1章ggplot2によるデータ可視化####
library(tidyverse)
ggplot2::mpg
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy))
#練習問題####
#1
ggplot(data = mpg)
#2
str(mtcars)
#3
?mpg #the type of drive train, where f = front-wheel drive, r = rear wheel drive, 4 = 4wd
#4
ggplot(data = mpg) +
geom_point(mapping = aes(x = hwy, y = cyl))
#5
ggplot(data = mpg) +
geom_point(mapping = aes(x = class, y = drv)) #カテゴリ変数のため
#1.3エステティックマッピング####
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, color = class))
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, size = class))
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, alpha = class))
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, shape = class))
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy), color = "blue")
#練習問題####
#1
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, color = "blue"))
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy), color = "blue")
#2
str(mpg)
?mpg
#3
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, color = manufacturer, size = model, shape = trans))
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, color = year, size = cyl, shape = cty))
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, color = year, size = cyl, shape = fl))
#4
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, color = drv, size = drv, shape = drv))
#5
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, stroke = 0.01))
?geom_point
#6
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, color = displ < 5))
#1.5ファセット
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_wrap(~ class, nrow = 2)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_wrap(~ class, nrow = 2)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(drv ~ cyl)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(. ~ cyl)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(drv ~ .)
#練習問題####
#1
str(mpg)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_wrap(~ displ)
#2
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(drv ~ cyl)
ggplot(data = mpg) +
geom_point(mapping = aes(x = drv, y = cyl))
#3
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(drv ~ .)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(. ~ cyl)
#4
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_wrap( ~ class, nrow = 2)
#5
?facet_wrap
?facet_grid
#6
#視認性の問題
#1.6幾何オブジェクト####
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy))
ggplot(data = mpg) +
geom_smooth(mapping = aes(x = displ, y = hwy))
ggplot(data = mpg) +
geom_smooth(mapping = aes(x = displ, y = hwy, linetype = drv))
ggplot(data = mpg) +
geom_smooth(mapping = aes(x = displ, y = hwy, group = drv))
ggplot(data = mpg) +
geom_smooth(mapping = aes(x = displ, y = hwy, color = drv), show.legend = FALSE)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
geom_smooth(mapping = aes(x = displ, y = hwy))
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth()
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(mapping = aes(color = class)) +
geom_smooth()
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(mapping = aes(color = class)) +
geom_smooth(data = filter(mpg, class == "subcompact"), se = FALSE) +
geom_step(mapping = aes(color = class))
#練習問題####
#1
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_line(mapping = aes(color = class))
ggplot(data = mpg, mapping = aes(x = class, y = hwy)) +
geom_boxplot(mapping = aes(color = class))
ggplot(data = mpg, mapping = aes(displ)) +
geom_bar()
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_area(mapping = aes(color = class))
#2
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth(se = FALSE)
#3
ggplot(data = mpg) +
geom_smooth(mapping = aes(x = displ, y = hwy, color = drv), show.legend = FALSE)
#4
#標準誤差
#5
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth()
ggplot() +
geom_point(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_smooth(data = mpg, mapping = aes(x = displ, y = hwy))
#6
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth(se = FALSE)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth(mapping = aes(group = drv), se = FALSE)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy, color = drv)) +
geom_point() +
geom_smooth(mapping = aes(group = drv), se = FALSE)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(mapping = aes(color = drv)) +
geom_smooth(se = FALSE)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(mapping = aes(color = drv)) +
geom_smooth(mapping = aes(linetype = drv), se = FALSE)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(shape = 21, color = "white", aes(fill = drv), size = 3, stroke = 2)
#1.7統計変換####
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut))
ggplot(data = diamonds) +
stat_count(mapping = aes(x = cut))
demo <- tribble(~a, ~b,
"bar_1", 20,
"bar_2", 30,
"bar_3", 40)
ggplot(data = demo) +
geom_bar(mapping = aes(x = a, y = b), stat = "identity")
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, y = ..prop.., group = 1))
ggplot(data = diamonds) +
stat_summary(mapping = aes(x = cut, y = depth),
fun.ymin = min,
fun.ymax = max,
fun.y = median)
#練習問題####
#1
ggplot(data = diamonds) +
stat_summary(mapping = aes(x = cut, y = depth))
#2
ggplot(data = diamonds) +
geom_col(mapping = aes(x = cut, y = depth))
#5
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, y = ..prop..))
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, y = ..prop.., group = 1))
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = color, y = ..prop.., group = 1))
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, group = color, fill = color, y = ..prop.., group = 1))
#1.8位置調整####
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, color = cut))
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = cut))
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity))
#identity 積み上げ
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "identity")
ggplot(data = diamonds, mapping = aes(x = cut, fill = clarity)) +
geom_bar(alpha = 1/5, position = "identity")
ggplot(data = diamonds, mapping = aes(x = cut, color = clarity)) +
geom_bar(fill = NA, position = "identity")
#fill 比率
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "fill")
#dodge 隣合わせ
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "dodge")
#jitter 乱れ
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy), position = "jitter")
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_jitter()
#練習問題####
#1
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_point()
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_jitter() +
geom_count()
#2
?geom_jitter
#3
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_jitter()
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_count()
#4
?geom_boxplot #dodge2
#1.9座標系
#coord_flip()x軸,y軸交換
ggplot(data = mpg, mapping = aes(x = class, y = hwy)) +
geom_boxplot()
ggplot(data = mpg, mapping = aes(x = class, y = hwy)) +
geom_boxplot() +
coord_flip()
#coord_quickmap()縦横比
install.packages("maps")
library(maps)
nz <- map_data("nz")
ggplot(nz, aes(long, lat, group = group)) +
geom_polygon(fill = "white", color = "black")
ggplot(nz, aes(long, lat, group = group)) +
geom_polygon(fill = "white", color = "black") +
coord_quickmap()
#coord_polar()極座標
bar <- ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = cut), show.legend = FALSE, width =1) +
theme(aspect.ratio = 1) +
labs(x = NULL, y = NULL)
bar
bar + coord_flip()
bar + coord_polar()
#練習問題####
#1
bar_ex1 <- ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), show.legend = FALSE, width =1, position = "identity") +
theme(aspect.ratio = 1) +
labs(x = NULL, y = NULL)
bar_ex1 + coord_polar()
#2
?labs()
#3
install.packages("mapproj")
library(mapproj) #coord_map
nz <- map_data("nz")
ggplot(nz, aes(long, lat, group = group)) +
geom_polygon(fill = "white", color = "black") +
coord_quickmap()
ggplot(nz, aes(long, lat, group = group)) +
geom_polygon(fill = "white", color = "black") +
coord_map()
#4
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_point() +
geom_abline() +
coord_fixed()
?geom_abline()
?coord_fixed()
#1.10階層グラフィックス文法
#ggplot(data = <DATA>) +
# <GEOM_FUNCTION>(mapping = aes(<MAPPINGS>),
# stat = <STAT>,
# position = <POSITION>) +
# <COORDINATE_FUNCTION> +
# <FACET_FUNCTION>
################################################################################
#2章ワークフロー:基礎
#2.1コーディングの基本
#"<-" : "option" + "-"
#2.3関数呼び出し
seq(1,10)
x <- "hello_world"
y <- seq(1, 10, length.out =5)
y
(y <- seq(1, 10, length.out =5))
?seq()
#練習問題####
#1
my_variable <- 10
my_varlable
my_variable
#2
library(tidyverse)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy))
filter(mpg, cyl == 8)
filter(diamonds, carat > 3)
#3
#Alt-Shift-k #shortcut一覧表示
################################################################################
#3章dplyrによるデータ変換
#3.1.1準備するもの####
library(nycflights13)
library(tidyverse)
#3.1.2nycflights13
?flights
flights
View(flights)
#3.3.2filter()で行にフィルタをかける
jan1 <- filter(flights, month == 1, day == 1)
(dec25 <- filter(flights, month ==12, day == 25))
#3.2.1比較
sqrt(2) ^ 2 == 2
near(sqrt(2) ^ 2, 2)
1/49 * 49 == 1
near(1/49 * 49, 1)
#3.2.2論理演算子
filter(flights, month == 11 | month ==12)
nov_dec <- filter(flights, month %in% c(11,12))
filter(flights, !(arr_delay > 120 | dep_delay > 120))
filter(flights, arr_delay <= 120, dep_delay <= 120)
#3.2.3欠損値
df <- tibble(x = c(1, NA, 3))
filter(df, x > 1)
filter(df, is.na(x) | x > 1)
#3.3arrange()で行を配置する
arrange(flights, year, month, day)
arrange(flights, desc(arr_delay))
#3.4select()で列を選ぶ
select(flights, year, month, day)
select(flights, year:day)
select(flights, -(year:day))
#ヘルパー関数
start_with()
ends_with()
contains()
matches()num_range()
#rename
colnames(flights)
rename(flights, tail_num = tailnum)
select(flights, time_hour, air_time, everything())
#3.5mutate()で新しい変数を追加する
flights_sml <- select(flights, year:day, ends_with("delay"), distance, air_time)
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60,
hours = air_time / 60,
gain_per_hour = gain / hours)
transmute(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60,
hours = air_time / 60,
gain_per_hour = gain / hours)
#3.5.1有用な作成関数
transmute(flights, dep_time,
hour = dep_time %/% 100,
minute = dep_time %% 100)
#オフセット
x <- 1:10
lead(x)
lag(x)
#累積および回転和
x
cumsum(x)
cummean(x)
cumprod(x)
cummin(x)
cummax(x)
#ランク付け
y <- c(1, 2, 2, NA, 3, 4)
min_rank(y)
min_rank(desc(y))
row_number(y)
dense_rank(y)
percent_rank(y)
cume_dist(y)
ntile(y, 5)
#3.6summarize()によるグループごとの要約
summarize(flights, delay = mean(dep_delay, na.rm = TRUE))
by_day <- group_by(flights, year, month, day)
summarize(by_day, delay = mean(dep_delay, na.rm = TRUE))
#3.6.1パイプで複数演算を結合する
by_dest <- group_by(flights, dest)
delay <- summarize(by_dest,
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE))
delay <- filter(delay, count > 20, dest != "HNL")
ggplot(data = delay, mapping = aes(x = dist, y = delay)) +
geom_point(aes(size = count), alpha = 1/3) +
geom_smooth(se = FALSE)
delay <- flights %>% group_by(dest) %>%
summarize(count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)) %>%
filter(count > 20, dest != "HNL")
#3.6.2欠損値
flights %>% group_by(year, month, day) %>%
summarize(mean = mean(dep_delay))
flights %>% group_by(year, month, day) %>%
summarize(mean = mean(dep_delay, na.rm = TRUE))
not_cancelled <- flights %>% filter(!is.na(dep_delay), !is.na(arr_delay))
not_cancelled %>% group_by(year, month, day) %>%
summarize(mean = mean(dep_delay))
#3.6.3カウント
delays <- not_cancelled %>% group_by(tailnum) %>%
summarize(delay = mean(arr_delay))
ggplot(data = delays, mapping = aes(x = delay)) +
geom_freqpoly(binwidth = 10)
delays <- not_cancelled %>% group_by(tailnum) %>%
summarize(delay = mean(arr_delay, na.rm = TRUE),
n = n())
ggplot(data = delays, mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
delays %>% filter(n > 25) %>%
ggplot(mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
batting <- as_tibble(Lahman::Batting)
batters <- batting %>% group_by(playerID) %>%
summarize(ba = sum(H, na.rm = TRUE) / sum(AB, na.rm = TRUE),
ab = sum(AB, na.rm = TRUE))
batters %>%
filter(ab > 100) %>%
ggplot(mapping = aes(x = ab, y = ba)) +
geom_point() +
geom_smooth(se = FALSE)
batters %>% arrange(desc(ba))
#3.6.4便利な要約関数
#中心傾向の代表値:mean(x), median(x)
not_cancelled %>%
group_by(year, month, day) %>%
summarize(avg_delay1 = mean(arr_delay),
avg_delay2 = mean(arr_delay[arr_delay > 0]))
not_cancelled$arr_delay[not_cancelled$arr_delay > 0]
#散らばり(広がり)の代表値:平均二乗偏差・標準偏差sd(x),四分位範囲IQR(x),中央値絶対偏差mad(x)
not_cancelled %>%
group_by(dest) %>%
summarize(distance_sd = sd(distance)) %>%
arrange(desc(distance_sd))
#ランクの代表値:min(x),quantile(x, 0.25),max(x)
not_cancelled %>%
group_by(year, month, day) %>%
summarize(first = min(dep_time),
last = max(dep_time))
#位置の代表値:first(x) = x[1],nth(x, 2) = x[2],last(x) = x[length(x)]
not_cancelled %>%
group_by(year, month, day) %>%
summarize(first_dep = first(dep_time),
last_dep = last(dep_time))
not_cancelled %>%
group_by(year, month, day) %>%
mutate(r = min_rank(desc(dep_time))) %>%
filter(r %in% range(r))
#カウント:n(),n_distinct(x)
################################################################################
################################################################################
|
/script/archive/R_for_Data_Science.R
|
permissive
|
achiral/rbioc
|
R
| false | false | 16,206 |
r
|
#『Rで始めるデータサイエンス』オライリー・ジャパン、ISBN978-4-87311-814-7.
#https://github.com/hadley/r4ds
################################################################################
library(tidyverse)
library(maps)
library(mapproj) #coord_map
library(nycflights13)
################################################################################
#まえがき####
devtools::install_github("hadley/r4ds")
install.packages("tidyverse")
install.packages(c("nycflights13", "gapminder", "Lahman"))
library(tidyverse)
tidyverse_update()
################################################################################
#1章ggplot2によるデータ可視化####
library(tidyverse)
ggplot2::mpg
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy))
#練習問題####
#1
ggplot(data = mpg)
#2
str(mtcars)
#3
?mpg #the type of drive train, where f = front-wheel drive, r = rear wheel drive, 4 = 4wd
#4
ggplot(data = mpg) +
geom_point(mapping = aes(x = hwy, y = cyl))
#5
ggplot(data = mpg) +
geom_point(mapping = aes(x = class, y = drv)) #カテゴリ変数のため
#1.3エステティックマッピング####
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, color = class))
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, size = class))
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, alpha = class))
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, shape = class))
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy), color = "blue")
#練習問題####
#1
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, color = "blue"))
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy), color = "blue")
#2
str(mpg)
?mpg
#3
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, color = manufacturer, size = model, shape = trans))
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, color = year, size = cyl, shape = cty))
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, color = year, size = cyl, shape = fl))
#4
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, color = drv, size = drv, shape = drv))
#5
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, stroke = 0.01))
?geom_point
#6
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy, color = displ < 5))
#1.5ファセット
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_wrap(~ class, nrow = 2)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_wrap(~ class, nrow = 2)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(drv ~ cyl)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(. ~ cyl)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(drv ~ .)
#練習問題####
#1
str(mpg)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_wrap(~ displ)
#2
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(drv ~ cyl)
ggplot(data = mpg) +
geom_point(mapping = aes(x = drv, y = cyl))
#3
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(drv ~ .)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(. ~ cyl)
#4
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_wrap( ~ class, nrow = 2)
#5
?facet_wrap
?facet_grid
#6
#視認性の問題
#1.6幾何オブジェクト####
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy))
ggplot(data = mpg) +
geom_smooth(mapping = aes(x = displ, y = hwy))
ggplot(data = mpg) +
geom_smooth(mapping = aes(x = displ, y = hwy, linetype = drv))
ggplot(data = mpg) +
geom_smooth(mapping = aes(x = displ, y = hwy, group = drv))
ggplot(data = mpg) +
geom_smooth(mapping = aes(x = displ, y = hwy, color = drv), show.legend = FALSE)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
geom_smooth(mapping = aes(x = displ, y = hwy))
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth()
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(mapping = aes(color = class)) +
geom_smooth()
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(mapping = aes(color = class)) +
geom_smooth(data = filter(mpg, class == "subcompact"), se = FALSE) +
geom_step(mapping = aes(color = class))
#練習問題####
#1
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_line(mapping = aes(color = class))
ggplot(data = mpg, mapping = aes(x = class, y = hwy)) +
geom_boxplot(mapping = aes(color = class))
ggplot(data = mpg, mapping = aes(displ)) +
geom_bar()
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_area(mapping = aes(color = class))
#2
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth(se = FALSE)
#3
ggplot(data = mpg) +
geom_smooth(mapping = aes(x = displ, y = hwy, color = drv), show.legend = FALSE)
#4
#標準誤差
#5
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth()
ggplot() +
geom_point(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_smooth(data = mpg, mapping = aes(x = displ, y = hwy))
#6
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth(se = FALSE)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth(mapping = aes(group = drv), se = FALSE)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy, color = drv)) +
geom_point() +
geom_smooth(mapping = aes(group = drv), se = FALSE)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(mapping = aes(color = drv)) +
geom_smooth(se = FALSE)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(mapping = aes(color = drv)) +
geom_smooth(mapping = aes(linetype = drv), se = FALSE)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(shape = 21, color = "white", aes(fill = drv), size = 3, stroke = 2)
#1.7統計変換####
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut))
ggplot(data = diamonds) +
stat_count(mapping = aes(x = cut))
demo <- tribble(~a, ~b,
"bar_1", 20,
"bar_2", 30,
"bar_3", 40)
ggplot(data = demo) +
geom_bar(mapping = aes(x = a, y = b), stat = "identity")
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, y = ..prop.., group = 1))
ggplot(data = diamonds) +
stat_summary(mapping = aes(x = cut, y = depth),
fun.ymin = min,
fun.ymax = max,
fun.y = median)
#練習問題####
#1
ggplot(data = diamonds) +
stat_summary(mapping = aes(x = cut, y = depth))
#2
ggplot(data = diamonds) +
geom_col(mapping = aes(x = cut, y = depth))
#5
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, y = ..prop..))
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, y = ..prop.., group = 1))
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = color, y = ..prop.., group = 1))
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, group = color, fill = color, y = ..prop.., group = 1))
#1.8位置調整####
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, color = cut))
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = cut))
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity))
#identity 積み上げ
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "identity")
ggplot(data = diamonds, mapping = aes(x = cut, fill = clarity)) +
geom_bar(alpha = 1/5, position = "identity")
ggplot(data = diamonds, mapping = aes(x = cut, color = clarity)) +
geom_bar(fill = NA, position = "identity")
#fill 比率
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "fill")
#dodge 隣合わせ
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "dodge")
#jitter 乱れ
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy), position = "jitter")
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_jitter()
#練習問題####
#1
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_point()
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_jitter() +
geom_count()
#2
?geom_jitter
#3
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_jitter()
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_count()
#4
?geom_boxplot #dodge2
#1.9座標系
#coord_flip()x軸,y軸交換
ggplot(data = mpg, mapping = aes(x = class, y = hwy)) +
geom_boxplot()
ggplot(data = mpg, mapping = aes(x = class, y = hwy)) +
geom_boxplot() +
coord_flip()
#coord_quickmap()縦横比
install.packages("maps")
library(maps)
nz <- map_data("nz")
ggplot(nz, aes(long, lat, group = group)) +
geom_polygon(fill = "white", color = "black")
ggplot(nz, aes(long, lat, group = group)) +
geom_polygon(fill = "white", color = "black") +
coord_quickmap()
#coord_polar()極座標
bar <- ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = cut), show.legend = FALSE, width =1) +
theme(aspect.ratio = 1) +
labs(x = NULL, y = NULL)
bar
bar + coord_flip()
bar + coord_polar()
#練習問題####
#1
bar_ex1 <- ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), show.legend = FALSE, width =1, position = "identity") +
theme(aspect.ratio = 1) +
labs(x = NULL, y = NULL)
bar_ex1 + coord_polar()
#2
?labs()
#3
install.packages("mapproj")
library(mapproj) #coord_map
nz <- map_data("nz")
ggplot(nz, aes(long, lat, group = group)) +
geom_polygon(fill = "white", color = "black") +
coord_quickmap()
ggplot(nz, aes(long, lat, group = group)) +
geom_polygon(fill = "white", color = "black") +
coord_map()
#4
ggplot(data = mpg, mapping = aes(x = cty, y = hwy)) +
geom_point() +
geom_abline() +
coord_fixed()
?geom_abline()
?coord_fixed()
#1.10階層グラフィックス文法
#ggplot(data = <DATA>) +
# <GEOM_FUNCTION>(mapping = aes(<MAPPINGS>),
# stat = <STAT>,
# position = <POSITION>) +
# <COORDINATE_FUNCTION> +
# <FACET_FUNCTION>
################################################################################
#2章ワークフロー:基礎
#2.1コーディングの基本
#"<-" : "option" + "-"
#2.3関数呼び出し
seq(1,10)
x <- "hello_world"
y <- seq(1, 10, length.out =5)
y
(y <- seq(1, 10, length.out =5))
?seq()
#練習問題####
#1
my_variable <- 10
my_varlable
my_variable
#2
library(tidyverse)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy))
filter(mpg, cyl == 8)
filter(diamonds, carat > 3)
#3
#Alt-Shift-k #shortcut一覧表示
################################################################################
#3章dplyrによるデータ変換
#3.1.1準備するもの####
library(nycflights13)
library(tidyverse)
#3.1.2nycflights13
?flights
flights
View(flights)
#3.3.2filter()で行にフィルタをかける
jan1 <- filter(flights, month == 1, day == 1)
(dec25 <- filter(flights, month ==12, day == 25))
#3.2.1比較
sqrt(2) ^ 2 == 2
near(sqrt(2) ^ 2, 2)
1/49 * 49 == 1
near(1/49 * 49, 1)
#3.2.2論理演算子
filter(flights, month == 11 | month ==12)
nov_dec <- filter(flights, month %in% c(11,12))
filter(flights, !(arr_delay > 120 | dep_delay > 120))
filter(flights, arr_delay <= 120, dep_delay <= 120)
#3.2.3欠損値
df <- tibble(x = c(1, NA, 3))
filter(df, x > 1)
filter(df, is.na(x) | x > 1)
#3.3arrange()で行を配置する
arrange(flights, year, month, day)
arrange(flights, desc(arr_delay))
#3.4select()で列を選ぶ
select(flights, year, month, day)
select(flights, year:day)
select(flights, -(year:day))
#ヘルパー関数
start_with()
ends_with()
contains()
matches()num_range()
#rename
colnames(flights)
rename(flights, tail_num = tailnum)
select(flights, time_hour, air_time, everything())
#3.5mutate()で新しい変数を追加する
flights_sml <- select(flights, year:day, ends_with("delay"), distance, air_time)
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60,
hours = air_time / 60,
gain_per_hour = gain / hours)
transmute(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60,
hours = air_time / 60,
gain_per_hour = gain / hours)
#3.5.1有用な作成関数
transmute(flights, dep_time,
hour = dep_time %/% 100,
minute = dep_time %% 100)
#オフセット
x <- 1:10
lead(x)
lag(x)
#累積および回転和
x
cumsum(x)
cummean(x)
cumprod(x)
cummin(x)
cummax(x)
#ランク付け
y <- c(1, 2, 2, NA, 3, 4)
min_rank(y)
min_rank(desc(y))
row_number(y)
dense_rank(y)
percent_rank(y)
cume_dist(y)
ntile(y, 5)
#3.6summarize()によるグループごとの要約
summarize(flights, delay = mean(dep_delay, na.rm = TRUE))
by_day <- group_by(flights, year, month, day)
summarize(by_day, delay = mean(dep_delay, na.rm = TRUE))
#3.6.1パイプで複数演算を結合する
by_dest <- group_by(flights, dest)
delay <- summarize(by_dest,
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE))
delay <- filter(delay, count > 20, dest != "HNL")
ggplot(data = delay, mapping = aes(x = dist, y = delay)) +
geom_point(aes(size = count), alpha = 1/3) +
geom_smooth(se = FALSE)
delay <- flights %>% group_by(dest) %>%
summarize(count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)) %>%
filter(count > 20, dest != "HNL")
#3.6.2欠損値
flights %>% group_by(year, month, day) %>%
summarize(mean = mean(dep_delay))
flights %>% group_by(year, month, day) %>%
summarize(mean = mean(dep_delay, na.rm = TRUE))
not_cancelled <- flights %>% filter(!is.na(dep_delay), !is.na(arr_delay))
not_cancelled %>% group_by(year, month, day) %>%
summarize(mean = mean(dep_delay))
#3.6.3カウント
delays <- not_cancelled %>% group_by(tailnum) %>%
summarize(delay = mean(arr_delay))
ggplot(data = delays, mapping = aes(x = delay)) +
geom_freqpoly(binwidth = 10)
delays <- not_cancelled %>% group_by(tailnum) %>%
summarize(delay = mean(arr_delay, na.rm = TRUE),
n = n())
ggplot(data = delays, mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
delays %>% filter(n > 25) %>%
ggplot(mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
batting <- as_tibble(Lahman::Batting)
batters <- batting %>% group_by(playerID) %>%
summarize(ba = sum(H, na.rm = TRUE) / sum(AB, na.rm = TRUE),
ab = sum(AB, na.rm = TRUE))
batters %>%
filter(ab > 100) %>%
ggplot(mapping = aes(x = ab, y = ba)) +
geom_point() +
geom_smooth(se = FALSE)
batters %>% arrange(desc(ba))
#3.6.4便利な要約関数
#中心傾向の代表値:mean(x), median(x)
not_cancelled %>%
group_by(year, month, day) %>%
summarize(avg_delay1 = mean(arr_delay),
avg_delay2 = mean(arr_delay[arr_delay > 0]))
not_cancelled$arr_delay[not_cancelled$arr_delay > 0]
#散らばり(広がり)の代表値:平均二乗偏差・標準偏差sd(x),四分位範囲IQR(x),中央値絶対偏差mad(x)
not_cancelled %>%
group_by(dest) %>%
summarize(distance_sd = sd(distance)) %>%
arrange(desc(distance_sd))
#ランクの代表値:min(x),quantile(x, 0.25),max(x)
not_cancelled %>%
group_by(year, month, day) %>%
summarize(first = min(dep_time),
last = max(dep_time))
#位置の代表値:first(x) = x[1],nth(x, 2) = x[2],last(x) = x[length(x)]
not_cancelled %>%
group_by(year, month, day) %>%
summarize(first_dep = first(dep_time),
last_dep = last(dep_time))
not_cancelled %>%
group_by(year, month, day) %>%
mutate(r = min_rank(desc(dep_time))) %>%
filter(r %in% range(r))
#カウント:n(),n_distinct(x)
################################################################################
################################################################################
|
# Function that cleans, transforms the predictor variables and splits the input dataset.
# The training, validation and test sets are returned for building the surrogate approximation models.
load_data <- function(file, sample_size) {
##
## Step 1: Read dataset
##
data <- read.csv(file, sep="\t", header=T)
cat("Initial size of total data = ", dim(data), "\n")
data <- data[sample(nrow(data), sample_size), ] # Randomly choose 1000 data points
cat("Sampled size of total data = ", dim(data), "\n")
data$ID <- NULL # Delete ID column
data$ShortestPath <- NULL # Delete ShortestPath column
data$rxPackets <- NULL # Delete rxPackets column
##
## Step 2: Variable transformation
##
if ( display_graphs == 1) {
# Checking for normality
for(i in 1:length(predictors)){
qqnorm(data[,predictors[i]], main = paste("Predictor Variable = ", predictors_names[i]))
dev.new()
hist(data[,predictors[i]], main = paste("Histogram Predictor Variable = ", predictors_names[i]))
dev.new()
hist(log(data[,predictors[i]]), main = paste("Log Predictor Variable = ", predictors_names[i]))
dev.new()
Sys.sleep(10)
}
}
# Leave untransformed: Orchestrators (10) and Distance (13)
# Moderately positive skewness
data[,7] = log(data[,7])
# LongesthPath = log(LongesthPath)
data[,9] = log(data[,9])
# Neighbors = log(Neighbors)
data[,11] = log(data[,11])
# Substantially negatively skewed
data[,8] = sqrt(max(data[,8]) + 1 - data[,8])
# Paths = hist(sqrt(max(Paths) + 1 - Paths))
data[,12] = sqrt(max(data[,12]) + 1 - data[,12])
data$Delay <- NULL # Delete Delay column
data$Latency <- NULL # Delete Latency column
data$txPackets <- NULL # Delete txPackets column
data$LongestPath <- NULL # Delete LongestPath column
data$Neighbors <- NULL # Delete Neighbors column
data$Distance <- NULL # Delete Distance column
data$Paths <- NULL # Delete Paths column
##
## Step 3: Data Splitting
##
# 60%
data_train = data[1:(dim(data)[1] * 0.6), ]
# 20%
from = dim(data)[1] * 0.6 + 1
to = dim(data)[1] * 0.6 + dim(data)[1] * 0.2
data_valid = data[from:to, ]
# 20%
from = dim(data)[1] * 0.6 + dim(data)[1] * 0.2 + 1
to = dim(data)[1]
data_test = data[from:to, ]
return(list(data_train = data_train, data_valid = data_valid, data_test = data_test))
}
|
/load_split_data.R
|
no_license
|
efstathiou/model-building-thesis
|
R
| false | false | 2,967 |
r
|
# Function that cleans, transforms the predictor variables and splits the input dataset.
# The training, validation and test sets are returned for building the surrogate approximation models.
load_data <- function(file, sample_size) {
##
## Step 1: Read dataset
##
data <- read.csv(file, sep="\t", header=T)
cat("Initial size of total data = ", dim(data), "\n")
data <- data[sample(nrow(data), sample_size), ] # Randomly choose 1000 data points
cat("Sampled size of total data = ", dim(data), "\n")
data$ID <- NULL # Delete ID column
data$ShortestPath <- NULL # Delete ShortestPath column
data$rxPackets <- NULL # Delete rxPackets column
##
## Step 2: Variable transformation
##
if ( display_graphs == 1) {
# Checking for normality
for(i in 1:length(predictors)){
qqnorm(data[,predictors[i]], main = paste("Predictor Variable = ", predictors_names[i]))
dev.new()
hist(data[,predictors[i]], main = paste("Histogram Predictor Variable = ", predictors_names[i]))
dev.new()
hist(log(data[,predictors[i]]), main = paste("Log Predictor Variable = ", predictors_names[i]))
dev.new()
Sys.sleep(10)
}
}
# Leave untransformed: Orchestrators (10) and Distance (13)
# Moderately positive skewness
data[,7] = log(data[,7])
# LongesthPath = log(LongesthPath)
data[,9] = log(data[,9])
# Neighbors = log(Neighbors)
data[,11] = log(data[,11])
# Substantially negatively skewed
data[,8] = sqrt(max(data[,8]) + 1 - data[,8])
# Paths = hist(sqrt(max(Paths) + 1 - Paths))
data[,12] = sqrt(max(data[,12]) + 1 - data[,12])
data$Delay <- NULL # Delete Delay column
data$Latency <- NULL # Delete Latency column
data$txPackets <- NULL # Delete txPackets column
data$LongestPath <- NULL # Delete LongestPath column
data$Neighbors <- NULL # Delete Neighbors column
data$Distance <- NULL # Delete Distance column
data$Paths <- NULL # Delete Paths column
##
## Step 3: Data Splitting
##
# 60%
data_train = data[1:(dim(data)[1] * 0.6), ]
# 20%
from = dim(data)[1] * 0.6 + 1
to = dim(data)[1] * 0.6 + dim(data)[1] * 0.2
data_valid = data[from:to, ]
# 20%
from = dim(data)[1] * 0.6 + dim(data)[1] * 0.2 + 1
to = dim(data)[1]
data_test = data[from:to, ]
return(list(data_train = data_train, data_valid = data_valid, data_test = data_test))
}
|
# remove all data in memory
rm(list=ls())
dev.off()
# libraries
library(dplyr)
library(ggplot2)
library(scales)
library(grid)
library(stringr)
# datapaths
main_path <- "C:/Users/arne/DS_Programming_Courses/Coursera/ExploratoryDataAnalysis/week4"
inpath <- paste(main_path, "exdata_data_NEI_data", sep ="/")
# reading the data
NEI <- readRDS(paste(inpath, "summarySCC_PM25.rds", sep="/"))
SCC <- readRDS(paste(inpath, "Source_Classification_Code.rds", sep="/"))
# quick exploration
# exploring NEI
dim(NEI)
head(NEI)
str(NEI)
unique(NEI$Pollutant)
# exploring SCC
dim(SCC)
names(SCC)
str(SCC)
sort(unique(SCC$SCC.Level.Four))
# subsetting for Baltimore and merging SCC and NEI
df <- NEI[NEI$fips=="24510"|NEI$fips=="06037",]
df$City <- ifelse(df$fips=="24510", "Baltimore",
ifelse(df$fips=="06037", "Los Angeles", NA))
NEI$SCC <- as.factor(NEI$SCC)
df <- merge(df, SCC, by.x = "SCC", by.y = "SCC", all.x = T, all.y = F)
df_org <- df
# checking for rows containing the words "motor" and "vehicle"
motor_string <- "motor"
vehicle_string <- "vehicle"
#
# checking by what to group to obtain only coal combustion related emissions
z <- select(SCC, -SCC, -Map.To, -Last.Inventory.Year, -Created_Date, -Revised_Date, -Short.Name)
z <- z %>%
mutate_all(as.character)
vec_motor_scc <- unlist((apply((z), 1, function(row) any(str_detect(tolower(row), motor_string)))==T))
vec_vehicle_string <- unlist((apply((z), 1, function(row) any(str_detect(tolower(row), vehicle_string)))==T))
y <- SCC[vec_vehicle_string,]
# => SCC.Level.Three
#
# keeping rows containing the words "motor" and "vehicle"
x <- select(df, -SCC, -fips, -Pollutant, -Emissions, -type, -year, -Map.To, -Last.Inventory.Year, -Created_Date, -Revised_Date, -Short.Name)
x <- x %>%
mutate_all(as.character)
vec_motor <- unlist((apply((x), 1, function(row) any(str_detect(tolower(row), motor_string)))==T))
vec_vehicle <- unlist((apply((x), 1, function(row) any(str_detect(tolower(row), vehicle_string)))==T))
# subsetting
df <- df[vec_vehicle,]
exclude <- c("All Processes", "Border Crossings", "Coal Mining, Cleaning, and Material Handling (See 305310)",
"Commercial Equipment", "Compression Ignition Equipment except Rail and Marine",
"Construction and Mining Equipment", "Filling Vehicle Gas Tanks - Stage II",
"Industrial Equipment", "Industrial/Commercial/Institutional",
"Iron Production (See 3-03-015 for Integrated Iron & Steel MACT)",
"Lawn and Garden Equipment","Lime Manufacture", "Logging Equipment", "Motor Vehicle Fires",
"NOT USED - Previously all LDGT (1&2) under M5", "Printing Ink Manufacture", "Residential",
"Road Construction", "Underground Mining Equipment")
df <- df[! df$SCC.Level.Three %in% exclude,]
length(unique(df$SCC.Level.Three))
length(unique(y$SCC.Level.Three))
# plotting
#
# plot6: How have emissions from motor vehicle sources changed from 1999-2008 in Baltimore City vs. Los Angeles?
# mean group emissions by year (using mean to account for number of measurements)
df <- aggregate(list(Emissions=df$Emissions), by=list(year=df$year, City=df$City), FUN = "mean")
avg_baltimore <- mean(df[df$City=="Baltimore","Emissions"])
avg_la <- mean(df[df$City=="Los Angeles","Emissions"])
df[df$City=="Baltimore","Emissions"] <- (df[df$City=="Baltimore","Emissions"]/max(df[df$City=="Baltimore","Emissions"]))
df[df$City=="Los Angeles","Emissions"] <- (df[df$City=="Los Angeles","Emissions"]/max(df[df$City=="Los Angeles","Emissions"]))
#df$Emissions <- percent(df$Emissions)
# create stacked barplot
png(filename = paste(main_path, "/plot6.png", sep =""),
width = 480, height = 480)
g <- ggplot(data=df, aes(x=year, y=Emissions))
g + geom_point() +
geom_smooth(method = "lm") +
scale_x_continuous(breaks = df$year) +
facet_grid(. ~ City) +
scale_y_continuous(labels = scales::percent) +
labs(title="Motor Vehicle PM2.5 Emissions (Indexed)",
subtitle=paste("Avg Emissions Baltimore: ", round(avg_baltimore,2), " - Avg Emissions Los Angeles: ", round(avg_la,2), sep =""))
dev.off()
|
/R/plots/plot6.R
|
no_license
|
agiedke/Snippets
|
R
| false | false | 4,108 |
r
|
# remove all data in memory
rm(list=ls())
dev.off()
# libraries
library(dplyr)
library(ggplot2)
library(scales)
library(grid)
library(stringr)
# datapaths
main_path <- "C:/Users/arne/DS_Programming_Courses/Coursera/ExploratoryDataAnalysis/week4"
inpath <- paste(main_path, "exdata_data_NEI_data", sep ="/")
# reading the data
NEI <- readRDS(paste(inpath, "summarySCC_PM25.rds", sep="/"))
SCC <- readRDS(paste(inpath, "Source_Classification_Code.rds", sep="/"))
# quick exploration
# exploring NEI
dim(NEI)
head(NEI)
str(NEI)
unique(NEI$Pollutant)
# exploring SCC
dim(SCC)
names(SCC)
str(SCC)
sort(unique(SCC$SCC.Level.Four))
# subsetting for Baltimore and merging SCC and NEI
df <- NEI[NEI$fips=="24510"|NEI$fips=="06037",]
df$City <- ifelse(df$fips=="24510", "Baltimore",
ifelse(df$fips=="06037", "Los Angeles", NA))
NEI$SCC <- as.factor(NEI$SCC)
df <- merge(df, SCC, by.x = "SCC", by.y = "SCC", all.x = T, all.y = F)
df_org <- df
# checking for rows containing the words "motor" and "vehicle"
motor_string <- "motor"
vehicle_string <- "vehicle"
#
# checking by what to group to obtain only coal combustion related emissions
z <- select(SCC, -SCC, -Map.To, -Last.Inventory.Year, -Created_Date, -Revised_Date, -Short.Name)
z <- z %>%
mutate_all(as.character)
vec_motor_scc <- unlist((apply((z), 1, function(row) any(str_detect(tolower(row), motor_string)))==T))
vec_vehicle_string <- unlist((apply((z), 1, function(row) any(str_detect(tolower(row), vehicle_string)))==T))
y <- SCC[vec_vehicle_string,]
# => SCC.Level.Three
#
# keeping rows containing the words "motor" and "vehicle"
x <- select(df, -SCC, -fips, -Pollutant, -Emissions, -type, -year, -Map.To, -Last.Inventory.Year, -Created_Date, -Revised_Date, -Short.Name)
x <- x %>%
mutate_all(as.character)
vec_motor <- unlist((apply((x), 1, function(row) any(str_detect(tolower(row), motor_string)))==T))
vec_vehicle <- unlist((apply((x), 1, function(row) any(str_detect(tolower(row), vehicle_string)))==T))
# subsetting
df <- df[vec_vehicle,]
exclude <- c("All Processes", "Border Crossings", "Coal Mining, Cleaning, and Material Handling (See 305310)",
"Commercial Equipment", "Compression Ignition Equipment except Rail and Marine",
"Construction and Mining Equipment", "Filling Vehicle Gas Tanks - Stage II",
"Industrial Equipment", "Industrial/Commercial/Institutional",
"Iron Production (See 3-03-015 for Integrated Iron & Steel MACT)",
"Lawn and Garden Equipment","Lime Manufacture", "Logging Equipment", "Motor Vehicle Fires",
"NOT USED - Previously all LDGT (1&2) under M5", "Printing Ink Manufacture", "Residential",
"Road Construction", "Underground Mining Equipment")
df <- df[! df$SCC.Level.Three %in% exclude,]
length(unique(df$SCC.Level.Three))
length(unique(y$SCC.Level.Three))
# plotting
#
# plot6: How have emissions from motor vehicle sources changed from 1999-2008 in Baltimore City vs. Los Angeles?
# mean group emissions by year (using mean to account for number of measurements)
df <- aggregate(list(Emissions=df$Emissions), by=list(year=df$year, City=df$City), FUN = "mean")
avg_baltimore <- mean(df[df$City=="Baltimore","Emissions"])
avg_la <- mean(df[df$City=="Los Angeles","Emissions"])
df[df$City=="Baltimore","Emissions"] <- (df[df$City=="Baltimore","Emissions"]/max(df[df$City=="Baltimore","Emissions"]))
df[df$City=="Los Angeles","Emissions"] <- (df[df$City=="Los Angeles","Emissions"]/max(df[df$City=="Los Angeles","Emissions"]))
#df$Emissions <- percent(df$Emissions)
# create stacked barplot
png(filename = paste(main_path, "/plot6.png", sep =""),
width = 480, height = 480)
g <- ggplot(data=df, aes(x=year, y=Emissions))
g + geom_point() +
geom_smooth(method = "lm") +
scale_x_continuous(breaks = df$year) +
facet_grid(. ~ City) +
scale_y_continuous(labels = scales::percent) +
labs(title="Motor Vehicle PM2.5 Emissions (Indexed)",
subtitle=paste("Avg Emissions Baltimore: ", round(avg_baltimore,2), " - Avg Emissions Los Angeles: ", round(avg_la,2), sep =""))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-pull.R
\name{pull.tbl_sql}
\alias{pull.tbl_sql}
\title{Extract a single column}
\usage{
\method{pull}{tbl_sql}(.data, var = -1)
}
\arguments{
\item{.data}{A lazy data frame backed by a database query.}
\item{var}{A variable specified as:
\itemize{
\item a literal variable name
\item a positive integer, giving the position counting from the left
\item a negative integer, giving the position counting from the right.
}
The default returns the last column (on the assumption that's the
column you've created most recently).
This argument is taken by expression and supports
\link[rlang:nse-force]{quasiquotation} (you can unquote column
names and column locations).}
}
\value{
A vector of data.
}
\description{
This is a method for the dplyr \code{\link[=pull]{pull()}} generic. It evaluates the query
retrieving just the specified column.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(x = 1:5, y = 5:1)
db \%>\%
mutate(z = x + y * 2) \%>\%
pull()
}
|
/man/pull.tbl_sql.Rd
|
permissive
|
edgararuiz/dbplyr
|
R
| false | true | 1,065 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-pull.R
\name{pull.tbl_sql}
\alias{pull.tbl_sql}
\title{Extract a single column}
\usage{
\method{pull}{tbl_sql}(.data, var = -1)
}
\arguments{
\item{.data}{A lazy data frame backed by a database query.}
\item{var}{A variable specified as:
\itemize{
\item a literal variable name
\item a positive integer, giving the position counting from the left
\item a negative integer, giving the position counting from the right.
}
The default returns the last column (on the assumption that's the
column you've created most recently).
This argument is taken by expression and supports
\link[rlang:nse-force]{quasiquotation} (you can unquote column
names and column locations).}
}
\value{
A vector of data.
}
\description{
This is a method for the dplyr \code{\link[=pull]{pull()}} generic. It evaluates the query
retrieving just the specified column.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(x = 1:5, y = 5:1)
db \%>\%
mutate(z = x + y * 2) \%>\%
pull()
}
|
data <- read.csv("/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/blocks.csv")
#transform selected variables and combine into a new dataset
log_data <-log(data[,2:11])
newdata <- data.frame(data[,1],log_data[,1:2],log_data[,4],data[,6:7],log_data[,7],log_data[,10])
colnames(newdata) <- c("block","log height","log length","log eccen", "pblock","psmooth","log meantr","log trans")
head(newdata)
## select 90% of data as training data and the rest as test data
train = sample(1:nrow(newdata),608)
train.data <- subset(data[train,])
write.csv(train.data, "/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/train_data.csv")
test.data <-newdata[-train,]
write.csv(test.data, "/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/test_data.csv")
train.data <- read.csv("/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/train_data.csv")
test.data <- read.csv("/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/test_data.csv")
## use rpart package to build a classification tree
library(rpart)
## grow tree
fit <- rpart(block~., data=train.data)
printcp(fit, digits=3) ## display results
##Classification tree:
##rpart(formula = block ~ ., data = train.data)
##Variables actually used in tree construction:
##[1] log eccen log height log length log meantr log trans pblock
##Root node error: 314/608 = 0.51645
##n= 608
## CP nsplit rel error xerror xstd
##1 0.238854 0 1.00000 1.00000 0.039243
##2 0.216561 1 0.76115 0.80892 0.038729
##3 0.197452 2 0.54459 0.55414 0.035493
##4 0.079618 3 0.34713 0.40446 0.031922
##5 0.060510 4 0.26752 0.32803 0.029457
##6 0.035032 5 0.20701 0.30892 0.028755
##7 0.017516 6 0.17197 0.24522 0.026116
##8 0.010000 8 0.13694 0.21656 0.024750
write.table(printcp(fit, digits=3),"/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/error_cp_classification_tree.csv", sep=",")
plotcp(fit) #visualize cross-validation results
summary(fit) # detailed summary of splits
##
tree.pred=predict(fit,test.data,type="class")
table(tree.pred, test.data$block)
write.table(table(tree.pred, test.data$block), "/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/pred_block.csv",sep=",")
##
plot(fit, uniform=TRUE,
main="Classification Tree for block data")
text(fit, use.n=TRUE, all=TRUE, cex=.5)
## beautify the tree
post(fit, file = "/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/Ctree_blockdata.pdf",
title = "Classification Tree for block data")
## prune the tree
pfit<- prune(fit, cp= fit$cptable[which.min(fit$cptable[,"xerror"]),"CP"])
# plot the pruned tree
plot(pfit, uniform=TRUE,
main="Pruned Classification Tree for block data")
text(pfit, use.n=TRUE, all=TRUE, cex=.5)
post(pfit, file = "/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/Pruned_Ctree_blockdata.pdf",
title = "Pruned Classification Tree for block data")
## Random Forest
library(randomForest)
train.data <- read.csv("/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/train_data.csv")
test.data <- read.csv("/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/test_data.csv")
fit = randomForest(block~., data = train.data, mtry=3, ntree=1000, importance = T)
importance(fit)
varImpPlot(fit)
print(fit)
##Call:
##randomForest(formula = block ~ ., data = train.data, mtry = 3, ntree = 1000, importance = T)
Type of random forest: classification
Number of trees: 1000
##No. of variables tried at each split: 3
## OOB estimate of error rate: 0.49%
##Confusion matrix:
## graphic h_line picture text v_line class.error
##graphic 25 0 0 0 0 0.00000000
##h_line 0 294 0 0 0 0.00000000
##picture 0 0 111 0 0 0.00000000
##text 0 1 0 100 0 0.00990099
##v_line 0 1 1 0 75 0.02597403
tree.pred=predict(fit,test.data,type="class")
table(tree.pred, test.data$block)
plot(fit)
## Support Vector Classifier
library(e1071)
svmfit = svm(block~., data=train.data, kernel = "linear", scale=FALSE, cost=10)
pred.svm <- predict(svmfit, train.data, type="class")
table(pred.svm, train.data$block)
## training error is 0%
pred.svm <- predict(svmfit, test.data, type="class")
table(pred.svm, test.data$block)
##Call:
##svm(formula = block ~ ., data = train.data, kernel = "linear", cost = 10, scale = FALSE)
##Parameters:
## SVM-Type: C-classification
## SVM-Kernel: linear
## cost: 10
## gamma: 0.1
##Number of Support Vectors: 100
## ( 27 30 27 4 12 )
##Number of Classes: 5
##Levels:
## graphic h_line picture text v_line
svmfit = svm(block~., data=train.data, kernel = "linear", scale=FALSE, cost=0.01)
##Call:
##svm(formula = block ~ ., data = train.data, kernel = "linear", cost = 0.01, scale = FALSE)
##Parameters:
## SVM-Type: C-classification
## SVM-Kernel: linear
## cost: 0.01
## gamma: 0.1
##Number of Support Vectors: 128
#3 ( 37 30 37 4 20 )
##Number of Classes: 5
##Levels:
## graphic h_line picture text v_line
## training error
pred.svm <- predict(svmfit, train.data, type="class")
table(pred.svm, train.data$block)
## training error is 0%
pred.svm <- predict(svmfit, test.data, type="class")
table(pred.svm, test.data$block)
## test error is 0%
##another linear kernel
svmfit = svm(block~., data=train.data, kernel = "linear", scale=FALSE, cost=100)
summary(svmfit)
## training error =0%; test error = 0%
## Support Vector Machine
svmfit = svm(block~., data=train.data, kernel = "radial", gamma=1, cost=1)
summary(svmfit)
##Call:
##svm(formula = block ~ ., data = train.data, kernel = "radial", gamma = 1, cost = 1)
##Parameters:
##SVM-Type: C-classification
##SVM-Kernel: radial
## cost: 1
## gamma: 1
##Number of Support Vectors: 327
## ( 62 128 66 25 46 )
##Number of Classes: 5
##Levels:
##graphic h_line picture text v_line
pred.svm <- predict(svmfit, train.data, type="class")
table(pred.svm, train.data$block)
## training error is 0.33%
pred.svm <- predict(svmfit, test.data, type="class")
table(pred.svm, test.data$block)
## test error is 2.94%
svmfit = svm(block~., data=train.data, kernel = "radial", gamma=1, cost=0.01)
summary(svmfit)
##Call:
##svm(formula = block ~ ., data = train.data, kernel = "radial", gamma = 1, cost = 0.01)
##Parameters:
##SVM-Type: C-classification
##SVM-Kernel: radial
## cost: 0.01
## gamma: 1
##Number of Support Vectors: 505
##( 99 193 110 26 77 )
##Number of Classes: 5
##Levels:
##graphic h_line picture text v_line
pred.svm <- predict(svmfit, train.data, type="class")
table(pred.svm, train.data$block)
##training error is 51.64%; predict everything as h_line
##pred.svm graphic h_line picture text v_line
##graphic 0 0 0 0 0
##h_line 25 294 111 101 77
##picture 0 0 0 0 0
##text 0 0 0 0 0
##v_line 0 0 0 0 0
pred.svm <- predict(svmfit, test.data, type="class")
table(pred.svm, test.data$block)
##test error is 52.94$; predict everything as h_line
|
/Tree_RandomForest_SupportVectorMachine.R
|
no_license
|
hhsieh/R_StatisticalLearning
|
R
| false | false | 7,263 |
r
|
data <- read.csv("/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/blocks.csv")
#transform selected variables and combine into a new dataset
log_data <-log(data[,2:11])
newdata <- data.frame(data[,1],log_data[,1:2],log_data[,4],data[,6:7],log_data[,7],log_data[,10])
colnames(newdata) <- c("block","log height","log length","log eccen", "pblock","psmooth","log meantr","log trans")
head(newdata)
## select 90% of data as training data and the rest as test data
train = sample(1:nrow(newdata),608)
train.data <- subset(data[train,])
write.csv(train.data, "/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/train_data.csv")
test.data <-newdata[-train,]
write.csv(test.data, "/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/test_data.csv")
train.data <- read.csv("/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/train_data.csv")
test.data <- read.csv("/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/test_data.csv")
## use rpart package to build a classification tree
library(rpart)
## grow tree
fit <- rpart(block~., data=train.data)
printcp(fit, digits=3) ## display results
##Classification tree:
##rpart(formula = block ~ ., data = train.data)
##Variables actually used in tree construction:
##[1] log eccen log height log length log meantr log trans pblock
##Root node error: 314/608 = 0.51645
##n= 608
## CP nsplit rel error xerror xstd
##1 0.238854 0 1.00000 1.00000 0.039243
##2 0.216561 1 0.76115 0.80892 0.038729
##3 0.197452 2 0.54459 0.55414 0.035493
##4 0.079618 3 0.34713 0.40446 0.031922
##5 0.060510 4 0.26752 0.32803 0.029457
##6 0.035032 5 0.20701 0.30892 0.028755
##7 0.017516 6 0.17197 0.24522 0.026116
##8 0.010000 8 0.13694 0.21656 0.024750
write.table(printcp(fit, digits=3),"/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/error_cp_classification_tree.csv", sep=",")
plotcp(fit) #visualize cross-validation results
summary(fit) # detailed summary of splits
##
tree.pred=predict(fit,test.data,type="class")
table(tree.pred, test.data$block)
write.table(table(tree.pred, test.data$block), "/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/pred_block.csv",sep=",")
##
plot(fit, uniform=TRUE,
main="Classification Tree for block data")
text(fit, use.n=TRUE, all=TRUE, cex=.5)
## beautify the tree
post(fit, file = "/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/Ctree_blockdata.pdf",
title = "Classification Tree for block data")
## prune the tree
pfit<- prune(fit, cp= fit$cptable[which.min(fit$cptable[,"xerror"]),"CP"])
# plot the pruned tree
plot(pfit, uniform=TRUE,
main="Pruned Classification Tree for block data")
text(pfit, use.n=TRUE, all=TRUE, cex=.5)
post(pfit, file = "/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/Pruned_Ctree_blockdata.pdf",
title = "Pruned Classification Tree for block data")
## Random Forest
library(randomForest)
train.data <- read.csv("/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/train_data.csv")
test.data <- read.csv("/Users/achimnyswallow/Documents/Courses/Stats 503/Homework/HW4/test_data.csv")
fit = randomForest(block~., data = train.data, mtry=3, ntree=1000, importance = T)
importance(fit)
varImpPlot(fit)
print(fit)
##Call:
##randomForest(formula = block ~ ., data = train.data, mtry = 3, ntree = 1000, importance = T)
Type of random forest: classification
Number of trees: 1000
##No. of variables tried at each split: 3
## OOB estimate of error rate: 0.49%
##Confusion matrix:
## graphic h_line picture text v_line class.error
##graphic 25 0 0 0 0 0.00000000
##h_line 0 294 0 0 0 0.00000000
##picture 0 0 111 0 0 0.00000000
##text 0 1 0 100 0 0.00990099
##v_line 0 1 1 0 75 0.02597403
tree.pred=predict(fit,test.data,type="class")
table(tree.pred, test.data$block)
plot(fit)
## Support Vector Classifier
library(e1071)
svmfit = svm(block~., data=train.data, kernel = "linear", scale=FALSE, cost=10)
pred.svm <- predict(svmfit, train.data, type="class")
table(pred.svm, train.data$block)
## training error is 0%
pred.svm <- predict(svmfit, test.data, type="class")
table(pred.svm, test.data$block)
##Call:
##svm(formula = block ~ ., data = train.data, kernel = "linear", cost = 10, scale = FALSE)
##Parameters:
## SVM-Type: C-classification
## SVM-Kernel: linear
## cost: 10
## gamma: 0.1
##Number of Support Vectors: 100
## ( 27 30 27 4 12 )
##Number of Classes: 5
##Levels:
## graphic h_line picture text v_line
svmfit = svm(block~., data=train.data, kernel = "linear", scale=FALSE, cost=0.01)
##Call:
##svm(formula = block ~ ., data = train.data, kernel = "linear", cost = 0.01, scale = FALSE)
##Parameters:
## SVM-Type: C-classification
## SVM-Kernel: linear
## cost: 0.01
## gamma: 0.1
##Number of Support Vectors: 128
#3 ( 37 30 37 4 20 )
##Number of Classes: 5
##Levels:
## graphic h_line picture text v_line
## training error
pred.svm <- predict(svmfit, train.data, type="class")
table(pred.svm, train.data$block)
## training error is 0%
pred.svm <- predict(svmfit, test.data, type="class")
table(pred.svm, test.data$block)
## test error is 0%
##another linear kernel
svmfit = svm(block~., data=train.data, kernel = "linear", scale=FALSE, cost=100)
summary(svmfit)
## training error =0%; test error = 0%
## Support Vector Machine
svmfit = svm(block~., data=train.data, kernel = "radial", gamma=1, cost=1)
summary(svmfit)
##Call:
##svm(formula = block ~ ., data = train.data, kernel = "radial", gamma = 1, cost = 1)
##Parameters:
##SVM-Type: C-classification
##SVM-Kernel: radial
## cost: 1
## gamma: 1
##Number of Support Vectors: 327
## ( 62 128 66 25 46 )
##Number of Classes: 5
##Levels:
##graphic h_line picture text v_line
pred.svm <- predict(svmfit, train.data, type="class")
table(pred.svm, train.data$block)
## training error is 0.33%
pred.svm <- predict(svmfit, test.data, type="class")
table(pred.svm, test.data$block)
## test error is 2.94%
svmfit = svm(block~., data=train.data, kernel = "radial", gamma=1, cost=0.01)
summary(svmfit)
##Call:
##svm(formula = block ~ ., data = train.data, kernel = "radial", gamma = 1, cost = 0.01)
##Parameters:
##SVM-Type: C-classification
##SVM-Kernel: radial
## cost: 0.01
## gamma: 1
##Number of Support Vectors: 505
##( 99 193 110 26 77 )
##Number of Classes: 5
##Levels:
##graphic h_line picture text v_line
pred.svm <- predict(svmfit, train.data, type="class")
table(pred.svm, train.data$block)
##training error is 51.64%; predict everything as h_line
##pred.svm graphic h_line picture text v_line
##graphic 0 0 0 0 0
##h_line 25 294 111 101 77
##picture 0 0 0 0 0
##text 0 0 0 0 0
##v_line 0 0 0 0 0
pred.svm <- predict(svmfit, test.data, type="class")
table(pred.svm, test.data$block)
##test error is 52.94$; predict everything as h_line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TECDF.R
\name{TECDF}
\alias{TECDF}
\title{TECDF}
\usage{
TECDF(initdata = initdata, kernel = kernel, TE, h, max_iter = 100,
simultaneous.inference = TRUE)
}
\arguments{
\item{initdata}{a list with elements Q, a data.frame with columns named QAW, Q1W, Q0W for initial
predictions for the outcome, outcome under A=1 and under A=0, resp. A, a vector of binary
treatment assignments and Y, the outcome and g1W, a vector of propensity scores.}
\item{kernel, }{see make_kernel}
\item{h, }{the bandwidth}
\item{max_iter, }{Maximum number of iteration steps}
\item{simultaneous.inference, }{do you want to compute simultaneous CI's (see ci_gentmle)}
\item{blip, }{a vector of treatment effect value(s).}
}
\description{
computes kernel smoothed treatment effect or Treatment Effect CDF
}
\examples{
data("data_example")
head(data_example$Q)
data_example$Y[1:6]
data_example$A[1:6]
data_example$g1W[1:6]
TE = seq(-0.08,0.3, .06)
# make polynomial kernel of order 6. Note, you can only input even degrees for the kernel which
# will be the highest degree
k=make_kernel(order=6,R=5)
est.info = TECDF(initdata = data_example, kernel = k, TE = TE, h = .1,
max_iter = 1000, simultaneous.inference = TRUE)
plot(TE, est.info$tmleests)
# tmle estimates
est.info$tmleests
# steps to convergence
est.info$steps
# mean of the influence curves
est.info$ED
plot(1:415, est.info$risk)
# for simultaneous inference, default set to 5\%
ci = ci_gentmle(est.info, level = 0.95)
ci
# number of se's used for simultaneous inference at type I error rate of 5\%
(ci[1,4]-ci[1,1])/ci[1,2]
}
|
/man/TECDF.Rd
|
no_license
|
jlstiles/TECDF
|
R
| false | true | 1,671 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TECDF.R
\name{TECDF}
\alias{TECDF}
\title{TECDF}
\usage{
TECDF(initdata = initdata, kernel = kernel, TE, h, max_iter = 100,
simultaneous.inference = TRUE)
}
\arguments{
\item{initdata}{a list with elements Q, a data.frame with columns named QAW, Q1W, Q0W for initial
predictions for the outcome, outcome under A=1 and under A=0, resp. A, a vector of binary
treatment assignments and Y, the outcome and g1W, a vector of propensity scores.}
\item{kernel, }{see make_kernel}
\item{h, }{the bandwidth}
\item{max_iter, }{Maximum number of iteration steps}
\item{simultaneous.inference, }{do you want to compute simultaneous CI's (see ci_gentmle)}
\item{blip, }{a vector of treatment effect value(s).}
}
\description{
computes kernel smoothed treatment effect or Treatment Effect CDF
}
\examples{
data("data_example")
head(data_example$Q)
data_example$Y[1:6]
data_example$A[1:6]
data_example$g1W[1:6]
TE = seq(-0.08,0.3, .06)
# make polynomial kernel of order 6. Note, you can only input even degrees for the kernel which
# will be the highest degree
k=make_kernel(order=6,R=5)
est.info = TECDF(initdata = data_example, kernel = k, TE = TE, h = .1,
max_iter = 1000, simultaneous.inference = TRUE)
plot(TE, est.info$tmleests)
# tmle estimates
est.info$tmleests
# steps to convergence
est.info$steps
# mean of the influence curves
est.info$ED
plot(1:415, est.info$risk)
# for simultaneous inference, default set to 5\%
ci = ci_gentmle(est.info, level = 0.95)
ci
# number of se's used for simultaneous inference at type I error rate of 5\%
(ci[1,4]-ci[1,1])/ci[1,2]
}
|
library(poibin)
probs = read.table("../data/ppoibin_probs.txt", sep = "\t",
fill = FALSE, strip.white = TRUE)
probs = as.vector(probs[, 1])
out <- vector("list", length(probs))
for (i in 1:length(probs)) {
out[i] = 1 - ppoibin(i, probs, method = "DFT-CF", wts = NULL)
}
head(out)
|
/src/02_PH_Hypothesis2_ppoibin.R
|
permissive
|
nhejazi/stat215a-journal-review
|
R
| false | false | 304 |
r
|
library(poibin)
probs = read.table("../data/ppoibin_probs.txt", sep = "\t",
fill = FALSE, strip.white = TRUE)
probs = as.vector(probs[, 1])
out <- vector("list", length(probs))
for (i in 1:length(probs)) {
out[i] = 1 - ppoibin(i, probs, method = "DFT-CF", wts = NULL)
}
head(out)
|
# source(file.path(getwd(), "R", "background.R"))
# test_file(file.path(getwd(), "inst", "tests", "test-background.R))
#### Background Data Class - Test Objects ----
TOTAL_DAYS = days.pass <- c(100,1000,10000)
TOTAL_DAYS = days.fail <- list(bool = TRUE, char = "string")
name.pass <- c("fac", "FAC", "Fac")
name.fail <- list(bool = TRUE, dec = 100.00, int = 1000)
region.pass <- c("reg", "REG", "Reg")
region.fail <- list(bool = TRUE, dec = 100.00, int = 1000)
STRATEGY = strategy.pass <- c("8_REASONS", "REPLENISH", "ORDER_POINT", "8_reasons", "replenish", "order_point", "8_Reasons", "Replenish", "Order_Point")
strategy.fail <- list(bool = TRUE, dec = 100.00, int = 1000)
shipment_size.pass <- c(10, 1000, 10000)
shipment_size.fail <- list(bool = TRUE, char = "string")
orders_per_week.pass <- c(1,3,6)
orders_per_week.fail <- list(num1 = 8, num2 = -1, num3 = 0, bool = TRUE, char = "string")
#### Background Data Class - Loads Properly ----
context("Background Data Class - Loads Properly")
test_that("Background Data Class - Loads Properly",
{
for (d in 1:length(days.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[d], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
is_a("background_data")
)
}
for (n in 1:length(name.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[n], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
is_a("background_data")
)
}
for (r in 1:length(region.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[r],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
is_a("background_data")
)
}
for (s in 1:length(strategy.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[s], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
is_a("background_data")
)
}
for (z in 1:length(shipment_size.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[[z]],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
is_a("background_data")
)
}
for (o in 1:length(orders_per_week.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[[o]]),
is_a("background_data")
)
}
})
#### Background Data Class - Fails Properly ----
context("Background Data Class - Fails Properly")
test_that("Background Data Class - Fails Properly",
{
for (d in 1:length(days.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.fail[[d]], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
throws_error())
}
for (n in 1:length(name.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.fail[[n]], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
throws_error())
}
for (r in 1:length(region.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.fail[[r]],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
throws_error())
}
for (s in 1:length(strategy.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.fail[[s]], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
throws_error())
}
for (z in 1:length(shipment_size.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.fail[[z]],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
throws_error())
}
for (o in 1:length(orders_per_week.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.fail[[o]]),
throws_error())
}
})
#### Background Data Class - Methods Load & Return Correctly ----
context("Background Data Class - Methods Load & Return Correctly")
test_that("Background Data Class - Methods Load & Return Correctly",
{
bg <- background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1])
expect_that(bg$getTotalDays(), is_equivalent_to(floor(days.pass[1])))
expect_that(bg$getName(), is_equivalent_to(toupper(name.pass[1])))
expect_that(bg$getRegion(), is_equivalent_to(toupper(region.pass[1])))
expect_that(bg$getStrategy(), is_equivalent_to(toupper(strategy.pass[1])))
expect_that(bg$getShipmentSize(), is_equivalent_to(floor(shipment_size.pass[1])))
expect_that(bg$getOrdersPerWeek(), is_equivalent_to(floor(orders_per_week.pass[1])))
expect_that(bg$getTotalDays(), is_a("numeric"))
expect_that(bg$getName(), is_a("character"))
expect_that(bg$getRegion(), is_a("character"))
expect_that(bg$getStrategy(), is_a("character"))
expect_that(bg$getShipmentSize(), is_a("numeric"))
expect_that(bg$getOrdersPerWeek(), is_a("numeric"))
})
# rm(list=ls())
|
/inst/tests/test-background.R
|
no_license
|
RobertWSmith/SCsim
|
R
| false | false | 6,659 |
r
|
# source(file.path(getwd(), "R", "background.R"))
# test_file(file.path(getwd(), "inst", "tests", "test-background.R))
#### Background Data Class - Test Objects ----
TOTAL_DAYS = days.pass <- c(100,1000,10000)
TOTAL_DAYS = days.fail <- list(bool = TRUE, char = "string")
name.pass <- c("fac", "FAC", "Fac")
name.fail <- list(bool = TRUE, dec = 100.00, int = 1000)
region.pass <- c("reg", "REG", "Reg")
region.fail <- list(bool = TRUE, dec = 100.00, int = 1000)
STRATEGY = strategy.pass <- c("8_REASONS", "REPLENISH", "ORDER_POINT", "8_reasons", "replenish", "order_point", "8_Reasons", "Replenish", "Order_Point")
strategy.fail <- list(bool = TRUE, dec = 100.00, int = 1000)
shipment_size.pass <- c(10, 1000, 10000)
shipment_size.fail <- list(bool = TRUE, char = "string")
orders_per_week.pass <- c(1,3,6)
orders_per_week.fail <- list(num1 = 8, num2 = -1, num3 = 0, bool = TRUE, char = "string")
#### Background Data Class - Loads Properly ----
context("Background Data Class - Loads Properly")
test_that("Background Data Class - Loads Properly",
{
for (d in 1:length(days.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[d], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
is_a("background_data")
)
}
for (n in 1:length(name.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[n], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
is_a("background_data")
)
}
for (r in 1:length(region.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[r],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
is_a("background_data")
)
}
for (s in 1:length(strategy.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[s], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
is_a("background_data")
)
}
for (z in 1:length(shipment_size.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[[z]],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
is_a("background_data")
)
}
for (o in 1:length(orders_per_week.pass)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[[o]]),
is_a("background_data")
)
}
})
#### Background Data Class - Fails Properly ----
context("Background Data Class - Fails Properly")
test_that("Background Data Class - Fails Properly",
{
for (d in 1:length(days.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.fail[[d]], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
throws_error())
}
for (n in 1:length(name.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.fail[[n]], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
throws_error())
}
for (r in 1:length(region.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.fail[[r]],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
throws_error())
}
for (s in 1:length(strategy.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.fail[[s]], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
throws_error())
}
for (z in 1:length(shipment_size.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.fail[[z]],
ORDERS_PER_WEEK = orders_per_week.pass[1]),
throws_error())
}
for (o in 1:length(orders_per_week.fail)) {
expect_that(
background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.fail[[o]]),
throws_error())
}
})
#### Background Data Class - Methods Load & Return Correctly ----
context("Background Data Class - Methods Load & Return Correctly")
test_that("Background Data Class - Methods Load & Return Correctly",
{
bg <- background_class(TOTAL_DAYS = days.pass[1], NAME = name.pass[1], REGION = region.pass[1],
STRATEGY = strategy.pass[1], SHIPMENT_SIZE = shipment_size.pass[1],
ORDERS_PER_WEEK = orders_per_week.pass[1])
expect_that(bg$getTotalDays(), is_equivalent_to(floor(days.pass[1])))
expect_that(bg$getName(), is_equivalent_to(toupper(name.pass[1])))
expect_that(bg$getRegion(), is_equivalent_to(toupper(region.pass[1])))
expect_that(bg$getStrategy(), is_equivalent_to(toupper(strategy.pass[1])))
expect_that(bg$getShipmentSize(), is_equivalent_to(floor(shipment_size.pass[1])))
expect_that(bg$getOrdersPerWeek(), is_equivalent_to(floor(orders_per_week.pass[1])))
expect_that(bg$getTotalDays(), is_a("numeric"))
expect_that(bg$getName(), is_a("character"))
expect_that(bg$getRegion(), is_a("character"))
expect_that(bg$getStrategy(), is_a("character"))
expect_that(bg$getShipmentSize(), is_a("numeric"))
expect_that(bg$getOrdersPerWeek(), is_a("numeric"))
})
# rm(list=ls())
|
## aeam :: C*K*M*E**1*3*2 :: Assignment 3
boxplot(mtcars$mpg~mtcars$cyl, main="Mpg by Cylinder Count", xlab="mpg", ylab=" Number of Cylinders", horizontal=TRUE, col=c("green", "red", "blue"))
allmeans <- tapply(mtcars$mpg, mtcars$cyl, mean)
points(allmeans,c(1,2,3), col="yellow", pch=18, lwd=3)
|
/assign3.R
|
no_license
|
aeamaea/132assign3
|
R
| false | false | 301 |
r
|
## aeam :: C*K*M*E**1*3*2 :: Assignment 3
boxplot(mtcars$mpg~mtcars$cyl, main="Mpg by Cylinder Count", xlab="mpg", ylab=" Number of Cylinders", horizontal=TRUE, col=c("green", "red", "blue"))
allmeans <- tapply(mtcars$mpg, mtcars$cyl, mean)
points(allmeans,c(1,2,3), col="yellow", pch=18, lwd=3)
|
loadData("atacazo.data")
addResultsIso()
# Figure 25.6
multiplePerPage(3,nrow=1,ncol=3,title=NULL)
Plate(1)
binary("Rb","Ni",log="xy",xmin=3,ymin=3,xmax=70,ymax=70,new=F)
Plate(2)
binary("Ba","Cr",log="xy",xmin=5,ymin=5,xmax=1100,ymax=1100,new=F)
Plate(3)
binary("La","Yb",log="xy",xmin=0.3,ymin=0.3,xmax=15,ymax=15,new=F)
plateCexLab(1.6)
plateCex(2)
|
/Janousek_et_al_2015_modelling_Springer/Part_6/Code/Figs/fig_25.6_atacazo_incomp_comp.r
|
no_license
|
nghia1991ad/GCDkit_book_R
|
R
| false | false | 367 |
r
|
loadData("atacazo.data")
addResultsIso()
# Figure 25.6
multiplePerPage(3,nrow=1,ncol=3,title=NULL)
Plate(1)
binary("Rb","Ni",log="xy",xmin=3,ymin=3,xmax=70,ymax=70,new=F)
Plate(2)
binary("Ba","Cr",log="xy",xmin=5,ymin=5,xmax=1100,ymax=1100,new=F)
Plate(3)
binary("La","Yb",log="xy",xmin=0.3,ymin=0.3,xmax=15,ymax=15,new=F)
plateCexLab(1.6)
plateCex(2)
|
/Practica 2/TrabajoPracticas2.R
|
no_license
|
MiguelLopezCampos/Aprendizaje-Automatico
|
R
| false | false | 23,484 |
r
| ||
#Page number--16.14
#Example number--16.7
n=10
u=100
#H0::Null Hypothesis ------> mean IQ of 100 in the population u=100
#H1::Alternative Hypothesis--->u!=100
x=c(70,120,110,101,88,83,95,98,107,100)
m=sum(x)/n #Mean
m
a=x-m
b=a^2
data.frame(x,a,b)
s2=sum(b)/9
s2
t=abs((m-u)/sqrt(s2/n))
t
sprintf("H0 is accepted")
|
/Fundamentals_Of_Mathematical_Statistics_by_S.c._Gupta,_V.k._Kapoor/CH16/EX16.7/EX16_7.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false | false | 348 |
r
|
#Page number--16.14
#Example number--16.7
n=10
u=100
#H0::Null Hypothesis ------> mean IQ of 100 in the population u=100
#H1::Alternative Hypothesis--->u!=100
x=c(70,120,110,101,88,83,95,98,107,100)
m=sum(x)/n #Mean
m
a=x-m
b=a^2
data.frame(x,a,b)
s2=sum(b)/9
s2
t=abs((m-u)/sqrt(s2/n))
t
sprintf("H0 is accepted")
|
98e1bb5534888bd3f64dfe1ce1f515c0 query71_query51_1344n.qdimacs 2277 8384
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query71_query51_1344n/query71_query51_1344n.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 72 |
r
|
98e1bb5534888bd3f64dfe1ce1f515c0 query71_query51_1344n.qdimacs 2277 8384
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fixest-tidiers.R
\name{glance.fixest}
\alias{glance.fixest}
\title{Glance at a(n) fixest object}
\usage{
\method{glance}{fixest}(x, ...)
}
\arguments{
\item{x}{A \code{fixest} object returned from any of the \code{fixest} estimators}
\item{...}{Additional arguments passed to \code{summary} and \code{confint}. Important
arguments are \code{se} and \code{cluster}. Other arguments are \code{dof}, \code{exact_dof},
\code{forceCovariance}, and \code{keepBounded}.
See \code{\link[fixest:summary.fixest]{summary.fixest}}.}
}
\description{
Glance accepts a model object and returns a \code{\link[tibble:tibble]{tibble::tibble()}}
with exactly one row of model summaries. The summaries are typically
goodness of fit measures, p-values for hypothesis tests on residuals,
or model convergence information.
Glance never returns information from the original call to the modeling
function. This includes the name of the modeling function or any
arguments passed to the modeling function.
Glance does not calculate summary measures. Rather, it farms out these
computations to appropriate methods and gathers the results together.
Sometimes a goodness of fit measure will be undefined. In these cases
the measure will be reported as \code{NA}.
Glance returns the same number of columns regardless of whether the
model matrix is rank-deficient or not. If so, entries in columns
that no longer have a well-defined value are filled in with an \code{NA}
of the appropriate type.
}
\note{
All columns listed below will be returned, but some will be \code{NA},
depending on the type of model estimated. \code{sigma}, \code{r.squared},
\code{adj.r.squared}, and \code{within.r.squared} will be NA for any model other than
\code{feols}. \code{pseudo.r.squared} will be NA for \code{feols}.
}
\examples{
\dontshow{if (rlang::is_installed("fixest")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# load libraries for models and data
library(fixest)
gravity <-
feols(
log(Euros) ~ log(dist_km) | Origin + Destination + Product + Year, trade
)
tidy(gravity)
glance(gravity)
augment(gravity, trade)
# to get robust or clustered SEs, users can either:
# 1) specify the arguments directly in the `tidy()` call
tidy(gravity, conf.int = TRUE, cluster = c("Product", "Year"))
tidy(gravity, conf.int = TRUE, se = "threeway")
# 2) or, feed tidy() a summary.fixest object that has already accepted
# these arguments
gravity_summ <- summary(gravity, cluster = c("Product", "Year"))
tidy(gravity_summ, conf.int = TRUE)
# approach (1) is preferred.
\dontshow{\}) # examplesIf}
}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with exactly one row and columns:
\item{adj.r.squared}{Adjusted R squared statistic, which is like the R squared statistic except taking degrees of freedom into account.}
\item{AIC}{Akaike's Information Criterion for the model.}
\item{BIC}{Bayesian Information Criterion for the model.}
\item{logLik}{The log-likelihood of the model. [stats::logLik()] may be a useful reference.}
\item{nobs}{Number of observations used.}
\item{pseudo.r.squared}{Like the R squared statistic, but for situations when the R squared statistic isn't defined.}
\item{r.squared}{R squared statistic, or the percent of variation explained by the model. Also known as the coefficient of determination.}
\item{sigma}{Estimated standard error of the residuals.}
\item{within.r.squared}{R squared within fixed-effect groups.}
}
|
/man/glance.fixest.Rd
|
permissive
|
gregmacfarlane/broom
|
R
| false | true | 3,548 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fixest-tidiers.R
\name{glance.fixest}
\alias{glance.fixest}
\title{Glance at a(n) fixest object}
\usage{
\method{glance}{fixest}(x, ...)
}
\arguments{
\item{x}{A \code{fixest} object returned from any of the \code{fixest} estimators}
\item{...}{Additional arguments passed to \code{summary} and \code{confint}. Important
arguments are \code{se} and \code{cluster}. Other arguments are \code{dof}, \code{exact_dof},
\code{forceCovariance}, and \code{keepBounded}.
See \code{\link[fixest:summary.fixest]{summary.fixest}}.}
}
\description{
Glance accepts a model object and returns a \code{\link[tibble:tibble]{tibble::tibble()}}
with exactly one row of model summaries. The summaries are typically
goodness of fit measures, p-values for hypothesis tests on residuals,
or model convergence information.
Glance never returns information from the original call to the modeling
function. This includes the name of the modeling function or any
arguments passed to the modeling function.
Glance does not calculate summary measures. Rather, it farms out these
computations to appropriate methods and gathers the results together.
Sometimes a goodness of fit measure will be undefined. In these cases
the measure will be reported as \code{NA}.
Glance returns the same number of columns regardless of whether the
model matrix is rank-deficient or not. If so, entries in columns
that no longer have a well-defined value are filled in with an \code{NA}
of the appropriate type.
}
\note{
All columns listed below will be returned, but some will be \code{NA},
depending on the type of model estimated. \code{sigma}, \code{r.squared},
\code{adj.r.squared}, and \code{within.r.squared} will be NA for any model other than
\code{feols}. \code{pseudo.r.squared} will be NA for \code{feols}.
}
\examples{
\dontshow{if (rlang::is_installed("fixest")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# load libraries for models and data
library(fixest)
gravity <-
feols(
log(Euros) ~ log(dist_km) | Origin + Destination + Product + Year, trade
)
tidy(gravity)
glance(gravity)
augment(gravity, trade)
# to get robust or clustered SEs, users can either:
# 1) specify the arguments directly in the `tidy()` call
tidy(gravity, conf.int = TRUE, cluster = c("Product", "Year"))
tidy(gravity, conf.int = TRUE, se = "threeway")
# 2) or, feed tidy() a summary.fixest object that has already accepted
# these arguments
gravity_summ <- summary(gravity, cluster = c("Product", "Year"))
tidy(gravity_summ, conf.int = TRUE)
# approach (1) is preferred.
\dontshow{\}) # examplesIf}
}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with exactly one row and columns:
\item{adj.r.squared}{Adjusted R squared statistic, which is like the R squared statistic except taking degrees of freedom into account.}
\item{AIC}{Akaike's Information Criterion for the model.}
\item{BIC}{Bayesian Information Criterion for the model.}
\item{logLik}{The log-likelihood of the model. [stats::logLik()] may be a useful reference.}
\item{nobs}{Number of observations used.}
\item{pseudo.r.squared}{Like the R squared statistic, but for situations when the R squared statistic isn't defined.}
\item{r.squared}{R squared statistic, or the percent of variation explained by the model. Also known as the coefficient of determination.}
\item{sigma}{Estimated standard error of the residuals.}
\item{within.r.squared}{R squared within fixed-effect groups.}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monitoring_functions.R
\name{projects.metricDescriptors.list}
\alias{projects.metricDescriptors.list}
\title{Lists metric descriptors that match a filter. This method does not require a Stackdriver account.}
\usage{
projects.metricDescriptors.list(name, pageToken = NULL, pageSize = NULL,
filter = NULL)
}
\arguments{
\item{name}{The project on which to execute the request}
\item{pageToken}{If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method}
\item{pageSize}{A positive number that is the maximum number of results to return}
\item{filter}{If this field is empty, all custom and system-defined metric descriptors are returned}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/monitoring
\item https://www.googleapis.com/auth/monitoring.read
\item https://www.googleapis.com/auth/monitoring.write
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/monitoring, https://www.googleapis.com/auth/monitoring.read, https://www.googleapis.com/auth/monitoring.write)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/monitoring/api/}{Google Documentation}
}
|
/googlemonitoringv3.auto/man/projects.metricDescriptors.list.Rd
|
permissive
|
GVersteeg/autoGoogleAPI
|
R
| false | true | 1,583 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monitoring_functions.R
\name{projects.metricDescriptors.list}
\alias{projects.metricDescriptors.list}
\title{Lists metric descriptors that match a filter. This method does not require a Stackdriver account.}
\usage{
projects.metricDescriptors.list(name, pageToken = NULL, pageSize = NULL,
filter = NULL)
}
\arguments{
\item{name}{The project on which to execute the request}
\item{pageToken}{If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method}
\item{pageSize}{A positive number that is the maximum number of results to return}
\item{filter}{If this field is empty, all custom and system-defined metric descriptors are returned}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/monitoring
\item https://www.googleapis.com/auth/monitoring.read
\item https://www.googleapis.com/auth/monitoring.write
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/monitoring, https://www.googleapis.com/auth/monitoring.read, https://www.googleapis.com/auth/monitoring.write)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/monitoring/api/}{Google Documentation}
}
|
#' @export
#' @title Normalize Image Values
#' @param image the image matrix to modify.
#' @param medianNew a new median value to set scale the image values against
#' @description Normalize a given image matrix based on median. The new median
#' value should be between 0 and 1, and will typically be between 0.2 and 0.6.
#' @note There are no hard-coded parameters in this function.
#' @return An image of the same dimensions.
flow_equalizePhase <- function(image, medianNew) {
# Set darkest value to 0
imageMin <- min(image)
image <- image - imageMin
# Scale image values so median matches new median
scaling <- medianNew / median(image)
image <- image * scaling
if (getRunOptions('verbose')) {
cat(paste0('\timageMin = ',round(imageMin,3),', scaling = ',round(scaling,3),'\n'))
}
return(image)
}
|
/R/flow_equalizePhase.R
|
no_license
|
MazamaScience/TBCellGrowth
|
R
| false | false | 838 |
r
|
#' @export
#' @title Normalize Image Values
#' @param image the image matrix to modify.
#' @param medianNew a new median value to set scale the image values against
#' @description Normalize a given image matrix based on median. The new median
#' value should be between 0 and 1, and will typically be between 0.2 and 0.6.
#' @note There are no hard-coded parameters in this function.
#' @return An image of the same dimensions.
flow_equalizePhase <- function(image, medianNew) {
# Set darkest value to 0
imageMin <- min(image)
image <- image - imageMin
# Scale image values so median matches new median
scaling <- medianNew / median(image)
image <- image * scaling
if (getRunOptions('verbose')) {
cat(paste0('\timageMin = ',round(imageMin,3),', scaling = ',round(scaling,3),'\n'))
}
return(image)
}
|
context("Bayesian fit with Stan")
test_that("Data that cannot be fitted with nls_list/nlme work with stan_fit", {
# with this seed, cf[10] does not fit with nls_list
library(breathtestcore)
# library(rstan)
# library(dplyr)
# library(rstan)
# library(stringr)
# library(testthat)
# library(breathteststan)
chains = 1
student_t_df = 10
dose = 100
iter = 100
sample_minutes = 15
data = cleanup_data(simulate_breathtest_data(seed = 100)$data)
comment(data) = "comment"
fit = stan_fit(data, dose = dose, student_t_df = student_t_df,
chains = chains, iter = iter )
expect_is(fit, "breathtestfit")
expect_is(fit, "breathteststanfit")
expect_is(fit$stan_fit, "stanfit" )
expect_identical(names(fit), c("coef", "data", "stan_fit", "coef_chain"))
expect_equal(names(fit$data), names(data))
expect_gt(sigma(fit), 0.9)
expect_identical(comment(fit), "comment")
cf = fit$coef
expect_identical(unique(cf$group), "A")
expect_identical(unique(cf$parameter), c("beta", "k", "m", "t50", "tlag"))
expect_identical(unique(cf$stat), c("estimate", "q_0275", "q_25", "q_75", "q_975"))
expect_equal(nrow(cf), 395)
expect_equal(ncol(cf), 6)
cf = coef(fit) # This is the "mean" group only
expect_identical(unique(cf$group), "A")
expect_identical(unique(cf$parameter), c("beta", "k", "m", "t50", "tlag"))
expect_equal(nrow(cf), 79)
expect_equal(ncol(cf), 5)
})
|
/tests/testthat/test_stan_fit_2.R
|
no_license
|
bgoodri/breathteststan
|
R
| false | false | 1,421 |
r
|
context("Bayesian fit with Stan")
test_that("Data that cannot be fitted with nls_list/nlme work with stan_fit", {
# with this seed, cf[10] does not fit with nls_list
library(breathtestcore)
# library(rstan)
# library(dplyr)
# library(rstan)
# library(stringr)
# library(testthat)
# library(breathteststan)
chains = 1
student_t_df = 10
dose = 100
iter = 100
sample_minutes = 15
data = cleanup_data(simulate_breathtest_data(seed = 100)$data)
comment(data) = "comment"
fit = stan_fit(data, dose = dose, student_t_df = student_t_df,
chains = chains, iter = iter )
expect_is(fit, "breathtestfit")
expect_is(fit, "breathteststanfit")
expect_is(fit$stan_fit, "stanfit" )
expect_identical(names(fit), c("coef", "data", "stan_fit", "coef_chain"))
expect_equal(names(fit$data), names(data))
expect_gt(sigma(fit), 0.9)
expect_identical(comment(fit), "comment")
cf = fit$coef
expect_identical(unique(cf$group), "A")
expect_identical(unique(cf$parameter), c("beta", "k", "m", "t50", "tlag"))
expect_identical(unique(cf$stat), c("estimate", "q_0275", "q_25", "q_75", "q_975"))
expect_equal(nrow(cf), 395)
expect_equal(ncol(cf), 6)
cf = coef(fit) # This is the "mean" group only
expect_identical(unique(cf$group), "A")
expect_identical(unique(cf$parameter), c("beta", "k", "m", "t50", "tlag"))
expect_equal(nrow(cf), 79)
expect_equal(ncol(cf), 5)
})
|
/QCA2BIS.R
|
no_license
|
LeaBuns/These-Lea-Bunnens
|
R
| false | false | 6,172 |
r
| ||
library(shiny)
# devtools::install_github('ayayron/shinydnd')
library(shinyDND)
library(shinyBS)
# source("Matching.R")
student_preferences <- readRDS("student_preferences.RDS")
faculty_preferences <- readRDS("faculty_preferences.RDS")
assignments <- read.csv("Assignments.csv", as.is=TRUE)
demand <- read.csv("Demand.csv", as.is=TRUE)
# Make this list be all the unassigned and assigned people
students <- names(student_preferences)
unassigned <- students[!students %in% assignments$student]
courses <- c(unique(demand$course), "unassigned")
course_assignments <- vector("list", length(courses))
names(course_assignments) <- courses
for (course in courses) {
course_assignments[[course]] <- assignments[assignments$course == course,"student"]
}
course_assignments[["unassigned"]] <- unassigned
# for clicking purposes later
s <- rep(list(FALSE), length(students))
c <- rep(list(FALSE), length(courses))
names(c) <- courses
names(s) <- students
ui <- shinyUI(
fluidPage(
# HTML formatting for notification
tags$head(
tags$style(
HTML(".shiny-notification {
position:fixed;
top: calc(30%);
left: calc(50%);
width: calc(20%);
}",
"th, td {
padding-right: 12px;
}"
)
)
),
sidebarLayout(
sidebarPanel(
h2("Courses"),
fluidRow(
column(6,
lapply(courses[1:ceiling((length(courses)-1)/2)], function(x) {
list(uiOutput(x),
actionLink(paste0(x, "_show"), "Preferences"),
uiOutput(paste0(x, "_prefs")),
br(),
uiOutput(paste0(x, "_list")),
br()
)
})
),
column(6,
lapply(courses[(ceiling((length(courses)-1)/2)+1) : (length(courses)-1)], function(x) {
list(uiOutput(x),
actionLink(inputId= paste0(x, "_show"), "Preferences"),
uiOutput(paste0(x, "_prefs")),
br(),
uiOutput(paste0(x, "_list")),
br()
)
})
)
)
),
mainPanel(
h2("Students"),
"To unassign a student, press on their button and hit the unassign button. To assign a student to a course, click their name and then click on the course name.",
br(),
br(),
"To view a student's preferences in order, click on their name.",
br(),
br(),
"To view a professor's preferences in order, click on 'preferences' below the course title. Students in blue are already assigned to that class, and crossed out students are assigned to another class. Bold students have not yet been assigned to a class and are willing to ULA the class.",
br(),
br(),
fluidRow(
column(6, br(), br(), actionButton(inputId= "unassigned", label="Unassign", style = "background-color: dodgerblue")),
column(6, img(src="Legend.png", width="200px"))
),
br(),
br(),
uiOutput(paste0("unassigned_list")),
br(),
hr(),
actionButton("submit", "Submit new assignments")
)
)
)
)
# server with reactive for observing reactive drop event
server <- shinyServer(function(input, output,session) {
clicked <- reactiveValues(s = s, c = c, change = FALSE)
# Record whether students have been clicked
lapply(students, function(x)
observeEvent(input[[x]],
clicked$s[[x]] <- !clicked$s[[x]]
)
)
# Record whether courses have been clicked
lapply(courses, function(x)
observeEvent(input[[paste0(x, "_show")]],
clicked$c[[x]] <- !clicked$c[[x]]
)
)
lapply(courses, function(x) {
output[[paste0(x, "_prefs")]] <- renderUI(
if(clicked$c[[x]]) {
text <- paste0(unlist(
lapply(faculty_preferences[[x]], function(y) {
if (y %in% course_assignments[["unassigned"]]) {
return(paste0("<b>", y, "</b></br>"))
}
else if (y %in% course_assignments[[x]]) {
return(paste0("<font color='blue'>", y, "</font></br>"))
}
else {return(paste0("<del>", y, "</del></br>"))}
})))
HTML(text)
}
)
})
# Render buttons for students that change color when clicked
lapply(students, function(x) {
most_desired <- student_preferences[[x]]$Title[student_preferences[[x]]$Rank < 3]
desired <- student_preferences[[x]]$Title[student_preferences[[x]]$Rank %in% c(3,4)]
not_desired <- student_preferences[[x]]$Title[student_preferences[[x]]$Rank > 4]
output[[paste0(x, "_prefs")]] <- renderUI(
if(clicked$s[[x]]) {
HTML(paste0(c("<table style='width=100%'> <tr> <th> Course </th> <th> Taken </th><th> Grade </th><th> Reason </th><th> </tr><tr> <td>",
unlist(lapply(order(student_preferences[[x]]$Rank), function(y) {
c(paste0(student_preferences[[x]][y,c("Title", "Taken", "Grade", "Suitable")], "</td> <td>"), "</td> </tr> <tr> <td>")
})), "</td></tr></table>")))
}
)
output[[x]] <- renderUI({
if(clicked$s[[x]]) {
actionButton(inputId= x, label=x, style = "border-color:red")
} else if (x %in% course_assignments[["unassigned"]] ) {
actionButton(inputId= x, label=x)
} else if(x %in% course_assignments[[most_desired[1]]] | x %in% course_assignments[[most_desired[2]]]) {
actionButton(inputId= x, label=x, style = "background-color:rgba(66, 244, 78, .6)")
} else if(x %in% course_assignments[[desired[1]]] | x %in% course_assignments[[desired[2]]]) {
actionButton(inputId= x, label=x, style = "background-color:rgba(244, 241, 65, .6)")
} else if(x %in% course_assignments[[not_desired[1]]] | x %in% course_assignments[[not_desired[2]]] | x %in% course_assignments[[not_desired[3]]] | x %in% course_assignments[[not_desired[4]]]){
actionButton(inputId= x, label=x, style = "background-color:rgba(244, 65, 65, .4)")
} else {actionButton(inputId= x, label=x, style = "background-color:grey")}
# actionButton(inputId= x, label=x, style = "background-color:grey")
})
})
# Make buttons for the courses
lapply(courses[-length(courses)], function(x)
output[[x]] <- renderUI({
actionButton(inputId= x, label=x, style = "background-color: dodgerblue")
})
)
# Render the lists of students in each course
observeEvent(clicked$change, {
lapply(courses[-length(courses)], function(x)
output[[paste0(x, "_list")]] <- renderUI({
if(length(course_assignments[[x]]) != demand$desired[demand$course == x]) {
HTML(c("<font color=red>", demand$desired[demand$course == x], " ULAs desired <hr> </font>", paste0(course_assignments[[x]], "</br>")))
}
else {
HTML(c("<font color=black>", demand$desired[demand$course == x], " ULAs desired <hr> </font>", paste0(course_assignments[[x]], "</br>")))
}
})
)
output[['unassigned_list']] <- renderUI(
fluidRow(
column(6,
lapply(students[1:(ceiling(length(students))/2)], function(x) list(uiOutput(x), uiOutput(paste0(x, "_prefs")), br()))
),
column(6,
lapply(students[((ceiling(length(students))/2)+1):length(students)], function(x) list(uiOutput(x), uiOutput(paste0(x, "_prefs")), br()))
)
)
)
})
# Move people around when course titles get clicked
remove_students <- function(students) {
for (student in students) {
for(course in courses) {
if(student %in% course_assignments[[course]]) {
course_assignments[[course]] <<- course_assignments[[course]][course_assignments[[course]] != student]
}
}
}
}
lapply(courses, function(x) {
observeEvent(input[[x]], {
changed_students <- students[which(unlist(clicked$s))]
remove_students(changed_students)
course_assignments[[x]] <<- c(course_assignments[[x]], changed_students)
for (student in changed_students) {
clicked$s[[student]] <- FALSE
}
clicked$change <- !clicked$change
})
})
observeEvent(input$submit, {
final_assignments <- data.frame("student" = students, "course" = rep(NA, length(students)))
for (course in courses) {
for (student in course_assignments[[course]]) {
final_assignments$course[final_assignments$student == student] <- course
}
}
write.csv(final_assignments, file="Final_assignments.csv")
showNotification("Submission successful!", duration=5, type="message")
})
})
shinyApp(ui, server)
|
/Archived/Admin.R
|
no_license
|
kkbrum/ULA-app
|
R
| false | false | 8,961 |
r
|
library(shiny)
# devtools::install_github('ayayron/shinydnd')
library(shinyDND)
library(shinyBS)
# source("Matching.R")
student_preferences <- readRDS("student_preferences.RDS")
faculty_preferences <- readRDS("faculty_preferences.RDS")
assignments <- read.csv("Assignments.csv", as.is=TRUE)
demand <- read.csv("Demand.csv", as.is=TRUE)
# Make this list be all the unassigned and assigned people
students <- names(student_preferences)
unassigned <- students[!students %in% assignments$student]
courses <- c(unique(demand$course), "unassigned")
course_assignments <- vector("list", length(courses))
names(course_assignments) <- courses
for (course in courses) {
course_assignments[[course]] <- assignments[assignments$course == course,"student"]
}
course_assignments[["unassigned"]] <- unassigned
# for clicking purposes later
s <- rep(list(FALSE), length(students))
c <- rep(list(FALSE), length(courses))
names(c) <- courses
names(s) <- students
ui <- shinyUI(
fluidPage(
# HTML formatting for notification
tags$head(
tags$style(
HTML(".shiny-notification {
position:fixed;
top: calc(30%);
left: calc(50%);
width: calc(20%);
}",
"th, td {
padding-right: 12px;
}"
)
)
),
sidebarLayout(
sidebarPanel(
h2("Courses"),
fluidRow(
column(6,
lapply(courses[1:ceiling((length(courses)-1)/2)], function(x) {
list(uiOutput(x),
actionLink(paste0(x, "_show"), "Preferences"),
uiOutput(paste0(x, "_prefs")),
br(),
uiOutput(paste0(x, "_list")),
br()
)
})
),
column(6,
lapply(courses[(ceiling((length(courses)-1)/2)+1) : (length(courses)-1)], function(x) {
list(uiOutput(x),
actionLink(inputId= paste0(x, "_show"), "Preferences"),
uiOutput(paste0(x, "_prefs")),
br(),
uiOutput(paste0(x, "_list")),
br()
)
})
)
)
),
mainPanel(
h2("Students"),
"To unassign a student, press on their button and hit the unassign button. To assign a student to a course, click their name and then click on the course name.",
br(),
br(),
"To view a student's preferences in order, click on their name.",
br(),
br(),
"To view a professor's preferences in order, click on 'preferences' below the course title. Students in blue are already assigned to that class, and crossed out students are assigned to another class. Bold students have not yet been assigned to a class and are willing to ULA the class.",
br(),
br(),
fluidRow(
column(6, br(), br(), actionButton(inputId= "unassigned", label="Unassign", style = "background-color: dodgerblue")),
column(6, img(src="Legend.png", width="200px"))
),
br(),
br(),
uiOutput(paste0("unassigned_list")),
br(),
hr(),
actionButton("submit", "Submit new assignments")
)
)
)
)
# server with reactive for observing reactive drop event
server <- shinyServer(function(input, output,session) {
clicked <- reactiveValues(s = s, c = c, change = FALSE)
# Record whether students have been clicked
lapply(students, function(x)
observeEvent(input[[x]],
clicked$s[[x]] <- !clicked$s[[x]]
)
)
# Record whether courses have been clicked
lapply(courses, function(x)
observeEvent(input[[paste0(x, "_show")]],
clicked$c[[x]] <- !clicked$c[[x]]
)
)
lapply(courses, function(x) {
output[[paste0(x, "_prefs")]] <- renderUI(
if(clicked$c[[x]]) {
text <- paste0(unlist(
lapply(faculty_preferences[[x]], function(y) {
if (y %in% course_assignments[["unassigned"]]) {
return(paste0("<b>", y, "</b></br>"))
}
else if (y %in% course_assignments[[x]]) {
return(paste0("<font color='blue'>", y, "</font></br>"))
}
else {return(paste0("<del>", y, "</del></br>"))}
})))
HTML(text)
}
)
})
# Render buttons for students that change color when clicked
lapply(students, function(x) {
most_desired <- student_preferences[[x]]$Title[student_preferences[[x]]$Rank < 3]
desired <- student_preferences[[x]]$Title[student_preferences[[x]]$Rank %in% c(3,4)]
not_desired <- student_preferences[[x]]$Title[student_preferences[[x]]$Rank > 4]
output[[paste0(x, "_prefs")]] <- renderUI(
if(clicked$s[[x]]) {
HTML(paste0(c("<table style='width=100%'> <tr> <th> Course </th> <th> Taken </th><th> Grade </th><th> Reason </th><th> </tr><tr> <td>",
unlist(lapply(order(student_preferences[[x]]$Rank), function(y) {
c(paste0(student_preferences[[x]][y,c("Title", "Taken", "Grade", "Suitable")], "</td> <td>"), "</td> </tr> <tr> <td>")
})), "</td></tr></table>")))
}
)
output[[x]] <- renderUI({
if(clicked$s[[x]]) {
actionButton(inputId= x, label=x, style = "border-color:red")
} else if (x %in% course_assignments[["unassigned"]] ) {
actionButton(inputId= x, label=x)
} else if(x %in% course_assignments[[most_desired[1]]] | x %in% course_assignments[[most_desired[2]]]) {
actionButton(inputId= x, label=x, style = "background-color:rgba(66, 244, 78, .6)")
} else if(x %in% course_assignments[[desired[1]]] | x %in% course_assignments[[desired[2]]]) {
actionButton(inputId= x, label=x, style = "background-color:rgba(244, 241, 65, .6)")
} else if(x %in% course_assignments[[not_desired[1]]] | x %in% course_assignments[[not_desired[2]]] | x %in% course_assignments[[not_desired[3]]] | x %in% course_assignments[[not_desired[4]]]){
actionButton(inputId= x, label=x, style = "background-color:rgba(244, 65, 65, .4)")
} else {actionButton(inputId= x, label=x, style = "background-color:grey")}
# actionButton(inputId= x, label=x, style = "background-color:grey")
})
})
# Make buttons for the courses
lapply(courses[-length(courses)], function(x)
output[[x]] <- renderUI({
actionButton(inputId= x, label=x, style = "background-color: dodgerblue")
})
)
# Render the lists of students in each course
observeEvent(clicked$change, {
lapply(courses[-length(courses)], function(x)
output[[paste0(x, "_list")]] <- renderUI({
if(length(course_assignments[[x]]) != demand$desired[demand$course == x]) {
HTML(c("<font color=red>", demand$desired[demand$course == x], " ULAs desired <hr> </font>", paste0(course_assignments[[x]], "</br>")))
}
else {
HTML(c("<font color=black>", demand$desired[demand$course == x], " ULAs desired <hr> </font>", paste0(course_assignments[[x]], "</br>")))
}
})
)
output[['unassigned_list']] <- renderUI(
fluidRow(
column(6,
lapply(students[1:(ceiling(length(students))/2)], function(x) list(uiOutput(x), uiOutput(paste0(x, "_prefs")), br()))
),
column(6,
lapply(students[((ceiling(length(students))/2)+1):length(students)], function(x) list(uiOutput(x), uiOutput(paste0(x, "_prefs")), br()))
)
)
)
})
# Move people around when course titles get clicked
remove_students <- function(students) {
for (student in students) {
for(course in courses) {
if(student %in% course_assignments[[course]]) {
course_assignments[[course]] <<- course_assignments[[course]][course_assignments[[course]] != student]
}
}
}
}
lapply(courses, function(x) {
observeEvent(input[[x]], {
changed_students <- students[which(unlist(clicked$s))]
remove_students(changed_students)
course_assignments[[x]] <<- c(course_assignments[[x]], changed_students)
for (student in changed_students) {
clicked$s[[student]] <- FALSE
}
clicked$change <- !clicked$change
})
})
observeEvent(input$submit, {
final_assignments <- data.frame("student" = students, "course" = rep(NA, length(students)))
for (course in courses) {
for (student in course_assignments[[course]]) {
final_assignments$course[final_assignments$student == student] <- course
}
}
write.csv(final_assignments, file="Final_assignments.csv")
showNotification("Submission successful!", duration=5, type="message")
})
})
shinyApp(ui, server)
|
#Loading Libraries
library(tidyverse)
library(dplyr)
library(DataExplorer)
library(data.table)
library(ggplot2)
library(vcd)
library(rpart)
library(mice)
library(randomForest)
library(plyr)
library(mice)
library(DMwR)
#install.packages("Hmisc")
library(Hmisc)
library(caret)
#install.packages("caretEnsemble")
library(caretEnsemble)
#Reading training and Testing Data
bigmart_train<- read.csv("train.csv",stringsAsFactors = TRUE)
bigmart_test <- read.csv("test.csv")
# Both Train and test datasets have near identical columns except for the predictor outcome Item_Sales
#Size of Datasets
object.size(bigmart_train)/10^6
#0.63 MB of Dataset
#Profile of Datasets
str(bigmart_train)
str(bigmart_test)
#Identifying NAs in both train and test Datasets
bigmart_train %>%
select(everything()) %>%
summarise_all(funs(sum(is.na(.))))
bigmart_test %>%
select(everything()) %>%
summarise_all(funs(sum(is.na(.))))
#Item_Weight has 1463 NAs in train and 976 NAs in test data
###--- Cleansing each and every Discrete variable in both datasets
# Starting with Item_Fat_Content
prop.table(table(bigmart_train$Item_Fat_Content))
prop.table(table(bigmart_test$Item_Fat_Content))
levels(bigmart_train$Item_Fat_Content)
str(bigmart_train$Item_Fat_Content)
revalue(bigmart_train$Item_Fat_Content,c("LF"="Low Fat", "low fat" = "Low Fat",
"reg" = "Regular")) -> bigmart_train$Item_Fat_Content
#Cleaning the Test table
levels(bigmart_test$Item_Fat_Content)
revalue(bigmart_test$Item_Fat_Content,c("LF"="Low Fat", "low fat" = "Low Fat",
"reg" = "Regular")) -> bigmart_test$Item_Fat_Content
# Both train and test table have Low Fat: Regular in 65:35 split
ggplot(data=bigmart_train, mapping = aes(x=Item_Fat_Content, fill = Item_Fat_Content)) +
geom_bar()
# Analyzing Item_Type for both Train and test data
prop.table(table(bigmart_train$Item_Type))
prop.table(table(bigmart_test$Item_Type))
levels(bigmart_train$Item_Type)
levels(bigmart_test$Item_Type)
ggplot(data=bigmart_train, mapping = aes(x=Item_Type, fill = Item_Type)) +
geom_bar()
ggplot(data=bigmart_test, mapping = aes(x=Item_Type, fill = Item_Type)) +
geom_bar() + facet_grid(~Item_Fat_Content)
#Analyzing Item_Visibility, detecting outliers and replacing them with NAs. Finally replacing
# the NAs with predictive numbers
hist(bigmart_train$Item_Visibility)
boxplot(bigmart_train$Item_Visibility)
boxplot(bigmart_test$Item_Visibility, main =" Item visibility Test")
# Outlier detected in the train and test datasets
bigmart_train$Item_Visibility <- ifelse(bigmart_train$Item_Visibility %in%
boxplot.stats(bigmart_train$Item_Visibility)$out, NA,
bigmart_train$Item_Visibility)
bigmart_test$Item_Visibility <- ifelse(bigmart_test$Item_Visibility %in%
boxplot.stats(bigmart_test$Item_Visibility)$out, NA,
bigmart_test$Item_Visibility)
#Imputing Outliers
bigmart_train$Item_Visibility[is.na(bigmart_train$Item_Visibility)] <- mean(bigmart_train$Item_Visibility, na.rm = T)
bigmart_test$Item_Visibility[is.na(bigmart_test$Item_Visibility)] <- mean(bigmart_test$Item_Visibility, na.rm = T)
#Analyzing Outlet Identifier variable
str(bigmart_train$Outlet_Identifier)
levels(bigmart_train$Outlet_Identifier)
prop.table(table(bigmart_train$Outlet_Identifier))
q<-ggplot(data=bigmart_train, mapping = aes(x=Outlet_Identifier, fill = Outlet_Identifier)) + geom_bar()
q+ theme(axis.text.x= element_text(angle=45, hjust=1))
p<- ggplot(data=bigmart_test, mapping = aes(x=Outlet_Identifier, fill = Outlet_Identifier)) + geom_bar()
p + theme(axis.text.x= element_text(angle=45, hjust=1))
#Except for Out10 and out19 every other outlier has the same contribution
# Analyzing the year of establishment
prop.table(table(bigmart_train$Outlet_Establishment_Year))
p<- ggplot(data=bigmart_test, mapping = aes(x=Outlet_Establishment_Year, fill = Outlet_Establishment_Year)) + geom_bar()
p + theme(axis.text.x= element_text(angle=45, hjust=1))
str(bigmart_train$Outlet_Establishment_Year)
bigmart_train$Outlet_Establishment_Year <- as.factor(bigmart_train$Outlet_Establishment_Year)
bigmart_test$Outlet_Establishment_Year <- as.factor(bigmart_test$Outlet_Establishment_Year)
#Maximum outlets were started in 1985 with minimum being 1998
# Analyzing Outlet size
str(bigmart_train$Outlet_Size)
levels(bigmart_train$Outlet_Size)
bigmart_train$Outlet_Size <- ifelse(bigmart_train$Outlet_Size== "", NA, bigmart_train$Outlet_Size)
levels(bigmart_test$Outlet_Size)
bigmart_test$Outlet_Size <- ifelse(bigmart_test$Outlet_Size== "", NA, bigmart_test$Outlet_Size)
#Imputing NAs in Categorical variable
fit <- rpart(Outlet_Size ~ Item_Visibility + Item_Fat_Content+Item_MRP,
data = bigmart_train[!is.na(bigmart_train$Outlet_Size),],
method = "anova")
bigmart_train$Outlet_Size[is.na(bigmart_train$Outlet_Size)] <-
predict(fit, bigmart_train[is.na(bigmart_train$Outlet_Size),])
bigmart_train$Outlet_Size <- as.factor(bigmart_train$Outlet_Size)
prop.table(table(bigmart_train$Outlet_Size))
revalue(bigmart_train$Outlet_Size,c("2"="High", "3" = "Medium", "3.23818092589563" = "Medium",
"4" = "Small")) -> bigmart_train$Outlet_Size
prop.table(table(bigmart_test$Outlet_Size))
bigmart_test$Outlet_Size <- as.factor(bigmart_test$Outlet_Size)
revalue(bigmart_test$Outlet_Size,c("2"="High", "3" = "Medium",
"4" = "Small")) -> bigmart_test$Outlet_Size
#Medium Size outlets dominate the store size
#Analyzing outlet location type
prop.table(table(bigmart_train$Outlet_Location_Type))
prop.table(table(bigmart_test$Outlet_Location_Type))
#Tier 3 contributes maximum of the location types
prop.table(table(bigmart_train$Outlet_Type))
prop.table(table(bigmart_test$Outlet_Type))
str(bigmart_train$Outlet_Type)
# Supermarket type1 contributes maximum to the store type
# Analyzing Sales figures w.r.to each and every Categorical variable
ggplot(data=bigmart_train,mapping =aes(x=Item_Fat_Content , y= Item_Outlet_Sales, fill = Item_Fat_Content)) +
geom_bar(stat = "identity")
#Sales of low fat is much higher than that of Regular
ggplot(data=bigmart_train,mapping =aes(x=Item_Visibility , y= Item_Outlet_Sales)) +
geom_point(stat = "identity")
#High visibility doesnt actually convert to high item sales
ggplot(data=bigmart_train,mapping =aes(x= Item_Type , y= Item_Outlet_Sales, fill = Item_Type)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#Fruits and Vegetables and Snack foods contribute maximum to teh Sales
ggplot(data=bigmart_train,mapping =aes(x= Outlet_Identifier, y= Item_Outlet_Sales, fill = Outlet_Identifier)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#Outlet 27 is the maximum selling outlet
ggplot(data=bigmart_train,mapping =aes(x= Outlet_Establishment_Year , y= Item_Outlet_Sales)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#1985 achieved the highest Sales in all years
ggplot(data=bigmart_train,mapping =aes(x= Outlet_Size , y= Item_Outlet_Sales)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#Medium size outlet do the maximum business for Bigmart
ggplot(data=bigmart_train,mapping =aes(x= Outlet_Location_Type , y= Item_Outlet_Sales, fill= Outlet_Location_Type)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#Tier 3 cities produce the maximum sales
ggplot(data=bigmart_train,mapping =aes(x= Outlet_Type, y= Item_Outlet_Sales, fill= Outlet_Type)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#Supermarket Type 1 contributes maximum Sales
ggplot(data=bigmart_train,mapping =aes(x= Item_MRP , y= Item_Outlet_Sales)) + geom_point() +
geom_smooth(method = "lm")
str(bigmart_train$Item_MRP)
levels(bigmart_train$Item_Fat_Content)
levels(bigmart_train$Item_Type)
levels(bigmart_train$Outlet_Identifier)
levels(bigmart_train$Outlet_Establishment_Year)
levels(bigmart_train$Outlet_Size)
levels(bigmart_train$Outlet_Location_Type)
levels(bigmart_train$Outlet_Type)
levels(bigmart_train$Item_Outlet_Sales)
bigmart_train$test <- factor(bigmart_train$Item_Type,levels = unique(c(bigmart_train$Item_Type)))
str(bigmart_train$test)
levels(bigmart_train$test)
bigmart_train$test <-as.character(bigmart_train$Item_Type)
str(bigmart_train$test)
trimws(bigmart_train$test)
bigmart_train$test <- as.factor(bigmart_train$test)
bigmart_train$Item_Type <- bigmart_train$test
str(bigmart_train$Item_Type)
cor(bigmart_train$Item_MRP,bigmart_train$Item_Outlet_Sales)
bigmart_train <- select(bigmart_train,-c(test))
# Building model
summary(big_train_model)
big_train_model <- lm(Item_Outlet_Sales ~ Item_Fat_Content + Item_Type +
Item_MRP + Outlet_Identifier , data = bigmart_train)
bigmart_sub <- bigmart_train %>%
select(-Item_Identifier,-Outlet_Identifier)
set.seed(366284)
inTrain <- createDataPartition(y=bigmart_sub$Item_Outlet_Sales,p=0.7,list=FALSE)
train <- bigmart_sub[inTrain,]
test <- bigmart_sub[-inTrain,]
# Building Models
control <- trainControl(method = "repeatedcv",number = 10,repeats = 3, savePredictions = TRUE,classProbs = TRUE)
algorithmList <- c('glm','glmnet','lm','ranger','treebag','gbm')
models <- caretList(Item_Outlet_Sales~., train,trControl=control,methodList = algorithmList)
|
/BigMartSales.R
|
no_license
|
AjitDevasia/BigmartSalesR
|
R
| false | false | 10,326 |
r
|
#Loading Libraries
library(tidyverse)
library(dplyr)
library(DataExplorer)
library(data.table)
library(ggplot2)
library(vcd)
library(rpart)
library(mice)
library(randomForest)
library(plyr)
library(mice)
library(DMwR)
#install.packages("Hmisc")
library(Hmisc)
library(caret)
#install.packages("caretEnsemble")
library(caretEnsemble)
#Reading training and Testing Data
bigmart_train<- read.csv("train.csv",stringsAsFactors = TRUE)
bigmart_test <- read.csv("test.csv")
# Both Train and test datasets have near identical columns except for the predictor outcome Item_Sales
#Size of Datasets
object.size(bigmart_train)/10^6
#0.63 MB of Dataset
#Profile of Datasets
str(bigmart_train)
str(bigmart_test)
#Identifying NAs in both train and test Datasets
bigmart_train %>%
select(everything()) %>%
summarise_all(funs(sum(is.na(.))))
bigmart_test %>%
select(everything()) %>%
summarise_all(funs(sum(is.na(.))))
#Item_Weight has 1463 NAs in train and 976 NAs in test data
###--- Cleansing each and every Discrete variable in both datasets
# Starting with Item_Fat_Content
prop.table(table(bigmart_train$Item_Fat_Content))
prop.table(table(bigmart_test$Item_Fat_Content))
levels(bigmart_train$Item_Fat_Content)
str(bigmart_train$Item_Fat_Content)
revalue(bigmart_train$Item_Fat_Content,c("LF"="Low Fat", "low fat" = "Low Fat",
"reg" = "Regular")) -> bigmart_train$Item_Fat_Content
#Cleaning the Test table
levels(bigmart_test$Item_Fat_Content)
revalue(bigmart_test$Item_Fat_Content,c("LF"="Low Fat", "low fat" = "Low Fat",
"reg" = "Regular")) -> bigmart_test$Item_Fat_Content
# Both train and test table have Low Fat: Regular in 65:35 split
ggplot(data=bigmart_train, mapping = aes(x=Item_Fat_Content, fill = Item_Fat_Content)) +
geom_bar()
# Analyzing Item_Type for both Train and test data
prop.table(table(bigmart_train$Item_Type))
prop.table(table(bigmart_test$Item_Type))
levels(bigmart_train$Item_Type)
levels(bigmart_test$Item_Type)
ggplot(data=bigmart_train, mapping = aes(x=Item_Type, fill = Item_Type)) +
geom_bar()
ggplot(data=bigmart_test, mapping = aes(x=Item_Type, fill = Item_Type)) +
geom_bar() + facet_grid(~Item_Fat_Content)
#Analyzing Item_Visibility, detecting outliers and replacing them with NAs. Finally replacing
# the NAs with predictive numbers
hist(bigmart_train$Item_Visibility)
boxplot(bigmart_train$Item_Visibility)
boxplot(bigmart_test$Item_Visibility, main =" Item visibility Test")
# Outlier detected in the train and test datasets
bigmart_train$Item_Visibility <- ifelse(bigmart_train$Item_Visibility %in%
boxplot.stats(bigmart_train$Item_Visibility)$out, NA,
bigmart_train$Item_Visibility)
bigmart_test$Item_Visibility <- ifelse(bigmart_test$Item_Visibility %in%
boxplot.stats(bigmart_test$Item_Visibility)$out, NA,
bigmart_test$Item_Visibility)
#Imputing Outliers
bigmart_train$Item_Visibility[is.na(bigmart_train$Item_Visibility)] <- mean(bigmart_train$Item_Visibility, na.rm = T)
bigmart_test$Item_Visibility[is.na(bigmart_test$Item_Visibility)] <- mean(bigmart_test$Item_Visibility, na.rm = T)
#Analyzing Outlet Identifier variable
str(bigmart_train$Outlet_Identifier)
levels(bigmart_train$Outlet_Identifier)
prop.table(table(bigmart_train$Outlet_Identifier))
q<-ggplot(data=bigmart_train, mapping = aes(x=Outlet_Identifier, fill = Outlet_Identifier)) + geom_bar()
q+ theme(axis.text.x= element_text(angle=45, hjust=1))
p<- ggplot(data=bigmart_test, mapping = aes(x=Outlet_Identifier, fill = Outlet_Identifier)) + geom_bar()
p + theme(axis.text.x= element_text(angle=45, hjust=1))
#Except for Out10 and out19 every other outlier has the same contribution
# Analyzing the year of establishment
prop.table(table(bigmart_train$Outlet_Establishment_Year))
p<- ggplot(data=bigmart_test, mapping = aes(x=Outlet_Establishment_Year, fill = Outlet_Establishment_Year)) + geom_bar()
p + theme(axis.text.x= element_text(angle=45, hjust=1))
str(bigmart_train$Outlet_Establishment_Year)
bigmart_train$Outlet_Establishment_Year <- as.factor(bigmart_train$Outlet_Establishment_Year)
bigmart_test$Outlet_Establishment_Year <- as.factor(bigmart_test$Outlet_Establishment_Year)
#Maximum outlets were started in 1985 with minimum being 1998
# Analyzing Outlet size
str(bigmart_train$Outlet_Size)
levels(bigmart_train$Outlet_Size)
bigmart_train$Outlet_Size <- ifelse(bigmart_train$Outlet_Size== "", NA, bigmart_train$Outlet_Size)
levels(bigmart_test$Outlet_Size)
bigmart_test$Outlet_Size <- ifelse(bigmart_test$Outlet_Size== "", NA, bigmart_test$Outlet_Size)
#Imputing NAs in Categorical variable
fit <- rpart(Outlet_Size ~ Item_Visibility + Item_Fat_Content+Item_MRP,
data = bigmart_train[!is.na(bigmart_train$Outlet_Size),],
method = "anova")
bigmart_train$Outlet_Size[is.na(bigmart_train$Outlet_Size)] <-
predict(fit, bigmart_train[is.na(bigmart_train$Outlet_Size),])
bigmart_train$Outlet_Size <- as.factor(bigmart_train$Outlet_Size)
prop.table(table(bigmart_train$Outlet_Size))
revalue(bigmart_train$Outlet_Size,c("2"="High", "3" = "Medium", "3.23818092589563" = "Medium",
"4" = "Small")) -> bigmart_train$Outlet_Size
prop.table(table(bigmart_test$Outlet_Size))
bigmart_test$Outlet_Size <- as.factor(bigmart_test$Outlet_Size)
revalue(bigmart_test$Outlet_Size,c("2"="High", "3" = "Medium",
"4" = "Small")) -> bigmart_test$Outlet_Size
#Medium Size outlets dominate the store size
#Analyzing outlet location type
prop.table(table(bigmart_train$Outlet_Location_Type))
prop.table(table(bigmart_test$Outlet_Location_Type))
#Tier 3 contributes maximum of the location types
prop.table(table(bigmart_train$Outlet_Type))
prop.table(table(bigmart_test$Outlet_Type))
str(bigmart_train$Outlet_Type)
# Supermarket type1 contributes maximum to the store type
# Analyzing Sales figures w.r.to each and every Categorical variable
ggplot(data=bigmart_train,mapping =aes(x=Item_Fat_Content , y= Item_Outlet_Sales, fill = Item_Fat_Content)) +
geom_bar(stat = "identity")
#Sales of low fat is much higher than that of Regular
ggplot(data=bigmart_train,mapping =aes(x=Item_Visibility , y= Item_Outlet_Sales)) +
geom_point(stat = "identity")
#High visibility doesnt actually convert to high item sales
ggplot(data=bigmart_train,mapping =aes(x= Item_Type , y= Item_Outlet_Sales, fill = Item_Type)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#Fruits and Vegetables and Snack foods contribute maximum to teh Sales
ggplot(data=bigmart_train,mapping =aes(x= Outlet_Identifier, y= Item_Outlet_Sales, fill = Outlet_Identifier)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#Outlet 27 is the maximum selling outlet
ggplot(data=bigmart_train,mapping =aes(x= Outlet_Establishment_Year , y= Item_Outlet_Sales)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#1985 achieved the highest Sales in all years
ggplot(data=bigmart_train,mapping =aes(x= Outlet_Size , y= Item_Outlet_Sales)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#Medium size outlet do the maximum business for Bigmart
ggplot(data=bigmart_train,mapping =aes(x= Outlet_Location_Type , y= Item_Outlet_Sales, fill= Outlet_Location_Type)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#Tier 3 cities produce the maximum sales
ggplot(data=bigmart_train,mapping =aes(x= Outlet_Type, y= Item_Outlet_Sales, fill= Outlet_Type)) +
geom_bar(stat = "identity") +
theme(axis.text.x= element_text(angle=45, hjust=1))
#Supermarket Type 1 contributes maximum Sales
ggplot(data=bigmart_train,mapping =aes(x= Item_MRP , y= Item_Outlet_Sales)) + geom_point() +
geom_smooth(method = "lm")
str(bigmart_train$Item_MRP)
levels(bigmart_train$Item_Fat_Content)
levels(bigmart_train$Item_Type)
levels(bigmart_train$Outlet_Identifier)
levels(bigmart_train$Outlet_Establishment_Year)
levels(bigmart_train$Outlet_Size)
levels(bigmart_train$Outlet_Location_Type)
levels(bigmart_train$Outlet_Type)
levels(bigmart_train$Item_Outlet_Sales)
bigmart_train$test <- factor(bigmart_train$Item_Type,levels = unique(c(bigmart_train$Item_Type)))
str(bigmart_train$test)
levels(bigmart_train$test)
bigmart_train$test <-as.character(bigmart_train$Item_Type)
str(bigmart_train$test)
trimws(bigmart_train$test)
bigmart_train$test <- as.factor(bigmart_train$test)
bigmart_train$Item_Type <- bigmart_train$test
str(bigmart_train$Item_Type)
cor(bigmart_train$Item_MRP,bigmart_train$Item_Outlet_Sales)
bigmart_train <- select(bigmart_train,-c(test))
# Building model
summary(big_train_model)
big_train_model <- lm(Item_Outlet_Sales ~ Item_Fat_Content + Item_Type +
Item_MRP + Outlet_Identifier , data = bigmart_train)
bigmart_sub <- bigmart_train %>%
select(-Item_Identifier,-Outlet_Identifier)
set.seed(366284)
inTrain <- createDataPartition(y=bigmart_sub$Item_Outlet_Sales,p=0.7,list=FALSE)
train <- bigmart_sub[inTrain,]
test <- bigmart_sub[-inTrain,]
# Building Models
control <- trainControl(method = "repeatedcv",number = 10,repeats = 3, savePredictions = TRUE,classProbs = TRUE)
algorithmList <- c('glm','glmnet','lm','ranger','treebag','gbm')
models <- caretList(Item_Outlet_Sales~., train,trControl=control,methodList = algorithmList)
|
# 'LIFX' light color
#'
#' change the state of 'LIFX' lamps
#'
#' @template param_colors
#' @template param_duration
#' @template param_infrared
#' @template param_color_name
#' @template param_fast
#' @param delta if set to TRUE, color values (hue, saturation, brightness, kelvin, infrared) are added to the lights' current values. Can not be used in combination with `color_name`
#' @template param_selector
#' @template param_power
#' @template param_token
#' @return an 'httr' response object (see \code{\link[httr]{response}})
#' @examples
#' \dontrun{
#' lx_color(hue = 200)
#' lx_color(saturation = 0.8)
#' lx_color(hue = 200, saturation = 0.5, brightness = 0.5)
#' lx_color(color_name = 'cyan', brightness = 1)
#' lx_color(kelvin = 5000, fast = TRUE)
#' lx_color(brightness = -0.3, delta = TRUE)
#' }
#' @export
lx_color <- function(hue = NULL, saturation = NULL, brightness = NULL, kelvin = NULL, duration = NULL, infrared = NULL, color_name = NULL,
fast = FALSE, delta = FALSE, selector = "all", power = NULL, token = lx_get_token()) {
# check inputs
if (!is.null(color_name)) {
if (delta) {
stop("can not use parameter color_name when delta is set to TRUE")
}
}
assertthat::assert_that(is.logical(fast))
if (fast & delta) {
warning("fast = TRUE has no effect if delta = TRUE")
}
# assertthat::assert_that(is.logical(delta))
if (!is.null(hue)) {
assertthat::assert_that(is.numeric(hue))
}
if (!is.null(saturation)) {
assertthat::assert_that(is.numeric(saturation))
}
if (!is.null(brightness)) {
assertthat::assert_that(is.numeric(brightness))
}
if (!is.null(kelvin)) {
assertthat::assert_that(is.numeric(kelvin))
}
if (!is.null(duration)) {
assertthat::assert_that(is.numeric(duration))
}
if (!is.null(infrared)) {
assertthat::assert_that(is.numeric(infrared))
}
# if delta, don't check color (because delta values can be invald colors, such as negative numbers)
check <- !delta
if (!all(sapply(list(hue, saturation, brightness, kelvin, color_name), is.null))) {
color_name <- lx_color_name(color_name = color_name, saturation = saturation, hue = hue, brightness = brightness, kelvin = kelvin,check = check)
}
if (!delta) {
# set the state using 'lx_state' function (which mirrors the 'LIFX' api /state endpoint):
response <- lx_state(power = power, color_name = color_name, brightness = brightness, infrared = infrared, duration = duration,
selector = selector, fast = fast, token = token)
}
if (delta) {
# set the state using 'lx_state' function (which mirrors the 'LIFX' api /state/delta endpoint):
response <- lx_delta(hue = hue, saturation = saturation, brightness = brightness, kelvin = kelvin, duration = duration, infrared = infrared,
power = power, selector = selector, token = token)
}
return(invisible(response))
}
|
/R/color.R
|
no_license
|
cran/lifx
|
R
| false | false | 3,011 |
r
|
# 'LIFX' light color
#'
#' change the state of 'LIFX' lamps
#'
#' @template param_colors
#' @template param_duration
#' @template param_infrared
#' @template param_color_name
#' @template param_fast
#' @param delta if set to TRUE, color values (hue, saturation, brightness, kelvin, infrared) are added to the lights' current values. Can not be used in combination with `color_name`
#' @template param_selector
#' @template param_power
#' @template param_token
#' @return an 'httr' response object (see \code{\link[httr]{response}})
#' @examples
#' \dontrun{
#' lx_color(hue = 200)
#' lx_color(saturation = 0.8)
#' lx_color(hue = 200, saturation = 0.5, brightness = 0.5)
#' lx_color(color_name = 'cyan', brightness = 1)
#' lx_color(kelvin = 5000, fast = TRUE)
#' lx_color(brightness = -0.3, delta = TRUE)
#' }
#' @export
lx_color <- function(hue = NULL, saturation = NULL, brightness = NULL, kelvin = NULL, duration = NULL, infrared = NULL, color_name = NULL,
fast = FALSE, delta = FALSE, selector = "all", power = NULL, token = lx_get_token()) {
# check inputs
if (!is.null(color_name)) {
if (delta) {
stop("can not use parameter color_name when delta is set to TRUE")
}
}
assertthat::assert_that(is.logical(fast))
if (fast & delta) {
warning("fast = TRUE has no effect if delta = TRUE")
}
# assertthat::assert_that(is.logical(delta))
if (!is.null(hue)) {
assertthat::assert_that(is.numeric(hue))
}
if (!is.null(saturation)) {
assertthat::assert_that(is.numeric(saturation))
}
if (!is.null(brightness)) {
assertthat::assert_that(is.numeric(brightness))
}
if (!is.null(kelvin)) {
assertthat::assert_that(is.numeric(kelvin))
}
if (!is.null(duration)) {
assertthat::assert_that(is.numeric(duration))
}
if (!is.null(infrared)) {
assertthat::assert_that(is.numeric(infrared))
}
# if delta, don't check color (because delta values can be invald colors, such as negative numbers)
check <- !delta
if (!all(sapply(list(hue, saturation, brightness, kelvin, color_name), is.null))) {
color_name <- lx_color_name(color_name = color_name, saturation = saturation, hue = hue, brightness = brightness, kelvin = kelvin,check = check)
}
if (!delta) {
# set the state using 'lx_state' function (which mirrors the 'LIFX' api /state endpoint):
response <- lx_state(power = power, color_name = color_name, brightness = brightness, infrared = infrared, duration = duration,
selector = selector, fast = fast, token = token)
}
if (delta) {
# set the state using 'lx_state' function (which mirrors the 'LIFX' api /state/delta endpoint):
response <- lx_delta(hue = hue, saturation = saturation, brightness = brightness, kelvin = kelvin, duration = duration, infrared = infrared,
power = power, selector = selector, token = token)
}
return(invisible(response))
}
|
nzlist <- function(blk,At,par){
spdensity <- par$spdensity
smallblkdim <- par$smallblkdim
m <- par$numcolAt
##
numblk <- nrow(as.matrix(blk))
isspA <- matrix(0, numblk,m)
nzlistA <- matrix(list(),numblk,2)
nzlistAsum <- matrix(list(),numblk,2)
isspAy <- rep(0,numblk)
nzlistAy <- matrix(list(),numblk,1)
##
for(p in 1:nrow(blk)){
if(blk[[p,1]] == "s" && (max(blk[[p,2]]) > smallblkdim || max(dim(as.matrix(blk[[p,2]]))) <= 10)){
numblk <- max(dim(as.matrix(blk[[p,2]])))
n <- sum(blk[[p,2]])
n2 <- sum(blk[[p,2]] * blk[[p,2]])
if(numblk == 1){
nztol <- spdensity*n
nztol2 <- spdensity*n2/2
}else{
nztol <- spdensity*n/2
nztol2 <- spdensity*n2/4
}
nzlist1 <- matrix(0,1,m+1)
nzlist2 <- c()
nzlist3 <- matrix(0,1,m+1)
nzlist4 <- c()
breakyes <- rep(FALSE,2)
Asum <- Matrix(0,n,n, sparse=TRUE)
##
m1 <- ncol(At[[p,1]])
if(is.null(m1)){
m1 <- 0
}
if(m1 > 0){
for(k in 1:m1){
#Ak <- smat(blk,p,At[[p]][,k],0) #changed from mexsmat
Ak <- mexsmat(blk,At,0,p,k)
nnzAk <- length(which(Ak != 0))
isspA[p,k] <- (nnzAk < spdensity*n2 | numblk > 1)
if(!all(breakyes)){
out <- which(abs(Ak) > 0, arr.ind = TRUE)
I <- out[,1]
J <- out[,2]
#
# Nonzero elemnts of Ak
#
if(breakyes[1] == 0){
if(nnzAk <= nztol){
idx <- which(I <= J)
nzlist1[k+1] <- nzlist1[k] + length(idx)
nzlist2 <- rbind(nzlist2,cbind(I[idx], J[idx]))
}else{
nzlist1[(k+1):(m+1)] <- rep(Inf,m-k+1)
breakyes[1] <- TRUE
}
}
#
#nonzero elements of A1 + ... + Ak
#
if(breakyes[2] == 0){
nztmp <- rep(0,length(I))
if(length(I) > 0){
for(t in 1:length(I)){
i <- I[t]
j <- J[t]
nztmp[t] <- Asum[i,j]
}
}
#Find new nonzero positions when Ak is added to Asum
idx <- which(nztmp == 0)
nzlist3[k+1] <- nzlist3[k] + length(idx)
if(nzlist3[k+1] < nztol2){
nzlist4 <- rbind(nzlist4,cbind(I[idx], J[idx]))
}else{
nzlist3[(k+1):(m+1)] <- rep(Inf,m-k+1)
breakyes[2] <- TRUE
}
Asum <- Asum + abs(Ak)
}
}
}
}
if(numblk == 1){
isspAy[p] <- (is.finite(nzlist1[m+1]) | is.finite(nzlist3[m+1]))
}else{
isspAy[p] <- 1
}
if(!is.null(nzlist1)) nzlistA[[p,1]] <- nzlist1
if(!is.null(nzlist2)) nzlistA[[p,2]] <- nzlist2
if(!is.null(nzlist3)) nzlistAsum[[p,1]] <- nzlist3
if(!is.null(nzlist4)) nzlistAsum[[p,2]] <- nzlist4
#
# nonzero elements of (A1*y1 ... Am*ym)
#
if(is.finite(nzlist3[m+1])){
if(ncol(blk) > 2){
m2 <- length(blk[[p,3]])
len <- sum(blk[[p,3]])
DD <- matrix(0,len,len)
for(t in 1:nrow(At[[p,3]])){
DD[At[[p,3]][t,2], At[[p,3]][t,3]] <- At[[p,3]][t,4]
}
Asum <- Asum + abs(At[[p,2]] %*% DD %*% t(At[[p,2]]))
}
out <- which(Asum > 0, arr.ind = TRUE)
I <- out[,1]
J <- out[,2]
if(length(I) < nztol2){
nzlistAy[[p,1]] <- cbind(I,J)
}else{
nzlistAy[[p,1]] <- Inf
}
}else{
nzlistAy[[p,1]] <- Inf
}
}
}
return(list(isspA = isspA, nzlistA = nzlistA, nzlistAsum=nzlistAsum, isspAy=isspAy, nzlistAy=nzlistAy))
}
|
/R/nzlist.R
|
no_license
|
cran/sdpt3r
|
R
| false | false | 3,952 |
r
|
nzlist <- function(blk,At,par){
spdensity <- par$spdensity
smallblkdim <- par$smallblkdim
m <- par$numcolAt
##
numblk <- nrow(as.matrix(blk))
isspA <- matrix(0, numblk,m)
nzlistA <- matrix(list(),numblk,2)
nzlistAsum <- matrix(list(),numblk,2)
isspAy <- rep(0,numblk)
nzlistAy <- matrix(list(),numblk,1)
##
for(p in 1:nrow(blk)){
if(blk[[p,1]] == "s" && (max(blk[[p,2]]) > smallblkdim || max(dim(as.matrix(blk[[p,2]]))) <= 10)){
numblk <- max(dim(as.matrix(blk[[p,2]])))
n <- sum(blk[[p,2]])
n2 <- sum(blk[[p,2]] * blk[[p,2]])
if(numblk == 1){
nztol <- spdensity*n
nztol2 <- spdensity*n2/2
}else{
nztol <- spdensity*n/2
nztol2 <- spdensity*n2/4
}
nzlist1 <- matrix(0,1,m+1)
nzlist2 <- c()
nzlist3 <- matrix(0,1,m+1)
nzlist4 <- c()
breakyes <- rep(FALSE,2)
Asum <- Matrix(0,n,n, sparse=TRUE)
##
m1 <- ncol(At[[p,1]])
if(is.null(m1)){
m1 <- 0
}
if(m1 > 0){
for(k in 1:m1){
#Ak <- smat(blk,p,At[[p]][,k],0) #changed from mexsmat
Ak <- mexsmat(blk,At,0,p,k)
nnzAk <- length(which(Ak != 0))
isspA[p,k] <- (nnzAk < spdensity*n2 | numblk > 1)
if(!all(breakyes)){
out <- which(abs(Ak) > 0, arr.ind = TRUE)
I <- out[,1]
J <- out[,2]
#
# Nonzero elemnts of Ak
#
if(breakyes[1] == 0){
if(nnzAk <= nztol){
idx <- which(I <= J)
nzlist1[k+1] <- nzlist1[k] + length(idx)
nzlist2 <- rbind(nzlist2,cbind(I[idx], J[idx]))
}else{
nzlist1[(k+1):(m+1)] <- rep(Inf,m-k+1)
breakyes[1] <- TRUE
}
}
#
#nonzero elements of A1 + ... + Ak
#
if(breakyes[2] == 0){
nztmp <- rep(0,length(I))
if(length(I) > 0){
for(t in 1:length(I)){
i <- I[t]
j <- J[t]
nztmp[t] <- Asum[i,j]
}
}
#Find new nonzero positions when Ak is added to Asum
idx <- which(nztmp == 0)
nzlist3[k+1] <- nzlist3[k] + length(idx)
if(nzlist3[k+1] < nztol2){
nzlist4 <- rbind(nzlist4,cbind(I[idx], J[idx]))
}else{
nzlist3[(k+1):(m+1)] <- rep(Inf,m-k+1)
breakyes[2] <- TRUE
}
Asum <- Asum + abs(Ak)
}
}
}
}
if(numblk == 1){
isspAy[p] <- (is.finite(nzlist1[m+1]) | is.finite(nzlist3[m+1]))
}else{
isspAy[p] <- 1
}
if(!is.null(nzlist1)) nzlistA[[p,1]] <- nzlist1
if(!is.null(nzlist2)) nzlistA[[p,2]] <- nzlist2
if(!is.null(nzlist3)) nzlistAsum[[p,1]] <- nzlist3
if(!is.null(nzlist4)) nzlistAsum[[p,2]] <- nzlist4
#
# nonzero elements of (A1*y1 ... Am*ym)
#
if(is.finite(nzlist3[m+1])){
if(ncol(blk) > 2){
m2 <- length(blk[[p,3]])
len <- sum(blk[[p,3]])
DD <- matrix(0,len,len)
for(t in 1:nrow(At[[p,3]])){
DD[At[[p,3]][t,2], At[[p,3]][t,3]] <- At[[p,3]][t,4]
}
Asum <- Asum + abs(At[[p,2]] %*% DD %*% t(At[[p,2]]))
}
out <- which(Asum > 0, arr.ind = TRUE)
I <- out[,1]
J <- out[,2]
if(length(I) < nztol2){
nzlistAy[[p,1]] <- cbind(I,J)
}else{
nzlistAy[[p,1]] <- Inf
}
}else{
nzlistAy[[p,1]] <- Inf
}
}
}
return(list(isspA = isspA, nzlistA = nzlistA, nzlistAsum=nzlistAsum, isspAy=isspAy, nzlistAy=nzlistAy))
}
|
yrates = c(c(120, 60, 30, 20, 15, 12, 10), yratesEXT)
yrates = sort(c(yrates,-yrates))
ytimes = sort(1000/yrates)
ybreaks = sort(c(round(ytimes, 2), 0))
ms2FPS = function(DATA, r = 0) round(1000/DATA, r)
labelBreak = function(breaks, SEC = FALSE) {
if (!app.BREAK) return(breaks)
BREAK = c("", "\n")
if (is.numeric(breaks) & 0 %in% breaks) if ((which(breaks %in% 0) %% 2) == 0) BREAK = rev(BREAK)
if (!SEC) return( paste0(rep(BREAK, length.out = length(breaks)), breaks) )
if (SEC) return( paste0(breaks, rep(BREAK, length.out = length(breaks))) )
}
# can be disabled by setting app.BREAK to FALSE
# labelRound = function(breaks) sprintf("%.1f", breaks)
labelRound = function(breaks) round(breaks, 1)
labelRoundB = function(breaks) labelBreak(labelRound(breaks))
ms2FPS.lab = function(breaks) labelBreak(ms2FPS(breaks), SEC = TRUE)
labelBreakQQ= function(breaks) labelBreak(paste0(pnorm(breaks) * 100, "%"))
labelDisp = function(breaks) round(breaks * 60/1000, 1)
labelDispB = function(breaks) labelBreak(labelDisp(breaks))
meanMS = function(DATA) {
out = c(mean(DATA), median(DATA))
names(out) = c("Mean", "Median")
return(out)
}
meanGEO = function(DATA) {
out = exp(mean(log(DATA)))
names(out) = "ms"
return(out)
}
normGEO = function(DATA) {
out = DATA / max(DATA) * 100
names(out) = "Normalized (%)"
return(out)
}
# this should only be used with special AGGREGATE functions with different GROUPS than normally used
# for example, just GROUP by GPU to compare them, or by GPU and API to compare them
# normGEO is for normalizing the performance based on the maximum/longest frame time
# should be used on the AGGREGATE, not within it, and will require passing the specific column to the function
percMS = function(DATA, listPERC = c(0.1, 1, 99, 99.9)) {
if (max(listPERC) > 1) listPERC = listPERC/100
out = quantile(DATA, listPERC)
names(out) = paste0(listPERC * 100, "%")
return(out)
}
ecdfFPS = function(DATA, listFPS = NULL, r = 2) {
default = c(60, 50, 30, 20, 15)
listFPS = unique(sort(c(default, listFPS), decreasing = TRUE))
out = 100 * (1 - ecdf(DATA)(1000 / listFPS))
names(out) = paste0(listFPS, " FPS")
return(round(out, r))
}
statMS = function(DATA, r = 2) {
out = c(mean(DATA), sd(DATA), sd(DATA)/mean(DATA) * 100, skewness(DATA), kurtosis(DATA))
names(out) = c("Mean (ms)", "StDev (ms)", "CoV (%)", "Skew", "Kurtosis")
return(round(out, r))
}
BoxPerc = function (DATA) {
out = quantile(DATA, c(0.001, 0.01, 0.5, 0.99, 0.999))
names(out) = c("ymin", "lower", "middle", "upper", "ymax")
return(out)
}
# by using this with stat_summary I can have custom quantiles for the boxplot
qqslope = function (DATA, r = 2, quan = QUAN) {
y = quantile(DATA, quan)
#x = qnorm(quan)
x = 100 * quan
# to make this be in percentile instead of Z-score
slope = diff(y)/diff(x)
return(round(slope, r))
}
statGRAPH = function(DATA, ...) {
out = c(mean(DATA), median(DATA), median(diff(DATA)), qqslope(DATA, ...), quantile(DATA, c(0.1, 1, 99, 99.9)/100))
names(out) = c("Mean", "Median", "DiffMedian", "Slope", "0.1", "1", "99", "99.9")
return(out)
}
# DiffMedian can be used in graphDIFF to apply a Median-Median cross on the plots
sepCOL = function(aggOUT) {
matCOL = sapply(aggOUT, is.matrix)
out = cbind(aggOUT[, !matCOL], as.data.frame(aggOUT[, matCOL]))
return(out)
}
AGG = function(datatype, FUN, ..., COL = NULL, ITEM = NULL, DATA = results) {
if (!is.null(COL) & !is.null(ITEM)) DATA = DATA[DATA[, COL] == ITEM, ]
GROUPS = list(GPU = DATA$GPU, API = DATA$API, Quality = DATA$Quality, Location = DATA$Location)
if (!testAPI) GROUPS$API = NULL
if (!testQUA) GROUPS$Quality = NULL
return(sepCOL(aggregate(DATA[, datatype], GROUPS, FUN, ...)))
}
addFPS = function(DATA, r = 2) {
numCOL = sapply(DATA, is.numeric)
dataFPS = cbind(list("Unit" = "FPS"), round(1000/DATA[, numCOL], r))
dataMS = cbind(list("Unit" = "ms"), round(DATA[, numCOL], r))
out = cbind(DATA[, !numCOL], rbind(dataFPS, dataMS))
colnames(out)[grep("Unit", colnames(out))] = ""
return(out)
}
compTAB = function(MEAN, PERC, ECDF) {
if (is.null(listFPS)) {
listECDF = grep("60 FPS", colnames(ECDF))
} else {
begECDF = grep(paste0(max(c(listFPS, 60)), " FPS"), colnames(ECDF))
endECDF = grep(paste0(min(c(listFPS, 60)), " FPS"), colnames(ECDF))
listECDF = begECDF:endECDF
}
compECDF = as.data.frame(ECDF[, listECDF])
names(compECDF) = colnames(ECDF)[listECDF]
out = cbind(
MEAN,
PERC[, sapply(PERC, is.numeric)],
compECDF
)
colnames(out)[grep("Var.", colnames(out))] = ""
return(out)
}
dataSEL = function(datatype, COL = NULL, ITEM = NULL) {
if (datatype == "MsBetweenPresents") descs = c("Frame Time", "data")
if (datatype == "MsBetweenDisplayChange") descs = c("Display Time", "disp")
if (datatype == "MsUntilRenderComplete") descs = c("Render Time", "rend")
if (datatype == "MsEstimatedDriverLag") descs = c("Driver Lag", "driv")
type <<- descs[1]; typeSHORT <<- descs[2]
MEAN <<- AGG(datatype, meanMS, COL = COL, ITEM = ITEM)
PERC <<- AGG(datatype, percMS, COL = COL, ITEM = ITEM)
ECDF <<- AGG(datatype, ecdfFPS, listFPS, COL = COL, ITEM = ITEM)
STAT <<- AGG(datatype, statMS, COL = COL, ITEM = ITEM)
}
dataSEL.rm = function() {
rm(type, typeSHORT, MEAN, PERC, ECDF, STAT, envir = .GlobalEnv)
}
# used for removing the objects dataSEL makes, in case you need to clear them for troubleshooting purposes
sinkTXT = function(datatype, COL = NULL, ITEM = NULL) {
options(width = 1000)
subSTR = ""
if (!is.null(ITEM)) subSTR = paste0(" - ", ITEM, " - ")
filePath = paste0(gameGAQF, " ", subSTR, type, ".txt")
if (!is.null(COL)) if (COL == "GPU") filePath = paste0(ITEM, "\\", filePath)
sink(filePath, split = TRUE)
writeLines(gameGAQ)
writeLines(type)
writeLines("\nMean")
print(addFPS(MEAN), row.names = FALSE)
writeLines("\nPercentiles")
print(addFPS(PERC), row.names = FALSE)
writeLines("\nPercentile of FPS")
print(ECDF, row.names = FALSE)
writeLines("\nDistribution Stats")
print(STAT, row.names = FALSE)
sink()
}
library(tableHTML)
OCCHTML = function(DATA) {
tableHTML(DATA, rownames = FALSE, class="OCC") %>%
replace_html('style="border-collapse:collapse;" class=OCC border=1', 'align="center" border="1" cellpadding="1" cellspacing="1" style="width: 90%;"') %>%
replace_html(' id=\"tableHTML_header_\\d\"', '', replace_all = TRUE) %>%
replace_html(' id=\"tableHTML_header_\\d\\d\"', '', replace_all = TRUE) %>%
replace_html(' id=\"tableHTML_column_\\d\"', '', replace_all = TRUE) %>%
replace_html(' id=\"tableHTML_column_\\d\\d\"', '', replace_all = TRUE)
}
writeOCC = function(DATA, dataNAME, name=gameGAQF, fold = "") {
filePath = paste0(name, " - ", dataNAME,".html")
if (fold != "") filePath = paste0(fold, "\\", filePath)
write_tableHTML(OCCHTML(DATA), file = filePath)
}
sinkHTML = function(datatype, COL = NULL, ITEM = NULL) {
ITEM.f = ""
if (!is.null(COL) & !is.null(ITEM)) ITEM.f = paste0(ITEM, " - ")
FOLD = ""
if (!is.null(COL)) if (COL == "GPU") FOLD = ITEM
writeOCC(addFPS(MEAN), dataNAME = paste0(ITEM.f, typeSHORT, "MEAN"), fold = FOLD)
writeOCC(addFPS(PERC), dataNAME = paste0(ITEM.f, typeSHORT, "PERC"), fold = FOLD)
writeOCC(ECDF, dataNAME = paste0(ITEM.f, typeSHORT, "ECDF"), fold = FOLD)
writeOCC(STAT, dataNAME = paste0(ITEM.f, typeSHORT, "STAT"), fold = FOLD)
writeOCC(compTAB(addFPS(MEAN), addFPS(PERC), ECDF), dataNAME = paste0(ITEM.f, typeSHORT, "COMP"), fold = FOLD)
}
sinkOUT = function(datatype) {
dataSEL(datatype)
sinkTXT(datatype)
if (HTMLOUT) sinkHTML(datatype)
if (perGPU) { for (GPU in listGPU) { if (!file.exists(GPU)) next
dataSEL(datatype, COL = "GPU", ITEM = GPU)
sinkTXT(datatype, COL = "GPU", ITEM = GPU)
if (HTMLOUT) sinkHTML(datatype, COL = "GPU", ITEM = GPU)
} }
if (textAPI) { for (API in listAPI) {
dataSEL(datatype, COL = "API", ITEM = API)
sinkTXT(datatype, COL = "API", ITEM = API)
if (HTMLOUT) sinkHTML(datatype, COL = "API", ITEM = API)
} }
if (textLOC) { for (Location in listLOC) {
dataSEL(datatype, COL = "Location", ITEM = Location)
sinkTXT(datatype, COL = "Location", ITEM = Location)
if (HTMLOUT) sinkHTML(datatype, COL = "Location", ITEM = Location)
} }
}
customSave = function(type="", plot = last_plot(), device=ggdevice, width=gWIDTH, height=gHEIGH, dpi=DPI) {
if (device == "png" | device == "both") {
ggsave(filename=paste0(gameGAQF, " - ", type, ".png"), plot = plot, device="png", width=width, height=height, dpi=dpi)
}
if (device == "pdf" | device == "both") {
ggsave(filename=paste0(gameGAQF, " - ", type, ".pdf"), plot = plot, device="pdf", width=width, height=height)
}
}
graphOUT = function(datatype, graphtype, OUT = TRUE, SHOW = FALSE, diffLim = NULL, ...) {
if (datatype == "MsBetweenPresents") dataNAME = "Frame Time"
if (datatype == "MsBetweenDisplayChange") dataNAME = "Display Time"
if (datatype == "MsUntilRenderComplete") dataNAME = "Render Time"
if (datatype == "MsEstimatedDriverLag") dataNAME = "Driver Lag"
if (substitute(graphtype) == "graphMEANS") graphNAME = "Means"
if (substitute(graphtype) == "graphCOURSE") graphNAME = "Course"
if (substitute(graphtype) == "graphFREQ") graphNAME = "Freq"
if (substitute(graphtype) == "graphQQ") graphNAME = "QQ"
if (substitute(graphtype) == "graphDIFF") graphNAME = "Diff"
if (substitute(graphtype) == "graphMEANSbox") graphNAME = "Means Labeled"
PLOT = graphtype(datatype)
if (graphNAME == "Diff" & !is.null(diffLim)) {
PLOT = graphtype(datatype, diffLim)
graphNAME = paste0(graphNAME, " EXT")
}
message(paste0(graphNAME, " - ", dataNAME))
if (OUT) customSave(paste0("@", graphNAME, " - ", dataNAME), plot = PLOT, ...)
if (SHOW) PLOT
}
levels.rev = function(DATA, COL) {
DATA = as.data.frame(DATA)
ordered(DATA[, COL], levels = rev(levels(DATA[, COL])))
}
levels.short = function(DATA, COL, LIST, LEVS) {
DATA = as.data.frame(DATA)
DATA[, COL] = ordered(DATA[, COL], levels = LIST)
levels(DATA[, COL]) = LEVS
return(DATA[, COL])
}
data.short = function(DATA) {
if (!is.null(shortLOC)) DATA[, "Location"] = levels.short(DATA, "Location", listLOC, levsLOC)
if (!is.null(shortAPI) & testAPI) DATA[, "API"] = levels.short(DATA, "API", listAPI, levsAPI)
return(DATA)
}
graph.rev = function(DATA, rev.LOC = FALSE, rev.API = FALSE) {
if (rev.LOC) DATA$Location = levels.rev(DATA, "Location")
if (rev.API & testAPI) DATA$API = levels.rev(DATA, "API")
return(DATA)
}
# spacing between facet panels can be set with theme(panel.spacing.x = unit(1, "lines"))
FACET = function(graphtype) {
if (any(substitute(graphtype) == c("graphMEANS"))) {
if (testAPI & !testQUA) return(facet_grid(rows = vars(API), cols = vars(Location), switch = "y"))
if (!testAPI & testQUA) return(facet_grid(rows = vars(Quality), cols = vars(Location), switch = "y"))
if (testAPI & testQUA) return(facet_grid(rows = vars(API, Quality), cols = vars(Location), switch = "y"))
return(facet_grid(cols = vars(Location), switch = "y"))
}
if (any(substitute(graphtype) == c("graphCOURSE", "graphFREQ", "graphQQ", "graphDIFF"))) {
if (multiGPU) {
if (testAPI & !testQUA) return(facet_grid(rows = vars(Location, API), cols = vars(GPU), switch = "y"))
if (!testAPI & testQUA) return(facet_grid(rows = vars(Location, Quality), cols = vars(GPU), switch = "y"))
if (testAPI & testQUA) return(facet_grid(rows = vars(Location, API, Quality), cols = vars(GPU), switch = "y"))
} else {
if (testAPI & !testQUA) return(facet_grid(rows = vars(API), cols = vars(Location, GPU), switch = "y"))
if (!testAPI & testQUA) return(facet_grid(rows = vars(Quality), cols = vars(Location, GPU), switch = "y"))
if (testAPI & testQUA) return(facet_grid(rows = vars(API, Quality), cols = vars(Location, GPU), switch = "y"))
}
return(facet_grid(rows = vars(Location), cols = vars(GPU), switch = "y"))
}
}
graphMEANS = function(datatype) {
if (datatype == "MsBetweenPresents") {
scale_Y = scale_y_continuous(
name = "Frame Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Frame Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsBetweenDisplayChange") {
scale_Y = scale_y_continuous(
name = "Refresh Cycles Later (1/60 s)",
breaks = ybreaks, labels = labelDisp, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Display Time (ms)",
labels = ybreaks
)
)
}
if (datatype == "MsUntilRenderComplete") {
scale_Y = scale_y_continuous(
name = "Render Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Render Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsEstimatedDriverLag") {
scale_Y = scale_y_continuous(
name = "Estimated Driver Lag (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis()
)
}
# if (useSHORT) results = data.short(results)
results = graph.rev(results, rev.LOC, rev.API)
# if (useSHORT) STATS = data.short(STATS) ; STATS = graph.rev(STATS, rev.LOC, rev.API)
ggplot(data = results, aes(x = GPU, y = get(datatype))) +
ggtitle(gameQ, subtitle = paste0(datatype, " - Means, Medians, and Percentiles")) + labsGPU +
geom_hline(yintercept = 1000/60, color = "red") +
# geom_boxplot(outlier.alpha = 0) +
stat_summary(fun.data = BoxPerc, geom = "boxplot", width = 0.6) +
geom_bar(aes(fill = GPU), stat = "summary", fun = mean) + scale_fill_hue() +
stat_summary(fun.data = BoxPerc, geom = "boxplot", alpha = 0.25, width = 0.6) +
# geom_boxplot(alpha = 0.50, outlier.alpha = 0.1) +
FACET(graphMEANS) +
scale_x_discrete(labels = labelBreak) +
scale_Y + coord_cartesian(ylim = c(0, FtimeLimit)) +
guides(fill = guide_legend(nrow = 1)) + theme(legend.position = "bottom")
}
boxLABS = function(datatype) {
STATS = AGG(datatype, statGRAPH)
ALPHA = 0.65
nudOUT = 0.50
nudIN = 0.40
nudMED = 0.55
list(
geom_label(data = STATS, aes(x = GPU, y = STATS[, "99.9"], label = round(STATS[, "99.9"], 2)), alpha = ALPHA,
vjust = 0, nudge_y = nudOUT),
geom_label(data = STATS, aes(x = GPU, y = STATS[, "0.1"], label = round(STATS[, "0.1"], 2)), alpha = ALPHA,
vjust = 1, nudge_y = -nudOUT),
# 0.1% and 99.9%
geom_label(data = STATS, aes(x = GPU, y = STATS[, "99"], label = round(STATS[, "99"], 2)), alpha = ALPHA,
hjust = 1, nudge_x = nudIN, vjust = 0),
geom_label(data = STATS, aes(x = GPU, y = STATS[, "1"], label = round(STATS[, "1"], 2)), alpha = ALPHA,
hjust = 1, nudge_x = nudIN, vjust = 1),
# 1% and 99%
geom_label(data = STATS, aes(x = GPU, y = Median, label = round(Median, 2)), alpha = ALPHA,
hjust = 1, nudge_x = nudMED),
geom_text(data = STATS, aes(x = GPU, y = Mean, label = round(Mean, 2)),
hjust = 0, nudge_x = -0.55, vjust = 0)
# median and mean
)
}
graphMEANSbox = function(datatype) graphMEANS(datatype) + boxLABS(datatype)
graphCOURSE = function(datatype) {
if (datatype == "MsBetweenPresents") {
scale_Y = scale_y_continuous(
name = "Frame Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Frame Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsBetweenDisplayChange") {
scale_Y = scale_y_continuous(
name = "Refresh Cycles Later (1/60 s)",
breaks = ybreaks, labels = labelDisp, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Display Time (ms)",
labels = ybreaks
)
)
}
if (datatype == "MsUntilRenderComplete") {
scale_Y = scale_y_continuous(
name = "Render Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Render Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsEstimatedDriverLag") {
scale_Y = scale_y_continuous(
name = "Estimated Driver Lag (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis()
)
}
if (useSHORT) results = data.short(results)
results = graph.rev(results, rev.LOC, rev.API)
# if (useSHORT) STATS = data.short(STATS) ; STATS = graph.rev(STATS, rev.LOC, rev.API)
ALPHA = 0.05
if (length(unique(results$Location)) == 1) ALPHA = 1
ggplot(data = results, aes(x = TimeInSeconds, y = get(datatype))) +
ggtitle(gameQ, subtitle = paste0(datatype, " - Course")) + labsGPU +
geom_hline(yintercept = 1000/60, color = "red") +
geom_point(alpha = ALPHA) +
geom_smooth(method="gam", formula= y ~ s(x, bs = "cs")) +
FACET(graphCOURSE) +
scale_x_continuous(name="Time (s)", breaks=seq(from=0, to=max(results$TimeInSeconds), by=60), labels = labelBreak, expand=c(0.02, 0)) +
scale_Y + coord_cartesian(ylim = c(0, FtimeLimit))
}
graphFREQ = function(datatype) {
STATS = AGG(datatype, statGRAPH)
if (datatype == "MsBetweenPresents") {
scale_X = scale_x_continuous(
name = "Frame Time (ms)",
breaks = ybreaks, labels = labelRoundB, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Frame Rate (FPS)",
labels = ms2FPS.lab
)
)
}
if (datatype == "MsBetweenDisplayChange") {
scale_X = scale_x_continuous(
name = "Refresh Cycles Later (1/60 s)",
breaks = ybreaks, labels = labelDispB, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Display Time (ms)",
labels = ybreaks
)
)
}
if (datatype == "MsUntilRenderComplete") {
scale_X = scale_x_continuous(
name = "Render Time (ms)",
breaks = ybreaks, labels = labelRoundB, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Render Rate (FPS)",
labels = ms2FPS.lab
)
)
}
if (datatype == "MsEstimatedDriverLag") {
scale_X = scale_x_continuous(
name = "Estimated Driver Lag (ms)",
breaks = ybreaks, labels = labelRoundB, expand = c(0.02, 0),
sec.axis = dup_axis()
)
}
if (useSHORT) results = data.short(results)
results = graph.rev(results, rev.LOC, rev.API)
if (useSHORT) STATS = data.short(STATS) ; STATS = graph.rev(STATS, rev.LOC, rev.API)
ggplot(results, aes(get(x = datatype))) +
ggtitle(gameQ, subtitle=paste0(datatype, " - Frequency Plot")) + labsGPU +
geom_vline(xintercept = 1000/60, color = "red") +
geom_freqpoly(binwidth=0.03, size=0) +
geom_vline(data = STATS, aes(xintercept = Mean), color = "darkgreen") +
geom_vline(data = STATS, aes(xintercept = Median), color = "darkcyan", linetype="dotdash") +
FACET(graphFREQ) +
scale_X + coord_cartesian(xlim = c(0, FtimeLimit)) +
scale_y_continuous(name="Count", expand=c(0.02, 0))
}
graphQQ = function(datatype, PERCS = c(.001, .01, .5, .99, .999)) {
PERCS = sort(unique(c(PERCS, QUAN)))
STATS = AGG(datatype, statGRAPH)
if (datatype == "MsBetweenPresents") {
scale_Y = scale_y_continuous(
name = "Frame Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Frame Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsBetweenDisplayChange") {
scale_Y = scale_y_continuous(
name = "Refresh Cycles Later (1/60 s)",
breaks = ybreaks, labels = labelDisp, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Display Time (ms)",
labels = ybreaks
)
)
}
if (datatype == "MsUntilRenderComplete") {
scale_Y = scale_y_continuous(
name = "Render Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Render Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsEstimatedDriverLag") {
scale_Y = scale_y_continuous(
name = "Estimated Driver Lag (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0)
)
}
if (useSHORT) results = data.short(results)
results = graph.rev(results, rev.LOC, rev.API)
if (useSHORT) STATS = data.short(STATS) ; STATS = graph.rev(STATS, rev.LOC, rev.API)
# sec.axis = sec_axis(~.,
# breaks = STATS[c("0.1", "1", "Median", "99", "99.9")],
# labels = paste0(round(STATS[c("0.1", "1", "Median", "99", "99.9")], 2), c(" (0.1%)", " (1%)", " (50%)", " (99%)", " (99.9%)"))
# )
# this can be used to add a secondary axis that shows the values for the percentiles
# code remains for reference as now Rates are shown for the second axis
ggplot(data = STATS, aes(ymin = -Inf, xmin = -Inf)) +
ggtitle(gameQ, subtitle = paste0(datatype, " - QQ Distribution")) + labsGPU +
geom_hline(yintercept = 1000/60, color = "red") +
geom_rect(aes(ymax = get("0.1"), xmax = qnorm(.001)), alpha=0.1, fill=c("blue"), color = "grey") +
geom_rect(aes(ymax = get("1"), xmax = qnorm(.010)), alpha=0.1, fill=c("blue"), color = "grey") +
geom_rect(aes(ymax = get("Median"), xmax = qnorm(.500)), alpha=0.1, fill=c("blue"), color = "grey") +
geom_rect(aes(ymax = get("99"), xmax = qnorm(.990)), alpha=0.1, fill=c("red"), color = "grey") +
geom_rect(aes(ymax = get("99.9"), xmax = qnorm(.999)), alpha=0.1, fill=c("red"), color = "grey") +
stat_qq_line(data = results, aes(sample=get(datatype)), line.p = QUAN, color = "green", size = 1.1, linetype = "dotted") +
stat_qq(data = results, aes(sample=get(datatype))) +
stat_qq_line(data = results, aes(sample=get(datatype)), line.p = QUAN, color = "green", alpha = 0.5, size = 1.1, linetype = "dotted") +
geom_label(data = STATS, aes(x = Inf, y = -Inf, label = paste0("Slope: ", Slope)), parse = TRUE, hjust="right", vjust="bottom", fill = "darkgrey", color = "green") +
FACET(graphQQ) +
scale_Y + coord_cartesian(ylim = c(0, FtimeLimit)) +
scale_x_continuous(name = "Percentile", breaks = qnorm(PERCS), labels = labelBreakQQ, minor_breaks = NULL, expand = c(0.02, 0))
}
graphDIFF = function(datatype, diffLim = 1000/50) {
if (datatype == "MsBetweenPresents") {
scale_X = scale_x_continuous(
name = "Frame Time (ms)",
breaks = ybreaks, labels = labelRoundB, limits = c(0, FtimeLimit),
expand = c(0.02, 0)
)
scale_Y = scale_y_continuous(
name = "Consecutive Frame Time Difference (ms)",
breaks = ybreaks, labels = labelRound, limits = c(-diffLim, diffLim),
expand = c(0, 0)
)
}
if (datatype == "MsBetweenDisplayChange") {
scale_X = scale_x_continuous(
name = "Refresh Cycles Later (1/60 s)",
breaks = ybreaks, labels = labelDisp, limits = c(0, FtimeLimit),
expand = c(0.02, 0)
)
scale_Y = scale_y_continuous(
name = "Consecutive Display Time Difference (ms)",
breaks = ybreaks,
limits = c(-diffLim, diffLim),
expand = c(0, 0)
)
}
if (datatype == "MsUntilRenderComplete") {
scale_X = scale_x_continuous(
name = "Render Time (ms)",
breaks = ybreaks, labels = labelRoundB, limits = c(0, FtimeLimit),
expand = c(0.02, 0)
)
scale_Y = scale_y_continuous(
name = "Consecutive Render Time Difference (ms)",
breaks = ybreaks, labels = labelRound, limits = c(-diffLim, diffLim),
expand = c(0, 0)
)
}
if (datatype == "MsEstimatedDriverLag") {
scale_X = scale_x_continuous(
name = "Estimated Driver Lag (ms)",
breaks = ybreaks, labels = labelRoundB, limits = c(0, FtimeLimit),
expand = c(0.02, 0)
)
scale_Y = scale_y_continuous(
name = "Consecutive Lag Difference (ms)",
breaks = ybreaks, labels = labelRound, limits = c(-diffLim, diffLim),
expand = c(0, 0)
)
}
if (useSHORT) results = data.short(results)
results = graph.rev(results, rev.LOC, rev.API)
# if (useSHORT) STATS = data.short(STATS) ; STATS = graph.rev(STATS, rev.LOC, rev.API)
# ggplot(data = results, aes(x = get(datatype), y = c(diff(as.data.frame(results)[, datatype]), 0)) ) +
ggplot(data = results, aes(x = get(datatype), y = diff.CONS(get(datatype))) ) +
ggtitle(gameQ, subtitle=paste0(datatype, " Consecutive Differences")) + labsGPU +
geom_point(alpha = 0.1) +
stat_density_2d(geom = "polygon", aes(fill = stat(nlevel)), show.legend = FALSE) + scale_fill_viridis_c() +
# stat_density_2d(geom = "polygon", aes(fill = stat(nlevel), alpha = stat(nlevel)), show.legend = FALSE) + scale_fill_viridis_c() +
FACET(graphDIFF) +
scale_X +
scale_Y
}
# using coord_cartesian to apply limits breaks the heatmap for some reason
# text outputs
if (textOUT) {
if (textFRAM) sinkOUT("MsBetweenPresents")
if (textDISP) sinkOUT("MsBetweenDisplayChange")
if (textREND) sinkOUT("MsUntilRenderComplete")
if (textDRIV) sinkOUT("MsEstimatedDriverLag")
message("")
}
if (graphs) {
rev.LOC = FALSE ; rev.API = TRUE
#Means
if (graphFRAM) graphOUT("MsBetweenPresents", graphMEANS)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphMEANS)
if (graphREND) graphOUT("MsUntilRenderComplete", graphMEANS)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphMEANS)
#Means with Boxplot Lables
# graphOUT("MsBetweenPresents", graphMEANSbox)
rev.LOC = TRUE ; rev.API = TRUE
#Course
if (graphFRAM) graphOUT("MsBetweenPresents", graphCOURSE)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphCOURSE)
if (graphREND) graphOUT("MsUntilRenderComplete", graphCOURSE)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphCOURSE)
#Frequency
if (graphFRAM) graphOUT("MsBetweenPresents", graphFREQ)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphFREQ)
if (graphREND) graphOUT("MsUntilRenderComplete", graphFREQ)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphFREQ)
#QQ
if (graphFRAM) graphOUT("MsBetweenPresents", graphQQ)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphQQ)
if (graphREND) graphOUT("MsUntilRenderComplete", graphQQ)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphQQ)
#Difference
if (graphFRAM) graphOUT("MsBetweenPresents", graphDIFF)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphDIFF)
if (graphREND) graphOUT("MsUntilRenderComplete", graphDIFF)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphDIFF)
#Difference - Extended
if (!is.null(diffLim)) {
if (graphFRAM) graphOUT("MsBetweenPresents", graphDIFF, diffLim = diffLim)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphDIFF, diffLim = diffLim)
if (graphREND) graphOUT("MsUntilRenderComplete", graphDIFF, diffLim = diffLim)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphDIFF, diffLim = diffLim)
}
}
|
/@LaTeX/Scripts/OCAT - Combined - Output.r
|
no_license
|
GuestJim/Serious-Statistics-Reprocessed
|
R
| false | false | 26,004 |
r
|
yrates = c(c(120, 60, 30, 20, 15, 12, 10), yratesEXT)
yrates = sort(c(yrates,-yrates))
ytimes = sort(1000/yrates)
ybreaks = sort(c(round(ytimes, 2), 0))
ms2FPS = function(DATA, r = 0) round(1000/DATA, r)
labelBreak = function(breaks, SEC = FALSE) {
if (!app.BREAK) return(breaks)
BREAK = c("", "\n")
if (is.numeric(breaks) & 0 %in% breaks) if ((which(breaks %in% 0) %% 2) == 0) BREAK = rev(BREAK)
if (!SEC) return( paste0(rep(BREAK, length.out = length(breaks)), breaks) )
if (SEC) return( paste0(breaks, rep(BREAK, length.out = length(breaks))) )
}
# can be disabled by setting app.BREAK to FALSE
# labelRound = function(breaks) sprintf("%.1f", breaks)
labelRound = function(breaks) round(breaks, 1)
labelRoundB = function(breaks) labelBreak(labelRound(breaks))
ms2FPS.lab = function(breaks) labelBreak(ms2FPS(breaks), SEC = TRUE)
labelBreakQQ= function(breaks) labelBreak(paste0(pnorm(breaks) * 100, "%"))
labelDisp = function(breaks) round(breaks * 60/1000, 1)
labelDispB = function(breaks) labelBreak(labelDisp(breaks))
meanMS = function(DATA) {
out = c(mean(DATA), median(DATA))
names(out) = c("Mean", "Median")
return(out)
}
meanGEO = function(DATA) {
out = exp(mean(log(DATA)))
names(out) = "ms"
return(out)
}
normGEO = function(DATA) {
out = DATA / max(DATA) * 100
names(out) = "Normalized (%)"
return(out)
}
# this should only be used with special AGGREGATE functions with different GROUPS than normally used
# for example, just GROUP by GPU to compare them, or by GPU and API to compare them
# normGEO is for normalizing the performance based on the maximum/longest frame time
# should be used on the AGGREGATE, not within it, and will require passing the specific column to the function
percMS = function(DATA, listPERC = c(0.1, 1, 99, 99.9)) {
if (max(listPERC) > 1) listPERC = listPERC/100
out = quantile(DATA, listPERC)
names(out) = paste0(listPERC * 100, "%")
return(out)
}
ecdfFPS = function(DATA, listFPS = NULL, r = 2) {
default = c(60, 50, 30, 20, 15)
listFPS = unique(sort(c(default, listFPS), decreasing = TRUE))
out = 100 * (1 - ecdf(DATA)(1000 / listFPS))
names(out) = paste0(listFPS, " FPS")
return(round(out, r))
}
statMS = function(DATA, r = 2) {
out = c(mean(DATA), sd(DATA), sd(DATA)/mean(DATA) * 100, skewness(DATA), kurtosis(DATA))
names(out) = c("Mean (ms)", "StDev (ms)", "CoV (%)", "Skew", "Kurtosis")
return(round(out, r))
}
BoxPerc = function (DATA) {
out = quantile(DATA, c(0.001, 0.01, 0.5, 0.99, 0.999))
names(out) = c("ymin", "lower", "middle", "upper", "ymax")
return(out)
}
# by using this with stat_summary I can have custom quantiles for the boxplot
qqslope = function (DATA, r = 2, quan = QUAN) {
y = quantile(DATA, quan)
#x = qnorm(quan)
x = 100 * quan
# to make this be in percentile instead of Z-score
slope = diff(y)/diff(x)
return(round(slope, r))
}
statGRAPH = function(DATA, ...) {
out = c(mean(DATA), median(DATA), median(diff(DATA)), qqslope(DATA, ...), quantile(DATA, c(0.1, 1, 99, 99.9)/100))
names(out) = c("Mean", "Median", "DiffMedian", "Slope", "0.1", "1", "99", "99.9")
return(out)
}
# DiffMedian can be used in graphDIFF to apply a Median-Median cross on the plots
sepCOL = function(aggOUT) {
matCOL = sapply(aggOUT, is.matrix)
out = cbind(aggOUT[, !matCOL], as.data.frame(aggOUT[, matCOL]))
return(out)
}
AGG = function(datatype, FUN, ..., COL = NULL, ITEM = NULL, DATA = results) {
if (!is.null(COL) & !is.null(ITEM)) DATA = DATA[DATA[, COL] == ITEM, ]
GROUPS = list(GPU = DATA$GPU, API = DATA$API, Quality = DATA$Quality, Location = DATA$Location)
if (!testAPI) GROUPS$API = NULL
if (!testQUA) GROUPS$Quality = NULL
return(sepCOL(aggregate(DATA[, datatype], GROUPS, FUN, ...)))
}
addFPS = function(DATA, r = 2) {
numCOL = sapply(DATA, is.numeric)
dataFPS = cbind(list("Unit" = "FPS"), round(1000/DATA[, numCOL], r))
dataMS = cbind(list("Unit" = "ms"), round(DATA[, numCOL], r))
out = cbind(DATA[, !numCOL], rbind(dataFPS, dataMS))
colnames(out)[grep("Unit", colnames(out))] = ""
return(out)
}
compTAB = function(MEAN, PERC, ECDF) {
if (is.null(listFPS)) {
listECDF = grep("60 FPS", colnames(ECDF))
} else {
begECDF = grep(paste0(max(c(listFPS, 60)), " FPS"), colnames(ECDF))
endECDF = grep(paste0(min(c(listFPS, 60)), " FPS"), colnames(ECDF))
listECDF = begECDF:endECDF
}
compECDF = as.data.frame(ECDF[, listECDF])
names(compECDF) = colnames(ECDF)[listECDF]
out = cbind(
MEAN,
PERC[, sapply(PERC, is.numeric)],
compECDF
)
colnames(out)[grep("Var.", colnames(out))] = ""
return(out)
}
dataSEL = function(datatype, COL = NULL, ITEM = NULL) {
if (datatype == "MsBetweenPresents") descs = c("Frame Time", "data")
if (datatype == "MsBetweenDisplayChange") descs = c("Display Time", "disp")
if (datatype == "MsUntilRenderComplete") descs = c("Render Time", "rend")
if (datatype == "MsEstimatedDriverLag") descs = c("Driver Lag", "driv")
type <<- descs[1]; typeSHORT <<- descs[2]
MEAN <<- AGG(datatype, meanMS, COL = COL, ITEM = ITEM)
PERC <<- AGG(datatype, percMS, COL = COL, ITEM = ITEM)
ECDF <<- AGG(datatype, ecdfFPS, listFPS, COL = COL, ITEM = ITEM)
STAT <<- AGG(datatype, statMS, COL = COL, ITEM = ITEM)
}
dataSEL.rm = function() {
rm(type, typeSHORT, MEAN, PERC, ECDF, STAT, envir = .GlobalEnv)
}
# used for removing the objects dataSEL makes, in case you need to clear them for troubleshooting purposes
sinkTXT = function(datatype, COL = NULL, ITEM = NULL) {
options(width = 1000)
subSTR = ""
if (!is.null(ITEM)) subSTR = paste0(" - ", ITEM, " - ")
filePath = paste0(gameGAQF, " ", subSTR, type, ".txt")
if (!is.null(COL)) if (COL == "GPU") filePath = paste0(ITEM, "\\", filePath)
sink(filePath, split = TRUE)
writeLines(gameGAQ)
writeLines(type)
writeLines("\nMean")
print(addFPS(MEAN), row.names = FALSE)
writeLines("\nPercentiles")
print(addFPS(PERC), row.names = FALSE)
writeLines("\nPercentile of FPS")
print(ECDF, row.names = FALSE)
writeLines("\nDistribution Stats")
print(STAT, row.names = FALSE)
sink()
}
library(tableHTML)
OCCHTML = function(DATA) {
tableHTML(DATA, rownames = FALSE, class="OCC") %>%
replace_html('style="border-collapse:collapse;" class=OCC border=1', 'align="center" border="1" cellpadding="1" cellspacing="1" style="width: 90%;"') %>%
replace_html(' id=\"tableHTML_header_\\d\"', '', replace_all = TRUE) %>%
replace_html(' id=\"tableHTML_header_\\d\\d\"', '', replace_all = TRUE) %>%
replace_html(' id=\"tableHTML_column_\\d\"', '', replace_all = TRUE) %>%
replace_html(' id=\"tableHTML_column_\\d\\d\"', '', replace_all = TRUE)
}
writeOCC = function(DATA, dataNAME, name=gameGAQF, fold = "") {
filePath = paste0(name, " - ", dataNAME,".html")
if (fold != "") filePath = paste0(fold, "\\", filePath)
write_tableHTML(OCCHTML(DATA), file = filePath)
}
sinkHTML = function(datatype, COL = NULL, ITEM = NULL) {
ITEM.f = ""
if (!is.null(COL) & !is.null(ITEM)) ITEM.f = paste0(ITEM, " - ")
FOLD = ""
if (!is.null(COL)) if (COL == "GPU") FOLD = ITEM
writeOCC(addFPS(MEAN), dataNAME = paste0(ITEM.f, typeSHORT, "MEAN"), fold = FOLD)
writeOCC(addFPS(PERC), dataNAME = paste0(ITEM.f, typeSHORT, "PERC"), fold = FOLD)
writeOCC(ECDF, dataNAME = paste0(ITEM.f, typeSHORT, "ECDF"), fold = FOLD)
writeOCC(STAT, dataNAME = paste0(ITEM.f, typeSHORT, "STAT"), fold = FOLD)
writeOCC(compTAB(addFPS(MEAN), addFPS(PERC), ECDF), dataNAME = paste0(ITEM.f, typeSHORT, "COMP"), fold = FOLD)
}
sinkOUT = function(datatype) {
dataSEL(datatype)
sinkTXT(datatype)
if (HTMLOUT) sinkHTML(datatype)
if (perGPU) { for (GPU in listGPU) { if (!file.exists(GPU)) next
dataSEL(datatype, COL = "GPU", ITEM = GPU)
sinkTXT(datatype, COL = "GPU", ITEM = GPU)
if (HTMLOUT) sinkHTML(datatype, COL = "GPU", ITEM = GPU)
} }
if (textAPI) { for (API in listAPI) {
dataSEL(datatype, COL = "API", ITEM = API)
sinkTXT(datatype, COL = "API", ITEM = API)
if (HTMLOUT) sinkHTML(datatype, COL = "API", ITEM = API)
} }
if (textLOC) { for (Location in listLOC) {
dataSEL(datatype, COL = "Location", ITEM = Location)
sinkTXT(datatype, COL = "Location", ITEM = Location)
if (HTMLOUT) sinkHTML(datatype, COL = "Location", ITEM = Location)
} }
}
customSave = function(type="", plot = last_plot(), device=ggdevice, width=gWIDTH, height=gHEIGH, dpi=DPI) {
if (device == "png" | device == "both") {
ggsave(filename=paste0(gameGAQF, " - ", type, ".png"), plot = plot, device="png", width=width, height=height, dpi=dpi)
}
if (device == "pdf" | device == "both") {
ggsave(filename=paste0(gameGAQF, " - ", type, ".pdf"), plot = plot, device="pdf", width=width, height=height)
}
}
graphOUT = function(datatype, graphtype, OUT = TRUE, SHOW = FALSE, diffLim = NULL, ...) {
if (datatype == "MsBetweenPresents") dataNAME = "Frame Time"
if (datatype == "MsBetweenDisplayChange") dataNAME = "Display Time"
if (datatype == "MsUntilRenderComplete") dataNAME = "Render Time"
if (datatype == "MsEstimatedDriverLag") dataNAME = "Driver Lag"
if (substitute(graphtype) == "graphMEANS") graphNAME = "Means"
if (substitute(graphtype) == "graphCOURSE") graphNAME = "Course"
if (substitute(graphtype) == "graphFREQ") graphNAME = "Freq"
if (substitute(graphtype) == "graphQQ") graphNAME = "QQ"
if (substitute(graphtype) == "graphDIFF") graphNAME = "Diff"
if (substitute(graphtype) == "graphMEANSbox") graphNAME = "Means Labeled"
PLOT = graphtype(datatype)
if (graphNAME == "Diff" & !is.null(diffLim)) {
PLOT = graphtype(datatype, diffLim)
graphNAME = paste0(graphNAME, " EXT")
}
message(paste0(graphNAME, " - ", dataNAME))
if (OUT) customSave(paste0("@", graphNAME, " - ", dataNAME), plot = PLOT, ...)
if (SHOW) PLOT
}
levels.rev = function(DATA, COL) {
DATA = as.data.frame(DATA)
ordered(DATA[, COL], levels = rev(levels(DATA[, COL])))
}
levels.short = function(DATA, COL, LIST, LEVS) {
DATA = as.data.frame(DATA)
DATA[, COL] = ordered(DATA[, COL], levels = LIST)
levels(DATA[, COL]) = LEVS
return(DATA[, COL])
}
data.short = function(DATA) {
if (!is.null(shortLOC)) DATA[, "Location"] = levels.short(DATA, "Location", listLOC, levsLOC)
if (!is.null(shortAPI) & testAPI) DATA[, "API"] = levels.short(DATA, "API", listAPI, levsAPI)
return(DATA)
}
graph.rev = function(DATA, rev.LOC = FALSE, rev.API = FALSE) {
if (rev.LOC) DATA$Location = levels.rev(DATA, "Location")
if (rev.API & testAPI) DATA$API = levels.rev(DATA, "API")
return(DATA)
}
# spacing between facet panels can be set with theme(panel.spacing.x = unit(1, "lines"))
FACET = function(graphtype) {
if (any(substitute(graphtype) == c("graphMEANS"))) {
if (testAPI & !testQUA) return(facet_grid(rows = vars(API), cols = vars(Location), switch = "y"))
if (!testAPI & testQUA) return(facet_grid(rows = vars(Quality), cols = vars(Location), switch = "y"))
if (testAPI & testQUA) return(facet_grid(rows = vars(API, Quality), cols = vars(Location), switch = "y"))
return(facet_grid(cols = vars(Location), switch = "y"))
}
if (any(substitute(graphtype) == c("graphCOURSE", "graphFREQ", "graphQQ", "graphDIFF"))) {
if (multiGPU) {
if (testAPI & !testQUA) return(facet_grid(rows = vars(Location, API), cols = vars(GPU), switch = "y"))
if (!testAPI & testQUA) return(facet_grid(rows = vars(Location, Quality), cols = vars(GPU), switch = "y"))
if (testAPI & testQUA) return(facet_grid(rows = vars(Location, API, Quality), cols = vars(GPU), switch = "y"))
} else {
if (testAPI & !testQUA) return(facet_grid(rows = vars(API), cols = vars(Location, GPU), switch = "y"))
if (!testAPI & testQUA) return(facet_grid(rows = vars(Quality), cols = vars(Location, GPU), switch = "y"))
if (testAPI & testQUA) return(facet_grid(rows = vars(API, Quality), cols = vars(Location, GPU), switch = "y"))
}
return(facet_grid(rows = vars(Location), cols = vars(GPU), switch = "y"))
}
}
graphMEANS = function(datatype) {
if (datatype == "MsBetweenPresents") {
scale_Y = scale_y_continuous(
name = "Frame Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Frame Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsBetweenDisplayChange") {
scale_Y = scale_y_continuous(
name = "Refresh Cycles Later (1/60 s)",
breaks = ybreaks, labels = labelDisp, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Display Time (ms)",
labels = ybreaks
)
)
}
if (datatype == "MsUntilRenderComplete") {
scale_Y = scale_y_continuous(
name = "Render Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Render Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsEstimatedDriverLag") {
scale_Y = scale_y_continuous(
name = "Estimated Driver Lag (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis()
)
}
# if (useSHORT) results = data.short(results)
results = graph.rev(results, rev.LOC, rev.API)
# if (useSHORT) STATS = data.short(STATS) ; STATS = graph.rev(STATS, rev.LOC, rev.API)
ggplot(data = results, aes(x = GPU, y = get(datatype))) +
ggtitle(gameQ, subtitle = paste0(datatype, " - Means, Medians, and Percentiles")) + labsGPU +
geom_hline(yintercept = 1000/60, color = "red") +
# geom_boxplot(outlier.alpha = 0) +
stat_summary(fun.data = BoxPerc, geom = "boxplot", width = 0.6) +
geom_bar(aes(fill = GPU), stat = "summary", fun = mean) + scale_fill_hue() +
stat_summary(fun.data = BoxPerc, geom = "boxplot", alpha = 0.25, width = 0.6) +
# geom_boxplot(alpha = 0.50, outlier.alpha = 0.1) +
FACET(graphMEANS) +
scale_x_discrete(labels = labelBreak) +
scale_Y + coord_cartesian(ylim = c(0, FtimeLimit)) +
guides(fill = guide_legend(nrow = 1)) + theme(legend.position = "bottom")
}
boxLABS = function(datatype) {
STATS = AGG(datatype, statGRAPH)
ALPHA = 0.65
nudOUT = 0.50
nudIN = 0.40
nudMED = 0.55
list(
geom_label(data = STATS, aes(x = GPU, y = STATS[, "99.9"], label = round(STATS[, "99.9"], 2)), alpha = ALPHA,
vjust = 0, nudge_y = nudOUT),
geom_label(data = STATS, aes(x = GPU, y = STATS[, "0.1"], label = round(STATS[, "0.1"], 2)), alpha = ALPHA,
vjust = 1, nudge_y = -nudOUT),
# 0.1% and 99.9%
geom_label(data = STATS, aes(x = GPU, y = STATS[, "99"], label = round(STATS[, "99"], 2)), alpha = ALPHA,
hjust = 1, nudge_x = nudIN, vjust = 0),
geom_label(data = STATS, aes(x = GPU, y = STATS[, "1"], label = round(STATS[, "1"], 2)), alpha = ALPHA,
hjust = 1, nudge_x = nudIN, vjust = 1),
# 1% and 99%
geom_label(data = STATS, aes(x = GPU, y = Median, label = round(Median, 2)), alpha = ALPHA,
hjust = 1, nudge_x = nudMED),
geom_text(data = STATS, aes(x = GPU, y = Mean, label = round(Mean, 2)),
hjust = 0, nudge_x = -0.55, vjust = 0)
# median and mean
)
}
graphMEANSbox = function(datatype) graphMEANS(datatype) + boxLABS(datatype)
graphCOURSE = function(datatype) {
if (datatype == "MsBetweenPresents") {
scale_Y = scale_y_continuous(
name = "Frame Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Frame Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsBetweenDisplayChange") {
scale_Y = scale_y_continuous(
name = "Refresh Cycles Later (1/60 s)",
breaks = ybreaks, labels = labelDisp, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Display Time (ms)",
labels = ybreaks
)
)
}
if (datatype == "MsUntilRenderComplete") {
scale_Y = scale_y_continuous(
name = "Render Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Render Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsEstimatedDriverLag") {
scale_Y = scale_y_continuous(
name = "Estimated Driver Lag (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis()
)
}
if (useSHORT) results = data.short(results)
results = graph.rev(results, rev.LOC, rev.API)
# if (useSHORT) STATS = data.short(STATS) ; STATS = graph.rev(STATS, rev.LOC, rev.API)
ALPHA = 0.05
if (length(unique(results$Location)) == 1) ALPHA = 1
ggplot(data = results, aes(x = TimeInSeconds, y = get(datatype))) +
ggtitle(gameQ, subtitle = paste0(datatype, " - Course")) + labsGPU +
geom_hline(yintercept = 1000/60, color = "red") +
geom_point(alpha = ALPHA) +
geom_smooth(method="gam", formula= y ~ s(x, bs = "cs")) +
FACET(graphCOURSE) +
scale_x_continuous(name="Time (s)", breaks=seq(from=0, to=max(results$TimeInSeconds), by=60), labels = labelBreak, expand=c(0.02, 0)) +
scale_Y + coord_cartesian(ylim = c(0, FtimeLimit))
}
graphFREQ = function(datatype) {
STATS = AGG(datatype, statGRAPH)
if (datatype == "MsBetweenPresents") {
scale_X = scale_x_continuous(
name = "Frame Time (ms)",
breaks = ybreaks, labels = labelRoundB, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Frame Rate (FPS)",
labels = ms2FPS.lab
)
)
}
if (datatype == "MsBetweenDisplayChange") {
scale_X = scale_x_continuous(
name = "Refresh Cycles Later (1/60 s)",
breaks = ybreaks, labels = labelDispB, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Display Time (ms)",
labels = ybreaks
)
)
}
if (datatype == "MsUntilRenderComplete") {
scale_X = scale_x_continuous(
name = "Render Time (ms)",
breaks = ybreaks, labels = labelRoundB, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Render Rate (FPS)",
labels = ms2FPS.lab
)
)
}
if (datatype == "MsEstimatedDriverLag") {
scale_X = scale_x_continuous(
name = "Estimated Driver Lag (ms)",
breaks = ybreaks, labels = labelRoundB, expand = c(0.02, 0),
sec.axis = dup_axis()
)
}
if (useSHORT) results = data.short(results)
results = graph.rev(results, rev.LOC, rev.API)
if (useSHORT) STATS = data.short(STATS) ; STATS = graph.rev(STATS, rev.LOC, rev.API)
ggplot(results, aes(get(x = datatype))) +
ggtitle(gameQ, subtitle=paste0(datatype, " - Frequency Plot")) + labsGPU +
geom_vline(xintercept = 1000/60, color = "red") +
geom_freqpoly(binwidth=0.03, size=0) +
geom_vline(data = STATS, aes(xintercept = Mean), color = "darkgreen") +
geom_vline(data = STATS, aes(xintercept = Median), color = "darkcyan", linetype="dotdash") +
FACET(graphFREQ) +
scale_X + coord_cartesian(xlim = c(0, FtimeLimit)) +
scale_y_continuous(name="Count", expand=c(0.02, 0))
}
graphQQ = function(datatype, PERCS = c(.001, .01, .5, .99, .999)) {
PERCS = sort(unique(c(PERCS, QUAN)))
STATS = AGG(datatype, statGRAPH)
if (datatype == "MsBetweenPresents") {
scale_Y = scale_y_continuous(
name = "Frame Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Frame Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsBetweenDisplayChange") {
scale_Y = scale_y_continuous(
name = "Refresh Cycles Later (1/60 s)",
breaks = ybreaks, labels = labelDisp, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Display Time (ms)",
labels = ybreaks
)
)
}
if (datatype == "MsUntilRenderComplete") {
scale_Y = scale_y_continuous(
name = "Render Time (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0),
sec.axis = dup_axis(
name = "Render Rate (FPS)",
labels = ms2FPS
)
)
}
if (datatype == "MsEstimatedDriverLag") {
scale_Y = scale_y_continuous(
name = "Estimated Driver Lag (ms)",
breaks = ybreaks, labels = labelRound, expand = c(0.02, 0)
)
}
if (useSHORT) results = data.short(results)
results = graph.rev(results, rev.LOC, rev.API)
if (useSHORT) STATS = data.short(STATS) ; STATS = graph.rev(STATS, rev.LOC, rev.API)
# sec.axis = sec_axis(~.,
# breaks = STATS[c("0.1", "1", "Median", "99", "99.9")],
# labels = paste0(round(STATS[c("0.1", "1", "Median", "99", "99.9")], 2), c(" (0.1%)", " (1%)", " (50%)", " (99%)", " (99.9%)"))
# )
# this can be used to add a secondary axis that shows the values for the percentiles
# code remains for reference as now Rates are shown for the second axis
ggplot(data = STATS, aes(ymin = -Inf, xmin = -Inf)) +
ggtitle(gameQ, subtitle = paste0(datatype, " - QQ Distribution")) + labsGPU +
geom_hline(yintercept = 1000/60, color = "red") +
geom_rect(aes(ymax = get("0.1"), xmax = qnorm(.001)), alpha=0.1, fill=c("blue"), color = "grey") +
geom_rect(aes(ymax = get("1"), xmax = qnorm(.010)), alpha=0.1, fill=c("blue"), color = "grey") +
geom_rect(aes(ymax = get("Median"), xmax = qnorm(.500)), alpha=0.1, fill=c("blue"), color = "grey") +
geom_rect(aes(ymax = get("99"), xmax = qnorm(.990)), alpha=0.1, fill=c("red"), color = "grey") +
geom_rect(aes(ymax = get("99.9"), xmax = qnorm(.999)), alpha=0.1, fill=c("red"), color = "grey") +
stat_qq_line(data = results, aes(sample=get(datatype)), line.p = QUAN, color = "green", size = 1.1, linetype = "dotted") +
stat_qq(data = results, aes(sample=get(datatype))) +
stat_qq_line(data = results, aes(sample=get(datatype)), line.p = QUAN, color = "green", alpha = 0.5, size = 1.1, linetype = "dotted") +
geom_label(data = STATS, aes(x = Inf, y = -Inf, label = paste0("Slope: ", Slope)), parse = TRUE, hjust="right", vjust="bottom", fill = "darkgrey", color = "green") +
FACET(graphQQ) +
scale_Y + coord_cartesian(ylim = c(0, FtimeLimit)) +
scale_x_continuous(name = "Percentile", breaks = qnorm(PERCS), labels = labelBreakQQ, minor_breaks = NULL, expand = c(0.02, 0))
}
graphDIFF = function(datatype, diffLim = 1000/50) {
if (datatype == "MsBetweenPresents") {
scale_X = scale_x_continuous(
name = "Frame Time (ms)",
breaks = ybreaks, labels = labelRoundB, limits = c(0, FtimeLimit),
expand = c(0.02, 0)
)
scale_Y = scale_y_continuous(
name = "Consecutive Frame Time Difference (ms)",
breaks = ybreaks, labels = labelRound, limits = c(-diffLim, diffLim),
expand = c(0, 0)
)
}
if (datatype == "MsBetweenDisplayChange") {
scale_X = scale_x_continuous(
name = "Refresh Cycles Later (1/60 s)",
breaks = ybreaks, labels = labelDisp, limits = c(0, FtimeLimit),
expand = c(0.02, 0)
)
scale_Y = scale_y_continuous(
name = "Consecutive Display Time Difference (ms)",
breaks = ybreaks,
limits = c(-diffLim, diffLim),
expand = c(0, 0)
)
}
if (datatype == "MsUntilRenderComplete") {
scale_X = scale_x_continuous(
name = "Render Time (ms)",
breaks = ybreaks, labels = labelRoundB, limits = c(0, FtimeLimit),
expand = c(0.02, 0)
)
scale_Y = scale_y_continuous(
name = "Consecutive Render Time Difference (ms)",
breaks = ybreaks, labels = labelRound, limits = c(-diffLim, diffLim),
expand = c(0, 0)
)
}
if (datatype == "MsEstimatedDriverLag") {
scale_X = scale_x_continuous(
name = "Estimated Driver Lag (ms)",
breaks = ybreaks, labels = labelRoundB, limits = c(0, FtimeLimit),
expand = c(0.02, 0)
)
scale_Y = scale_y_continuous(
name = "Consecutive Lag Difference (ms)",
breaks = ybreaks, labels = labelRound, limits = c(-diffLim, diffLim),
expand = c(0, 0)
)
}
if (useSHORT) results = data.short(results)
results = graph.rev(results, rev.LOC, rev.API)
# if (useSHORT) STATS = data.short(STATS) ; STATS = graph.rev(STATS, rev.LOC, rev.API)
# ggplot(data = results, aes(x = get(datatype), y = c(diff(as.data.frame(results)[, datatype]), 0)) ) +
ggplot(data = results, aes(x = get(datatype), y = diff.CONS(get(datatype))) ) +
ggtitle(gameQ, subtitle=paste0(datatype, " Consecutive Differences")) + labsGPU +
geom_point(alpha = 0.1) +
stat_density_2d(geom = "polygon", aes(fill = stat(nlevel)), show.legend = FALSE) + scale_fill_viridis_c() +
# stat_density_2d(geom = "polygon", aes(fill = stat(nlevel), alpha = stat(nlevel)), show.legend = FALSE) + scale_fill_viridis_c() +
FACET(graphDIFF) +
scale_X +
scale_Y
}
# using coord_cartesian to apply limits breaks the heatmap for some reason
# text outputs
if (textOUT) {
if (textFRAM) sinkOUT("MsBetweenPresents")
if (textDISP) sinkOUT("MsBetweenDisplayChange")
if (textREND) sinkOUT("MsUntilRenderComplete")
if (textDRIV) sinkOUT("MsEstimatedDriverLag")
message("")
}
if (graphs) {
rev.LOC = FALSE ; rev.API = TRUE
#Means
if (graphFRAM) graphOUT("MsBetweenPresents", graphMEANS)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphMEANS)
if (graphREND) graphOUT("MsUntilRenderComplete", graphMEANS)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphMEANS)
#Means with Boxplot Lables
# graphOUT("MsBetweenPresents", graphMEANSbox)
rev.LOC = TRUE ; rev.API = TRUE
#Course
if (graphFRAM) graphOUT("MsBetweenPresents", graphCOURSE)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphCOURSE)
if (graphREND) graphOUT("MsUntilRenderComplete", graphCOURSE)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphCOURSE)
#Frequency
if (graphFRAM) graphOUT("MsBetweenPresents", graphFREQ)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphFREQ)
if (graphREND) graphOUT("MsUntilRenderComplete", graphFREQ)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphFREQ)
#QQ
if (graphFRAM) graphOUT("MsBetweenPresents", graphQQ)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphQQ)
if (graphREND) graphOUT("MsUntilRenderComplete", graphQQ)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphQQ)
#Difference
if (graphFRAM) graphOUT("MsBetweenPresents", graphDIFF)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphDIFF)
if (graphREND) graphOUT("MsUntilRenderComplete", graphDIFF)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphDIFF)
#Difference - Extended
if (!is.null(diffLim)) {
if (graphFRAM) graphOUT("MsBetweenPresents", graphDIFF, diffLim = diffLim)
if (graphDISP) graphOUT("MsBetweenDisplayChange", graphDIFF, diffLim = diffLim)
if (graphREND) graphOUT("MsUntilRenderComplete", graphDIFF, diffLim = diffLim)
if (graphDRIV) graphOUT("MsEstimatedDriverLag", graphDIFF, diffLim = diffLim)
}
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
shinyUI(fluidPage(
titlePanel("Gradient Explained"),
sidebarLayout(
sidebarPanel(
sliderInput("grad",
"Select a gradient:",
min = 0,
max = 10,
value = 5)
),
mainPanel(
p("This app is intended to demo how the gradient affects the shape of a linear plot by moving the slider."),
p("The gradient of a linear plot is defined as the change Y-values against an increase in 1 unit of X."),
p("Move the slider and observe how the slope of line changes."),
plotOutput("LinearPlot"),
p("Observe how as you increase the gradient the line becomes steeper and as you decrease the gradient the line becomes shallower. When the gradient is Zero the line is completely flat.")
)
)
))
|
/9.DevelopingDataProdcts/FinalCourseWork/GradientExplained/ui.R
|
no_license
|
omarmn/DataScience_JH_Coursera_Assignments
|
R
| false | false | 1,059 |
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
shinyUI(fluidPage(
titlePanel("Gradient Explained"),
sidebarLayout(
sidebarPanel(
sliderInput("grad",
"Select a gradient:",
min = 0,
max = 10,
value = 5)
),
mainPanel(
p("This app is intended to demo how the gradient affects the shape of a linear plot by moving the slider."),
p("The gradient of a linear plot is defined as the change Y-values against an increase in 1 unit of X."),
p("Move the slider and observe how the slope of line changes."),
plotOutput("LinearPlot"),
p("Observe how as you increase the gradient the line becomes steeper and as you decrease the gradient the line becomes shallower. When the gradient is Zero the line is completely flat.")
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_interface.R
\name{spark_read_libsvm}
\alias{spark_read_libsvm}
\title{Read libsvm file into a Spark DataFrame.}
\usage{
spark_read_libsvm(sc, name, path, repartition = 0, memory = TRUE,
overwrite = TRUE, ...)
}
\arguments{
\item{sc}{A \code{spark_connection}.}
\item{name}{The name to assign to the newly generated table.}
\item{path}{The path to the file. Needs to be accessible from the cluster.
Supports the \samp{"hdfs://"}, \samp{"s3a://"} and \samp{"file://"} protocols.}
\item{repartition}{The number of partitions used to distribute the
generated table. Use 0 (the default) to avoid partitioning.}
\item{memory}{Boolean; should the data be loaded eagerly into memory? (That
is, should the table be cached?)}
\item{overwrite}{Boolean; overwrite the table with the given name if it
already exists?}
\item{...}{Optional arguments; currently unused.}
}
\description{
Read libsvm file into a Spark DataFrame.
}
\seealso{
Other Spark serialization routines: \code{\link{spark_load_table}},
\code{\link{spark_read_csv}},
\code{\link{spark_read_jdbc}},
\code{\link{spark_read_json}},
\code{\link{spark_read_orc}},
\code{\link{spark_read_parquet}},
\code{\link{spark_read_source}},
\code{\link{spark_read_table}},
\code{\link{spark_read_text}},
\code{\link{spark_save_table}},
\code{\link{spark_write_csv}},
\code{\link{spark_write_jdbc}},
\code{\link{spark_write_json}},
\code{\link{spark_write_orc}},
\code{\link{spark_write_parquet}},
\code{\link{spark_write_source}},
\code{\link{spark_write_table}},
\code{\link{spark_write_text}}
}
\concept{Spark serialization routines}
|
/man/spark_read_libsvm.Rd
|
permissive
|
awblocker/sparklyr
|
R
| false | true | 1,701 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_interface.R
\name{spark_read_libsvm}
\alias{spark_read_libsvm}
\title{Read libsvm file into a Spark DataFrame.}
\usage{
spark_read_libsvm(sc, name, path, repartition = 0, memory = TRUE,
overwrite = TRUE, ...)
}
\arguments{
\item{sc}{A \code{spark_connection}.}
\item{name}{The name to assign to the newly generated table.}
\item{path}{The path to the file. Needs to be accessible from the cluster.
Supports the \samp{"hdfs://"}, \samp{"s3a://"} and \samp{"file://"} protocols.}
\item{repartition}{The number of partitions used to distribute the
generated table. Use 0 (the default) to avoid partitioning.}
\item{memory}{Boolean; should the data be loaded eagerly into memory? (That
is, should the table be cached?)}
\item{overwrite}{Boolean; overwrite the table with the given name if it
already exists?}
\item{...}{Optional arguments; currently unused.}
}
\description{
Read libsvm file into a Spark DataFrame.
}
\seealso{
Other Spark serialization routines: \code{\link{spark_load_table}},
\code{\link{spark_read_csv}},
\code{\link{spark_read_jdbc}},
\code{\link{spark_read_json}},
\code{\link{spark_read_orc}},
\code{\link{spark_read_parquet}},
\code{\link{spark_read_source}},
\code{\link{spark_read_table}},
\code{\link{spark_read_text}},
\code{\link{spark_save_table}},
\code{\link{spark_write_csv}},
\code{\link{spark_write_jdbc}},
\code{\link{spark_write_json}},
\code{\link{spark_write_orc}},
\code{\link{spark_write_parquet}},
\code{\link{spark_write_source}},
\code{\link{spark_write_table}},
\code{\link{spark_write_text}}
}
\concept{Spark serialization routines}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_feature_dct.R
\name{ft_dct}
\alias{ft_dct}
\alias{ft_discrete_cosine_transform}
\title{Feature Transformation -- Discrete Cosine Transform (DCT) (Transformer)}
\usage{
ft_dct(x, input_col = NULL, output_col = NULL, inverse = FALSE,
uid = random_string("dct_"), ...)
ft_discrete_cosine_transform(x, input_col, output_col, inverse = FALSE,
uid = random_string("dct_"), ...)
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_col}{The name of the input column.}
\item{output_col}{The name of the output column.}
\item{inverse}{Indicates whether to perform the inverse DCT (TRUE) or forward DCT (FALSE).}
\item{uid}{A character string used to uniquely identify the feature transformer.}
\item{...}{Optional arguments; currently unused.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns a \code{ml_transformer},
a \code{ml_estimator}, or one of their subclasses. The object contains a pointer to
a Spark \code{Transformer} or \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the transformer or estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a transformer is constructed then
immediately applied to the input \code{tbl_spark}, returning a \code{tbl_spark}
}
}
\description{
A feature transformer that takes the 1D discrete cosine transform of a real
vector. No zero padding is performed on the input vector. It returns a real
vector of the same length representing the DCT. The return vector is scaled
such that the transform matrix is unitary (aka scaled DCT-II).
}
\details{
\code{ft_discrete_cosine_transform()} is an alias for \code{ft_dct} for backwards compatibility.
}
\seealso{
See \url{http://spark.apache.org/docs/latest/ml-features.html} for
more information on the set of transformations available for DataFrame
columns in Spark.
Other feature transformers: \code{\link{ft_binarizer}},
\code{\link{ft_bucketizer}},
\code{\link{ft_chisq_selector}},
\code{\link{ft_count_vectorizer}},
\code{\link{ft_elementwise_product}},
\code{\link{ft_feature_hasher}},
\code{\link{ft_hashing_tf}}, \code{\link{ft_idf}},
\code{\link{ft_imputer}},
\code{\link{ft_index_to_string}},
\code{\link{ft_interaction}}, \code{\link{ft_lsh}},
\code{\link{ft_max_abs_scaler}},
\code{\link{ft_min_max_scaler}}, \code{\link{ft_ngram}},
\code{\link{ft_normalizer}},
\code{\link{ft_one_hot_encoder_estimator}},
\code{\link{ft_one_hot_encoder}}, \code{\link{ft_pca}},
\code{\link{ft_polynomial_expansion}},
\code{\link{ft_quantile_discretizer}},
\code{\link{ft_r_formula}},
\code{\link{ft_regex_tokenizer}},
\code{\link{ft_sql_transformer}},
\code{\link{ft_standard_scaler}},
\code{\link{ft_stop_words_remover}},
\code{\link{ft_string_indexer}},
\code{\link{ft_tokenizer}},
\code{\link{ft_vector_assembler}},
\code{\link{ft_vector_indexer}},
\code{\link{ft_vector_slicer}}, \code{\link{ft_word2vec}}
}
\concept{feature transformers}
|
/man/ft_dct.Rd
|
permissive
|
lu-wang-dl/sparklyr
|
R
| false | true | 3,345 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_feature_dct.R
\name{ft_dct}
\alias{ft_dct}
\alias{ft_discrete_cosine_transform}
\title{Feature Transformation -- Discrete Cosine Transform (DCT) (Transformer)}
\usage{
ft_dct(x, input_col = NULL, output_col = NULL, inverse = FALSE,
uid = random_string("dct_"), ...)
ft_discrete_cosine_transform(x, input_col, output_col, inverse = FALSE,
uid = random_string("dct_"), ...)
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_col}{The name of the input column.}
\item{output_col}{The name of the output column.}
\item{inverse}{Indicates whether to perform the inverse DCT (TRUE) or forward DCT (FALSE).}
\item{uid}{A character string used to uniquely identify the feature transformer.}
\item{...}{Optional arguments; currently unused.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns a \code{ml_transformer},
a \code{ml_estimator}, or one of their subclasses. The object contains a pointer to
a Spark \code{Transformer} or \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the transformer or estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a transformer is constructed then
immediately applied to the input \code{tbl_spark}, returning a \code{tbl_spark}
}
}
\description{
A feature transformer that takes the 1D discrete cosine transform of a real
vector. No zero padding is performed on the input vector. It returns a real
vector of the same length representing the DCT. The return vector is scaled
such that the transform matrix is unitary (aka scaled DCT-II).
}
\details{
\code{ft_discrete_cosine_transform()} is an alias for \code{ft_dct} for backwards compatibility.
}
\seealso{
See \url{http://spark.apache.org/docs/latest/ml-features.html} for
more information on the set of transformations available for DataFrame
columns in Spark.
Other feature transformers: \code{\link{ft_binarizer}},
\code{\link{ft_bucketizer}},
\code{\link{ft_chisq_selector}},
\code{\link{ft_count_vectorizer}},
\code{\link{ft_elementwise_product}},
\code{\link{ft_feature_hasher}},
\code{\link{ft_hashing_tf}}, \code{\link{ft_idf}},
\code{\link{ft_imputer}},
\code{\link{ft_index_to_string}},
\code{\link{ft_interaction}}, \code{\link{ft_lsh}},
\code{\link{ft_max_abs_scaler}},
\code{\link{ft_min_max_scaler}}, \code{\link{ft_ngram}},
\code{\link{ft_normalizer}},
\code{\link{ft_one_hot_encoder_estimator}},
\code{\link{ft_one_hot_encoder}}, \code{\link{ft_pca}},
\code{\link{ft_polynomial_expansion}},
\code{\link{ft_quantile_discretizer}},
\code{\link{ft_r_formula}},
\code{\link{ft_regex_tokenizer}},
\code{\link{ft_sql_transformer}},
\code{\link{ft_standard_scaler}},
\code{\link{ft_stop_words_remover}},
\code{\link{ft_string_indexer}},
\code{\link{ft_tokenizer}},
\code{\link{ft_vector_assembler}},
\code{\link{ft_vector_indexer}},
\code{\link{ft_vector_slicer}}, \code{\link{ft_word2vec}}
}
\concept{feature transformers}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim.R
\name{sdmTMB_sim}
\alias{sdmTMB_sim}
\title{Simulate from a spatial/spatiotemporal model}
\usage{
sdmTMB_sim(
mesh,
x,
y,
range,
time_steps = 1L,
X = NULL,
betas = NULL,
family = gaussian(link = "identity"),
rho = 0,
sigma_O = 0.1,
sigma_E = 0,
sigma_V = rep(0, length(betas)),
phi = 0.01,
thetaf = 1.5,
df = 3,
seed = sample.int(1e+06, 1),
list = FALSE,
size = NULL
)
}
\arguments{
\item{mesh}{Output from \code{\link[=make_mesh]{make_mesh()}} or a mesh directly from INLA.}
\item{x}{A vector of x coordinates. Should match \code{mesh}.}
\item{y}{A vector of y coordinates. Should match \code{mesh}.}
\item{range}{Parameter that controls the decay of spatial correlation.}
\item{time_steps}{The number of time steps.}
\item{X}{An optional covariate design matrix formatted as a list with each
element of the list representing a slice in time. If ommitted and \code{betas}
is not \code{NULL}, will be set to standard normal.}
\item{betas}{A vector of beta values (design-matrix fixed-effect coefficient
values). If a random walk (\code{sigma_V > 0}), these are the starting values.}
\item{family}{Family as in \code{\link[=sdmTMB]{sdmTMB()}}.}
\item{rho}{Spatiotemporal correlation between years;
should be between -1 and 1.}
\item{sigma_O}{SD of spatial process (Omega).}
\item{sigma_E}{SD of spatiotemporal process (Epsilon). Can be scalar or
vector for time-varying model.}
\item{sigma_V}{A vector of standard deviations of time-varying random walk on
parameters. Set to 0 for parameters that should not vary through time.}
\item{phi}{Observation error scale parameter.}
\item{thetaf}{Tweedie p (power) parameter; between 1 and 2.}
\item{df}{Student-t degrees of freedom.}
\item{seed}{A value with which to set the random seed.}
\item{list}{Logical for whether output is in list format. If \code{TRUE},
data is in list element 1 and input values in element 2.}
\item{size}{Specific for the binomial family, vector representing binomial N. If not included, defaults to 1 (bernoulli)}
}
\value{
A data frame where:
\itemize{
\item \code{omega_s} represents the simulated spatial random effects.
\item \code{epsilon_st} represents the simulated spatiotemporal random effects.
\item \code{eta} is the true value in link space
\item \code{mu} is the true value in inverse link space.
\item \code{observed} represents the simulated process with observation error.
\item \code{b_...} contain the beta values for each covariate used to simulate each time slice.
\item \code{cov_...} covariate values for each observation.
}
}
\description{
Simulate from a spatial/spatiotemporal model
}
\examples{
\donttest{
set.seed(42)
x <- runif(50, -1, 1)
y <- runif(50, -1, 1)
N <- length(x)
time_steps <- 6
X <- model.matrix(~ x1, data.frame(x1 = rnorm(N * time_steps)))
loc <- data.frame(x = x, y = y)
mesh <- make_mesh(loc, xy_cols = c("x", "y"), cutoff = 0.1)
s <- sdmTMB_sim(
x = x, y = y, mesh = mesh, X = X,
betas = c(0.5, 0.7), time_steps = time_steps, rho = 0.5,
phi = 0.2, range = 0.8, sigma_O = 0, sigma_E = 0.3,
seed = 123, family = gaussian()
)
mesh <- make_mesh(s, xy_cols = c("x", "y"), cutoff = 0.1)
m <- sdmTMB(
data = s, formula = observed ~ x1,
time = "time", spde = mesh,
ar1_fields = TRUE, include_spatial = FALSE
)
tidy(m, conf.int = TRUE)
tidy(m, "ran_pars", conf.int = TRUE)
#' # example with time-varying sigma_E (spatiotemporal variation)
s <- sdmTMB_sim(
x = x, y = y, mesh = mesh, X = X,
betas = c(0.5, 0.7), time_steps = time_steps, rho = 0,
phi = 0.2, range = 0.8, sigma_O = 0,
sigma_E = seq(0.2,1,length.out=time_steps),
seed = 123, family = gaussian())
}
}
|
/man/sdmTMB_sim.Rd
|
no_license
|
Kotkot/sdmTMB
|
R
| false | true | 3,727 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim.R
\name{sdmTMB_sim}
\alias{sdmTMB_sim}
\title{Simulate from a spatial/spatiotemporal model}
\usage{
sdmTMB_sim(
mesh,
x,
y,
range,
time_steps = 1L,
X = NULL,
betas = NULL,
family = gaussian(link = "identity"),
rho = 0,
sigma_O = 0.1,
sigma_E = 0,
sigma_V = rep(0, length(betas)),
phi = 0.01,
thetaf = 1.5,
df = 3,
seed = sample.int(1e+06, 1),
list = FALSE,
size = NULL
)
}
\arguments{
\item{mesh}{Output from \code{\link[=make_mesh]{make_mesh()}} or a mesh directly from INLA.}
\item{x}{A vector of x coordinates. Should match \code{mesh}.}
\item{y}{A vector of y coordinates. Should match \code{mesh}.}
\item{range}{Parameter that controls the decay of spatial correlation.}
\item{time_steps}{The number of time steps.}
\item{X}{An optional covariate design matrix formatted as a list with each
element of the list representing a slice in time. If ommitted and \code{betas}
is not \code{NULL}, will be set to standard normal.}
\item{betas}{A vector of beta values (design-matrix fixed-effect coefficient
values). If a random walk (\code{sigma_V > 0}), these are the starting values.}
\item{family}{Family as in \code{\link[=sdmTMB]{sdmTMB()}}.}
\item{rho}{Spatiotemporal correlation between years;
should be between -1 and 1.}
\item{sigma_O}{SD of spatial process (Omega).}
\item{sigma_E}{SD of spatiotemporal process (Epsilon). Can be scalar or
vector for time-varying model.}
\item{sigma_V}{A vector of standard deviations of time-varying random walk on
parameters. Set to 0 for parameters that should not vary through time.}
\item{phi}{Observation error scale parameter.}
\item{thetaf}{Tweedie p (power) parameter; between 1 and 2.}
\item{df}{Student-t degrees of freedom.}
\item{seed}{A value with which to set the random seed.}
\item{list}{Logical for whether output is in list format. If \code{TRUE},
data is in list element 1 and input values in element 2.}
\item{size}{Specific for the binomial family, vector representing binomial N. If not included, defaults to 1 (bernoulli)}
}
\value{
A data frame where:
\itemize{
\item \code{omega_s} represents the simulated spatial random effects.
\item \code{epsilon_st} represents the simulated spatiotemporal random effects.
\item \code{eta} is the true value in link space
\item \code{mu} is the true value in inverse link space.
\item \code{observed} represents the simulated process with observation error.
\item \code{b_...} contain the beta values for each covariate used to simulate each time slice.
\item \code{cov_...} covariate values for each observation.
}
}
\description{
Simulate from a spatial/spatiotemporal model
}
\examples{
\donttest{
set.seed(42)
x <- runif(50, -1, 1)
y <- runif(50, -1, 1)
N <- length(x)
time_steps <- 6
X <- model.matrix(~ x1, data.frame(x1 = rnorm(N * time_steps)))
loc <- data.frame(x = x, y = y)
mesh <- make_mesh(loc, xy_cols = c("x", "y"), cutoff = 0.1)
s <- sdmTMB_sim(
x = x, y = y, mesh = mesh, X = X,
betas = c(0.5, 0.7), time_steps = time_steps, rho = 0.5,
phi = 0.2, range = 0.8, sigma_O = 0, sigma_E = 0.3,
seed = 123, family = gaussian()
)
mesh <- make_mesh(s, xy_cols = c("x", "y"), cutoff = 0.1)
m <- sdmTMB(
data = s, formula = observed ~ x1,
time = "time", spde = mesh,
ar1_fields = TRUE, include_spatial = FALSE
)
tidy(m, conf.int = TRUE)
tidy(m, "ran_pars", conf.int = TRUE)
#' # example with time-varying sigma_E (spatiotemporal variation)
s <- sdmTMB_sim(
x = x, y = y, mesh = mesh, X = X,
betas = c(0.5, 0.7), time_steps = time_steps, rho = 0,
phi = 0.2, range = 0.8, sigma_O = 0,
sigma_E = seq(0.2,1,length.out=time_steps),
seed = 123, family = gaussian())
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zz_help_files.R
\name{zstd_compress_raw}
\alias{zstd_compress_raw}
\title{Zstd compression}
\usage{
zstd_compress_raw(x, compress_level)
}
\arguments{
\item{x}{The object to serialize.}
\item{compress_level}{The compression level used (default \code{4}). A number between \code{-50} to \code{22} (higher is more compressed). Due to the format of qs, there is
very little benefit to compression levels > 5 or so.}
}
\value{
The compressed data as a raw vector.
}
\description{
Compresses to a raw vector using the zstd algorithm. Exports the main zstd compression function.
}
\examples{
x <- 1:1e6
xserialized <- serialize(x, connection=NULL)
xcompressed <- zstd_compress_raw(xserialized, compress_level = 1)
xrecovered <- unserialize(zstd_decompress_raw(xcompressed))
}
|
/man/zstd_compress_raw.Rd
|
no_license
|
traversc/qs
|
R
| false | true | 849 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zz_help_files.R
\name{zstd_compress_raw}
\alias{zstd_compress_raw}
\title{Zstd compression}
\usage{
zstd_compress_raw(x, compress_level)
}
\arguments{
\item{x}{The object to serialize.}
\item{compress_level}{The compression level used (default \code{4}). A number between \code{-50} to \code{22} (higher is more compressed). Due to the format of qs, there is
very little benefit to compression levels > 5 or so.}
}
\value{
The compressed data as a raw vector.
}
\description{
Compresses to a raw vector using the zstd algorithm. Exports the main zstd compression function.
}
\examples{
x <- 1:1e6
xserialized <- serialize(x, connection=NULL)
xcompressed <- zstd_compress_raw(xserialized, compress_level = 1)
xrecovered <- unserialize(zstd_decompress_raw(xcompressed))
}
|
#Put both the source code and
Dir<-"C:/path/" #Change this to match the path for the folder you put the
source(paste0(Dir,'MUE_code.r'), echo=TRUE)
dat.in<-read.csv(paste0(Dir,"example_B_CVs.csv"),header=T)
#Split biomass from coefficient of variations (CVs)
index<-dat.in[,2:(((ncol(dat.in)-1)/2)+1)]
CVs<-dat.in[,(((ncol(dat.in)-1)/2)+2):ncol(dat.in)]
years<-dat.in[,1]
#RUN clusters
#Hubert's gamma for the assignment clusters
spp.Hg<-CPUE.sims.SPP(index,1000,rep(1,length(index)),CVs,19,colnames(index),cutoff=1,op.type=c(0,1,0,1,1,1,0,0),k.max.m=2,Z_score=T)
#Make silhouette plot
plot(pam(spp.Hg$D.matrix,2,diss=TRUE), main="HUBERT's GAMMA used for cluster assignment")
abline(v=c(0.25,0.5,0.75),col="red",lwd=c(1,2,3))
#Silhouette for the assignment clusters
spp.Sil<-CPUE.sims.SPP(index,1000,rep(1,length(index)),CVs,19,colnames(index),cutoff=1,op.type=c(0,1,0,1,1,1,0,0),k.max.m=1,Z_score=T)
#Make silhouette plot
plot(pam(spp.Sil$D.matrix,2,diss=TRUE), main="SILHOUETTE used for cluster assignment")
abline(v=c(0.25,0.5,0.75),col="red",lwd=c(1,2,3))
|
/MUE_run_code.r
|
no_license
|
shcaba/MUE
|
R
| false | false | 1,063 |
r
|
#Put both the source code and
Dir<-"C:/path/" #Change this to match the path for the folder you put the
source(paste0(Dir,'MUE_code.r'), echo=TRUE)
dat.in<-read.csv(paste0(Dir,"example_B_CVs.csv"),header=T)
#Split biomass from coefficient of variations (CVs)
index<-dat.in[,2:(((ncol(dat.in)-1)/2)+1)]
CVs<-dat.in[,(((ncol(dat.in)-1)/2)+2):ncol(dat.in)]
years<-dat.in[,1]
#RUN clusters
#Hubert's gamma for the assignment clusters
spp.Hg<-CPUE.sims.SPP(index,1000,rep(1,length(index)),CVs,19,colnames(index),cutoff=1,op.type=c(0,1,0,1,1,1,0,0),k.max.m=2,Z_score=T)
#Make silhouette plot
plot(pam(spp.Hg$D.matrix,2,diss=TRUE), main="HUBERT's GAMMA used for cluster assignment")
abline(v=c(0.25,0.5,0.75),col="red",lwd=c(1,2,3))
#Silhouette for the assignment clusters
spp.Sil<-CPUE.sims.SPP(index,1000,rep(1,length(index)),CVs,19,colnames(index),cutoff=1,op.type=c(0,1,0,1,1,1,0,0),k.max.m=1,Z_score=T)
#Make silhouette plot
plot(pam(spp.Sil$D.matrix,2,diss=TRUE), main="SILHOUETTE used for cluster assignment")
abline(v=c(0.25,0.5,0.75),col="red",lwd=c(1,2,3))
|
load("finished13.RDA")
jj=1:82
mavsaway=subset(finished13,away_team_abbrev=="DAL")
install.packages("TTR")
library(TTR)
nm=sample(1:100, 100, replace=T)
mavsaway$home_team_fgm_WMA=WMA(mavsaway$home_team_fgm, n=10, wilder=FALSE, ratio=NULL)
mavsaway$home_team_fgm_WMA=c("na",mavsaway$home_team_fgm_WMA[1:69])
home_team_fgm_EMA
new111=data.frame(newdata$game_id,home_team_fgm_EMA)
names(new111)=c("game_id","home_team_fgm_EMA")
woo=merge(finished13, new111, by="game_id")
|
/r code for nba 6.R
|
no_license
|
garretthill/NBA
|
R
| false | false | 510 |
r
|
load("finished13.RDA")
jj=1:82
mavsaway=subset(finished13,away_team_abbrev=="DAL")
install.packages("TTR")
library(TTR)
nm=sample(1:100, 100, replace=T)
mavsaway$home_team_fgm_WMA=WMA(mavsaway$home_team_fgm, n=10, wilder=FALSE, ratio=NULL)
mavsaway$home_team_fgm_WMA=c("na",mavsaway$home_team_fgm_WMA[1:69])
home_team_fgm_EMA
new111=data.frame(newdata$game_id,home_team_fgm_EMA)
names(new111)=c("game_id","home_team_fgm_EMA")
woo=merge(finished13, new111, by="game_id")
|
#Sys.setenv(SPARK_HOME="/usr/hdp/current/spark-client") # working
#Sys.setenv(SPARK_HOME="/home/rstudio/spark/spark-1.5.1-bin-hadoop2.6")
source("~/iwr/R/init.R")
source("~/iwr/R/mongo_api.R")
source("~/iwr/R/myhelper.R")
source("~/iwr/R/spark_api.R")
.libPaths(c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib"), .libPaths()))
library(SparkR)
sc <- sparkR.init("local")
sqlContext = sparkRSQL.init()
hivecontext <- sparkRHive.init(sc)
df <- loadDF( hivecontext, "/data/ingest/sparktest1/56878a17e4b09bf0793b93ad/merged/orc/*/*", "orc")
print(df$ziw_row_id)
|
/extras/readOrcFileFromOrc.R
|
no_license
|
Infoworks/R
|
R
| false | false | 563 |
r
|
#Sys.setenv(SPARK_HOME="/usr/hdp/current/spark-client") # working
#Sys.setenv(SPARK_HOME="/home/rstudio/spark/spark-1.5.1-bin-hadoop2.6")
source("~/iwr/R/init.R")
source("~/iwr/R/mongo_api.R")
source("~/iwr/R/myhelper.R")
source("~/iwr/R/spark_api.R")
.libPaths(c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib"), .libPaths()))
library(SparkR)
sc <- sparkR.init("local")
sqlContext = sparkRSQL.init()
hivecontext <- sparkRHive.init(sc)
df <- loadDF( hivecontext, "/data/ingest/sparktest1/56878a17e4b09bf0793b93ad/merged/orc/*/*", "orc")
print(df$ziw_row_id)
|
# header ----------------------------------------------------------------------
# Course: R Programming
# Programming Assignment 3: Hospital Quality
# Author: Sunil Pereira
# Created Date: May 1, 2015
# rankall() ------------------------------------------------------------
# This function takes two arguments: an outcome name (outcome) and a hospital
# ranking (num). The function reads the outcome-of-care-measures.csv file and
# returns a 2-column data frame containing the hospital in each state that has
# the ranking specified in num. For example the function call rankall("heart
# attack", "best") would return a data frame containing the names of the
# hospitals that are the best in their respective states for 30-day heart attack
# death rates. The function should return a value for every state (some may be
# NA). The first column in the data frame is named hospital, which contains the
# hospital name, and the second column is named state, which contains the
# 2-character abbreviation for the state name. Hospitals that do not have data
# on a particular outcome should be excluded from the set of hospitals when
# deciding the rankings.
# rankall <- function(outcome, num = "best") {
# ## Read outcome data
# data.file <- "./data/outcome-of-care-measures.csv"
# data <- read.csv(data.file, colClasses="character")
#
# ## Get the col number
# coln <- if(outcome == "heart attack") {
# 11
# } else if(outcome == "heart failure") {
# 17
# } else if(outcome == "pneumonia") {
# 23
# } else {
# stop("invalid outcome")
# }
#
# ## Remove NAs
# data <- data[complete.cases(data[,coln]),]
#
# ## Sort by state, rate and name
# data <- data[order(data[7], data[coln], data[2]), ]
#
# ## Split by state
# s <- split(data, data[7])
#
# hospitals <- character(0)
# states <- character(0)
#
# for(name in names(s)){
# hospName <- if(num == "best"){
# s[[name]][1, 2]
# } else if(num == "worst"){
# tail(s[[name]], 1)[1, 2]
# } else {
# s[[name]][num, 2]
# }
# hospitals <- append(hospitals, hospName)
# states <- append(states, name)
# }
#
# data.frame(hospital=hospitals, state=states)
# }
rankall <- function(outcome, num = "best") {
## Read outcome data
data.file <- "./data/outcome-of-care-measures.csv"
outcome.data <- read.csv(data.file, colClasses="character")
## Get the col number
coln <- if(outcome == "heart attack") {
11
} else if(outcome == "heart failure") {
17
} else if(outcome == "pneumonia") {
23
} else {
stop("invalid outcome")
}
states <- unique(outcome.data$State)
rankMatrix <- sapply(states, function(state){
## Filter DF on state
outcome.data <- outcome.data[outcome.data$State==state,]
## Convert using as.numeric, supressing coercian warnings
suppressWarnings(outcome.data[, coln] <- as.numeric(outcome.data[, coln]))
if (class(num) == "numeric") { ## Order DF on outcomeCol, and then name
outcome.data <- outcome.data[order(outcome.data[,coln], outcome.data[,2]), ]
return (c(state, outcome.data[num,2]))
}
if (num=="best") { ## Order DF on outcomeCol, and then name
outcome.data <- outcome.data[order(outcome.data[,coln], outcome.data[,2]), ]
return (c(state, outcomeData[1,2]))
}
if (num=="worst") { ## Order DF on reverse of outcomeCol, and then name
outcome.data <- outcome.data[order(-outcome.data[,coln], outcome.data[,2]), ]
return (c(state, outcome.data[1,2]))
}
})
result <- data.frame(rankMatrix[2,], rankMatrix[1,]) # Insert values into DF
colnames(result) <- c("hospital", "state")
rownames(result) <- result$state
result <- result[order(result$state),]
return(result)
}
|
/c2.r-programming/ProgrammingAssignment3/rankall.R
|
no_license
|
sunil9/datasciencecoursera
|
R
| false | false | 4,039 |
r
|
# header ----------------------------------------------------------------------
# Course: R Programming
# Programming Assignment 3: Hospital Quality
# Author: Sunil Pereira
# Created Date: May 1, 2015
# rankall() ------------------------------------------------------------
# This function takes two arguments: an outcome name (outcome) and a hospital
# ranking (num). The function reads the outcome-of-care-measures.csv file and
# returns a 2-column data frame containing the hospital in each state that has
# the ranking specified in num. For example the function call rankall("heart
# attack", "best") would return a data frame containing the names of the
# hospitals that are the best in their respective states for 30-day heart attack
# death rates. The function should return a value for every state (some may be
# NA). The first column in the data frame is named hospital, which contains the
# hospital name, and the second column is named state, which contains the
# 2-character abbreviation for the state name. Hospitals that do not have data
# on a particular outcome should be excluded from the set of hospitals when
# deciding the rankings.
# rankall <- function(outcome, num = "best") {
# ## Read outcome data
# data.file <- "./data/outcome-of-care-measures.csv"
# data <- read.csv(data.file, colClasses="character")
#
# ## Get the col number
# coln <- if(outcome == "heart attack") {
# 11
# } else if(outcome == "heart failure") {
# 17
# } else if(outcome == "pneumonia") {
# 23
# } else {
# stop("invalid outcome")
# }
#
# ## Remove NAs
# data <- data[complete.cases(data[,coln]),]
#
# ## Sort by state, rate and name
# data <- data[order(data[7], data[coln], data[2]), ]
#
# ## Split by state
# s <- split(data, data[7])
#
# hospitals <- character(0)
# states <- character(0)
#
# for(name in names(s)){
# hospName <- if(num == "best"){
# s[[name]][1, 2]
# } else if(num == "worst"){
# tail(s[[name]], 1)[1, 2]
# } else {
# s[[name]][num, 2]
# }
# hospitals <- append(hospitals, hospName)
# states <- append(states, name)
# }
#
# data.frame(hospital=hospitals, state=states)
# }
rankall <- function(outcome, num = "best") {
## Read outcome data
data.file <- "./data/outcome-of-care-measures.csv"
outcome.data <- read.csv(data.file, colClasses="character")
## Get the col number
coln <- if(outcome == "heart attack") {
11
} else if(outcome == "heart failure") {
17
} else if(outcome == "pneumonia") {
23
} else {
stop("invalid outcome")
}
states <- unique(outcome.data$State)
rankMatrix <- sapply(states, function(state){
## Filter DF on state
outcome.data <- outcome.data[outcome.data$State==state,]
## Convert using as.numeric, supressing coercian warnings
suppressWarnings(outcome.data[, coln] <- as.numeric(outcome.data[, coln]))
if (class(num) == "numeric") { ## Order DF on outcomeCol, and then name
outcome.data <- outcome.data[order(outcome.data[,coln], outcome.data[,2]), ]
return (c(state, outcome.data[num,2]))
}
if (num=="best") { ## Order DF on outcomeCol, and then name
outcome.data <- outcome.data[order(outcome.data[,coln], outcome.data[,2]), ]
return (c(state, outcomeData[1,2]))
}
if (num=="worst") { ## Order DF on reverse of outcomeCol, and then name
outcome.data <- outcome.data[order(-outcome.data[,coln], outcome.data[,2]), ]
return (c(state, outcome.data[1,2]))
}
})
result <- data.frame(rankMatrix[2,], rankMatrix[1,]) # Insert values into DF
colnames(result) <- c("hospital", "state")
rownames(result) <- result$state
result <- result[order(result$state),]
return(result)
}
|
# @name betas_restr
# @rdname betas_restr
#
# @title betas restricted
#
# @description
# Restricted estimation of beta coefficients
#
# @param results : An object create with \code{\link{spsurml}}.
# @param R : Coefficient matrix for betas.
# @param r : Vector of independent terms.
#
# @details
# ¿?¿?
#
# @return
# betas restricted and covariance matrix
#
# @references
#
# CAMBIAR
# J. LeSage and R.K. Pace. \emph{Introduction to Spatial Econometrics}, CRC Press, chapter 10.1.6, 2009
# Mur, J., López, F., & Herrera, M. (2010). Testing for spatial effects in seemingly unrelated regressions. \emph{Spatial Economic Analysis}, 5(4), 399-440.
# \cr
# \cr
# López, F.A., Mur, J., & Angulo, A. (2014). Spatial model selection strategies in a SUR framework. The case of regional productivity in EU. \emph{Annals of Regional Science}, 53(1), 197-220.
# \cr
# \cr
# López, F.A., Martínez-Ortiz, P.J., & Cegarra-Navarro, J.G. (2017). Spatial spillovers in public expenditure on a municipal level in Spain. \emph{Annals of Regional Science}, 58(1), 39-65.
#
# @author
# \tabular{ll}{
# Fernando Lopez \tab \email{fernando.lopez@@upct.es} \cr
# Roman Minguez \tab \email{roman.minguez@@uclm.es} \cr
# Jesus Mur \tab \email{jmur@@unizar.es} \cr
# }
# @seealso
# \code{\link{spsur}}
# @examples
# data(spc)
# Tformula <- WAGE83 | WAGE81 ~ UN83 + NMR83 + SMSA | UN80 + NMR80 + SMSA
# ## Estimate SUR-SLM model
# spcsur.slm <-spsurml(Form=Tformula,data=spc,type="slm",W=Wspc)
# summary(spcsur.slm)
# ## H_0: equality between SMSA coefficients in both equations.
# R1 <- matrix(c(0,0,0,1,0,0,0,-1),nrow=1)
# r1 <- matrix(0,ncol=1)
# wald_betas(results=spcsur.slm,R=R1,r=r1)
# betas_rest1 <- betas_restr(spcsur.slm,R=R1,r=r1)
# ## Estimate SUR-SEM model
# spcsur.sem <-spsurml(Form=Tformula,data=spc,type="sem",W=Wspc)
# summary(spcsur.sem)
# ## H_0: equality between intercepts and SMSA coefficients in both equations.
# R2 <- matrix(c(1,0,0,0,-1,0,0,0,0,0,0,1,0,0,0,-1),nrow=2,ncol=8,byrow=TRUE)
# r2 <- matrix(c(0,0),ncol=1)
# res2 <- wald_betas(results=spcsur.sem,R=R2,r=r2)
# betas_rest2 <- betas_restr(spcsur.sem,R=R2,r=r2)
# ## Estimate SUR-SARAR model
# spcsur.sarar <-spsurml(Form=Tformula,data=spc,type="sarar",W=Wspc)
# summary(spcsur.sarar)
# ## H_0: equality between SMSA coefficients in both equations.
# R3 <- matrix(c(0,0,0,1,0,0,0,-1),nrow=1)
# r3 <- matrix(0,ncol=1)
# wald_betas(results=spcsur.sarar,R=R3,r=r3)
# betas_rest3 <- betas_restr(spcsur.sarar,R=R3,r=r3)
betas_restr <- function(results , R , r){
z <- results # OBJETO QUE INCLUYE ESTIMACIÓN EN Rbetas <- z$betas
betas <- Matrix::Matrix(matrix(z$betas,ncol=1))
rownames(betas) <- names(z$betas)
cov_betas <- Matrix::Matrix(z$cov[rownames(betas),rownames(betas)])
R <- Matrix::Matrix(R)
colnames(R) <- rownames(betas)
r <- Matrix::Matrix(matrix(r,ncol=1))
holg <- R %*% betas - r
q <- nrow(R)
X <- Matrix::Matrix(z$X)
W <- Matrix::Matrix(z$W)
Sigma <- Matrix::Matrix(z$Sigma)
Sigma_inv <- Matrix::solve(Sigma)
Tm <- z$Tm
N <- z$N
G <- z$G
IT <- Matrix::Diagonal(Tm)
IR <- Matrix::Diagonal(N)
IGR <- Matrix::Diagonal(G*N)
OME <- kronecker(IT,kronecker(Sigma,IR))
OMEinv <- kronecker(IT,kronecker(Sigma_inv,IR))
type <- z$type
deltas <- Matrix::Matrix(matrix(z$deltas,ncol=1))
rownames(deltas) <- names(z$deltas)
cov_deltas <- Matrix::Matrix(z$cov[rownames(deltas),rownames(deltas)])
if (type=="sem" || type=="sdem" || type=="sarar")
{
rhos <- deltas[grepl("rho",rownames(deltas))]
rho_matrix <- Matrix::Matrix(diag(as.vector(rhos)))
B <- kronecker(IT,(IGR - kronecker(rho_matrix,W)))
X <- B %*% X
}
XtOMEinvX <- Matrix::t(X) %*% (OMEinv %*% X)
rownames(XtOMEinvX) <- rownames(betas)
colnames(XtOMEinvX) <- rownames(betas)
XtOMEinvX_inv <- Matrix::solve(XtOMEinvX)
rownames(XtOMEinvX_inv) <- rownames(betas)
colnames(XtOMEinvX_inv) <- rownames(betas)
betas_r <- betas + XtOMEinvX_inv %*% (Matrix::t(R) %*%
Matrix::solve(R %*% XtOMEinvX_inv %*%
Matrix::t(R),-holg))
cov_betas_r <- XtOMEinvX_inv - XtOMEinvX_inv %*%
(Matrix::t(R) %*%
Matrix::solve(R %*% XtOMEinvX_inv %*%
Matrix::t(R),R %*% XtOMEinvX_inv))
rownames(cov_betas_r) <- rownames(betas_r)
colnames(cov_betas_r) <- rownames(betas_r)
se_betas_r <- sqrt(Matrix::diag(cov_betas_r))
t_betas_r <- betas_r / se_betas_r
##### Print Table of beta restricted
p <- z$p
rdf <- z$df.residual+q
coef_table <- list(NULL)
# Build coefficients table by Equation
for (i in 1:G)
{
if(i==1){
coef_table[[i]] <- cbind(betas_r[1:p[i]], se_betas_r[1:p[i]],
t_betas_r[1:p[i]],
2 * pt(abs(t_betas_r[1:p[i]]),rdf,
lower.tail = FALSE))
colnames(coef_table[[i]]) <- c("Estimate", "Std. Error",
"t value", "Pr(>|t|)")
rownames(coef_table[[i]]) <- rownames(betas_r)[1:p[i]]
} else {
coef_table[[i]] <- cbind(betas_r[(cumsum(p)[i-1]+1):cumsum(p)[i]],
se_betas_r[(cumsum(p)[i-1]+1):cumsum(p)[i]],
t_betas_r[(cumsum(p)[i-1]+1):cumsum(p)[i]],
2 * pt(abs(t_betas_r[(cumsum(p)[i-1]+1):cumsum(p)[i]]),rdf,
lower.tail = FALSE))
rownames(coef_table[[i]]) <- rownames(betas_r)[(cumsum(p)[i-1]+1):cumsum(p)[i]]
}
colnames(coef_table[[i]]) <- c("Estimate", "Std. Error",
"t value", "Pr(>|t|)")
}
digits <- max(3L, getOption("digits") - 3L)
cat("\n Betas Restricted: \n\n")
for (i in 1:length(coef_table)){
cat("Equation ",i,"\n")
printCoefmat(coef_table[[i]], P.values = TRUE, has.Pvalue = TRUE)
}
res <- list(betas_restr = as.matrix(betas_r),
cov_betas_restr = as.matrix(cov_betas_r),
betas_unrestr = as.matrix(betas),
cov_betas_unrestr = as.matrix(cov_betas))
}
|
/R/betas_restr.R
|
no_license
|
rsbivand/spsur
|
R
| false | false | 6,347 |
r
|
# @name betas_restr
# @rdname betas_restr
#
# @title betas restricted
#
# @description
# Restricted estimation of beta coefficients
#
# @param results : An object create with \code{\link{spsurml}}.
# @param R : Coefficient matrix for betas.
# @param r : Vector of independent terms.
#
# @details
# ¿?¿?
#
# @return
# betas restricted and covariance matrix
#
# @references
#
# CAMBIAR
# J. LeSage and R.K. Pace. \emph{Introduction to Spatial Econometrics}, CRC Press, chapter 10.1.6, 2009
# Mur, J., López, F., & Herrera, M. (2010). Testing for spatial effects in seemingly unrelated regressions. \emph{Spatial Economic Analysis}, 5(4), 399-440.
# \cr
# \cr
# López, F.A., Mur, J., & Angulo, A. (2014). Spatial model selection strategies in a SUR framework. The case of regional productivity in EU. \emph{Annals of Regional Science}, 53(1), 197-220.
# \cr
# \cr
# López, F.A., Martínez-Ortiz, P.J., & Cegarra-Navarro, J.G. (2017). Spatial spillovers in public expenditure on a municipal level in Spain. \emph{Annals of Regional Science}, 58(1), 39-65.
#
# @author
# \tabular{ll}{
# Fernando Lopez \tab \email{fernando.lopez@@upct.es} \cr
# Roman Minguez \tab \email{roman.minguez@@uclm.es} \cr
# Jesus Mur \tab \email{jmur@@unizar.es} \cr
# }
# @seealso
# \code{\link{spsur}}
# @examples
# data(spc)
# Tformula <- WAGE83 | WAGE81 ~ UN83 + NMR83 + SMSA | UN80 + NMR80 + SMSA
# ## Estimate SUR-SLM model
# spcsur.slm <-spsurml(Form=Tformula,data=spc,type="slm",W=Wspc)
# summary(spcsur.slm)
# ## H_0: equality between SMSA coefficients in both equations.
# R1 <- matrix(c(0,0,0,1,0,0,0,-1),nrow=1)
# r1 <- matrix(0,ncol=1)
# wald_betas(results=spcsur.slm,R=R1,r=r1)
# betas_rest1 <- betas_restr(spcsur.slm,R=R1,r=r1)
# ## Estimate SUR-SEM model
# spcsur.sem <-spsurml(Form=Tformula,data=spc,type="sem",W=Wspc)
# summary(spcsur.sem)
# ## H_0: equality between intercepts and SMSA coefficients in both equations.
# R2 <- matrix(c(1,0,0,0,-1,0,0,0,0,0,0,1,0,0,0,-1),nrow=2,ncol=8,byrow=TRUE)
# r2 <- matrix(c(0,0),ncol=1)
# res2 <- wald_betas(results=spcsur.sem,R=R2,r=r2)
# betas_rest2 <- betas_restr(spcsur.sem,R=R2,r=r2)
# ## Estimate SUR-SARAR model
# spcsur.sarar <-spsurml(Form=Tformula,data=spc,type="sarar",W=Wspc)
# summary(spcsur.sarar)
# ## H_0: equality between SMSA coefficients in both equations.
# R3 <- matrix(c(0,0,0,1,0,0,0,-1),nrow=1)
# r3 <- matrix(0,ncol=1)
# wald_betas(results=spcsur.sarar,R=R3,r=r3)
# betas_rest3 <- betas_restr(spcsur.sarar,R=R3,r=r3)
betas_restr <- function(results , R , r){
z <- results # OBJETO QUE INCLUYE ESTIMACIÓN EN Rbetas <- z$betas
betas <- Matrix::Matrix(matrix(z$betas,ncol=1))
rownames(betas) <- names(z$betas)
cov_betas <- Matrix::Matrix(z$cov[rownames(betas),rownames(betas)])
R <- Matrix::Matrix(R)
colnames(R) <- rownames(betas)
r <- Matrix::Matrix(matrix(r,ncol=1))
holg <- R %*% betas - r
q <- nrow(R)
X <- Matrix::Matrix(z$X)
W <- Matrix::Matrix(z$W)
Sigma <- Matrix::Matrix(z$Sigma)
Sigma_inv <- Matrix::solve(Sigma)
Tm <- z$Tm
N <- z$N
G <- z$G
IT <- Matrix::Diagonal(Tm)
IR <- Matrix::Diagonal(N)
IGR <- Matrix::Diagonal(G*N)
OME <- kronecker(IT,kronecker(Sigma,IR))
OMEinv <- kronecker(IT,kronecker(Sigma_inv,IR))
type <- z$type
deltas <- Matrix::Matrix(matrix(z$deltas,ncol=1))
rownames(deltas) <- names(z$deltas)
cov_deltas <- Matrix::Matrix(z$cov[rownames(deltas),rownames(deltas)])
if (type=="sem" || type=="sdem" || type=="sarar")
{
rhos <- deltas[grepl("rho",rownames(deltas))]
rho_matrix <- Matrix::Matrix(diag(as.vector(rhos)))
B <- kronecker(IT,(IGR - kronecker(rho_matrix,W)))
X <- B %*% X
}
XtOMEinvX <- Matrix::t(X) %*% (OMEinv %*% X)
rownames(XtOMEinvX) <- rownames(betas)
colnames(XtOMEinvX) <- rownames(betas)
XtOMEinvX_inv <- Matrix::solve(XtOMEinvX)
rownames(XtOMEinvX_inv) <- rownames(betas)
colnames(XtOMEinvX_inv) <- rownames(betas)
betas_r <- betas + XtOMEinvX_inv %*% (Matrix::t(R) %*%
Matrix::solve(R %*% XtOMEinvX_inv %*%
Matrix::t(R),-holg))
cov_betas_r <- XtOMEinvX_inv - XtOMEinvX_inv %*%
(Matrix::t(R) %*%
Matrix::solve(R %*% XtOMEinvX_inv %*%
Matrix::t(R),R %*% XtOMEinvX_inv))
rownames(cov_betas_r) <- rownames(betas_r)
colnames(cov_betas_r) <- rownames(betas_r)
se_betas_r <- sqrt(Matrix::diag(cov_betas_r))
t_betas_r <- betas_r / se_betas_r
##### Print Table of beta restricted
p <- z$p
rdf <- z$df.residual+q
coef_table <- list(NULL)
# Build coefficients table by Equation
for (i in 1:G)
{
if(i==1){
coef_table[[i]] <- cbind(betas_r[1:p[i]], se_betas_r[1:p[i]],
t_betas_r[1:p[i]],
2 * pt(abs(t_betas_r[1:p[i]]),rdf,
lower.tail = FALSE))
colnames(coef_table[[i]]) <- c("Estimate", "Std. Error",
"t value", "Pr(>|t|)")
rownames(coef_table[[i]]) <- rownames(betas_r)[1:p[i]]
} else {
coef_table[[i]] <- cbind(betas_r[(cumsum(p)[i-1]+1):cumsum(p)[i]],
se_betas_r[(cumsum(p)[i-1]+1):cumsum(p)[i]],
t_betas_r[(cumsum(p)[i-1]+1):cumsum(p)[i]],
2 * pt(abs(t_betas_r[(cumsum(p)[i-1]+1):cumsum(p)[i]]),rdf,
lower.tail = FALSE))
rownames(coef_table[[i]]) <- rownames(betas_r)[(cumsum(p)[i-1]+1):cumsum(p)[i]]
}
colnames(coef_table[[i]]) <- c("Estimate", "Std. Error",
"t value", "Pr(>|t|)")
}
digits <- max(3L, getOption("digits") - 3L)
cat("\n Betas Restricted: \n\n")
for (i in 1:length(coef_table)){
cat("Equation ",i,"\n")
printCoefmat(coef_table[[i]], P.values = TRUE, has.Pvalue = TRUE)
}
res <- list(betas_restr = as.matrix(betas_r),
cov_betas_restr = as.matrix(cov_betas_r),
betas_unrestr = as.matrix(betas),
cov_betas_unrestr = as.matrix(cov_betas))
}
|
PlotBasic <-function(out,pars){
dev.new()
op<-par(mfrow=c(2,2),pty = "s",mar=c(4,3,1,1),cex.axis=0.8,tck=-0.01,cex.lab=0.8,font=2,mgp=c(2,1,0))
plot(out$time,out$X*100,type='l',xlab='Time(min)',ylab='X(%)',col='red');grid()
plot(out$time,out$M*pars['MWM']/1000*out$Vl,type='l',xlab='Time(min)',ylab='M(Kg)',col='red');grid()
plot(out$time,out$T-273.16,type='l',xlab='Time(min)',ylab='Temperature(C)',col='red');grid()
lgMn<-log10(out$Mnm)
lgMw<-log10(out$Mwm)
plot(out$time,lgMn,type='l',xlab='Time(min)',ylab='log Mn(red), log Mw(blue) in (g/mol)',col='red',
ylim=c(min(lgMn[-1],lgMw[-1]),max(lgMn[-1],lgMw[-1])));grid()
lines(out$time,lgMw,col='blue')
par(op)
}
|
/R/PlotBasic.R
|
no_license
|
timhockswender/HomoPolymer
|
R
| false | false | 675 |
r
|
PlotBasic <-function(out,pars){
dev.new()
op<-par(mfrow=c(2,2),pty = "s",mar=c(4,3,1,1),cex.axis=0.8,tck=-0.01,cex.lab=0.8,font=2,mgp=c(2,1,0))
plot(out$time,out$X*100,type='l',xlab='Time(min)',ylab='X(%)',col='red');grid()
plot(out$time,out$M*pars['MWM']/1000*out$Vl,type='l',xlab='Time(min)',ylab='M(Kg)',col='red');grid()
plot(out$time,out$T-273.16,type='l',xlab='Time(min)',ylab='Temperature(C)',col='red');grid()
lgMn<-log10(out$Mnm)
lgMw<-log10(out$Mwm)
plot(out$time,lgMn,type='l',xlab='Time(min)',ylab='log Mn(red), log Mw(blue) in (g/mol)',col='red',
ylim=c(min(lgMn[-1],lgMw[-1]),max(lgMn[-1],lgMw[-1])));grid()
lines(out$time,lgMw,col='blue')
par(op)
}
|
#' Candidate object
#'
#' Objects of class \code{Candidate} are created using the \code{createCandidate} function
#'
#' #' An object of the class `Candidate' has the following slots:
#' \itemize{
#' \item \code{name} Name of the candidate
#' \item \code{party} Candidate's party, either "Democratic" or "Republican"
#' \item \code{delegatesWon} Number of delegates the candidate has won so far
#' \item \code{delegatesNeeded} Number of additional delegates needed to secure nomination
#' }
#'
#' @author Jacob H. Hample: \email{jacob.hample@@wustl.edu}
#' @aliases Candidate-class initialize,Candidate-method show,Candidate-method
#' @rdname Candidate
#' @export
setClass(Class = "Candidate",
slots = c(name = "character",
party = "character",
delegatesWon = "numeric",
delegatesNeeded = "numeric"),
prototype = prototype(
name = character(),
party = character(),
delegatesWon = numeric(),
delegatesNeeded = numeric()
)
)
#' @export
setMethod("initialize", "Candidate",
function(.Object, name, party, delegatesWon) {
.Object@name <- name
.Object@party <- party
.Object@delegatesWon <- delegatesWon
if(party == "Republican") {
.Object@delegatesNeeded <- 1237 - .Object@delegatesWon
}
else if(party == "Democratic") {
.Object@delegatesNeeded <- 2383 - .Object@delegatesWon
}
else {
stop("You have not chosen a valid party. Please specify either 'Democratic' or 'Republican'")
}
value = callNextMethod()
return(value)
}
)
#' @export
# Show method
setMethod(f = "show",
signature = "Candidate",
definition = function(object) {
show.dataframe <- data.frame(object@name, object@party, object@delegatesWon, object@delegatesNeeded)
colnames(show.dataframe) <- c("Name", "Party", "Delegates Won", "Delegates Needed")
print(show.dataframe)
}
)
#' @export
# Print method
setMethod(f = "print",
signature = "Candidate",
definition = function(x) {
paste("So far", x@name, "has won", x@delegatesWon, "delegates in the",
x@party, "primary and therefore needs", x@delegatesNeeded,
"more delegates in order to secure the nomination.", sep = " ")
}
)
|
/MyPackage/R/Candidate.R
|
no_license
|
jacobhample/PS6
|
R
| false | false | 2,558 |
r
|
#' Candidate object
#'
#' Objects of class \code{Candidate} are created using the \code{createCandidate} function
#'
#' #' An object of the class `Candidate' has the following slots:
#' \itemize{
#' \item \code{name} Name of the candidate
#' \item \code{party} Candidate's party, either "Democratic" or "Republican"
#' \item \code{delegatesWon} Number of delegates the candidate has won so far
#' \item \code{delegatesNeeded} Number of additional delegates needed to secure nomination
#' }
#'
#' @author Jacob H. Hample: \email{jacob.hample@@wustl.edu}
#' @aliases Candidate-class initialize,Candidate-method show,Candidate-method
#' @rdname Candidate
#' @export
setClass(Class = "Candidate",
slots = c(name = "character",
party = "character",
delegatesWon = "numeric",
delegatesNeeded = "numeric"),
prototype = prototype(
name = character(),
party = character(),
delegatesWon = numeric(),
delegatesNeeded = numeric()
)
)
#' @export
setMethod("initialize", "Candidate",
function(.Object, name, party, delegatesWon) {
.Object@name <- name
.Object@party <- party
.Object@delegatesWon <- delegatesWon
if(party == "Republican") {
.Object@delegatesNeeded <- 1237 - .Object@delegatesWon
}
else if(party == "Democratic") {
.Object@delegatesNeeded <- 2383 - .Object@delegatesWon
}
else {
stop("You have not chosen a valid party. Please specify either 'Democratic' or 'Republican'")
}
value = callNextMethod()
return(value)
}
)
#' @export
# Show method
setMethod(f = "show",
signature = "Candidate",
definition = function(object) {
show.dataframe <- data.frame(object@name, object@party, object@delegatesWon, object@delegatesNeeded)
colnames(show.dataframe) <- c("Name", "Party", "Delegates Won", "Delegates Needed")
print(show.dataframe)
}
)
#' @export
# Print method
setMethod(f = "print",
signature = "Candidate",
definition = function(x) {
paste("So far", x@name, "has won", x@delegatesWon, "delegates in the",
x@party, "primary and therefore needs", x@delegatesNeeded,
"more delegates in order to secure the nomination.", sep = " ")
}
)
|
setOldClass(c("xml_document", "xml_node"))
.PXDataset <- setClass("PXDataset",
slots = list(
## attributes
id = "character",
formatVersion = "character",
## Nodes
Data = "xml_document"))
setMethod("show", "PXDataset",
function(object) {
cat("Object of class \"", class(object), "\"\n", sep = "")
fls <- pxfiles(object)
fls <- paste0("'", fls, "'")
n <- length(fls)
cat(" Id:", object@id, "with ")
cat(n, "files\n")
cat(" ")
if (n < 3) {
cat(paste(fls, collapse = ", "), "\n")
} else {
cat("[1]", paste(fls[1], collapse = ", "))
cat(" ... ")
cat("[", n, "] ", paste(fls[n], collapse = ", "),
"\n", sep = "")
cat(" Use 'pxfiles(.)' to see all files.\n")
}
})
## ##' Returns the node names of the underliyng XML content of an
## ##' \code{PXDataset} object, available in the \code{Data} slot. This
## ##' function is meant to be used if additional parsing of the XML
## ##' structure is needed.
## ##'
## ##' @title Return the nodes of a \code{PXDataset}
## ##' @param pxdata An instance of class \code{PXDataset}.
## ##' @param name The name of a node.
## ##' @param all Should node from all levels be returned. Default is
## ##' \code{FALSE}.
## ##' @return A \code{character} with XML node names.
## ##' @author Laurent Gatto
## pxnodes <- function(pxdata, name, all = FALSE) {
## stopifnot(inherits(pxdata, "PXDataset"))
## stop("Not available for new version")
## if (all) {
## ans <- names(unlist(pxdata@Data))
## ans <- ans[grep("children", ans)]
## ans <- gsub("\\.", "/", ans)
## ans <- gsub("children", "", ans)
## return(ans)
## }
## if (missing(name)) ans <- names(names(pxdata@Data))
## else ans <- names(xmlChildren(pxdata@Data[[name]]))
## ans
## }
pxid <- function(object) object@id
pxurl <- function(object) {
stopifnot(inherits(object, "PXDataset"))
p <- "//cvParam[@accession = 'PRIDE:0000411']"
url <- xml_attr(xml_find_all(object@Data, p), "value")
names(url) <- NULL
url
}
pxtax <- function(object) {
p <- "//cvParam[@accession = 'MS:1001469']"
tax <- xml_attr(xml_find_all(object@Data, p), "value")
names(tax) <- NULL
tax
}
pxref <- function(object) {
p <- "//cvParam[@accession = 'PRIDE:0000400']"
q <- "//cvParam[@accession = 'PRIDE:0000432']"
ref <- xml_attr(xml_find_all(object@Data, p), "value")
pendingref <- xml_attr(xml_find_all(object@Data, q), "value")
c(ref, pendingref)
}
pxfiles <- function(object) {
stopifnot(inherits(object, "PXDataset"))
ftpdir <- paste0(pxurl(object), "/")
ans <- strsplit(getURL(ftpdir, dirlistonly = TRUE), "\n")[[1]]
if (Sys.info()['sysname'] == "Windows")
ans <- sub("\r$", "", ans)
ans
}
pxget <- function(object, list, force = FALSE, destdir = getwd(), ...) {
fls <- pxfiles(object)
url <- pxurl(object)
if (missing(list))
list <- menu(fls, FALSE, paste0("Files for ", object@id))
if (length(list) == 1 && list == "all") {
toget <- fls
} else {
if (is.character(list)) {
toget <- fls[fls %in% list]
} else toget <- fls[list]
}
if (length(toget) < 1)
stop("No files to download.")
urls <- gsub(" ", "\ ", paste0(url, "/", toget))
toget <- file.path(destdir, toget)
message("Downloading ", length(urls), " file",
ifelse(length(urls) > 1, "s", ""))
for (i in 1:length(urls)) {
if (file.exists(toget[i]) && !force)
message(toget[i], " already present.")
else download.file(urls[i], toget[i], ...)
}
invisible(toget)
}
## ns10 <- "https://raw.githubusercontent.com/proteomexchange/proteomecentral/master/lib/schemas/proteomeXchange-1.0.xsd"
## ns11 <- "https://raw.githubusercontent.com/proteomexchange/proteomecentral/master/lib/schemas/proteomeXchange-1.1.0.xsd"
## ns12 <- "https://raw.githubusercontent.com/proteomexchange/proteomecentral/master/lib/schemas/proteomeXchange-1.2.0.xsd"
## ns13 <- "https://raw.githubusercontent.com/proteomexchange/proteomecentral/master/lib/schemas/proteomeXchange-1.3.0.xsd"
## constructor
PXDataset <- function(id) {
url <- paste0(
"http://proteomecentral.proteomexchange.org/cgi/GetDataset?ID=",
id, "&outputMode=XML&test=no")
x <- readLines(url)
if (length(grep("ERROR", x)) > 0) {
x <- x[grep("message=", x)]
x <- sub("message=", "", x)
stop(x)
}
x <- x[x != ""]
v <- sub("\".+$", "", sub("^.+formatVersion=\"", "", x[2]))
x <- read_xml(url)
.formatVersion <- xml_attr(x, "formatVersion")
.id <- xml_attr(x, "id")
if (length(.id) != 1)
stop("Got ", length(.id), " identifiers: ",
paste(.id, collapse = ", "), ".")
if (id != .id)
warning("Identifier '", id, "' not found. Retrieved '",
.id, "' instead.")
if (v != .formatVersion)
warning("Format version does not match. Got '",
.formatVersion, "' instead of '", v, "'.")
.PXDataset(id = .id,
formatVersion = .formatVersion,
Data = x)
}
|
/R/px.R
|
no_license
|
rintukutum/rpx
|
R
| false | false | 5,541 |
r
|
setOldClass(c("xml_document", "xml_node"))
.PXDataset <- setClass("PXDataset",
slots = list(
## attributes
id = "character",
formatVersion = "character",
## Nodes
Data = "xml_document"))
setMethod("show", "PXDataset",
function(object) {
cat("Object of class \"", class(object), "\"\n", sep = "")
fls <- pxfiles(object)
fls <- paste0("'", fls, "'")
n <- length(fls)
cat(" Id:", object@id, "with ")
cat(n, "files\n")
cat(" ")
if (n < 3) {
cat(paste(fls, collapse = ", "), "\n")
} else {
cat("[1]", paste(fls[1], collapse = ", "))
cat(" ... ")
cat("[", n, "] ", paste(fls[n], collapse = ", "),
"\n", sep = "")
cat(" Use 'pxfiles(.)' to see all files.\n")
}
})
## ##' Returns the node names of the underliyng XML content of an
## ##' \code{PXDataset} object, available in the \code{Data} slot. This
## ##' function is meant to be used if additional parsing of the XML
## ##' structure is needed.
## ##'
## ##' @title Return the nodes of a \code{PXDataset}
## ##' @param pxdata An instance of class \code{PXDataset}.
## ##' @param name The name of a node.
## ##' @param all Should node from all levels be returned. Default is
## ##' \code{FALSE}.
## ##' @return A \code{character} with XML node names.
## ##' @author Laurent Gatto
## pxnodes <- function(pxdata, name, all = FALSE) {
## stopifnot(inherits(pxdata, "PXDataset"))
## stop("Not available for new version")
## if (all) {
## ans <- names(unlist(pxdata@Data))
## ans <- ans[grep("children", ans)]
## ans <- gsub("\\.", "/", ans)
## ans <- gsub("children", "", ans)
## return(ans)
## }
## if (missing(name)) ans <- names(names(pxdata@Data))
## else ans <- names(xmlChildren(pxdata@Data[[name]]))
## ans
## }
pxid <- function(object) object@id
pxurl <- function(object) {
stopifnot(inherits(object, "PXDataset"))
p <- "//cvParam[@accession = 'PRIDE:0000411']"
url <- xml_attr(xml_find_all(object@Data, p), "value")
names(url) <- NULL
url
}
pxtax <- function(object) {
p <- "//cvParam[@accession = 'MS:1001469']"
tax <- xml_attr(xml_find_all(object@Data, p), "value")
names(tax) <- NULL
tax
}
pxref <- function(object) {
p <- "//cvParam[@accession = 'PRIDE:0000400']"
q <- "//cvParam[@accession = 'PRIDE:0000432']"
ref <- xml_attr(xml_find_all(object@Data, p), "value")
pendingref <- xml_attr(xml_find_all(object@Data, q), "value")
c(ref, pendingref)
}
pxfiles <- function(object) {
stopifnot(inherits(object, "PXDataset"))
ftpdir <- paste0(pxurl(object), "/")
ans <- strsplit(getURL(ftpdir, dirlistonly = TRUE), "\n")[[1]]
if (Sys.info()['sysname'] == "Windows")
ans <- sub("\r$", "", ans)
ans
}
pxget <- function(object, list, force = FALSE, destdir = getwd(), ...) {
fls <- pxfiles(object)
url <- pxurl(object)
if (missing(list))
list <- menu(fls, FALSE, paste0("Files for ", object@id))
if (length(list) == 1 && list == "all") {
toget <- fls
} else {
if (is.character(list)) {
toget <- fls[fls %in% list]
} else toget <- fls[list]
}
if (length(toget) < 1)
stop("No files to download.")
urls <- gsub(" ", "\ ", paste0(url, "/", toget))
toget <- file.path(destdir, toget)
message("Downloading ", length(urls), " file",
ifelse(length(urls) > 1, "s", ""))
for (i in 1:length(urls)) {
if (file.exists(toget[i]) && !force)
message(toget[i], " already present.")
else download.file(urls[i], toget[i], ...)
}
invisible(toget)
}
## ns10 <- "https://raw.githubusercontent.com/proteomexchange/proteomecentral/master/lib/schemas/proteomeXchange-1.0.xsd"
## ns11 <- "https://raw.githubusercontent.com/proteomexchange/proteomecentral/master/lib/schemas/proteomeXchange-1.1.0.xsd"
## ns12 <- "https://raw.githubusercontent.com/proteomexchange/proteomecentral/master/lib/schemas/proteomeXchange-1.2.0.xsd"
## ns13 <- "https://raw.githubusercontent.com/proteomexchange/proteomecentral/master/lib/schemas/proteomeXchange-1.3.0.xsd"
## constructor
PXDataset <- function(id) {
url <- paste0(
"http://proteomecentral.proteomexchange.org/cgi/GetDataset?ID=",
id, "&outputMode=XML&test=no")
x <- readLines(url)
if (length(grep("ERROR", x)) > 0) {
x <- x[grep("message=", x)]
x <- sub("message=", "", x)
stop(x)
}
x <- x[x != ""]
v <- sub("\".+$", "", sub("^.+formatVersion=\"", "", x[2]))
x <- read_xml(url)
.formatVersion <- xml_attr(x, "formatVersion")
.id <- xml_attr(x, "id")
if (length(.id) != 1)
stop("Got ", length(.id), " identifiers: ",
paste(.id, collapse = ", "), ".")
if (id != .id)
warning("Identifier '", id, "' not found. Retrieved '",
.id, "' instead.")
if (v != .formatVersion)
warning("Format version does not match. Got '",
.formatVersion, "' instead of '", v, "'.")
.PXDataset(id = .id,
formatVersion = .formatVersion,
Data = x)
}
|
library(plyr)
library(dplyr)
library(ggplot2)
setwd("/home/vanessa/Documents/Dropbox/Code/Python/brainmeta/ontological_comparison/cluster/ranges-vs-binary/analysisPriorspt5")
# Reading in the result data
ri_ranges = read.csv("data/reverse_inference_scores_ranges.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # reverse inference ranges scores
ri_binary = read.csv("data/reverse_inference_scores_binary.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # binary scores
ri_priors_in = read.csv("data/reverse_inference_priors_in.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # priors for in and out of node sets
ri_priors_out = read.csv("data/reverse_inference_priors_out.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1)
bayes_in_ranges = read.csv("data/reverse_inference_bayes_in_ranges",sep="\t",stringsAsFactors=FALSE,row.names=1) # bayes for query images, ranges in
bayes_out_ranges = read.csv("data/reverse_inference_bayes_out_ranges.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # ranges out
bayes_in_bin = read.csv("data/reverse_inference_bayes_in_binary.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # bayes binary bin
bayes_out_bin = read.csv("data/reverse_inference_bayes_out_binary.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # bayes binary out
# Read in all groups
groups = read.csv("data/groups/all_groups.tsv",sep="\t",stringsAsFactors=FALSE)
image_ids = c()
for (image in groups$image){
image = strsplit(image,"/")[[1]]
image = as.numeric(strsplit(image[length(image)],"[.]")[[1]][1])
image_ids = c(image_ids,image)
}
groups$image_ids = image_ids
count_bin = matrix(0,nrow=ncol(bayes_in_bin),ncol=2)
rownames(count_bin) = colnames(bayes_in_bin)
colnames(count_bin) = c("for","against")
count_range = matrix(0,nrow=ncol(bayes_in_ranges),ncol=2)
rownames(count_range) = colnames(bayes_in_ranges)
colnames(count_range) = c("for","against")
# Make a lookup table for the node name
nodes = unique(groups$group)
node_lookup = c()
for (node in nodes){
node_name = unique(groups$name[groups$group==node])
node_lookup = c(node_lookup,node_name)
}
length(node_lookup) == length(nodes)
names(node_lookup) = nodes
# Write a function to generate evidence "for" or "against" a concept
count_for_against = function(image,node,bayesin,bayesout,count_df,direction){
image_id = strsplit(image,"/")[[1]]
image_id = as.character(as.numeric(sub(".nii.gz","",image_id[length(image_id)])))
score_in = bayesin[image_id,node]
score_out = bayesout[image_id,node]
if (!is.na(score_in) && (!is.na(score_out))) {
if (direction=="gt"){
if (score_in > score_out){
count_df[node,"for"] = count_df[node,"for"] + 1
} else {
count_df[node,"against"] = count_df[node,"against"] + 1
}
} else {
if (score_in < score_out){
count_df[node,"for"] = count_df[node,"for"] + 1
} else {
count_df[node,"against"] = count_df[node,"against"] + 1
}
}
}
return(count_df)
}
# Count evidence for (meaning bayes_in > bayes_out or against (bayes_out > bayes_in)) each concept
# for each of ranges and bin data
for (node in nodes){
cat("Parsing",node,"\n")
# Find in group
group = groups[groups$group==node,]
in_group = group$image[which(group$direction=="in")]
out_group = group$image[which(group$direction=="out")]
# Look at bayes for range and bin given "in" group
for (image in in_group){
count_range = count_for_against(image,node,bayes_in_ranges,bayes_out_ranges,count_range,"gt")
count_bin = count_for_against(image,node,bayes_in_bin,bayes_out_bin,count_bin,"gt")
}
for (image in out_group){
count_range = count_for_against(image,node,bayes_in_ranges,bayes_out_ranges,count_range,"lt")
count_bin = count_for_against(image,node,bayes_in_bin,bayes_out_bin,count_bin,"lt")
}
}
### STEP 1: VISUALIZATION #########################################################################
# Note - this does not completely coincide with order of google site
cr = melt(count_bin)
colnames(cr) = c("node","direction","value")
# Evidence for and against the concepts, not normalized
pdf("img/evidence_for_concepts.pdf")
for (node in nodes){
subset = cr[cr$node==node,]
p = ggplot(subset,aes(x=direction,y=value,fill=direction)) +
geom_histogram(alpha=0.25,stat="identity",binwidth=1) +
labs(title = paste("Evidence for/against",node_lookup[node]))
print(p)
}
dev.off()
# Idea 1: If the "in" group images provide evidence for the concept, on the level of the node
# Evidence for and against the concepts, but now we only want to consider the images that are tagged AT the node:
count_bin_in = matrix(0,nrow=ncol(bayes_in_bin),ncol=2)
rownames(count_bin_in) = colnames(bayes_in_bin)
colnames(count_bin_in) = c("for","against")
for (node in nodes){
cat("Parsing",node,"\n")
# Find in group
group = groups[groups$group==node,]
in_group = group$image[which(group$direction=="in")]
# Look at bayes for range and bin given "in" group
for (image in in_group){
count_bin_in = count_for_against(image,node,bayes_in_bin,bayes_out_bin,count_bin_in,"gt")
}
}
crin = melt(count_bin_in)
colnames(crin) = c("node","direction","value")
pdf("img/evidence_for_concepts_ins.pdf")
for (node in nodes){
subset = crin[crin$node==node,]
if (sum(subset$value)>0) {
p = ggplot(subset,aes(x=direction,y=value,fill=direction)) +
geom_histogram(alpha=0.25,stat="identity",binwidth=1) +
scale_y_continuous(limits = c(0, 93)) +
labs(title = paste("Evidence for/against",node_lookup[node]))
print(p)
}
}
dev.off()
# Basic visualization of reverse inference scores
riranges$image_id = rownames(ri_ranges)
ribinary$image_id = rownames(ri_binary)
riranges = as.matrix(ri_ranges)
ribinary = as.matrix(ri_binary)
par(mfrow=c(1,2))
hist(riranges,col="purple",main="Reverse Inference Scores, Range Method")
hist(ribinary,col="blue",main="Reverse Inference Scores, Binary Method")
# They are essentially equivalent, but we will look at them in detail anyway
# Reverse inference scores by task
rir = melt(riranges)
colnames(rir) = c("image","task","value")
rib = melt(ribinary)
colnames(rib) = c("image","task","value")
rir$task = as.character(rir$task)
rir$image = as.character(rir$image)
rir$value = as.numeric(rir$value)
rib$task = as.character(rib$task)
rib$image = as.character(rib$image)
rib$value = as.numeric(rib$value)
# Let's look at the distributions individually for each concept!
# we will write to pdf
# We will also save a data frame with number in, number out, and average RI scores
# (not not used)
df=c("countin","countout","meanin","meanout")
par(mfrow=c(1,1))
pdf(file="img/node-ri-scores.pdf")
for (node in nodes){
subset1 = rir$value[rir$task==node]
subset2 = rib$value[rib$task==node]
minvalue = min(min(subset1),min(subset2))
maxvalue = max(max(subset1),max(subset2))
#hist(subset,col="purple",main="RI Scores",xlim=c(minvalue,maxvalue),xlab="ranges method")
#hist(subset,col="blue",main=as.character(node_lookup[node]),xlim=c(minvalue,maxvalue),xlab="binary method")
# Now count the in vs out group
group = groups[groups$group == node,]
rir$direction = group$direction[as.character(group$image_ids) %in% rir$image]
rib$direction = group$direction[as.character(group$image_ids) %in% rib$image]
ingroup = length(group$image[group$direction=="in"])
outgroup = length(group$image[group$direction=="out"])
meanin = mean(rib$value[rib$direction=="in"])
meanout = mean(rib$value[rib$direction=="out"])
df = rbind(df,c(ingroup,outgroup,meanin,meanout))
p = ggplot(rib,aes(x=value,fill=direction)) +
geom_histogram(alpha=0.25,stat="bin",binwidth=0.1) +
facet_wrap(~direction) +
labs(title = node_lookup[node])
print(p)
}
dev.off()
# Function to calculate confidence intervals
get_ci = function(dat,direction="upper"){
error = qnorm(0.975)*sd(dat)/sqrt(length(dat))
if (direction=="upper"){
return(mean(dat)+error)
} else {
return(mean(dat)-error)
}
}
# Let's look at mean RI scores for each concept node
ribsum = ddply(rib, c("task"), summarise, mscore = mean(value))
ribsum$name = as.character(node_lookup[ribsum$task])
# Sort by meanscore
tmp = ribsum[with(ribsum, order(-mscore)), ]
rownames(tmp) = seq(1,nrow(tmp))
tmp$sort = as.numeric(rownames(tmp))
ggplot(tmp, aes(x=sort,y=mscore,task=task,colour=mscore)) +
geom_bar(stat="identity") +
xlab("concept") +
ylab(paste("mean reverse inference score")) +
scale_x_discrete(limits=tmp$sort,labels=tmp$name) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
countin = c()
countout = c()
for (t in tmp$task){
subset= groups[groups$group==t,]
countin = c(countin,nrow(subset[subset$direction=="in",]))
countout = c(countout,nrow(subset[subset$direction=="out",]))
}
tmp$countin = countin
tmp$countout = countout
# Look at overall mean reverse inference scores
ggplot(tmp, aes(x=sort,y=mscore,task=task,countin=countin,countout=countout,colour=mscore)) +
geom_bar(stat="identity") +
xlab("concept") +
ylab(paste("mean reverse inference score")) +
scale_x_discrete(limits=tmp$sort,labels=tmp$name) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Look at count in and out of set
ggplot(tmp, aes(x=sort,task=task,countin=countin,countout=countout)) +
geom_point(aes(y = countin,colour='pink')) +
geom_point(aes(y = countout)) +
scale_x_discrete(limits=tmp$sort,labels=tmp$name) +
xlab("concept") +
ylab(paste("count in (pink) and out")) +
theme(axis.text.x = element_text(angle = 90, hjust = 1),legend.position="none")
# Idea 2: Can reverse inference scores be used to predict image labels
# First make a matrix of images by labels, with 1 if the image is in the "in group"
unique_ids = unique(image_ids)
labels = array(0,dim=c(length(unique_ids),length(nodes)))
rownames(labels) = unique_ids
colnames(labels) = nodes
for (node in nodes){
subset = groups[groups$group==node,]
ingroup = subset$image_ids[subset$direction=="in"]
labels[which(rownames(labels)%in%ingroup),node] = 1
}
# Let's first be stupid and see if we can cluster concepts based on image scores
library(pheatmap)
ribname = ri_binary
colnames(ribname) = as.character(node_lookup[colnames(ribname)])
disty = dist(ribname)
dmat = as.matrix(disty)
# Correlation of concepts by image scores
corr = cor(ribname)
pheatmap(corr,fontsize_row=8)
# Correlation of images by image scores
corr = cor(t(ribname))
pheatmap(corr,fontsize_row=8)
# Get contrast names for the images
images = read.csv("/home/vanessa/Documents/Work/BRAINMETA/reverse_inference/contrast_defined_images.tsv",sep="\t")
contrast_names = as.character(images$cognitive_contrast_cogatlas[as.character(images$image_id)%in%rownames(ribname)])
contrast_names = strtrim(contrast_names, 25)
rownames(corr) = contrast_names
colnames(corr) = contrast_names
pheatmap(corr,fontsize_row=8)
# NEXT: What we would want to do is look at the change in bayes score as we add concepts KNOWN to be in the set.
# want to get feedback on this first before doing it
|
/ontological_comparison/cluster/ranges-vs-binary/analysisPriorspt5/0.data_exploration.R
|
no_license
|
nagyistge/brainmeta
|
R
| false | false | 11,097 |
r
|
library(plyr)
library(dplyr)
library(ggplot2)
setwd("/home/vanessa/Documents/Dropbox/Code/Python/brainmeta/ontological_comparison/cluster/ranges-vs-binary/analysisPriorspt5")
# Reading in the result data
ri_ranges = read.csv("data/reverse_inference_scores_ranges.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # reverse inference ranges scores
ri_binary = read.csv("data/reverse_inference_scores_binary.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # binary scores
ri_priors_in = read.csv("data/reverse_inference_priors_in.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # priors for in and out of node sets
ri_priors_out = read.csv("data/reverse_inference_priors_out.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1)
bayes_in_ranges = read.csv("data/reverse_inference_bayes_in_ranges",sep="\t",stringsAsFactors=FALSE,row.names=1) # bayes for query images, ranges in
bayes_out_ranges = read.csv("data/reverse_inference_bayes_out_ranges.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # ranges out
bayes_in_bin = read.csv("data/reverse_inference_bayes_in_binary.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # bayes binary bin
bayes_out_bin = read.csv("data/reverse_inference_bayes_out_binary.tsv",sep="\t",stringsAsFactors=FALSE,row.names=1) # bayes binary out
# Read in all groups
groups = read.csv("data/groups/all_groups.tsv",sep="\t",stringsAsFactors=FALSE)
image_ids = c()
for (image in groups$image){
image = strsplit(image,"/")[[1]]
image = as.numeric(strsplit(image[length(image)],"[.]")[[1]][1])
image_ids = c(image_ids,image)
}
groups$image_ids = image_ids
count_bin = matrix(0,nrow=ncol(bayes_in_bin),ncol=2)
rownames(count_bin) = colnames(bayes_in_bin)
colnames(count_bin) = c("for","against")
count_range = matrix(0,nrow=ncol(bayes_in_ranges),ncol=2)
rownames(count_range) = colnames(bayes_in_ranges)
colnames(count_range) = c("for","against")
# Make a lookup table for the node name
nodes = unique(groups$group)
node_lookup = c()
for (node in nodes){
node_name = unique(groups$name[groups$group==node])
node_lookup = c(node_lookup,node_name)
}
length(node_lookup) == length(nodes)
names(node_lookup) = nodes
# Write a function to generate evidence "for" or "against" a concept
count_for_against = function(image,node,bayesin,bayesout,count_df,direction){
image_id = strsplit(image,"/")[[1]]
image_id = as.character(as.numeric(sub(".nii.gz","",image_id[length(image_id)])))
score_in = bayesin[image_id,node]
score_out = bayesout[image_id,node]
if (!is.na(score_in) && (!is.na(score_out))) {
if (direction=="gt"){
if (score_in > score_out){
count_df[node,"for"] = count_df[node,"for"] + 1
} else {
count_df[node,"against"] = count_df[node,"against"] + 1
}
} else {
if (score_in < score_out){
count_df[node,"for"] = count_df[node,"for"] + 1
} else {
count_df[node,"against"] = count_df[node,"against"] + 1
}
}
}
return(count_df)
}
# Count evidence for (meaning bayes_in > bayes_out or against (bayes_out > bayes_in)) each concept
# for each of ranges and bin data
for (node in nodes){
cat("Parsing",node,"\n")
# Find in group
group = groups[groups$group==node,]
in_group = group$image[which(group$direction=="in")]
out_group = group$image[which(group$direction=="out")]
# Look at bayes for range and bin given "in" group
for (image in in_group){
count_range = count_for_against(image,node,bayes_in_ranges,bayes_out_ranges,count_range,"gt")
count_bin = count_for_against(image,node,bayes_in_bin,bayes_out_bin,count_bin,"gt")
}
for (image in out_group){
count_range = count_for_against(image,node,bayes_in_ranges,bayes_out_ranges,count_range,"lt")
count_bin = count_for_against(image,node,bayes_in_bin,bayes_out_bin,count_bin,"lt")
}
}
### STEP 1: VISUALIZATION #########################################################################
# Note - this does not completely coincide with order of google site
cr = melt(count_bin)
colnames(cr) = c("node","direction","value")
# Evidence for and against the concepts, not normalized
pdf("img/evidence_for_concepts.pdf")
for (node in nodes){
subset = cr[cr$node==node,]
p = ggplot(subset,aes(x=direction,y=value,fill=direction)) +
geom_histogram(alpha=0.25,stat="identity",binwidth=1) +
labs(title = paste("Evidence for/against",node_lookup[node]))
print(p)
}
dev.off()
# Idea 1: If the "in" group images provide evidence for the concept, on the level of the node
# Evidence for and against the concepts, but now we only want to consider the images that are tagged AT the node:
count_bin_in = matrix(0,nrow=ncol(bayes_in_bin),ncol=2)
rownames(count_bin_in) = colnames(bayes_in_bin)
colnames(count_bin_in) = c("for","against")
for (node in nodes){
cat("Parsing",node,"\n")
# Find in group
group = groups[groups$group==node,]
in_group = group$image[which(group$direction=="in")]
# Look at bayes for range and bin given "in" group
for (image in in_group){
count_bin_in = count_for_against(image,node,bayes_in_bin,bayes_out_bin,count_bin_in,"gt")
}
}
crin = melt(count_bin_in)
colnames(crin) = c("node","direction","value")
pdf("img/evidence_for_concepts_ins.pdf")
for (node in nodes){
subset = crin[crin$node==node,]
if (sum(subset$value)>0) {
p = ggplot(subset,aes(x=direction,y=value,fill=direction)) +
geom_histogram(alpha=0.25,stat="identity",binwidth=1) +
scale_y_continuous(limits = c(0, 93)) +
labs(title = paste("Evidence for/against",node_lookup[node]))
print(p)
}
}
dev.off()
# Basic visualization of reverse inference scores
riranges$image_id = rownames(ri_ranges)
ribinary$image_id = rownames(ri_binary)
riranges = as.matrix(ri_ranges)
ribinary = as.matrix(ri_binary)
par(mfrow=c(1,2))
hist(riranges,col="purple",main="Reverse Inference Scores, Range Method")
hist(ribinary,col="blue",main="Reverse Inference Scores, Binary Method")
# They are essentially equivalent, but we will look at them in detail anyway
# Reverse inference scores by task
rir = melt(riranges)
colnames(rir) = c("image","task","value")
rib = melt(ribinary)
colnames(rib) = c("image","task","value")
rir$task = as.character(rir$task)
rir$image = as.character(rir$image)
rir$value = as.numeric(rir$value)
rib$task = as.character(rib$task)
rib$image = as.character(rib$image)
rib$value = as.numeric(rib$value)
# Let's look at the distributions individually for each concept!
# we will write to pdf
# We will also save a data frame with number in, number out, and average RI scores
# (not not used)
df=c("countin","countout","meanin","meanout")
par(mfrow=c(1,1))
pdf(file="img/node-ri-scores.pdf")
for (node in nodes){
subset1 = rir$value[rir$task==node]
subset2 = rib$value[rib$task==node]
minvalue = min(min(subset1),min(subset2))
maxvalue = max(max(subset1),max(subset2))
#hist(subset,col="purple",main="RI Scores",xlim=c(minvalue,maxvalue),xlab="ranges method")
#hist(subset,col="blue",main=as.character(node_lookup[node]),xlim=c(minvalue,maxvalue),xlab="binary method")
# Now count the in vs out group
group = groups[groups$group == node,]
rir$direction = group$direction[as.character(group$image_ids) %in% rir$image]
rib$direction = group$direction[as.character(group$image_ids) %in% rib$image]
ingroup = length(group$image[group$direction=="in"])
outgroup = length(group$image[group$direction=="out"])
meanin = mean(rib$value[rib$direction=="in"])
meanout = mean(rib$value[rib$direction=="out"])
df = rbind(df,c(ingroup,outgroup,meanin,meanout))
p = ggplot(rib,aes(x=value,fill=direction)) +
geom_histogram(alpha=0.25,stat="bin",binwidth=0.1) +
facet_wrap(~direction) +
labs(title = node_lookup[node])
print(p)
}
dev.off()
# Function to calculate confidence intervals
get_ci = function(dat,direction="upper"){
error = qnorm(0.975)*sd(dat)/sqrt(length(dat))
if (direction=="upper"){
return(mean(dat)+error)
} else {
return(mean(dat)-error)
}
}
# Let's look at mean RI scores for each concept node
ribsum = ddply(rib, c("task"), summarise, mscore = mean(value))
ribsum$name = as.character(node_lookup[ribsum$task])
# Sort by meanscore
tmp = ribsum[with(ribsum, order(-mscore)), ]
rownames(tmp) = seq(1,nrow(tmp))
tmp$sort = as.numeric(rownames(tmp))
ggplot(tmp, aes(x=sort,y=mscore,task=task,colour=mscore)) +
geom_bar(stat="identity") +
xlab("concept") +
ylab(paste("mean reverse inference score")) +
scale_x_discrete(limits=tmp$sort,labels=tmp$name) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
countin = c()
countout = c()
for (t in tmp$task){
subset= groups[groups$group==t,]
countin = c(countin,nrow(subset[subset$direction=="in",]))
countout = c(countout,nrow(subset[subset$direction=="out",]))
}
tmp$countin = countin
tmp$countout = countout
# Look at overall mean reverse inference scores
ggplot(tmp, aes(x=sort,y=mscore,task=task,countin=countin,countout=countout,colour=mscore)) +
geom_bar(stat="identity") +
xlab("concept") +
ylab(paste("mean reverse inference score")) +
scale_x_discrete(limits=tmp$sort,labels=tmp$name) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Look at count in and out of set
ggplot(tmp, aes(x=sort,task=task,countin=countin,countout=countout)) +
geom_point(aes(y = countin,colour='pink')) +
geom_point(aes(y = countout)) +
scale_x_discrete(limits=tmp$sort,labels=tmp$name) +
xlab("concept") +
ylab(paste("count in (pink) and out")) +
theme(axis.text.x = element_text(angle = 90, hjust = 1),legend.position="none")
# Idea 2: Can reverse inference scores be used to predict image labels
# First make a matrix of images by labels, with 1 if the image is in the "in group"
unique_ids = unique(image_ids)
labels = array(0,dim=c(length(unique_ids),length(nodes)))
rownames(labels) = unique_ids
colnames(labels) = nodes
for (node in nodes){
subset = groups[groups$group==node,]
ingroup = subset$image_ids[subset$direction=="in"]
labels[which(rownames(labels)%in%ingroup),node] = 1
}
# Let's first be stupid and see if we can cluster concepts based on image scores
library(pheatmap)
ribname = ri_binary
colnames(ribname) = as.character(node_lookup[colnames(ribname)])
disty = dist(ribname)
dmat = as.matrix(disty)
# Correlation of concepts by image scores
corr = cor(ribname)
pheatmap(corr,fontsize_row=8)
# Correlation of images by image scores
corr = cor(t(ribname))
pheatmap(corr,fontsize_row=8)
# Get contrast names for the images
images = read.csv("/home/vanessa/Documents/Work/BRAINMETA/reverse_inference/contrast_defined_images.tsv",sep="\t")
contrast_names = as.character(images$cognitive_contrast_cogatlas[as.character(images$image_id)%in%rownames(ribname)])
contrast_names = strtrim(contrast_names, 25)
rownames(corr) = contrast_names
colnames(corr) = contrast_names
pheatmap(corr,fontsize_row=8)
# NEXT: What we would want to do is look at the change in bayes score as we add concepts KNOWN to be in the set.
# want to get feedback on this first before doing it
|
library(shiny)
library(DT)
shinyUI(
fluidPage(
titlePanel(
fluidRow(
column(4, h3(strong("CPRD Database")),offset = 1 ),
column(4, offset = 7, img(src='CNODES_logo.png', align = "right", height = "25px"))
)
),
sidebarPanel(
fluidPage(
br(),
fluidRow(column(12,fileInput('file1','Choose Variable',
accept = c(".xlsx")), align="center")),
fluidRow(column(6, actionButton(inputId = "submitSearch", label = "Search") , align='center'),
column(4, actionButton(inputId = "submitEdit", label = "Edit Variable") , align='center')))),
mainPanel(
tabsetPanel(
id = "tabs",
tabPanel(
title = "Dashboard: CPRD variable definition log",
fluidRow(dataTableOutput(outputId= "dash"))),
tabPanel("Search", fluidRow(dataTableOutput(outputId= "search"))),
tabPanel("Edit/add", fluidRow(dataTableOutput(outputId= "contents"))),
tabPanel("About", textOutput("about.txt"))
)
)
)
)
|
/ui.R
|
no_license
|
LaminJuwara/crpd
|
R
| false | false | 1,108 |
r
|
library(shiny)
library(DT)
shinyUI(
fluidPage(
titlePanel(
fluidRow(
column(4, h3(strong("CPRD Database")),offset = 1 ),
column(4, offset = 7, img(src='CNODES_logo.png', align = "right", height = "25px"))
)
),
sidebarPanel(
fluidPage(
br(),
fluidRow(column(12,fileInput('file1','Choose Variable',
accept = c(".xlsx")), align="center")),
fluidRow(column(6, actionButton(inputId = "submitSearch", label = "Search") , align='center'),
column(4, actionButton(inputId = "submitEdit", label = "Edit Variable") , align='center')))),
mainPanel(
tabsetPanel(
id = "tabs",
tabPanel(
title = "Dashboard: CPRD variable definition log",
fluidRow(dataTableOutput(outputId= "dash"))),
tabPanel("Search", fluidRow(dataTableOutput(outputId= "search"))),
tabPanel("Edit/add", fluidRow(dataTableOutput(outputId= "contents"))),
tabPanel("About", textOutput("about.txt"))
)
)
)
)
|
library(tidyverse)
library(DT)
mpg$cty %>% summary()
ctyPercentiles <- mpg$cty %>% quantile(c(.25,.75))
mpg %>%
datatable(rownames = F) %>%
formatCurrency("displ",currency = "£",digits = 2) %>%
formatStyle("cty",
backgroundColor = styleInterval(ctyPercentiles,c("green","yellow","red")),
color = styleInterval(ctyPercentiles,c("white","blue","white"))) %>%
formatStyle("hwy",background = styleColorBar(mpg$hwy,"steelblue"))
|
/excel_r_dplyr/Visualizations/DataTables.R
|
no_license
|
giorgioottolina94/Excel-R-Python-Projects
|
R
| false | false | 480 |
r
|
library(tidyverse)
library(DT)
mpg$cty %>% summary()
ctyPercentiles <- mpg$cty %>% quantile(c(.25,.75))
mpg %>%
datatable(rownames = F) %>%
formatCurrency("displ",currency = "£",digits = 2) %>%
formatStyle("cty",
backgroundColor = styleInterval(ctyPercentiles,c("green","yellow","red")),
color = styleInterval(ctyPercentiles,c("white","blue","white"))) %>%
formatStyle("hwy",background = styleColorBar(mpg$hwy,"steelblue"))
|
library(data.table)
library(plyr)
library(ggplot2)
library(dplyr)
library(lubridate)
setwd(dirname(path.expand("~")))
DataPath <- file.path(paste(getwd(),"Dropbox/Customs Evasion/Raw Data/Comtrade/yearly", sep="/"))
#Michael's computer datapath for saving subsets of data, to keep files from getting too big
DataPath2 <- file.path(paste(getwd(),"Documents/hs2012", sep="/"))
#####REMOVE REPEATED VARIABLES#####
load(paste(DataPath,"y_hs12/y_2012_hs12.Rda", sep = "/"))
y_2012_hs12[ ,`:=`(Classification = NULL, Year = NULL,
`Period Desc.` = NULL, `Is Leaf Code` = NULL,
`Reporter ISO` = NULL, `Partner ISO` = NULL,
`Qty Unit Code` = NULL, Flag = NULL
)]
load(paste(DataPath,"y_hs12/y_2013_hs12.Rda", sep = "/"))
y_2013_hs12 <- as.data.table(y_2013_hs12)
y_2013_hs12[ ,`:=`(Classification = NULL, Year = NULL,
`Period Desc.` = NULL, `Is Leaf Code` = NULL,
`Reporter ISO` = NULL, `Partner ISO` = NULL,
`Qty Unit Code` = NULL, Flag = NULL
)]
hs12_1213 <- do.call("rbind", list(y_2012_hs12, y_2013_hs12))
load(paste(DataPath,"y_hs12/y_2014_hs12.Rda", sep = "/"))
y_2014_hs12 <- as.data.table(y_2014_hs12)
y_2014_hs12[ ,`:=`(Classification = NULL, Year = NULL,
`Period Desc.` = NULL, `Is Leaf Code` = NULL,
`Reporter ISO` = NULL, `Partner ISO` = NULL,
`Qty Unit Code` = NULL, Flag = NULL
)]
hs12_1213 <- do.call("rbind", list(hs12_1213, y_2014_hs12))
save(hs12_1213,file = paste(DataPath2, "hs12_12-14.Rda", sep = "/"))
load(paste(DataPath,"y_hs12/y_2015_hs12.Rda", sep = "/"))
y_2015_hs12 <- as.data.table(y_2015_hs12)
y_2015_hs12[ ,`:=`(Classification = NULL, Year = NULL,
`Period Desc.` = NULL, `Is Leaf Code` = NULL,
`Reporter ISO` = NULL, `Partner ISO` = NULL,
`Qty Unit Code` = NULL, Flag = NULL
)]
load(paste(DataPath,"y_hs12/y_2016_hs12.Rda", sep = "/"))
y_2016_hs12 <- as.data.table(y_2016_hs12)
y_2016_hs12[ ,`:=`(Classification = NULL, Year = NULL,
`Period Desc.` = NULL, `Is Leaf Code` = NULL,
`Reporter ISO` = NULL, `Partner ISO` = NULL,
`Qty Unit Code` = NULL, Flag = NULL
)]
hs12_15_16 <- do.call("rbind", list(y_2015_hs12, y_2016_hs12))
Imports_1516 <- hs12_15_16[`Trade Flow Code`==1]
Exports_1516 <- hs12_15_16[`Trade Flow Code`==2]
rm(hs12_15_16)
save(Imports_1516,file = paste(DataPath2,"Imports1516.Rda", sep = "/"))
rm(Imports_1516)
save(Exports_1516,file = paste(DataPath2,"Exports1516.Rda", sep = "/"))
rm(Exports_1516)
#####CREATE DATASET OF ALL IMPORTS/EXPORTS FOR HS12#####
load(paste(DataPath,"Analysis Data/hs12_12-14.Rda", sep = "/"))
Imports_1214 <- hs12_1213[`Trade Flow Code`==1]
save(Imports_1214,file = paste(DataPath2,"Imports1214.Rda", sep = "/"))
rm(Imports_1214)
Exports_1214 <- hs12_1213[`Trade Flow Code`==2]
save(Exports_1214,file = paste(DataPath2,"Exports1214.Rda", sep = "/"))
rm(Exports_1214)
rm(hs12_1213)
load(paste(DataPath2,"Exports1214.Rda", sep = "/"))
load(paste(DataPath2,"Exports1516.Rda", sep = "/"))
exports <- do.call("rbind", list(Exports_1214, Exports_1516))
save(exports,file = paste(DataPath,"Analysis Data","exports_full12.Rda", sep = "/"))
load(paste(DataPath,"Analysis Data/Imports1214.Rda", sep = "/"))
load(paste(DataPath,"Analysis Data/Imports1516.Rda", sep = "/"))
imports_full12 <- do.call("rbind", list(Imports_1214, Imports_1516))
save(imports_full12,file = paste(DataPath,"Analysis Data","imports_full12.Rda", sep = "/"))
#####SPLIT IMPORTS/EXPORTS FOR EACH YEAR OF HS12 DATA#####
load(paste(DataPath,"Analysis Data/exports_full12.Rda", sep = "/"))
exports <- rename(exports, "Export Value" = "Trade Value (US$)")
exports <- rename(exports, "Export Qty Unit" = "Qty Unit")
exports <- rename(exports, "Export Qty" = "Qty")
exports <- rename(exports, "Export Netweight (kg)" = "Netweight (kg)")
exports2012 <- exports[Period=="2012", ]
save(exports2012,file = paste(DataPath2,"exports2012.Rda", sep = "/"))
rm(exports2012)
exports2013 <- exports[Period=="2013", ]
save(exports2013,file = paste(DataPath2,"exports2013.Rda", sep = "/"))
rm(exports2013)
exports2014 <- exports[Period=="2014", ]
save(exports2014,file = paste(DataPath2,"exports2014.Rda", sep = "/"))
rm(exports2014)
exports2015 <- exports[Period=="2015", ]
save(exports2015,file = paste(DataPath2,"exports2015.Rda", sep = "/"))
rm(exports2015)
exports2016 <- exports[Period=="2016", ]
save(exports2016,file = paste(DataPath2, "exports2016.Rda"))
rm(exports2016)
rm(exports)
load(paste(DataPath,"imports_full12.Rda", sep = "/"))
imports2012 <- imports_full12[Period=="2012", ]
save(imports2012,file = paste(DataPath2, "imports2012.Rda", sep = "/"))
rm(imports2012)
imports2013 <- imports_full12[Period=="2013", ]
save(imports2013,file = paste(DataPath2, "imports2013.Rda", sep = "/"))
rm(imports2013)
imports2014 <- imports_full12[Period=="2014", ]
save(imports2014,file = paste(DataPath2, "imports2014.Rda", sep = "/"))
rm(imports2014)
imports2015 <- imports_full12[Period=="2015", ]
save(imports2015,file = paste(DataPath2, "imports2015.Rda", sep = "/"))
rm(imports2015)
imports2016 <- imports_full12[Period=="2016", ]
save(imports2016,file = paste(DataPath2, "imports2016.Rda"))
rm(imports2016)
#####MERGE IMPORTS/EXPORTS FOR EACH YEAR OF HS12 DATA#####
#2012
load(paste(DataPath2,"imports2012.Rda", sep = "/"))
load(paste(DataPath2,"exports2012.Rda", sep = "/"))
hs12_12 <- merge(imports2012, exports2012,
by.x=c("Period", "Aggregate Level",
"Reporter Code", "Reporter", "Partner Code", "Partner",
"Commodity Code", "Commodity"),
by.y=c("Period", "Aggregate Level",
"Partner Code", "Partner", "Reporter Code", "Reporter",
"Commodity Code", "Commodity"),all=TRUE)
hs12_12 <- rename(hs12_12, "Importer" = "Reporter")
hs12_12 <- rename(hs12_12, "Exporter" = "Partner")
hs12_12$Raw_gap = hs12_12$`Export Value` - hs12_12$`Import Value`
hs12_12$Log_gap = log(hs12_12$`Export Value`) - log(hs12_12$`Import Value`)
hs12_12$Gap_ratio = hs12_12$`Raw_gap`/(hs12_12$`Import Value` + hs12_12$`Export Value`)
hs12_12$`Export Netweight (kg)` <- as.numeric(hs12_12$`Export Netweight (kg)`)
hs12_12$`Import Netweight (kg)` <- as.numeric(hs12_12$`Import Netweight (kg)`)
hs12_12$Qty_raw_gap = hs12_12$`Export Netweight (kg)` - hs12_12$`Import Netweight (kg)`
hs12_12$Qty_log_gap = log(hs12_12$`Export Netweight (kg)`) - log(hs12_12$`Import Netweight (kg)`)
hs12_12$Qty_gap_ratio = hs12_12$`Qty_raw_gap`/(hs12_12$`Export Netweight (kg)` + hs12_12$`Import Netweight (kg)`)
hs12_12 <- hs12_12[`Commodity Code`!="TOTAL", ]
save(hs12_12,file = paste(DataPath, "Analysis Data", "hs12_12.Rda", sep = "/"))
rm(hs12_12, exports2012, imports2012)
#2013
load(paste(DataPath2,"imports2013.Rda", sep = "/"))
load(paste(DataPath2,"exports2013.Rda", sep = "/"))
hs12_13 <- merge(imports2013, exports2013,
by.x=c("Period", "Aggregate Level",
"Reporter Code", "Reporter", "Partner Code", "Partner",
"Commodity Code", "Commodity"),
by.y=c("Period", "Aggregate Level",
"Partner Code", "Partner", "Reporter Code", "Reporter",
"Commodity Code", "Commodity"),all=TRUE)
hs12_13 <- rename(hs12_13, "Importer" = "Reporter")
hs12_13 <- rename(hs12_13, "Exporter" = "Partner")
hs12_13$Raw_gap = hs12_13$`Export Value` - hs12_13$`Import Value`
hs12_13$Log_gap = log(hs12_13$`Export Value`) - log(hs12_13$`Import Value`)
hs12_13$Gap_ratio = hs12_13$`Raw_gap`/(hs12_13$`Import Value` + hs12_13$`Export Value`)
hs12_13$`Export Netweight (kg)` <- as.numeric(hs12_13$`Export Netweight (kg)`)
hs12_13$`Import Netweight (kg)` <- as.numeric(hs12_13$`Import Netweight (kg)`)
hs12_13$Qty_raw_gap = hs12_13$`Export Netweight (kg)` - hs12_13$`Import Netweight (kg)`
hs12_13$Qty_log_gap = log(hs12_13$`Export Netweight (kg)`) - log(hs12_13$`Import Netweight (kg)`)
hs12_13$Qty_gap_ratio = hs12_13$`Qty_raw_gap`/(hs12_13$`Export Netweight (kg)` + hs12_13$`Import Netweight (kg)`)
hs12_13 <- hs12_13[`Commodity Code`!="TOTAL", ]
save(hs12_13,file = paste(DataPath, "Analysis Data", "hs12_13.Rda", sep = "/"))
rm(hs12_13, exports2013, imports2013)
#2014
load(paste(DataPath2,"imports2014.Rda", sep = "/"))
load(paste(DataPath2,"exports2014.Rda", sep = "/"))
hs12_14 <- merge(imports2014, exports2014,
by.x=c("Period", "Aggregate Level",
"Reporter Code", "Reporter", "Partner Code", "Partner",
"Commodity Code", "Commodity"),
by.y=c("Period", "Aggregate Level",
"Partner Code", "Partner", "Reporter Code", "Reporter",
"Commodity Code", "Commodity"),all=TRUE)
hs12_14 <- rename(hs12_14, "Importer" = "Reporter")
hs12_14 <- rename(hs12_14, "Exporter" = "Partner")
hs12_14$Raw_gap = hs12_14$`Export Value` - hs12_14$`Import Value`
hs12_14$Log_gap = log(hs12_14$`Export Value`) - log(hs12_14$`Import Value`)
hs12_14$Gap_ratio = hs12_14$`Raw_gap`/(hs12_14$`Import Value` + hs12_14$`Export Value`)
hs12_14$`Export Netweight (kg)` <- as.numeric(hs12_14$`Export Netweight (kg)`)
hs12_14$`Import Netweight (kg)` <- as.numeric(hs12_14$`Import Netweight (kg)`)
hs12_14$Qty_raw_gap = hs12_14$`Export Netweight (kg)` - hs12_14$`Import Netweight (kg)`
hs12_14$Qty_log_gap = log(hs12_14$`Export Netweight (kg)`) - log(hs12_14$`Import Netweight (kg)`)
hs12_14$Qty_gap_ratio = hs12_14$`Qty_raw_gap`/(hs12_14$`Export Netweight (kg)` + hs12_14$`Import Netweight (kg)`)
hs12_14 <- hs12_14[`Commodity Code`!="TOTAL", ]
save(hs12_14,file = paste(DataPath, "Analysis Data", "hs12_14.Rda", sep = "/"))
rm(hs12_14, exports2014, imports2014)
#2015
load(paste(DataPath2,"imports2015.Rda", sep = "/"))
load(paste(DataPath2,"exports2015.Rda", sep = "/"))
hs12_15 <- merge(imports2015, exports2015,
by.x=c("Period", "Aggregate Level",
"Reporter Code", "Reporter", "Partner Code", "Partner",
"Commodity Code", "Commodity"),
by.y=c("Period", "Aggregate Level",
"Partner Code", "Partner", "Reporter Code", "Reporter",
"Commodity Code", "Commodity"),all=TRUE)
hs12_15 <- rename(hs12_15, "Importer" = "Reporter")
hs12_15 <- rename(hs12_15, "Exporter" = "Partner")
hs12_15$Raw_gap = hs12_15$`Export Value` - hs12_15$`Import Value`
hs12_15$Log_gap = log(hs12_15$`Export Value`) - log(hs12_15$`Import Value`)
hs12_15$Gap_ratio = hs12_15$`Raw_gap`/(hs12_15$`Import Value` + hs12_15$`Export Value`)
hs12_15$`Export Netweight (kg)` <- as.numeric(hs12_15$`Export Netweight (kg)`)
hs12_15$`Import Netweight (kg)` <- as.numeric(hs12_15$`Import Netweight (kg)`)
hs12_15$Qty_raw_gap = hs12_15$`Export Netweight (kg)` - hs12_15$`Import Netweight (kg)`
hs12_15$Qty_log_gap = log(hs12_15$`Export Netweight (kg)`) - log(hs12_15$`Import Netweight (kg)`)
hs12_15$Qty_gap_ratio = hs12_15$`Qty_raw_gap`/(hs12_15$`Export Netweight (kg)` + hs12_15$`Import Netweight (kg)`)
hs12_15 <- hs12_15[`Commodity Code`!="TOTAL", ]
save(hs12_15,file = paste(DataPath, "Analysis Data", "hs12_15.Rda", sep = "/"))
rm(hs12_15, exports2015, imports2015)
#2016
load(paste(DataPath2,"imports2016.Rda", sep = "/"))
load(paste(DataPath2,"exports2016.Rda", sep = "/"))
hs12_16 <- merge(imports2016, exports2016,
by.x=c("Period", "Aggregate Level",
"Reporter Code", "Reporter", "Partner Code", "Partner",
"Commodity Code", "Commodity"),
by.y=c("Period", "Aggregate Level",
"Partner Code", "Partner", "Reporter Code", "Reporter",
"Commodity Code", "Commodity"),all=TRUE)
hs12_16 <- rename(hs12_16, "Importer" = "Reporter")
hs12_16 <- rename(hs12_16, "Exporter" = "Partner")
hs12_16$Raw_gap = hs12_16$`Export Value` - hs12_16$`Import Value`
hs12_16$Log_gap = log(hs12_16$`Export Value`) - log(hs12_16$`Import Value`)
hs12_16$Gap_ratio = hs12_16$`Raw_gap`/(hs12_16$`Import Value` + hs12_16$`Export Value`)
hs12_16$`Export Netweight (kg)` <- as.numeric(hs12_16$`Export Netweight (kg)`)
hs12_16$`Import Netweight (kg)` <- as.numeric(hs12_16$`Import Netweight (kg)`)
hs12_16$Qty_raw_gap = hs12_16$`Export Netweight (kg)` - hs12_16$`Import Netweight (kg)`
hs12_16$Qty_log_gap = log(hs12_16$`Export Netweight (kg)`) - log(hs12_16$`Import Netweight (kg)`)
hs12_16$Qty_gap_ratio = hs12_16$`Qty_raw_gap`/(hs12_16$`Export Netweight (kg)` + hs12_16$`Import Netweight (kg)`)
hs12_16 <- hs12_16[`Commodity Code`!="TOTAL", ]
save(hs12_16,file = paste(DataPath, "Analysis Data", "hs12_16.Rda", sep = "/"))
rm(hs12_16, exports2016, imports2016)
|
/descriptive analysis/comtrade_clean.R
|
no_license
|
michaelcbest/customsevasion
|
R
| false | false | 13,020 |
r
|
library(data.table)
library(plyr)
library(ggplot2)
library(dplyr)
library(lubridate)
setwd(dirname(path.expand("~")))
DataPath <- file.path(paste(getwd(),"Dropbox/Customs Evasion/Raw Data/Comtrade/yearly", sep="/"))
#Michael's computer datapath for saving subsets of data, to keep files from getting too big
DataPath2 <- file.path(paste(getwd(),"Documents/hs2012", sep="/"))
#####REMOVE REPEATED VARIABLES#####
load(paste(DataPath,"y_hs12/y_2012_hs12.Rda", sep = "/"))
y_2012_hs12[ ,`:=`(Classification = NULL, Year = NULL,
`Period Desc.` = NULL, `Is Leaf Code` = NULL,
`Reporter ISO` = NULL, `Partner ISO` = NULL,
`Qty Unit Code` = NULL, Flag = NULL
)]
load(paste(DataPath,"y_hs12/y_2013_hs12.Rda", sep = "/"))
y_2013_hs12 <- as.data.table(y_2013_hs12)
y_2013_hs12[ ,`:=`(Classification = NULL, Year = NULL,
`Period Desc.` = NULL, `Is Leaf Code` = NULL,
`Reporter ISO` = NULL, `Partner ISO` = NULL,
`Qty Unit Code` = NULL, Flag = NULL
)]
hs12_1213 <- do.call("rbind", list(y_2012_hs12, y_2013_hs12))
load(paste(DataPath,"y_hs12/y_2014_hs12.Rda", sep = "/"))
y_2014_hs12 <- as.data.table(y_2014_hs12)
y_2014_hs12[ ,`:=`(Classification = NULL, Year = NULL,
`Period Desc.` = NULL, `Is Leaf Code` = NULL,
`Reporter ISO` = NULL, `Partner ISO` = NULL,
`Qty Unit Code` = NULL, Flag = NULL
)]
hs12_1213 <- do.call("rbind", list(hs12_1213, y_2014_hs12))
save(hs12_1213,file = paste(DataPath2, "hs12_12-14.Rda", sep = "/"))
load(paste(DataPath,"y_hs12/y_2015_hs12.Rda", sep = "/"))
y_2015_hs12 <- as.data.table(y_2015_hs12)
y_2015_hs12[ ,`:=`(Classification = NULL, Year = NULL,
`Period Desc.` = NULL, `Is Leaf Code` = NULL,
`Reporter ISO` = NULL, `Partner ISO` = NULL,
`Qty Unit Code` = NULL, Flag = NULL
)]
load(paste(DataPath,"y_hs12/y_2016_hs12.Rda", sep = "/"))
y_2016_hs12 <- as.data.table(y_2016_hs12)
y_2016_hs12[ ,`:=`(Classification = NULL, Year = NULL,
`Period Desc.` = NULL, `Is Leaf Code` = NULL,
`Reporter ISO` = NULL, `Partner ISO` = NULL,
`Qty Unit Code` = NULL, Flag = NULL
)]
hs12_15_16 <- do.call("rbind", list(y_2015_hs12, y_2016_hs12))
Imports_1516 <- hs12_15_16[`Trade Flow Code`==1]
Exports_1516 <- hs12_15_16[`Trade Flow Code`==2]
rm(hs12_15_16)
save(Imports_1516,file = paste(DataPath2,"Imports1516.Rda", sep = "/"))
rm(Imports_1516)
save(Exports_1516,file = paste(DataPath2,"Exports1516.Rda", sep = "/"))
rm(Exports_1516)
#####CREATE DATASET OF ALL IMPORTS/EXPORTS FOR HS12#####
load(paste(DataPath,"Analysis Data/hs12_12-14.Rda", sep = "/"))
Imports_1214 <- hs12_1213[`Trade Flow Code`==1]
save(Imports_1214,file = paste(DataPath2,"Imports1214.Rda", sep = "/"))
rm(Imports_1214)
Exports_1214 <- hs12_1213[`Trade Flow Code`==2]
save(Exports_1214,file = paste(DataPath2,"Exports1214.Rda", sep = "/"))
rm(Exports_1214)
rm(hs12_1213)
load(paste(DataPath2,"Exports1214.Rda", sep = "/"))
load(paste(DataPath2,"Exports1516.Rda", sep = "/"))
exports <- do.call("rbind", list(Exports_1214, Exports_1516))
save(exports,file = paste(DataPath,"Analysis Data","exports_full12.Rda", sep = "/"))
load(paste(DataPath,"Analysis Data/Imports1214.Rda", sep = "/"))
load(paste(DataPath,"Analysis Data/Imports1516.Rda", sep = "/"))
imports_full12 <- do.call("rbind", list(Imports_1214, Imports_1516))
save(imports_full12,file = paste(DataPath,"Analysis Data","imports_full12.Rda", sep = "/"))
#####SPLIT IMPORTS/EXPORTS FOR EACH YEAR OF HS12 DATA#####
load(paste(DataPath,"Analysis Data/exports_full12.Rda", sep = "/"))
exports <- rename(exports, "Export Value" = "Trade Value (US$)")
exports <- rename(exports, "Export Qty Unit" = "Qty Unit")
exports <- rename(exports, "Export Qty" = "Qty")
exports <- rename(exports, "Export Netweight (kg)" = "Netweight (kg)")
exports2012 <- exports[Period=="2012", ]
save(exports2012,file = paste(DataPath2,"exports2012.Rda", sep = "/"))
rm(exports2012)
exports2013 <- exports[Period=="2013", ]
save(exports2013,file = paste(DataPath2,"exports2013.Rda", sep = "/"))
rm(exports2013)
exports2014 <- exports[Period=="2014", ]
save(exports2014,file = paste(DataPath2,"exports2014.Rda", sep = "/"))
rm(exports2014)
exports2015 <- exports[Period=="2015", ]
save(exports2015,file = paste(DataPath2,"exports2015.Rda", sep = "/"))
rm(exports2015)
exports2016 <- exports[Period=="2016", ]
save(exports2016,file = paste(DataPath2, "exports2016.Rda"))
rm(exports2016)
rm(exports)
load(paste(DataPath,"imports_full12.Rda", sep = "/"))
imports2012 <- imports_full12[Period=="2012", ]
save(imports2012,file = paste(DataPath2, "imports2012.Rda", sep = "/"))
rm(imports2012)
imports2013 <- imports_full12[Period=="2013", ]
save(imports2013,file = paste(DataPath2, "imports2013.Rda", sep = "/"))
rm(imports2013)
imports2014 <- imports_full12[Period=="2014", ]
save(imports2014,file = paste(DataPath2, "imports2014.Rda", sep = "/"))
rm(imports2014)
imports2015 <- imports_full12[Period=="2015", ]
save(imports2015,file = paste(DataPath2, "imports2015.Rda", sep = "/"))
rm(imports2015)
imports2016 <- imports_full12[Period=="2016", ]
save(imports2016,file = paste(DataPath2, "imports2016.Rda"))
rm(imports2016)
#####MERGE IMPORTS/EXPORTS FOR EACH YEAR OF HS12 DATA#####
#2012
load(paste(DataPath2,"imports2012.Rda", sep = "/"))
load(paste(DataPath2,"exports2012.Rda", sep = "/"))
hs12_12 <- merge(imports2012, exports2012,
by.x=c("Period", "Aggregate Level",
"Reporter Code", "Reporter", "Partner Code", "Partner",
"Commodity Code", "Commodity"),
by.y=c("Period", "Aggregate Level",
"Partner Code", "Partner", "Reporter Code", "Reporter",
"Commodity Code", "Commodity"),all=TRUE)
hs12_12 <- rename(hs12_12, "Importer" = "Reporter")
hs12_12 <- rename(hs12_12, "Exporter" = "Partner")
hs12_12$Raw_gap = hs12_12$`Export Value` - hs12_12$`Import Value`
hs12_12$Log_gap = log(hs12_12$`Export Value`) - log(hs12_12$`Import Value`)
hs12_12$Gap_ratio = hs12_12$`Raw_gap`/(hs12_12$`Import Value` + hs12_12$`Export Value`)
hs12_12$`Export Netweight (kg)` <- as.numeric(hs12_12$`Export Netweight (kg)`)
hs12_12$`Import Netweight (kg)` <- as.numeric(hs12_12$`Import Netweight (kg)`)
hs12_12$Qty_raw_gap = hs12_12$`Export Netweight (kg)` - hs12_12$`Import Netweight (kg)`
hs12_12$Qty_log_gap = log(hs12_12$`Export Netweight (kg)`) - log(hs12_12$`Import Netweight (kg)`)
hs12_12$Qty_gap_ratio = hs12_12$`Qty_raw_gap`/(hs12_12$`Export Netweight (kg)` + hs12_12$`Import Netweight (kg)`)
hs12_12 <- hs12_12[`Commodity Code`!="TOTAL", ]
save(hs12_12,file = paste(DataPath, "Analysis Data", "hs12_12.Rda", sep = "/"))
rm(hs12_12, exports2012, imports2012)
#2013
load(paste(DataPath2,"imports2013.Rda", sep = "/"))
load(paste(DataPath2,"exports2013.Rda", sep = "/"))
hs12_13 <- merge(imports2013, exports2013,
by.x=c("Period", "Aggregate Level",
"Reporter Code", "Reporter", "Partner Code", "Partner",
"Commodity Code", "Commodity"),
by.y=c("Period", "Aggregate Level",
"Partner Code", "Partner", "Reporter Code", "Reporter",
"Commodity Code", "Commodity"),all=TRUE)
hs12_13 <- rename(hs12_13, "Importer" = "Reporter")
hs12_13 <- rename(hs12_13, "Exporter" = "Partner")
hs12_13$Raw_gap = hs12_13$`Export Value` - hs12_13$`Import Value`
hs12_13$Log_gap = log(hs12_13$`Export Value`) - log(hs12_13$`Import Value`)
hs12_13$Gap_ratio = hs12_13$`Raw_gap`/(hs12_13$`Import Value` + hs12_13$`Export Value`)
hs12_13$`Export Netweight (kg)` <- as.numeric(hs12_13$`Export Netweight (kg)`)
hs12_13$`Import Netweight (kg)` <- as.numeric(hs12_13$`Import Netweight (kg)`)
hs12_13$Qty_raw_gap = hs12_13$`Export Netweight (kg)` - hs12_13$`Import Netweight (kg)`
hs12_13$Qty_log_gap = log(hs12_13$`Export Netweight (kg)`) - log(hs12_13$`Import Netweight (kg)`)
hs12_13$Qty_gap_ratio = hs12_13$`Qty_raw_gap`/(hs12_13$`Export Netweight (kg)` + hs12_13$`Import Netweight (kg)`)
hs12_13 <- hs12_13[`Commodity Code`!="TOTAL", ]
save(hs12_13,file = paste(DataPath, "Analysis Data", "hs12_13.Rda", sep = "/"))
rm(hs12_13, exports2013, imports2013)
#2014
load(paste(DataPath2,"imports2014.Rda", sep = "/"))
load(paste(DataPath2,"exports2014.Rda", sep = "/"))
hs12_14 <- merge(imports2014, exports2014,
by.x=c("Period", "Aggregate Level",
"Reporter Code", "Reporter", "Partner Code", "Partner",
"Commodity Code", "Commodity"),
by.y=c("Period", "Aggregate Level",
"Partner Code", "Partner", "Reporter Code", "Reporter",
"Commodity Code", "Commodity"),all=TRUE)
hs12_14 <- rename(hs12_14, "Importer" = "Reporter")
hs12_14 <- rename(hs12_14, "Exporter" = "Partner")
hs12_14$Raw_gap = hs12_14$`Export Value` - hs12_14$`Import Value`
hs12_14$Log_gap = log(hs12_14$`Export Value`) - log(hs12_14$`Import Value`)
hs12_14$Gap_ratio = hs12_14$`Raw_gap`/(hs12_14$`Import Value` + hs12_14$`Export Value`)
hs12_14$`Export Netweight (kg)` <- as.numeric(hs12_14$`Export Netweight (kg)`)
hs12_14$`Import Netweight (kg)` <- as.numeric(hs12_14$`Import Netweight (kg)`)
hs12_14$Qty_raw_gap = hs12_14$`Export Netweight (kg)` - hs12_14$`Import Netweight (kg)`
hs12_14$Qty_log_gap = log(hs12_14$`Export Netweight (kg)`) - log(hs12_14$`Import Netweight (kg)`)
hs12_14$Qty_gap_ratio = hs12_14$`Qty_raw_gap`/(hs12_14$`Export Netweight (kg)` + hs12_14$`Import Netweight (kg)`)
hs12_14 <- hs12_14[`Commodity Code`!="TOTAL", ]
save(hs12_14,file = paste(DataPath, "Analysis Data", "hs12_14.Rda", sep = "/"))
rm(hs12_14, exports2014, imports2014)
#2015
load(paste(DataPath2,"imports2015.Rda", sep = "/"))
load(paste(DataPath2,"exports2015.Rda", sep = "/"))
hs12_15 <- merge(imports2015, exports2015,
by.x=c("Period", "Aggregate Level",
"Reporter Code", "Reporter", "Partner Code", "Partner",
"Commodity Code", "Commodity"),
by.y=c("Period", "Aggregate Level",
"Partner Code", "Partner", "Reporter Code", "Reporter",
"Commodity Code", "Commodity"),all=TRUE)
hs12_15 <- rename(hs12_15, "Importer" = "Reporter")
hs12_15 <- rename(hs12_15, "Exporter" = "Partner")
hs12_15$Raw_gap = hs12_15$`Export Value` - hs12_15$`Import Value`
hs12_15$Log_gap = log(hs12_15$`Export Value`) - log(hs12_15$`Import Value`)
hs12_15$Gap_ratio = hs12_15$`Raw_gap`/(hs12_15$`Import Value` + hs12_15$`Export Value`)
hs12_15$`Export Netweight (kg)` <- as.numeric(hs12_15$`Export Netweight (kg)`)
hs12_15$`Import Netweight (kg)` <- as.numeric(hs12_15$`Import Netweight (kg)`)
hs12_15$Qty_raw_gap = hs12_15$`Export Netweight (kg)` - hs12_15$`Import Netweight (kg)`
hs12_15$Qty_log_gap = log(hs12_15$`Export Netweight (kg)`) - log(hs12_15$`Import Netweight (kg)`)
hs12_15$Qty_gap_ratio = hs12_15$`Qty_raw_gap`/(hs12_15$`Export Netweight (kg)` + hs12_15$`Import Netweight (kg)`)
hs12_15 <- hs12_15[`Commodity Code`!="TOTAL", ]
save(hs12_15,file = paste(DataPath, "Analysis Data", "hs12_15.Rda", sep = "/"))
rm(hs12_15, exports2015, imports2015)
#2016
load(paste(DataPath2,"imports2016.Rda", sep = "/"))
load(paste(DataPath2,"exports2016.Rda", sep = "/"))
hs12_16 <- merge(imports2016, exports2016,
by.x=c("Period", "Aggregate Level",
"Reporter Code", "Reporter", "Partner Code", "Partner",
"Commodity Code", "Commodity"),
by.y=c("Period", "Aggregate Level",
"Partner Code", "Partner", "Reporter Code", "Reporter",
"Commodity Code", "Commodity"),all=TRUE)
hs12_16 <- rename(hs12_16, "Importer" = "Reporter")
hs12_16 <- rename(hs12_16, "Exporter" = "Partner")
hs12_16$Raw_gap = hs12_16$`Export Value` - hs12_16$`Import Value`
hs12_16$Log_gap = log(hs12_16$`Export Value`) - log(hs12_16$`Import Value`)
hs12_16$Gap_ratio = hs12_16$`Raw_gap`/(hs12_16$`Import Value` + hs12_16$`Export Value`)
hs12_16$`Export Netweight (kg)` <- as.numeric(hs12_16$`Export Netweight (kg)`)
hs12_16$`Import Netweight (kg)` <- as.numeric(hs12_16$`Import Netweight (kg)`)
hs12_16$Qty_raw_gap = hs12_16$`Export Netweight (kg)` - hs12_16$`Import Netweight (kg)`
hs12_16$Qty_log_gap = log(hs12_16$`Export Netweight (kg)`) - log(hs12_16$`Import Netweight (kg)`)
hs12_16$Qty_gap_ratio = hs12_16$`Qty_raw_gap`/(hs12_16$`Export Netweight (kg)` + hs12_16$`Import Netweight (kg)`)
hs12_16 <- hs12_16[`Commodity Code`!="TOTAL", ]
save(hs12_16,file = paste(DataPath, "Analysis Data", "hs12_16.Rda", sep = "/"))
rm(hs12_16, exports2016, imports2016)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_documentation.R
\docType{data}
\name{USA.presidents}
\alias{USA.presidents}
\title{First 44 US Presidents}
\source{
Based on
\url{http://thedatahub.org/api/data/ba0cdb03-c0f0-45ff-a21f-63fdf6ce1a89/}
}
\description{
List of presidents. Includes date of inauguration, party and home state.
}
\keyword{data}
|
/man/USA.presidents.Rd
|
no_license
|
LudvigOlsen/transfrr
|
R
| false | true | 390 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_documentation.R
\docType{data}
\name{USA.presidents}
\alias{USA.presidents}
\title{First 44 US Presidents}
\source{
Based on
\url{http://thedatahub.org/api/data/ba0cdb03-c0f0-45ff-a21f-63fdf6ce1a89/}
}
\description{
List of presidents. Includes date of inauguration, party and home state.
}
\keyword{data}
|
# This preliminary analysis evaluate the weight of different PCs
library(dslabs)
library(tidyverse)
tissue_gene_expression <- data("tissue_gene_expression") %>%
save(tissue_gene_expression, file = 'rdas/tissue_gene_expression.rda')
load('rdas/tissue_gene_expression.rda')
pca <- prcomp(tissue_gene_expression$x)
pc <- 1:nrow(tissue_gene_expression$x) # As the n of obser. is less than predictors, the n of PCs is equal to n of obser.
qplot(pc, pca$sdev) + # to see the importance of PCs
xlab('PCA number') + ylab('PCA standard deviation') +
ggsave('fig/PC-sd.png')
|
/pre-analysis.R
|
no_license
|
ferilab/tissue-gene-analysis
|
R
| false | false | 573 |
r
|
# This preliminary analysis evaluate the weight of different PCs
library(dslabs)
library(tidyverse)
tissue_gene_expression <- data("tissue_gene_expression") %>%
save(tissue_gene_expression, file = 'rdas/tissue_gene_expression.rda')
load('rdas/tissue_gene_expression.rda')
pca <- prcomp(tissue_gene_expression$x)
pc <- 1:nrow(tissue_gene_expression$x) # As the n of obser. is less than predictors, the n of PCs is equal to n of obser.
qplot(pc, pca$sdev) + # to see the importance of PCs
xlab('PCA number') + ylab('PCA standard deviation') +
ggsave('fig/PC-sd.png')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cormatrix.R
\name{cormatrix_r_p}
\alias{cormatrix_r_p}
\title{Correlations of all pairs o variables.}
\usage{
cormatrix_r_p(numericdatanona, columnpos = NULL, filename = NULL,
method = "pearson")
}
\arguments{
\item{numericdatanona}{A numeric data frame}
\item{columnpos}{In case you want to compute correlations of a subset of columns. Default NULL}
\item{filename}{Optional parameter, if provided a string, it will output the plot in a pdf in the working directory. Default NULL}
\item{method}{the methods inherited from rcorr. Default "pearson". Other possibility is "spearman".}
}
\value{
If a file name is indicated, an output pdf plot. Otherwise, prints to the graphic device.
A matrix with lower triangle R pearson correlations, and upper triangle the p-values
}
\description{
Correlations of all pairs o variables.
}
\examples{
library(MASS)
data(mtcars)
cormatrix_r_p(as.numeric(na.omit(mtcars)))
}
|
/man/cormatrix_r_p.Rd
|
no_license
|
MoisesExpositoAlonso/moiR
|
R
| false | true | 994 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cormatrix.R
\name{cormatrix_r_p}
\alias{cormatrix_r_p}
\title{Correlations of all pairs o variables.}
\usage{
cormatrix_r_p(numericdatanona, columnpos = NULL, filename = NULL,
method = "pearson")
}
\arguments{
\item{numericdatanona}{A numeric data frame}
\item{columnpos}{In case you want to compute correlations of a subset of columns. Default NULL}
\item{filename}{Optional parameter, if provided a string, it will output the plot in a pdf in the working directory. Default NULL}
\item{method}{the methods inherited from rcorr. Default "pearson". Other possibility is "spearman".}
}
\value{
If a file name is indicated, an output pdf plot. Otherwise, prints to the graphic device.
A matrix with lower triangle R pearson correlations, and upper triangle the p-values
}
\description{
Correlations of all pairs o variables.
}
\examples{
library(MASS)
data(mtcars)
cormatrix_r_p(as.numeric(na.omit(mtcars)))
}
|
library(jomo)
### Name: jomo2com.MCMCchain
### Title: JM Imputation of 2-level data assuming a common level-1
### covariance matrix across level-2 units - A tool to check convergence
### of the MCMC
### Aliases: jomo2com.MCMCchain
### ** Examples
attach(tldata)
nburn=20
#now we run the imputation function. Note that we would typically use an higher
#number of nburn iterations in real applications (at least 100)
imp<-jomo2com.MCMCchain(Y.con=data.frame(measure.a),
Y2.cat=data.frame(big.city), Y2.numcat=c(2), clus=city,nburn=nburn)
#We can check the convergence of the first element of beta:
plot(c(1:nburn),imp$collectbeta[1,1,1:nburn],type="l")
|
/data/genthat_extracted_code/jomo/examples/jomo2com.MCMCchain.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 689 |
r
|
library(jomo)
### Name: jomo2com.MCMCchain
### Title: JM Imputation of 2-level data assuming a common level-1
### covariance matrix across level-2 units - A tool to check convergence
### of the MCMC
### Aliases: jomo2com.MCMCchain
### ** Examples
attach(tldata)
nburn=20
#now we run the imputation function. Note that we would typically use an higher
#number of nburn iterations in real applications (at least 100)
imp<-jomo2com.MCMCchain(Y.con=data.frame(measure.a),
Y2.cat=data.frame(big.city), Y2.numcat=c(2), clus=city,nburn=nburn)
#We can check the convergence of the first element of beta:
plot(c(1:nburn),imp$collectbeta[1,1,1:nburn],type="l")
|
#' Deviation Diverging Lollipop Chart.
#'
#' divlollipop function will draw Diverging Lollipop Chart for Deviation analysis.
#' @param data input data.frame
#' @param div.var diverging value variable
#' @param method "mean" or "median"
#' @param title main title
#' @param subtitle subtitle
#' @param xtitle x axis title
#' @param ytitle y axis title
#' @param caption caption
#' @return An object of class \code{ggplot}
#' @examples
#' plot<- divlollipop(data=mtcars,div.var="mpg",method="mean")
#' plot
#'
#' @import ggplot2
#' @import scales
#' @import reshape2
#' @import ggthemes
#' @import gganimate
#' @import gapminder
#' @import ggalt
#' @import ggExtra
#' @import ggcorrplot
#' @import dplyr
#' @import treemapify
#' @import ggfortify
#' @import zoo
#' @import ggdendro
#' @export
divlollipop<-function(data,div.var, method="mean",
title=NULL,subtitle=NULL,xtitle=NULL,ytitle=NULL,caption=NULL){
df<- data
div.var<- div.var
if(method=="mean"){
df$row<-rownames(df)
df$z<-round((df[,div.var] - mean(df[,div.var])), 2)
df$z_type <- ifelse(df$z < 0, "below", "above")
df <- df[order(df$z), ]
df$row <- factor(df$row, levels = df$row)
}
if(method=="median"){
df$row<-rownames(df)
df$z<-round((df[,div.var] - median(df[,div.var])), 2)
df$z_type <- ifelse(df$z < 0, "below", "above")
df <- df[order(df$z), ]
df$row <- factor(df$row, levels = df$row)
}
p<-ggplot(df, aes_string(x="row", y="z", label="z")) +
geom_point(stat='identity', fill="black", size=6) +
geom_segment(aes_string(y = 0,
x = "row",
yend = "z",
xend = "row"),
color = "black") +
geom_text(color="white", size=2) +
theme_fivethirtyeight() +
theme(axis.title = element_text(),
legend.title = element_text(face = 4,size = 10),
legend.direction = "horizontal", legend.box = "horizontal")+
labs(title=title,
subtitle=subtitle,
x=xtitle,
y=ytitle,
caption=caption) +
coord_flip()
return(p)
}
|
/R/divlolipop.r
|
permissive
|
HeeseokMoon/ggedachart
|
R
| false | false | 2,101 |
r
|
#' Deviation Diverging Lollipop Chart.
#'
#' divlollipop function will draw Diverging Lollipop Chart for Deviation analysis.
#' @param data input data.frame
#' @param div.var diverging value variable
#' @param method "mean" or "median"
#' @param title main title
#' @param subtitle subtitle
#' @param xtitle x axis title
#' @param ytitle y axis title
#' @param caption caption
#' @return An object of class \code{ggplot}
#' @examples
#' plot<- divlollipop(data=mtcars,div.var="mpg",method="mean")
#' plot
#'
#' @import ggplot2
#' @import scales
#' @import reshape2
#' @import ggthemes
#' @import gganimate
#' @import gapminder
#' @import ggalt
#' @import ggExtra
#' @import ggcorrplot
#' @import dplyr
#' @import treemapify
#' @import ggfortify
#' @import zoo
#' @import ggdendro
#' @export
divlollipop<-function(data,div.var, method="mean",
title=NULL,subtitle=NULL,xtitle=NULL,ytitle=NULL,caption=NULL){
df<- data
div.var<- div.var
if(method=="mean"){
df$row<-rownames(df)
df$z<-round((df[,div.var] - mean(df[,div.var])), 2)
df$z_type <- ifelse(df$z < 0, "below", "above")
df <- df[order(df$z), ]
df$row <- factor(df$row, levels = df$row)
}
if(method=="median"){
df$row<-rownames(df)
df$z<-round((df[,div.var] - median(df[,div.var])), 2)
df$z_type <- ifelse(df$z < 0, "below", "above")
df <- df[order(df$z), ]
df$row <- factor(df$row, levels = df$row)
}
p<-ggplot(df, aes_string(x="row", y="z", label="z")) +
geom_point(stat='identity', fill="black", size=6) +
geom_segment(aes_string(y = 0,
x = "row",
yend = "z",
xend = "row"),
color = "black") +
geom_text(color="white", size=2) +
theme_fivethirtyeight() +
theme(axis.title = element_text(),
legend.title = element_text(face = 4,size = 10),
legend.direction = "horizontal", legend.box = "horizontal")+
labs(title=title,
subtitle=subtitle,
x=xtitle,
y=ytitle,
caption=caption) +
coord_flip()
return(p)
}
|
#' Plot venn diagram as an independent function. It supports both data frame and list as input.
#'
#' @name ggvenn
#' @param data A data.frame or a list as input data.
#' @param columns A character vector use as index to select columns/elements.
#' @param show_elements Show set elements instead of count/percentage.
#' @param show_percentage Show percentage for each set.
#' @param digits The desired number of digits after the decimal point
#' @param fill_color Filling colors in circles.
#' @param fill_alpha Transparency for filling circles.
#' @param stroke_color Stroke color for drawing circles.
#' @param stroke_alpha Transparency for drawing circles.
#' @param stroke_size Stroke size for drawing circles.
#' @param stroke_linetype Line type for drawing circles.
#' @param set_name_color Text color for set names.
#' @param set_name_size Text size for set names.
#' @param text_color Text color for intersect contents.
#' @param text_size Text size for intersect contents.
#' @param label_sep Separator character for displaying elements.
#' @param count_column Specify column for element repeat count.
#' @param show_outside Show outside elements (not belongs to any set).
#' @param auto_scale Allow automatically resizing circles according to element counts.
#' @return The ggplot object to print or save to file.
#' @examples
#' library(ggvenn)
#'
#' # use list as input
#' a <- list(`Set 1` = c(1, 3, 5, 7),
#' `Set 2` = c(1, 5, 9),
#' `Set 3` = c(1, 2, 8),
#' `Set 4` = c(6, 7))
#' ggvenn(a, c("Set 1", "Set 2"))
#' ggvenn(a, c("Set 1", "Set 2", "Set 3"))
#' ggvenn(a)
#'
#' # use data.frame as input
#' d <- tibble(value = c(1, 2, 3, 5, 6, 7, 8, 9),
#' `Set 1` = c(TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE),
#' `Set 2` = c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE),
#' `Set 3` = c(TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE),
#' `Set 4` = c(FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE))
#' ggvenn(d, c("Set 1", "Set 2"))
#' ggvenn(d, c("Set 1", "Set 2", "Set 3"))
#' ggvenn(d)
#'
#' # set fill color
#' ggvenn(d, c("Set 1", "Set 2"), fill_color = c("red", "blue"))
#'
#' # hide percentage
#' ggvenn(d, c("Set 1", "Set 2"), show_percentage = FALSE)
#'
#' # change precision of percentages
#' ggvenn(d, c("Set 1", "Set 2"), digits = 2)
#'
#' # show elements instead of count/percentage
#' ggvenn(a, show_elements = TRUE)
#' ggvenn(d, show_elements = "value")
#' @seealso geom_venn
#' @importFrom dplyr tibble tribble as_tibble %>% select_if mutate count filter inner_join
#' @importFrom ggplot2 ggplot aes geom_polygon geom_segment geom_text scale_x_continuous scale_y_continuous scale_fill_manual guides coord_fixed theme_void layer
#' @importFrom stats na.omit
#' @export
ggvenn <- function(data, columns = NULL,
show_elements = FALSE,
show_percentage = TRUE,
digits = 1,
fill_color = c("blue", "yellow", "green", "red"),
fill_alpha = .5,
stroke_color = "black",
stroke_alpha = 1,
stroke_size = 1,
stroke_linetype = "solid",
set_name_color = "black",
set_name_size = 6,
text_color = "black",
text_size = 4,
label_sep = ",",
count_column = NULL,
show_outside = c("auto", "none", "always"),
auto_scale = FALSE) {
show_outside <- match.arg(show_outside)
venn <- prepare_venn_data(data, columns, show_elements, show_percentage, digits,
label_sep, count_column, show_outside, auto_scale)
g <- venn$shapes %>%
mutate(group = LETTERS[group]) %>%
ggplot() +
geom_polygon(aes(x = x, y = y, group = group, fill = group),
alpha = fill_alpha) +
geom_polygon(aes(x = x, y = y, group = group),
fill = NA,
color = stroke_color,
size = stroke_size,
alpha = stroke_alpha,
linetype = stroke_linetype)
if (nrow(venn$labels) > 0) {
g <- g +
geom_text(data = venn$labels,
aes(x = x, y = y, label = text, hjust = hjust, vjust = vjust),
color = set_name_color,
size = set_name_size)
}
if (nrow(venn$texts) > 0) {
g <- g +
geom_text(data = venn$texts,
aes(x = x, y = y, label = text, hjust = hjust, vjust = vjust),
color = text_color,
size = text_size)
}
if (nrow(venn$segs) > 0) {
g <- g +
geom_segment(data = venn$segs,
aes(x = x, y = y, xend = xend, yend = yend),
color = text_color,
size = 0.5)
}
g <- g +
scale_fill_manual(values = fill_color) +
guides(fill = "none") +
coord_fixed() +
theme_void()
return(g)
}
gen_element_df_2 <- function() {
df <- tribble(~name, ~A, ~B,
"A", TRUE, FALSE,
"B", FALSE, TRUE,
"AB", TRUE, TRUE,
"-", FALSE, FALSE)
stopifnot(all((df %>% dplyr::count(A, B) %>% with(n)) == 1))
return(df %>% mutate(n = 0, text = ""))
}
gen_element_df_3 <- function() {
df <- tribble(~name, ~A, ~B, ~C,
"A", TRUE, FALSE, FALSE,
"B", FALSE, TRUE, FALSE,
"C", FALSE, FALSE, TRUE,
"AB", TRUE, TRUE, FALSE,
"AC", TRUE, FALSE, TRUE,
"BC", FALSE, TRUE, TRUE,
"ABC", TRUE, TRUE, TRUE,
"-", FALSE, FALSE, FALSE)
stopifnot(all((df %>% dplyr::count(A, B, C) %>% with(n)) == 1))
return(df %>% mutate(n = 0, text = ""))
}
gen_element_df_4 <- function() {
df <- tribble(~name, ~A, ~B, ~C, ~D,
"A", TRUE, FALSE, FALSE, FALSE,
"B", FALSE, TRUE, FALSE, FALSE,
"C", FALSE, FALSE, TRUE, FALSE,
"D", FALSE, FALSE, FALSE, TRUE,
"AB", TRUE, TRUE, FALSE, FALSE,
"BC", FALSE, TRUE, TRUE, FALSE,
"CD", FALSE, FALSE, TRUE, TRUE,
"AC", TRUE, FALSE, TRUE, FALSE,
"BD", FALSE, TRUE, FALSE, TRUE,
"AD", TRUE, FALSE, FALSE, TRUE,
"ABC", TRUE, TRUE, TRUE, FALSE,
"BCD", FALSE, TRUE, TRUE, TRUE,
"ACD", TRUE, FALSE, TRUE, TRUE,
"ABD", TRUE, TRUE, FALSE, TRUE,
"ABCD",TRUE, TRUE, TRUE, TRUE,
"-", FALSE, FALSE, FALSE, FALSE)
stopifnot(all((df %>% dplyr::count(A, B, C, D) %>% with(n)) == 1))
return(df %>% mutate(n = 0, text = ""))
}
gen_circle <- function(group, x_offset = 0, y_offset = 0, radius = 1,
radius_b = radius, theta_offset = 0, length.out = 100) {
tibble(group = group,
theta = seq(0, 2 * pi, length.out = length.out)) %>%
mutate(x_raw = radius * cos(theta),
y_raw = radius_b * sin(theta),
x = x_offset + x_raw * cos(theta_offset) - y_raw * sin(theta_offset),
y = y_offset + x_raw * sin(theta_offset) + y_raw * cos(theta_offset))
}
calc_scale_info_2 <- function(auto_scale, n_sets, max_scale_diff = 5) {
if (auto_scale) {
stopifnot(length(n_sets) == 4)
if (n_sets[[1]] == 0 && n_sets[[2]] == 0 && n_sets[[3]] == 0) { # both sets are empty
a_radius <- 1
b_radius <- 1
overlap_size <- -0.2
} else if (n_sets[[1]] + n_sets[[3]] == 0) { # set A is empty
a_radius <- 1 / max_scale_diff
b_radius <- 1
overlap_size <- -0.2
} else if (n_sets[[2]] + n_sets[[3]] == 0) { # set B is empty
a_radius <- 1
b_radius <- 1 / max_scale_diff
overlap_size <- -0.2
} else if (n_sets[[1]] >= n_sets[[2]]) { # set A is larger than or equal to set B
a_radius <- 1
b_radius <- (n_sets[[2]] + n_sets[[3]]) / (n_sets[[1]] + n_sets[[3]])
overlap_size <- ifelse(n_sets[[3]] == 0, -0.2, n_sets[[3]] / (n_sets[[1]] + n_sets[[3]]))
if (b_radius < 1 / max_scale_diff) {
b_radius <- 1 / max_scale_diff
if (overlap_size > 0) {
overlap_size <- b_radius * (n_sets[[3]] / (n_sets[[2]] + n_sets[[3]]))
}
}
} else { # set A is smaller than set B
a_radius <- (n_sets[[1]] + n_sets[[3]]) / (n_sets[[2]] + n_sets[[3]])
b_radius <- 1
overlap_size <- ifelse(n_sets[[3]] == 0, -0.2, n_sets[[3]] / (n_sets[[2]] + n_sets[[3]]))
if (a_radius < 1 / max_scale_diff) {
a_radius <- 1 / max_scale_diff
if (overlap_size > 0) {
overlap_size <- a_radius * (n_sets[[3]] / (n_sets[[1]] + n_sets[[3]]))
}
}
}
} else {
a_radius = 1
b_radius = 1
overlap_size = 1/3
}
return(c(auto_scale = auto_scale,
a_radius = a_radius,
b_radius = b_radius,
overlap_size = overlap_size))
}
calc_scale_info_3 <- function(auto_scale, n_sets, max_scale_diff = 5) {
if (auto_scale) {
stop("Error: 'auto_scale' parameter is supported for only two set venn so far.")
}
return(NULL)
}
calc_scale_info_4 <- function(auto_scale, n_sets, max_scale_diff = 5) {
if (auto_scale) {
stop("Error: 'auto_scale' parameter is supported for only two set venn so far.")
}
return(NULL)
}
min_overlap_for_text <- 0.2
gen_circle_2 <- function(scale_info) {
x_dist <- (scale_info['a_radius'] + scale_info['b_radius'] - scale_info['overlap_size'] * 2) / 2
rbind(gen_circle(1L, -x_dist, 0, scale_info['a_radius']),
gen_circle(2L, x_dist, 0, scale_info['b_radius']))
}
gen_text_pos_2 <- function(scale_info) {
df <- tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -0.8, 0, 0.5, 0.5,
"B", 0.8, 0, 0.5, 0.5,
"AB", 0, 0, 0.5, 0.5,
"-", 0, -1.2, 0.5, 0.5)
if (scale_info['auto_scale']) {
x_dist <- (scale_info['a_radius'] + scale_info['b_radius'] - scale_info['overlap_size'] * 2) / 2
if (scale_info['overlap_size'] <= 0) {
df$x[[1]] <- -x_dist
df$x[[2]] <- x_dist
df <- df %>% filter(name != "AB")
} else {
if (scale_info['overlap_size'] < min_overlap_for_text) {
df$x[[1]] <- -x_dist - scale_info['overlap_size']
df$x[[2]] <- x_dist + scale_info['overlap_size']
if (scale_info['a_radius'] < min_overlap_for_text) {
df$x[[3]] <- -x_dist + (scale_info['a_radius'] - scale_info['overlap_size']) / 2
df$y[[3]] <- -1.5 * scale_info['a_radius']
} else if (scale_info['b_radius'] < min_overlap_for_text) {
df$x[[3]] <- x_dist - (scale_info['a_radius'] - scale_info['overlap_size']) / 2
df$y[[3]] <- -1.5 * scale_info['b_radius']
} else {
df$x[[3]] <- -x_dist + scale_info['a_radius'] - scale_info['overlap_size']
df$y[[3]] <- -1.2
}
df$x[[4]] <- -x_dist - scale_info['a_radius']
df$y[[4]] <- -1.6
df$hjust[[4]] <- 0
} else {
df$x[[1]] <- -x_dist - scale_info['overlap_size']
df$x[[2]] <- x_dist + scale_info['overlap_size']
df$x[[3]] <- -x_dist + scale_info['a_radius'] - scale_info['overlap_size']
}
if (scale_info['a_radius'] <= scale_info['overlap_size']) {
df <- df %>% filter(name != "A")
} else if (scale_info['b_radius'] <= scale_info['overlap_size']) {
df <- df %>% filter(name != "B")
}
}
}
return(df)
}
gen_seg_pos_2 <- function(scale_info) {
df <- tibble(x = 0, y = 0, xend = 0, yend = 0)[-1,]
if (scale_info['overlap_size'] > 0 && scale_info['auto_scale']) {
x_dist <- (scale_info['a_radius'] + scale_info['b_radius'] - scale_info['overlap_size'] * 2) / 2
if (scale_info['overlap_size'] < min_overlap_for_text) {
x_pos <- -x_dist + scale_info['a_radius'] - scale_info['overlap_size']
if (scale_info['a_radius'] < min_overlap_for_text) {
x2_pos <- -x_dist + 1.2 * (scale_info['a_radius'] - scale_info['overlap_size']) / 2
df <- tibble(x = x_pos, y = 0, xend = x2_pos, yend = -1.2 * scale_info['a_radius'])
} else if (scale_info['b_radius'] < min_overlap_for_text) {
x2_pos <- x_dist - 1.2 * (scale_info['a_radius'] - scale_info['overlap_size']) / 2
df <- tibble(x = x_pos, y = 0, xend = x2_pos, yend = -1.2 * scale_info['a_radius'])
} else {
df <- tibble(x = x_pos, y = 0, xend = x_pos, yend = -1)
}
}
}
return(df)
}
gen_label_pos_2 <- function(scale_info) {
df <- tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -0.8, 1.2, 0.5, 0,
"B", 0.8, 1.2, 0.5, 0)
if (scale_info['auto_scale']) {
}
return(df)
}
gen_circle_3 <- function() {
rbind(gen_circle(1L, -2/3, (sqrt(3) + 2) / 6, 1),
gen_circle(2L, 2/3,(sqrt(3) + 2) / 6, 1),
gen_circle(3L, 0, -(sqrt(3) + 2) / 6, 1))
}
gen_text_pos_3 <- function() {
tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -0.8, 0.62, 0.5, 0.5,
"B", 0.8, 0.62, 0.5, 0.5,
"C", 0, -0.62, 0.5, 0.5,
"AB", 0, 0.8, 0.5, 0.5,
"AC", -0.5, 0, 0.5, 0.5,
"BC", 0.5, 0, 0.5, 0.5,
"ABC", 0, 0.2, 0.5, 0.5,
"-", 1.2, -0.8, 0, 0.5)
}
gen_seg_pos_3 <- function(scale_info) {
df <- tibble(x = 0, y = 0, xend = 0, yend = 0)[-1,]
return(df)
}
gen_label_pos_3 <- function() {
tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -0.8, 1.8, 0.5, 0,
"B", 0.8, 1.8, 0.5, 0,
"C", 0, -1.8, 0.5, 1)
}
gen_circle_4 <- function() {
rbind(gen_circle(1L, -.7, -1/2, .75, 1.5, pi/4),
gen_circle(2L, -.72+2/3, -1/6, .75, 1.5, pi/4),
gen_circle(3L, .72-2/3, -1/6, .75, 1.5, -pi/4),
gen_circle(4L, .7, -1/2, .75, 1.5, -pi/4))
}
gen_text_pos_4 <- function() {
tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -1.5, 0, 0.5, 0.5,
"B", -0.6, 0.7, 0.5, 0.5,
"C", 0.6, 0.7, 0.5, 0.5,
"D", 1.5, 0, 0.5, 0.5,
"AB", -0.9, 0.3, 0.5, 0.5,
"BC", 0, 0.4, 0.5, 0.5,
"CD", 0.9, 0.3, 0.5, 0.5,
"AC", -0.8, -0.9, 0.5, 0.5,
"BD", 0.8, -0.9, 0.5, 0.5,
"AD", 0, -1.4, 0.5, 0.5,
"ABC", -0.5, -0.2, 0.5, 0.5,
"BCD", 0.5, -0.2, 0.5, 0.5,
"ACD", -0.3, -1.1, 0.5, 0.5,
"ABD", 0.3, -1.1, 0.5, 0.5,
"ABCD", 0, -0.7, 0.5, 0.5,
"-", 0, -1.9, 0.5, 0.5)
}
gen_seg_pos_4 <- function(scale_info) {
df <- tibble(x = 0, y = 0, xend = 0, yend = 0)[-1,]
return(df)
}
gen_label_pos_4 <- function() {
tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -1.5, -1.3, 1, 1,
"B", -0.8, 1.2, 0.5, 0,
"C", 0.8, 1.2, 0.5, 0,
"D", 1.5, -1.3, 0, 1)
}
prepare_venn_data <- function(data, columns = NULL,
show_elements = FALSE, show_percentage = TRUE, digits = 1,
label_sep = ",", count_column = NULL,
show_outside = c("auto", "none", "always"),
auto_scale = FALSE) {
show_outside <- match.arg(show_outside)
if (is.data.frame(data)) {
if (is.null(columns)) {
columns = data %>% select_if(is.logical) %>% names
}
if (!identical(show_elements, FALSE)) {
if (!{
if (is.character(show_elements)) {
show_elements <- show_elements[[1]]
show_elements %in% names(data)
} else { FALSE }}) {
stop("Value ", deparse(show_elements),
" in `show_elements` does not correspond to any column name of the data frame.",
call. = FALSE)
}
}
if (length(columns) == 2) {
stopifnot(is.logical(as_tibble(data)[,columns[[1]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[2]], drop = TRUE]))
df_element <- gen_element_df_2()
for (i in 1:nrow(df_element)) {
idx <- ((!xor(df_element$A[[i]], as_tibble(data)[,columns[[1]]])) &
(!xor(df_element$B[[i]], as_tibble(data)[,columns[[2]]])))
if (is.null(count_column)) {
df_element$n[[i]] <- sum(idx)
} else {
df_element$n[[i]] <- sum(as_tibble(data)[,count_column][idx,])
}
if (!identical(show_elements, FALSE)) {
df_element$text[[i]] <- paste(unlist(as_tibble(data)[idx,show_elements]), collapse = label_sep)
}
}
scale_info <- calc_scale_info_2(auto_scale, df_element$n)
df_shape <- gen_circle_2(scale_info)
df_text <- gen_text_pos_2(scale_info) %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_2(scale_info)
df_seg <- gen_seg_pos_2(scale_info)
} else if (length(columns) == 3) {
stopifnot(is.logical(as_tibble(data)[,columns[[1]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[2]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[3]], drop = TRUE]))
df_element <- gen_element_df_3()
for (i in 1:nrow(df_element)) {
idx <- ((!xor(df_element$A[[i]], as_tibble(data)[,columns[[1]]])) &
(!xor(df_element$B[[i]], as_tibble(data)[,columns[[2]]])) &
(!xor(df_element$C[[i]], as_tibble(data)[,columns[[3]]])))
if (is.null(count_column)) {
df_element$n[[i]] <- sum(idx)
} else {
df_element$n[[i]] <- sum(as_tibble(data)[,count_column][idx,])
}
if (!identical(show_elements, FALSE)) {
df_element$text[[i]] <- paste(unlist(as_tibble(data)[idx,show_elements]), collapse = label_sep)
}
}
scale_info <- calc_scale_info_3(auto_scale, df_element$n)
df_shape <- gen_circle_3()
df_text <- gen_text_pos_3() %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_3()
df_seg <- gen_seg_pos_3(scale_info)
} else if (length(columns) == 4) {
stopifnot(is.logical(as_tibble(data)[,columns[[1]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[2]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[3]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[4]], drop = TRUE]))
df_element <- gen_element_df_4()
for (i in 1:nrow(df_element)) {
idx <- ((df_element$A[[i]] == as_tibble(data)[,columns[[1]], drop = TRUE]) &
(df_element$B[[i]] == as_tibble(data)[,columns[[2]], drop = TRUE]) &
(df_element$C[[i]] == as_tibble(data)[,columns[[3]], drop = TRUE]) &
(df_element$D[[i]] == as_tibble(data)[,columns[[4]], drop = TRUE]))
if (is.null(count_column)) {
df_element$n[[i]] <- sum(idx)
} else {
df_element$n[[i]] <- sum(as_tibble(data)[,count_column][idx,])
}
if (!identical(show_elements, FALSE)) {
df_element$text[[i]] <- paste(unlist(as_tibble(data)[idx,show_elements]), collapse = label_sep)
}
}
scale_info <- calc_scale_info_4(auto_scale, df_element$n)
df_shape <- gen_circle_4()
df_text <- gen_text_pos_4() %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_4()
df_seg <- gen_seg_pos_4(scale_info)
} else {
stop("logical columns in data.frame `data` or vector `columns` should be length between 2 and 4")
}
df_label <- df_label %>% mutate(text = columns)
show_elements <- !identical(show_elements, FALSE)
} else if (is.list(data)) {
if (is.null(columns)) {
columns <- names(data) %>% head(4)
}
a2 <- na.omit(unique(unlist(data[columns])))
if (length(columns) == 2) {
df_element <- gen_element_df_2()
for (i in 1:nrow(df_element)) {
idx <- ((!xor(df_element$A[[i]], a2 %in% data[[columns[[1]]]])) &
(!xor(df_element$B[[i]], a2 %in% data[[columns[[2]]]])))
df_element$n[[i]] <- sum(idx)
df_element$text[[i]] <- paste(a2[idx], collapse = label_sep)
}
scale_info <- calc_scale_info_2(auto_scale, df_element$n)
df_shape <- gen_circle_2(scale_info)
df_text <- gen_text_pos_2(scale_info) %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_2(scale_info)
df_seg <- gen_seg_pos_2(scale_info)
} else if (length(columns) == 3) {
df_element <- gen_element_df_3()
for (i in 1:nrow(df_element)) {
idx <- ((!xor(df_element$A[[i]], a2 %in% data[[columns[[1]]]])) &
(!xor(df_element$B[[i]], a2 %in% data[[columns[[2]]]])) &
(!xor(df_element$C[[i]], a2 %in% data[[columns[[3]]]])))
df_element$n[[i]] <- sum(idx)
df_element$text[[i]] <- paste(a2[idx], collapse = label_sep)
}
scale_info <- calc_scale_info_3(auto_scale, df_element$n)
df_shape <- gen_circle_3()
df_text <- gen_text_pos_3() %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_3()
df_seg <- gen_seg_pos_3(scale_info)
} else if (length(columns) == 4) {
df_element <- gen_element_df_4()
for (i in 1:nrow(df_element)) {
idx <- ((!xor(df_element$A[[i]], a2 %in% data[[columns[[1]]]])) &
(!xor(df_element$B[[i]], a2 %in% data[[columns[[2]]]])) &
(!xor(df_element$C[[i]], a2 %in% data[[columns[[3]]]])) &
(!xor(df_element$D[[i]], a2 %in% data[[columns[[4]]]])))
df_element$n[[i]] <- sum(idx)
df_element$text[[i]] <- paste(a2[idx], collapse = label_sep)
}
scale_info <- calc_scale_info_4(auto_scale, df_element$n)
df_shape <- gen_circle_4()
df_text <- gen_text_pos_4() %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_4()
df_seg <- gen_seg_pos_4(scale_info)
} else {
stop("list `data` or vector `column` should be length between 2 and 4")
}
df_label <- df_label %>% mutate(text = columns)
} else {
stop("`data` should be either a list or a data.frame")
}
if ((show_outside == "none") || (show_outside == "auto" && df_text$n[[nrow(df_text)]] == 0)) {
if (df_text$n[[nrow(df_text)]] > 0)
warning("Although not display in plot, outside elements are still count in percentages.")
df_text <- df_text[-nrow(df_text), ]
}
if (!show_elements) {
fmt <- sprintf("%%d\n(%%.%df%%%%)", digits)
if (show_percentage) {
df_text <- df_text %>% mutate(text = sprintf(fmt, n, 100 * n / sum(n)))
} else {
df_text <- df_text %>% mutate(text = sprintf("%d", n))
}
}
list(shapes = df_shape, texts = df_text, labels = df_label, segs = df_seg)
}
|
/R/ggvenn.R
|
permissive
|
yanlinlin82/ggvenn
|
R
| false | false | 23,017 |
r
|
#' Plot venn diagram as an independent function. It supports both data frame and list as input.
#'
#' @name ggvenn
#' @param data A data.frame or a list as input data.
#' @param columns A character vector use as index to select columns/elements.
#' @param show_elements Show set elements instead of count/percentage.
#' @param show_percentage Show percentage for each set.
#' @param digits The desired number of digits after the decimal point
#' @param fill_color Filling colors in circles.
#' @param fill_alpha Transparency for filling circles.
#' @param stroke_color Stroke color for drawing circles.
#' @param stroke_alpha Transparency for drawing circles.
#' @param stroke_size Stroke size for drawing circles.
#' @param stroke_linetype Line type for drawing circles.
#' @param set_name_color Text color for set names.
#' @param set_name_size Text size for set names.
#' @param text_color Text color for intersect contents.
#' @param text_size Text size for intersect contents.
#' @param label_sep Separator character for displaying elements.
#' @param count_column Specify column for element repeat count.
#' @param show_outside Show outside elements (not belongs to any set).
#' @param auto_scale Allow automatically resizing circles according to element counts.
#' @return The ggplot object to print or save to file.
#' @examples
#' library(ggvenn)
#'
#' # use list as input
#' a <- list(`Set 1` = c(1, 3, 5, 7),
#' `Set 2` = c(1, 5, 9),
#' `Set 3` = c(1, 2, 8),
#' `Set 4` = c(6, 7))
#' ggvenn(a, c("Set 1", "Set 2"))
#' ggvenn(a, c("Set 1", "Set 2", "Set 3"))
#' ggvenn(a)
#'
#' # use data.frame as input
#' d <- tibble(value = c(1, 2, 3, 5, 6, 7, 8, 9),
#' `Set 1` = c(TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE),
#' `Set 2` = c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE),
#' `Set 3` = c(TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE),
#' `Set 4` = c(FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE))
#' ggvenn(d, c("Set 1", "Set 2"))
#' ggvenn(d, c("Set 1", "Set 2", "Set 3"))
#' ggvenn(d)
#'
#' # set fill color
#' ggvenn(d, c("Set 1", "Set 2"), fill_color = c("red", "blue"))
#'
#' # hide percentage
#' ggvenn(d, c("Set 1", "Set 2"), show_percentage = FALSE)
#'
#' # change precision of percentages
#' ggvenn(d, c("Set 1", "Set 2"), digits = 2)
#'
#' # show elements instead of count/percentage
#' ggvenn(a, show_elements = TRUE)
#' ggvenn(d, show_elements = "value")
#' @seealso geom_venn
#' @importFrom dplyr tibble tribble as_tibble %>% select_if mutate count filter inner_join
#' @importFrom ggplot2 ggplot aes geom_polygon geom_segment geom_text scale_x_continuous scale_y_continuous scale_fill_manual guides coord_fixed theme_void layer
#' @importFrom stats na.omit
#' @export
ggvenn <- function(data, columns = NULL,
show_elements = FALSE,
show_percentage = TRUE,
digits = 1,
fill_color = c("blue", "yellow", "green", "red"),
fill_alpha = .5,
stroke_color = "black",
stroke_alpha = 1,
stroke_size = 1,
stroke_linetype = "solid",
set_name_color = "black",
set_name_size = 6,
text_color = "black",
text_size = 4,
label_sep = ",",
count_column = NULL,
show_outside = c("auto", "none", "always"),
auto_scale = FALSE) {
show_outside <- match.arg(show_outside)
venn <- prepare_venn_data(data, columns, show_elements, show_percentage, digits,
label_sep, count_column, show_outside, auto_scale)
g <- venn$shapes %>%
mutate(group = LETTERS[group]) %>%
ggplot() +
geom_polygon(aes(x = x, y = y, group = group, fill = group),
alpha = fill_alpha) +
geom_polygon(aes(x = x, y = y, group = group),
fill = NA,
color = stroke_color,
size = stroke_size,
alpha = stroke_alpha,
linetype = stroke_linetype)
if (nrow(venn$labels) > 0) {
g <- g +
geom_text(data = venn$labels,
aes(x = x, y = y, label = text, hjust = hjust, vjust = vjust),
color = set_name_color,
size = set_name_size)
}
if (nrow(venn$texts) > 0) {
g <- g +
geom_text(data = venn$texts,
aes(x = x, y = y, label = text, hjust = hjust, vjust = vjust),
color = text_color,
size = text_size)
}
if (nrow(venn$segs) > 0) {
g <- g +
geom_segment(data = venn$segs,
aes(x = x, y = y, xend = xend, yend = yend),
color = text_color,
size = 0.5)
}
g <- g +
scale_fill_manual(values = fill_color) +
guides(fill = "none") +
coord_fixed() +
theme_void()
return(g)
}
gen_element_df_2 <- function() {
df <- tribble(~name, ~A, ~B,
"A", TRUE, FALSE,
"B", FALSE, TRUE,
"AB", TRUE, TRUE,
"-", FALSE, FALSE)
stopifnot(all((df %>% dplyr::count(A, B) %>% with(n)) == 1))
return(df %>% mutate(n = 0, text = ""))
}
gen_element_df_3 <- function() {
df <- tribble(~name, ~A, ~B, ~C,
"A", TRUE, FALSE, FALSE,
"B", FALSE, TRUE, FALSE,
"C", FALSE, FALSE, TRUE,
"AB", TRUE, TRUE, FALSE,
"AC", TRUE, FALSE, TRUE,
"BC", FALSE, TRUE, TRUE,
"ABC", TRUE, TRUE, TRUE,
"-", FALSE, FALSE, FALSE)
stopifnot(all((df %>% dplyr::count(A, B, C) %>% with(n)) == 1))
return(df %>% mutate(n = 0, text = ""))
}
gen_element_df_4 <- function() {
df <- tribble(~name, ~A, ~B, ~C, ~D,
"A", TRUE, FALSE, FALSE, FALSE,
"B", FALSE, TRUE, FALSE, FALSE,
"C", FALSE, FALSE, TRUE, FALSE,
"D", FALSE, FALSE, FALSE, TRUE,
"AB", TRUE, TRUE, FALSE, FALSE,
"BC", FALSE, TRUE, TRUE, FALSE,
"CD", FALSE, FALSE, TRUE, TRUE,
"AC", TRUE, FALSE, TRUE, FALSE,
"BD", FALSE, TRUE, FALSE, TRUE,
"AD", TRUE, FALSE, FALSE, TRUE,
"ABC", TRUE, TRUE, TRUE, FALSE,
"BCD", FALSE, TRUE, TRUE, TRUE,
"ACD", TRUE, FALSE, TRUE, TRUE,
"ABD", TRUE, TRUE, FALSE, TRUE,
"ABCD",TRUE, TRUE, TRUE, TRUE,
"-", FALSE, FALSE, FALSE, FALSE)
stopifnot(all((df %>% dplyr::count(A, B, C, D) %>% with(n)) == 1))
return(df %>% mutate(n = 0, text = ""))
}
gen_circle <- function(group, x_offset = 0, y_offset = 0, radius = 1,
radius_b = radius, theta_offset = 0, length.out = 100) {
tibble(group = group,
theta = seq(0, 2 * pi, length.out = length.out)) %>%
mutate(x_raw = radius * cos(theta),
y_raw = radius_b * sin(theta),
x = x_offset + x_raw * cos(theta_offset) - y_raw * sin(theta_offset),
y = y_offset + x_raw * sin(theta_offset) + y_raw * cos(theta_offset))
}
calc_scale_info_2 <- function(auto_scale, n_sets, max_scale_diff = 5) {
if (auto_scale) {
stopifnot(length(n_sets) == 4)
if (n_sets[[1]] == 0 && n_sets[[2]] == 0 && n_sets[[3]] == 0) { # both sets are empty
a_radius <- 1
b_radius <- 1
overlap_size <- -0.2
} else if (n_sets[[1]] + n_sets[[3]] == 0) { # set A is empty
a_radius <- 1 / max_scale_diff
b_radius <- 1
overlap_size <- -0.2
} else if (n_sets[[2]] + n_sets[[3]] == 0) { # set B is empty
a_radius <- 1
b_radius <- 1 / max_scale_diff
overlap_size <- -0.2
} else if (n_sets[[1]] >= n_sets[[2]]) { # set A is larger than or equal to set B
a_radius <- 1
b_radius <- (n_sets[[2]] + n_sets[[3]]) / (n_sets[[1]] + n_sets[[3]])
overlap_size <- ifelse(n_sets[[3]] == 0, -0.2, n_sets[[3]] / (n_sets[[1]] + n_sets[[3]]))
if (b_radius < 1 / max_scale_diff) {
b_radius <- 1 / max_scale_diff
if (overlap_size > 0) {
overlap_size <- b_radius * (n_sets[[3]] / (n_sets[[2]] + n_sets[[3]]))
}
}
} else { # set A is smaller than set B
a_radius <- (n_sets[[1]] + n_sets[[3]]) / (n_sets[[2]] + n_sets[[3]])
b_radius <- 1
overlap_size <- ifelse(n_sets[[3]] == 0, -0.2, n_sets[[3]] / (n_sets[[2]] + n_sets[[3]]))
if (a_radius < 1 / max_scale_diff) {
a_radius <- 1 / max_scale_diff
if (overlap_size > 0) {
overlap_size <- a_radius * (n_sets[[3]] / (n_sets[[1]] + n_sets[[3]]))
}
}
}
} else {
a_radius = 1
b_radius = 1
overlap_size = 1/3
}
return(c(auto_scale = auto_scale,
a_radius = a_radius,
b_radius = b_radius,
overlap_size = overlap_size))
}
calc_scale_info_3 <- function(auto_scale, n_sets, max_scale_diff = 5) {
if (auto_scale) {
stop("Error: 'auto_scale' parameter is supported for only two set venn so far.")
}
return(NULL)
}
calc_scale_info_4 <- function(auto_scale, n_sets, max_scale_diff = 5) {
if (auto_scale) {
stop("Error: 'auto_scale' parameter is supported for only two set venn so far.")
}
return(NULL)
}
min_overlap_for_text <- 0.2
gen_circle_2 <- function(scale_info) {
x_dist <- (scale_info['a_radius'] + scale_info['b_radius'] - scale_info['overlap_size'] * 2) / 2
rbind(gen_circle(1L, -x_dist, 0, scale_info['a_radius']),
gen_circle(2L, x_dist, 0, scale_info['b_radius']))
}
gen_text_pos_2 <- function(scale_info) {
df <- tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -0.8, 0, 0.5, 0.5,
"B", 0.8, 0, 0.5, 0.5,
"AB", 0, 0, 0.5, 0.5,
"-", 0, -1.2, 0.5, 0.5)
if (scale_info['auto_scale']) {
x_dist <- (scale_info['a_radius'] + scale_info['b_radius'] - scale_info['overlap_size'] * 2) / 2
if (scale_info['overlap_size'] <= 0) {
df$x[[1]] <- -x_dist
df$x[[2]] <- x_dist
df <- df %>% filter(name != "AB")
} else {
if (scale_info['overlap_size'] < min_overlap_for_text) {
df$x[[1]] <- -x_dist - scale_info['overlap_size']
df$x[[2]] <- x_dist + scale_info['overlap_size']
if (scale_info['a_radius'] < min_overlap_for_text) {
df$x[[3]] <- -x_dist + (scale_info['a_radius'] - scale_info['overlap_size']) / 2
df$y[[3]] <- -1.5 * scale_info['a_radius']
} else if (scale_info['b_radius'] < min_overlap_for_text) {
df$x[[3]] <- x_dist - (scale_info['a_radius'] - scale_info['overlap_size']) / 2
df$y[[3]] <- -1.5 * scale_info['b_radius']
} else {
df$x[[3]] <- -x_dist + scale_info['a_radius'] - scale_info['overlap_size']
df$y[[3]] <- -1.2
}
df$x[[4]] <- -x_dist - scale_info['a_radius']
df$y[[4]] <- -1.6
df$hjust[[4]] <- 0
} else {
df$x[[1]] <- -x_dist - scale_info['overlap_size']
df$x[[2]] <- x_dist + scale_info['overlap_size']
df$x[[3]] <- -x_dist + scale_info['a_radius'] - scale_info['overlap_size']
}
if (scale_info['a_radius'] <= scale_info['overlap_size']) {
df <- df %>% filter(name != "A")
} else if (scale_info['b_radius'] <= scale_info['overlap_size']) {
df <- df %>% filter(name != "B")
}
}
}
return(df)
}
gen_seg_pos_2 <- function(scale_info) {
df <- tibble(x = 0, y = 0, xend = 0, yend = 0)[-1,]
if (scale_info['overlap_size'] > 0 && scale_info['auto_scale']) {
x_dist <- (scale_info['a_radius'] + scale_info['b_radius'] - scale_info['overlap_size'] * 2) / 2
if (scale_info['overlap_size'] < min_overlap_for_text) {
x_pos <- -x_dist + scale_info['a_radius'] - scale_info['overlap_size']
if (scale_info['a_radius'] < min_overlap_for_text) {
x2_pos <- -x_dist + 1.2 * (scale_info['a_radius'] - scale_info['overlap_size']) / 2
df <- tibble(x = x_pos, y = 0, xend = x2_pos, yend = -1.2 * scale_info['a_radius'])
} else if (scale_info['b_radius'] < min_overlap_for_text) {
x2_pos <- x_dist - 1.2 * (scale_info['a_radius'] - scale_info['overlap_size']) / 2
df <- tibble(x = x_pos, y = 0, xend = x2_pos, yend = -1.2 * scale_info['a_radius'])
} else {
df <- tibble(x = x_pos, y = 0, xend = x_pos, yend = -1)
}
}
}
return(df)
}
gen_label_pos_2 <- function(scale_info) {
df <- tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -0.8, 1.2, 0.5, 0,
"B", 0.8, 1.2, 0.5, 0)
if (scale_info['auto_scale']) {
}
return(df)
}
gen_circle_3 <- function() {
rbind(gen_circle(1L, -2/3, (sqrt(3) + 2) / 6, 1),
gen_circle(2L, 2/3,(sqrt(3) + 2) / 6, 1),
gen_circle(3L, 0, -(sqrt(3) + 2) / 6, 1))
}
gen_text_pos_3 <- function() {
tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -0.8, 0.62, 0.5, 0.5,
"B", 0.8, 0.62, 0.5, 0.5,
"C", 0, -0.62, 0.5, 0.5,
"AB", 0, 0.8, 0.5, 0.5,
"AC", -0.5, 0, 0.5, 0.5,
"BC", 0.5, 0, 0.5, 0.5,
"ABC", 0, 0.2, 0.5, 0.5,
"-", 1.2, -0.8, 0, 0.5)
}
gen_seg_pos_3 <- function(scale_info) {
df <- tibble(x = 0, y = 0, xend = 0, yend = 0)[-1,]
return(df)
}
gen_label_pos_3 <- function() {
tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -0.8, 1.8, 0.5, 0,
"B", 0.8, 1.8, 0.5, 0,
"C", 0, -1.8, 0.5, 1)
}
gen_circle_4 <- function() {
rbind(gen_circle(1L, -.7, -1/2, .75, 1.5, pi/4),
gen_circle(2L, -.72+2/3, -1/6, .75, 1.5, pi/4),
gen_circle(3L, .72-2/3, -1/6, .75, 1.5, -pi/4),
gen_circle(4L, .7, -1/2, .75, 1.5, -pi/4))
}
gen_text_pos_4 <- function() {
tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -1.5, 0, 0.5, 0.5,
"B", -0.6, 0.7, 0.5, 0.5,
"C", 0.6, 0.7, 0.5, 0.5,
"D", 1.5, 0, 0.5, 0.5,
"AB", -0.9, 0.3, 0.5, 0.5,
"BC", 0, 0.4, 0.5, 0.5,
"CD", 0.9, 0.3, 0.5, 0.5,
"AC", -0.8, -0.9, 0.5, 0.5,
"BD", 0.8, -0.9, 0.5, 0.5,
"AD", 0, -1.4, 0.5, 0.5,
"ABC", -0.5, -0.2, 0.5, 0.5,
"BCD", 0.5, -0.2, 0.5, 0.5,
"ACD", -0.3, -1.1, 0.5, 0.5,
"ABD", 0.3, -1.1, 0.5, 0.5,
"ABCD", 0, -0.7, 0.5, 0.5,
"-", 0, -1.9, 0.5, 0.5)
}
gen_seg_pos_4 <- function(scale_info) {
df <- tibble(x = 0, y = 0, xend = 0, yend = 0)[-1,]
return(df)
}
gen_label_pos_4 <- function() {
tribble(~name, ~x, ~y, ~hjust, ~vjust,
"A", -1.5, -1.3, 1, 1,
"B", -0.8, 1.2, 0.5, 0,
"C", 0.8, 1.2, 0.5, 0,
"D", 1.5, -1.3, 0, 1)
}
prepare_venn_data <- function(data, columns = NULL,
show_elements = FALSE, show_percentage = TRUE, digits = 1,
label_sep = ",", count_column = NULL,
show_outside = c("auto", "none", "always"),
auto_scale = FALSE) {
show_outside <- match.arg(show_outside)
if (is.data.frame(data)) {
if (is.null(columns)) {
columns = data %>% select_if(is.logical) %>% names
}
if (!identical(show_elements, FALSE)) {
if (!{
if (is.character(show_elements)) {
show_elements <- show_elements[[1]]
show_elements %in% names(data)
} else { FALSE }}) {
stop("Value ", deparse(show_elements),
" in `show_elements` does not correspond to any column name of the data frame.",
call. = FALSE)
}
}
if (length(columns) == 2) {
stopifnot(is.logical(as_tibble(data)[,columns[[1]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[2]], drop = TRUE]))
df_element <- gen_element_df_2()
for (i in 1:nrow(df_element)) {
idx <- ((!xor(df_element$A[[i]], as_tibble(data)[,columns[[1]]])) &
(!xor(df_element$B[[i]], as_tibble(data)[,columns[[2]]])))
if (is.null(count_column)) {
df_element$n[[i]] <- sum(idx)
} else {
df_element$n[[i]] <- sum(as_tibble(data)[,count_column][idx,])
}
if (!identical(show_elements, FALSE)) {
df_element$text[[i]] <- paste(unlist(as_tibble(data)[idx,show_elements]), collapse = label_sep)
}
}
scale_info <- calc_scale_info_2(auto_scale, df_element$n)
df_shape <- gen_circle_2(scale_info)
df_text <- gen_text_pos_2(scale_info) %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_2(scale_info)
df_seg <- gen_seg_pos_2(scale_info)
} else if (length(columns) == 3) {
stopifnot(is.logical(as_tibble(data)[,columns[[1]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[2]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[3]], drop = TRUE]))
df_element <- gen_element_df_3()
for (i in 1:nrow(df_element)) {
idx <- ((!xor(df_element$A[[i]], as_tibble(data)[,columns[[1]]])) &
(!xor(df_element$B[[i]], as_tibble(data)[,columns[[2]]])) &
(!xor(df_element$C[[i]], as_tibble(data)[,columns[[3]]])))
if (is.null(count_column)) {
df_element$n[[i]] <- sum(idx)
} else {
df_element$n[[i]] <- sum(as_tibble(data)[,count_column][idx,])
}
if (!identical(show_elements, FALSE)) {
df_element$text[[i]] <- paste(unlist(as_tibble(data)[idx,show_elements]), collapse = label_sep)
}
}
scale_info <- calc_scale_info_3(auto_scale, df_element$n)
df_shape <- gen_circle_3()
df_text <- gen_text_pos_3() %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_3()
df_seg <- gen_seg_pos_3(scale_info)
} else if (length(columns) == 4) {
stopifnot(is.logical(as_tibble(data)[,columns[[1]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[2]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[3]], drop = TRUE]))
stopifnot(is.logical(as_tibble(data)[,columns[[4]], drop = TRUE]))
df_element <- gen_element_df_4()
for (i in 1:nrow(df_element)) {
idx <- ((df_element$A[[i]] == as_tibble(data)[,columns[[1]], drop = TRUE]) &
(df_element$B[[i]] == as_tibble(data)[,columns[[2]], drop = TRUE]) &
(df_element$C[[i]] == as_tibble(data)[,columns[[3]], drop = TRUE]) &
(df_element$D[[i]] == as_tibble(data)[,columns[[4]], drop = TRUE]))
if (is.null(count_column)) {
df_element$n[[i]] <- sum(idx)
} else {
df_element$n[[i]] <- sum(as_tibble(data)[,count_column][idx,])
}
if (!identical(show_elements, FALSE)) {
df_element$text[[i]] <- paste(unlist(as_tibble(data)[idx,show_elements]), collapse = label_sep)
}
}
scale_info <- calc_scale_info_4(auto_scale, df_element$n)
df_shape <- gen_circle_4()
df_text <- gen_text_pos_4() %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_4()
df_seg <- gen_seg_pos_4(scale_info)
} else {
stop("logical columns in data.frame `data` or vector `columns` should be length between 2 and 4")
}
df_label <- df_label %>% mutate(text = columns)
show_elements <- !identical(show_elements, FALSE)
} else if (is.list(data)) {
if (is.null(columns)) {
columns <- names(data) %>% head(4)
}
a2 <- na.omit(unique(unlist(data[columns])))
if (length(columns) == 2) {
df_element <- gen_element_df_2()
for (i in 1:nrow(df_element)) {
idx <- ((!xor(df_element$A[[i]], a2 %in% data[[columns[[1]]]])) &
(!xor(df_element$B[[i]], a2 %in% data[[columns[[2]]]])))
df_element$n[[i]] <- sum(idx)
df_element$text[[i]] <- paste(a2[idx], collapse = label_sep)
}
scale_info <- calc_scale_info_2(auto_scale, df_element$n)
df_shape <- gen_circle_2(scale_info)
df_text <- gen_text_pos_2(scale_info) %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_2(scale_info)
df_seg <- gen_seg_pos_2(scale_info)
} else if (length(columns) == 3) {
df_element <- gen_element_df_3()
for (i in 1:nrow(df_element)) {
idx <- ((!xor(df_element$A[[i]], a2 %in% data[[columns[[1]]]])) &
(!xor(df_element$B[[i]], a2 %in% data[[columns[[2]]]])) &
(!xor(df_element$C[[i]], a2 %in% data[[columns[[3]]]])))
df_element$n[[i]] <- sum(idx)
df_element$text[[i]] <- paste(a2[idx], collapse = label_sep)
}
scale_info <- calc_scale_info_3(auto_scale, df_element$n)
df_shape <- gen_circle_3()
df_text <- gen_text_pos_3() %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_3()
df_seg <- gen_seg_pos_3(scale_info)
} else if (length(columns) == 4) {
df_element <- gen_element_df_4()
for (i in 1:nrow(df_element)) {
idx <- ((!xor(df_element$A[[i]], a2 %in% data[[columns[[1]]]])) &
(!xor(df_element$B[[i]], a2 %in% data[[columns[[2]]]])) &
(!xor(df_element$C[[i]], a2 %in% data[[columns[[3]]]])) &
(!xor(df_element$D[[i]], a2 %in% data[[columns[[4]]]])))
df_element$n[[i]] <- sum(idx)
df_element$text[[i]] <- paste(a2[idx], collapse = label_sep)
}
scale_info <- calc_scale_info_4(auto_scale, df_element$n)
df_shape <- gen_circle_4()
df_text <- gen_text_pos_4() %>% inner_join(df_element, by = "name")
df_label <- gen_label_pos_4()
df_seg <- gen_seg_pos_4(scale_info)
} else {
stop("list `data` or vector `column` should be length between 2 and 4")
}
df_label <- df_label %>% mutate(text = columns)
} else {
stop("`data` should be either a list or a data.frame")
}
if ((show_outside == "none") || (show_outside == "auto" && df_text$n[[nrow(df_text)]] == 0)) {
if (df_text$n[[nrow(df_text)]] > 0)
warning("Although not display in plot, outside elements are still count in percentages.")
df_text <- df_text[-nrow(df_text), ]
}
if (!show_elements) {
fmt <- sprintf("%%d\n(%%.%df%%%%)", digits)
if (show_percentage) {
df_text <- df_text %>% mutate(text = sprintf(fmt, n, 100 * n / sum(n)))
} else {
df_text <- df_text %>% mutate(text = sprintf("%d", n))
}
}
list(shapes = df_shape, texts = df_text, labels = df_label, segs = df_seg)
}
|
## ROC curve fitting
## Coded by Qingfei Pan (Qingfei.Pan@stjude.org)
## R-3.6
## 0. configuration
require(NetBID2)
library(pROC)
require(ggplot2)
setwd("./DATA/ClinicalTrial")
## 1. load the eset
load("ClinicalTrial.log2CP50M.genelevel.afterBatchEffectRemoval.eset")
eset <- ClinicalTrial.log2CP50M.genelevel.afterBatchEffectRemoval.eset; rm(ClinicalTrial.log2CP50M.genelevel.afterBatchEffectRemoval.eset)
## 2. read the regulon
regulon.file <- read.table("./HDAC6_Breast_Cancer_Regulon.txt", header = F, sep = "\t", stringsAsFactors = F)
regulon <- data.frame(row.names = regulon.file$V1, target = regulon.file$V1); head(regulon)
geneset <- list(); geneset[[1]] <- regulon; names(geneset) <- "HDAC6regulon"
## 3. calculate the HDAC6 score
exp <- exprs(eset)
hdac6score <- cal.Activity(target_list = geneset, cal_mat = as.matrix(exp),
es.method = 'mean', # 'Weightedmean', 'mean', 'maxmean', 'absmean'
std = T, memory_constrain = F # if true, the calculation strategy will not use Matrix Cross Products, which is memory consuming.
)
hdac6score <- data.frame(t(hdac6score))
# z-normalize the raw HDAC6 score
std <- function(x){
tmp_mean <- mean(x, na.rm = T); tmp_sd <- sd(x, na.rm = T); (x - tmp_mean) / tmp_sd
}
hdac6score$HDAC6regulon <- std(hdac6score$HDAC6regulon)
## 4. prepare the master table
pd <- pData(eset)
master <- merge(pd, hdac6score, by = "row.names", all = T); dim(master)
head(master)
## 5. ROC curve fitting
roc1 <- roc(master$Response2, master$HDAC6regulon, plot=TRUE, legacy.axes = T, #percent = T,
xlab = "1 - Specificity", ylab = "Sensitivity", col="red", lwd=4, print.auc=T, ci = T)
coords(roc1, "all", ret=c("threshold", "sens", "spec", "accuracy")) # accuracy, precision, recall
|
/12.ROC_curve_analysis_of_HDAC6_score_in_the_clinical_trial.R
|
permissive
|
jyyulab/HDAC6-score
|
R
| false | false | 1,782 |
r
|
## ROC curve fitting
## Coded by Qingfei Pan (Qingfei.Pan@stjude.org)
## R-3.6
## 0. configuration
require(NetBID2)
library(pROC)
require(ggplot2)
setwd("./DATA/ClinicalTrial")
## 1. load the eset
load("ClinicalTrial.log2CP50M.genelevel.afterBatchEffectRemoval.eset")
eset <- ClinicalTrial.log2CP50M.genelevel.afterBatchEffectRemoval.eset; rm(ClinicalTrial.log2CP50M.genelevel.afterBatchEffectRemoval.eset)
## 2. read the regulon
regulon.file <- read.table("./HDAC6_Breast_Cancer_Regulon.txt", header = F, sep = "\t", stringsAsFactors = F)
regulon <- data.frame(row.names = regulon.file$V1, target = regulon.file$V1); head(regulon)
geneset <- list(); geneset[[1]] <- regulon; names(geneset) <- "HDAC6regulon"
## 3. calculate the HDAC6 score
exp <- exprs(eset)
hdac6score <- cal.Activity(target_list = geneset, cal_mat = as.matrix(exp),
es.method = 'mean', # 'Weightedmean', 'mean', 'maxmean', 'absmean'
std = T, memory_constrain = F # if true, the calculation strategy will not use Matrix Cross Products, which is memory consuming.
)
hdac6score <- data.frame(t(hdac6score))
# z-normalize the raw HDAC6 score
std <- function(x){
tmp_mean <- mean(x, na.rm = T); tmp_sd <- sd(x, na.rm = T); (x - tmp_mean) / tmp_sd
}
hdac6score$HDAC6regulon <- std(hdac6score$HDAC6regulon)
## 4. prepare the master table
pd <- pData(eset)
master <- merge(pd, hdac6score, by = "row.names", all = T); dim(master)
head(master)
## 5. ROC curve fitting
roc1 <- roc(master$Response2, master$HDAC6regulon, plot=TRUE, legacy.axes = T, #percent = T,
xlab = "1 - Specificity", ylab = "Sensitivity", col="red", lwd=4, print.auc=T, ci = T)
coords(roc1, "all", ret=c("threshold", "sens", "spec", "accuracy")) # accuracy, precision, recall
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pedCreate.R
\name{pedCreate}
\alias{pedCreate}
\alias{nuclearPed}
\alias{cousinsPed}
\alias{halfCousinsPed}
\alias{doubleCousins}
\alias{doubleFirstCousins}
\alias{quadHalfFirstCousins}
\alias{fullSibMating}
\alias{halfSibStack}
\alias{cousinPed}
\alias{halfCousinPed}
\title{Create simple pedigrees}
\usage{
nuclearPed(noffs, sex)
cousinsPed(degree, removal = 0, degree2 = NULL, child = FALSE)
halfCousinsPed(degree, removal = 0, degree2 = NULL, child = FALSE)
doubleCousins(degree1, degree2, removal1 = 0, removal2 = 0, child = FALSE)
doubleFirstCousins()
quadHalfFirstCousins()
fullSibMating(generations)
halfSibStack(generations)
cousinPed(degree)
halfCousinPed(degree)
}
\arguments{
\item{noffs}{A positive integer, the number of offspring in the nuclear
family.}
\item{sex}{A vector of length \code{noffs}; indicating the genders (1=male,
2=female) of the offspring. If missing, all offspring are taken to be
males.}
\item{degree, degree1, degree2}{Non-negative integers, indicating the degree of
cousin-like relationships: 0=siblings, 1=first cousins; 2=second cousins,
a.s.o. See Details and Examples.}
\item{removal, removal1, removal2}{Non-negative integers, indicating removals
of cousin-like relationships. See Details and Examples.}
\item{child}{A logical: Should an inbred child be added to the two cousins?}
\item{generations}{A positive integer indicating the number of crossings.}
}
\value{
A \code{\link{linkdat}} object.
}
\description{
These are utility functions for creating some common pedigree structures as
\code{linkdat} objects.
}
\details{
All individuals are created as unaffected. Use \code{\link{swapAff}} to edit
this (see Examples). Use \code{\link{swapSex}} to change gender of pedigree
members.
The call \code{cousinsPed(degree=n, removal=k)} creates a pedigree with two
n'th cousins, k times removed. By default, removals are added on the right
side. To override this, the parameter \code{degree2} can be used to indicate
explicitly the number of generations on the right side of the pedigree. When
\code{degree2} is given \code{removal} is ignored. (Similarly for
\code{halfCousinsPed}.)
The function \code{doubleCousins} creates two individuals whose fathers are
cousins (\code{degree1}, \code{removal1}) as well as their mothers
(\code{degree2}, \code{removal2}). For simplicity, a wrapper
\code{doubleFirstCousins} is provided for the most common case, double first
cousins. Finally \code{quadHalfFirstCousins} produces a pedigree with
quadruple half first cousins.
\code{fullSibMating} crosses full sibs continuously for the indicated number
of generations.
\code{halfSibStack} produces a breeding scheme where the two individuals in
the final generation are simultaneously half siblings and half n'th cousins,
where \code{n=1,...,generations}.
\code{cousinPed} and \code{halfCousinPed} (written without the 's') are
depreciated functions kept for backwards compatibility. They create cousin
pedigrees, but without possibility for removals, and with a different
ordering than their replacements \code{cousinsPed} and \code{halfCousinsPed}.
}
\examples{
# A nuclear family with 2 boys and 3 girls,
# where the father and the two boys are affected.
x = nuclearPed(noffs=5, sex=c(1,1,2,2,2))
x = swapAff(x, ids=c(1,3,4))
# Half sibs:
halfCousinsPed(degree=0)
# Grand aunt:
cousinsPed(degree=0, removal=2)
# Second cousins once removed.
cousinsPed(degree=2, removal=1)
# Again second cousins once removed,
# but with the 'removal' on the left side.
cousinsPed(degree=3, degree2=2)
# A child of first cousin parents.
cousinsPed(degree=1, child=TRUE)
# Consecutive brother-sister matings.
fullSibMating(3)
# Simultaneous half siblings and half first cousins
halfSibStack(2)
# Double first cousins
doubleFirstCousins()
# Quadruple half first cousins
# Weird plotting behaviour for this pedigree.
x = quadHalfFirstCousins()
#plot(x)
}
\seealso{
\code{\link{swapAff}}, \code{\link{swapSex}},
\code{\link{removeIndividuals}}, \code{\link{addOffspring}},
\code{\link{relabel}}
}
|
/man/pedCreate.Rd
|
no_license
|
cran/paramlink
|
R
| false | true | 4,257 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pedCreate.R
\name{pedCreate}
\alias{pedCreate}
\alias{nuclearPed}
\alias{cousinsPed}
\alias{halfCousinsPed}
\alias{doubleCousins}
\alias{doubleFirstCousins}
\alias{quadHalfFirstCousins}
\alias{fullSibMating}
\alias{halfSibStack}
\alias{cousinPed}
\alias{halfCousinPed}
\title{Create simple pedigrees}
\usage{
nuclearPed(noffs, sex)
cousinsPed(degree, removal = 0, degree2 = NULL, child = FALSE)
halfCousinsPed(degree, removal = 0, degree2 = NULL, child = FALSE)
doubleCousins(degree1, degree2, removal1 = 0, removal2 = 0, child = FALSE)
doubleFirstCousins()
quadHalfFirstCousins()
fullSibMating(generations)
halfSibStack(generations)
cousinPed(degree)
halfCousinPed(degree)
}
\arguments{
\item{noffs}{A positive integer, the number of offspring in the nuclear
family.}
\item{sex}{A vector of length \code{noffs}; indicating the genders (1=male,
2=female) of the offspring. If missing, all offspring are taken to be
males.}
\item{degree, degree1, degree2}{Non-negative integers, indicating the degree of
cousin-like relationships: 0=siblings, 1=first cousins; 2=second cousins,
a.s.o. See Details and Examples.}
\item{removal, removal1, removal2}{Non-negative integers, indicating removals
of cousin-like relationships. See Details and Examples.}
\item{child}{A logical: Should an inbred child be added to the two cousins?}
\item{generations}{A positive integer indicating the number of crossings.}
}
\value{
A \code{\link{linkdat}} object.
}
\description{
These are utility functions for creating some common pedigree structures as
\code{linkdat} objects.
}
\details{
All individuals are created as unaffected. Use \code{\link{swapAff}} to edit
this (see Examples). Use \code{\link{swapSex}} to change gender of pedigree
members.
The call \code{cousinsPed(degree=n, removal=k)} creates a pedigree with two
n'th cousins, k times removed. By default, removals are added on the right
side. To override this, the parameter \code{degree2} can be used to indicate
explicitly the number of generations on the right side of the pedigree. When
\code{degree2} is given \code{removal} is ignored. (Similarly for
\code{halfCousinsPed}.)
The function \code{doubleCousins} creates two individuals whose fathers are
cousins (\code{degree1}, \code{removal1}) as well as their mothers
(\code{degree2}, \code{removal2}). For simplicity, a wrapper
\code{doubleFirstCousins} is provided for the most common case, double first
cousins. Finally \code{quadHalfFirstCousins} produces a pedigree with
quadruple half first cousins.
\code{fullSibMating} crosses full sibs continuously for the indicated number
of generations.
\code{halfSibStack} produces a breeding scheme where the two individuals in
the final generation are simultaneously half siblings and half n'th cousins,
where \code{n=1,...,generations}.
\code{cousinPed} and \code{halfCousinPed} (written without the 's') are
depreciated functions kept for backwards compatibility. They create cousin
pedigrees, but without possibility for removals, and with a different
ordering than their replacements \code{cousinsPed} and \code{halfCousinsPed}.
}
\examples{
# A nuclear family with 2 boys and 3 girls,
# where the father and the two boys are affected.
x = nuclearPed(noffs=5, sex=c(1,1,2,2,2))
x = swapAff(x, ids=c(1,3,4))
# Half sibs:
halfCousinsPed(degree=0)
# Grand aunt:
cousinsPed(degree=0, removal=2)
# Second cousins once removed.
cousinsPed(degree=2, removal=1)
# Again second cousins once removed,
# but with the 'removal' on the left side.
cousinsPed(degree=3, degree2=2)
# A child of first cousin parents.
cousinsPed(degree=1, child=TRUE)
# Consecutive brother-sister matings.
fullSibMating(3)
# Simultaneous half siblings and half first cousins
halfSibStack(2)
# Double first cousins
doubleFirstCousins()
# Quadruple half first cousins
# Weird plotting behaviour for this pedigree.
x = quadHalfFirstCousins()
#plot(x)
}
\seealso{
\code{\link{swapAff}}, \code{\link{swapSex}},
\code{\link{removeIndividuals}}, \code{\link{addOffspring}},
\code{\link{relabel}}
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = numeric()) {
s <- NULL
set <- function(y){
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## Write a short comment describing this function
cachesolve <- function(x) {
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data)
x$setsolve(s)
s
}
## Return a matrix that is the inverse of 'x'
|
/cachematrix.R
|
no_license
|
keitasawamura/ProgrammingAssignment2
|
R
| false | false | 739 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = numeric()) {
s <- NULL
set <- function(y){
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## Write a short comment describing this function
cachesolve <- function(x) {
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data)
x$setsolve(s)
s
}
## Return a matrix that is the inverse of 'x'
|
fig03x019<-function(){
item<-c("Overall coordination",
"Political affairs",
"International law",
"International cooperation",
"Regional cooperation",
"Human rights",
"Public information",
"Management",
"Internal oversight",
"Administrative",
"Capital",
"Safety & security",
"Development",
"Staff assessment")
#
amount<-c(718555600,626069600,87269400,398449400,
477145600,259227500,184000500,540204300,35997700,
108470900,58782600,197169300,18651300,461366000)
amount<-amount/1000000
#
y1<-1:length(item)
item1<-item[order(amount,decreasing=TRUE)]
amount1<-sort(amount,decreasing=TRUE)
#
graphics.off()
windows(width=4.5,height=4.5,pointsize=12)
par(fin=c(4.45,4.45),pin=c(4.45,4.45),
mai=c(0.875,2.0,0.0,0.25))
#
plot(amount1,y1,type="n",xaxt="n",yaxt="n",
xlim=c(0,800/1.04),ylim=c(0,length(item1)+1),
xlab='Millions of US Dollars',ylab='',xaxs="r",yaxs="i")
#
for (i in 1:14) lines(x=c(0,amount1[i]),y=c(i,i),lty=3)
points(x=amount1,y=1:14,pch=19,cex=1.0)
#
axis(1,at=200*(0:4),labels=TRUE,tick=TRUE,
outer=FALSE)
axis(2,at=1:14+0.25,labels=item1,tick=FALSE,
outer=FALSE,las=2,hadj=1,padj=1)
#
dev.copy2eps(file="fig03x019.eps")
dev.copy2pdf(file="fig03x019.pdf")
}
|
/graphicsforstatistics_2e_figures_scripts_r/Chapter 3/fig03x019.R
|
no_license
|
saqibarfeen/coding_time
|
R
| false | false | 1,215 |
r
|
fig03x019<-function(){
item<-c("Overall coordination",
"Political affairs",
"International law",
"International cooperation",
"Regional cooperation",
"Human rights",
"Public information",
"Management",
"Internal oversight",
"Administrative",
"Capital",
"Safety & security",
"Development",
"Staff assessment")
#
amount<-c(718555600,626069600,87269400,398449400,
477145600,259227500,184000500,540204300,35997700,
108470900,58782600,197169300,18651300,461366000)
amount<-amount/1000000
#
y1<-1:length(item)
item1<-item[order(amount,decreasing=TRUE)]
amount1<-sort(amount,decreasing=TRUE)
#
graphics.off()
windows(width=4.5,height=4.5,pointsize=12)
par(fin=c(4.45,4.45),pin=c(4.45,4.45),
mai=c(0.875,2.0,0.0,0.25))
#
plot(amount1,y1,type="n",xaxt="n",yaxt="n",
xlim=c(0,800/1.04),ylim=c(0,length(item1)+1),
xlab='Millions of US Dollars',ylab='',xaxs="r",yaxs="i")
#
for (i in 1:14) lines(x=c(0,amount1[i]),y=c(i,i),lty=3)
points(x=amount1,y=1:14,pch=19,cex=1.0)
#
axis(1,at=200*(0:4),labels=TRUE,tick=TRUE,
outer=FALSE)
axis(2,at=1:14+0.25,labels=item1,tick=FALSE,
outer=FALSE,las=2,hadj=1,padj=1)
#
dev.copy2eps(file="fig03x019.eps")
dev.copy2pdf(file="fig03x019.pdf")
}
|
\name{par.openCR.fit}
\alias{par.openCR.fit}
\title{Fit Multiple openCR Models}
\description{
This function is a wrapper for \code{\link{openCR.fit}}.
}
\usage{
par.openCR.fit (arglist, ncores = 1, seed = 123, trace = FALSE, logfile = NULL,
prefix = "")
}
\arguments{
\item{arglist}{list of argument lists for \code{secr.fit} or a character
vector naming such lists}
\item{ncores}{ integer number of cores used by parallel::makeClusters() }
\item{seed}{integer pseudorandom number seed}
\item{trace}{logical; if TRUE intermediate output may be logged}
\item{logfile}{character name of file to log progress reports}
\item{prefix}{character prefix for names of output}
}
\details{
In openCR >= 1.5.0, setting ncores > 1 is deprecated and triggers a warning: multithreading makes it faster to set ncores = 1 in par.secr.fit.
\code{trace} overrides any settings in \code{arglist}.
It is convenient to provide the names of the capthist and mask arguments
in each component of arglist as character values (i.e. in quotes); objects thus
named are exported from the workspace to each worker process (see Examples).
Using \code{ncores}>1 is obsolete under the multithreading regime in \pkg{openCR} >= 1.5.0.
It is usually slower than \code{ncores} = 1. If used it has these effects:
-- worker processes are generated using the \pkg{parallel} package,
-- one model is fitted on each worker, and
-- if no logfile name is provided then a temporary file name will be generated in tempdir().
}
\value{
For \code{par.openCR.fit} - openCRlist of model fits (see
\code{\link{openCR.fit}} and \code{\link{openCRlist}}). Names are created by prefixing \code{prefix} to the
names of \code{argslist}. If \code{trace} is TRUE then the total
execution time and finish time are displayed.
}
\seealso{
\code{\link{openCR.fit}},
\link{Parallel},
\code{\link{make.table}},
\code{\link{openCRlist}}
}
\note{
Any attempt in \code{arglist} to set \code{ncores > 1} for a particular
openCR fit was ignored in \pkg{openCR} < 1.5.0. Now it is allowed.
}
\examples{
\dontrun{
m1 <- list(capthist = ovenCH, model = list(p~1, phi~1))
m2 <- list(capthist = ovenCH, model = list(p~session, phi~1))
m3 <- list(capthist = ovenCH, model = list(p~session, phi~session) )
setNumThreads(7) # on quadcore Windows PC
fits <- par.openCR.fit (c('m1','m2','m3'), ncores = 1)
AIC(fits)
}
}
\keyword{ model }
|
/man/par.openCR.fit.Rd
|
no_license
|
MurrayEfford/openCR
|
R
| false | false | 2,450 |
rd
|
\name{par.openCR.fit}
\alias{par.openCR.fit}
\title{Fit Multiple openCR Models}
\description{
This function is a wrapper for \code{\link{openCR.fit}}.
}
\usage{
par.openCR.fit (arglist, ncores = 1, seed = 123, trace = FALSE, logfile = NULL,
prefix = "")
}
\arguments{
\item{arglist}{list of argument lists for \code{secr.fit} or a character
vector naming such lists}
\item{ncores}{ integer number of cores used by parallel::makeClusters() }
\item{seed}{integer pseudorandom number seed}
\item{trace}{logical; if TRUE intermediate output may be logged}
\item{logfile}{character name of file to log progress reports}
\item{prefix}{character prefix for names of output}
}
\details{
In openCR >= 1.5.0, setting ncores > 1 is deprecated and triggers a warning: multithreading makes it faster to set ncores = 1 in par.secr.fit.
\code{trace} overrides any settings in \code{arglist}.
It is convenient to provide the names of the capthist and mask arguments
in each component of arglist as character values (i.e. in quotes); objects thus
named are exported from the workspace to each worker process (see Examples).
Using \code{ncores}>1 is obsolete under the multithreading regime in \pkg{openCR} >= 1.5.0.
It is usually slower than \code{ncores} = 1. If used it has these effects:
-- worker processes are generated using the \pkg{parallel} package,
-- one model is fitted on each worker, and
-- if no logfile name is provided then a temporary file name will be generated in tempdir().
}
\value{
For \code{par.openCR.fit} - openCRlist of model fits (see
\code{\link{openCR.fit}} and \code{\link{openCRlist}}). Names are created by prefixing \code{prefix} to the
names of \code{argslist}. If \code{trace} is TRUE then the total
execution time and finish time are displayed.
}
\seealso{
\code{\link{openCR.fit}},
\link{Parallel},
\code{\link{make.table}},
\code{\link{openCRlist}}
}
\note{
Any attempt in \code{arglist} to set \code{ncores > 1} for a particular
openCR fit was ignored in \pkg{openCR} < 1.5.0. Now it is allowed.
}
\examples{
\dontrun{
m1 <- list(capthist = ovenCH, model = list(p~1, phi~1))
m2 <- list(capthist = ovenCH, model = list(p~session, phi~1))
m3 <- list(capthist = ovenCH, model = list(p~session, phi~session) )
setNumThreads(7) # on quadcore Windows PC
fits <- par.openCR.fit (c('m1','m2','m3'), ncores = 1)
AIC(fits)
}
}
\keyword{ model }
|
library(dplyr)
library(dbplyr)
library(readr)
Automezzo <- read_csv2("new/Automezzo.csv")
#Using ',' as decimal and '.' as grouping mark. Use read_delim() for more control.
##Parsed with column specification:
# cols(
# AutomezzoID = col_double(),
# DataDiAcquisto = col_character(),
# Chilometraggio = col_character(),
# ModelloID = col_double(),
# MarcaID = col_double(),
# Rottamato = col_character(),
# DataRottamazione = col_character(),
# TipoVeicolo = col_double(),
# TipiCarburante_TipoCarburanteID = col_double()
# )
#How to change data type? only character and double?
#TODO
#
my_db_file <- "car-database.sqlite"
my_db <- src_sqlite(my_db_file, create = TRUE)
my_db
copy_to(my_db, Automezzi, temporary = FALSE)
my_db
|
/CreatingCarSQLiteDB.R
|
no_license
|
CavallucciMartina/R-statistic-Test
|
R
| false | false | 757 |
r
|
library(dplyr)
library(dbplyr)
library(readr)
Automezzo <- read_csv2("new/Automezzo.csv")
#Using ',' as decimal and '.' as grouping mark. Use read_delim() for more control.
##Parsed with column specification:
# cols(
# AutomezzoID = col_double(),
# DataDiAcquisto = col_character(),
# Chilometraggio = col_character(),
# ModelloID = col_double(),
# MarcaID = col_double(),
# Rottamato = col_character(),
# DataRottamazione = col_character(),
# TipoVeicolo = col_double(),
# TipiCarburante_TipoCarburanteID = col_double()
# )
#How to change data type? only character and double?
#TODO
#
my_db_file <- "car-database.sqlite"
my_db <- src_sqlite(my_db_file, create = TRUE)
my_db
copy_to(my_db, Automezzi, temporary = FALSE)
my_db
|
require(ggplot2)
require(dplyr)
require(Matrix)
require(cowplot)
require(Seurat)
load(file="/data/tmp/Dovydas/Single_Cell_RNAseq/2020_02_05_YvO_Single_Cell/Seurat/With_old_4_best_mice/Merged.RData")
# Loading cluster information for different cell populations
Initial = read.csv(file = "5kmeans_initial.csv") # Initial 5 k-means
StemTa = read.csv(file = "4kmeans_Stem_TA.csv") # 4 k means of Stem/TA cluster
TuftEE = read.csv(file = "2kmeans_Tuft_EE.csv") # 2 k means of Tuft/EE cluster
## Overlaying the initial 5 k-means clustering info on cells
tenmeans = Initial
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(SI.integrated@meta.data),]
Idents(SI.integrated) = tenmeans[,2]
levels(SI.integrated@active.ident) = c("Colonocyte_1", "Goblet", "Colonocyte_2", "Tuft-EE", "Stem-TA")
# Generating csv files for number of each cell type in different mice (Initial clustering).
num = cbind(SI.integrated@meta.data, "Cluster" = SI.integrated@active.ident)
info = table(num[,c(5,6,9)])
write.csv(info, "Mouse_Cell_Numbers.csv")
#################################### Re-clustering Tuft/EE ##############################################
Tuft = subset(SI.integrated, idents = "Tuft-EE")
tenmeans = TuftEE
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(Tuft@meta.data),]
Idents(Tuft) = tenmeans[,2]
levels(Tuft@active.ident) = c("EnteroEndocrine_Cell", "Tuft_Cell")
Tuft = NormalizeData(Tuft, verbose = F)
Tuft = FindVariableFeatures(Tuft, verbose = F)
Tuft = ScaleData(Tuft, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
Tuft = RunPCA(Tuft, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
Tuft = RunTSNE(Tuft, dims = 1:30)
pdf(file = "Tuft-EE-tSNE_Reclust.pdf")
DimPlot(Tuft, reduction = "tsne") + theme(legend.position = "bottom")
DimPlot(Tuft, reduction = "tsne", group.by = "Compartment") + theme(legend.position = "bottom")
DimPlot(Tuft, reduction = "tsne", group.by = "Age") + theme(legend.position = "bottom")
FeaturePlot(Tuft, features = c("Dclk1", "Cd24a", "Chga", "Chgb"))
dev.off()
Tuft1 = subset(Tuft, idents = "Tuft_Cell")
Tuft1 = FindNeighbors(Tuft1, dims = 1:10)
Tuft1 = FindClusters(Tuft1, resolution = 0.1)
pdf(file = paste("Graph_based_Tuft1.pdf", sep = "") )
plot = DimPlot(Tuft1, reduction = "tsne")
print(plot)
dev.off()
Tuft1.markers <- FindAllMarkers(Tuft1, only.pos = F, min.pct = 0.1, logfc.threshold = 0.1)
Tuft1.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top10 <- Tuft1.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
write.csv(top10, file= "Tuft1_compartment_genes.csv")
Tuft1@meta.data$Compartment = factor(Tuft1@meta.data$Compartment, levels = c("Cecum", "ProxCol", "DistCol"))
col.plot = c("Red", "Black", "Blue")
pdf(file = "Heatmap_Tuft1.pdf" )
plot = DoHeatmap(Tuft1, features = top10$gene, size=2, angle=0, group.by = "Compartment") + NoLegend() + scale_fill_gradient2( low = "Blue", mid = "Black", high = "Red", midpoint = 0, guide = "colourbar", aesthetics = "fill") +theme(axis.text = element_text(size=5))
print(plot)
dev.off()
EnteroE = subset(Tuft, idents = "EnteroEndocrine_Cell")
EnteroE= NormalizeData(EnteroE, verbose = F)
EnteroE = FindVariableFeatures(EnteroE, verbose = F)
EnteroE = ScaleData(EnteroE, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
EnteroE = RunPCA(EnteroE, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
EnteroE = RunTSNE(EnteroE, dims = 1:30)
pdf(file = "EnteroE-tSNE_Reclust.pdf")
DimPlot(EnteroE, reduction = "tsne") + theme(legend.position = "bottom")
DimPlot(EnteroE, reduction = "tsne", group.by = "Compartment") + theme(legend.position = "bottom")
DimPlot(EnteroE, reduction = "tsne", group.by = "Age") + theme(legend.position = "bottom")
dev.off()
pdf(file = "EE-subtype-markers.pdf") ##From Haber paper
FeaturePlot(EnteroE, features = c("Clca4", "Slc12a2", "Sox4", "Dll1"), reduction = "tsne") #Early + Mid progenitor
FeaturePlot(EnteroE, features = c("Neurod1", "Neurod2", "Serpina1c", "Ghrl"), reduction = "tsne") #Late + A progenitor
FeaturePlot(EnteroE, features = c("Cck", "Gal", "Pyy", "Gcg"), reduction = "tsne") #SILA + SIL-P
FeaturePlot(EnteroE, features = c("Bdnf", "Scgn", "Gip", "Fabp5"), reduction = "tsne") #SIK-P + SIK
FeaturePlot(EnteroE, features = c("Sst", "Iapp", "Nts", "Adgrd1"), reduction = "tsne") #SAKD + SIN
FeaturePlot(EnteroE, features = c("Tac1", "Gch1", "Reg4", "Afp"), reduction = "tsne") #EC + EC-Reg4
dev.off()
pdf(file = "Tuft-subtype-markers.pdf")
FeaturePlot(Tuft, features = c("Ptprc", "Dclk1", "Cd24a", "Il25"), reduction = "tsne")
FeaturePlot(Tuft, features = c("Tslp", "Rac2", "Ptgs1", "Irf7"), reduction = "tsne")
FeaturePlot(Tuft, features = c("Nradd", "Gng13", "Nrep", "Rgs2"), reduction = "tsne")
dev.off()
# Generating csv files for number of each cell type in different mice (Tuft).
num = cbind(Tuft@meta.data, "Cluster" = Tuft@active.ident)
info = table(num[,c(5,6,9)])
write.csv(info, "Mouse_Tuft-EE_Numbers.csv")
# Wilcox
Tuft = subset(SI.integrated, idents = "Tuft-EE")
tenmeans = TuftEE
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(Tuft@meta.data),]
Idents(Tuft) = tenmeans[,2]
levels(Tuft@active.ident) = c("EnteroEndocrine_Cell", "Tuft_Cell")
Tuft@meta.data$Age[which(Tuft@meta.data$Age == "Young_20w")] = "Young"
DE.Tuft = subset(Tuft, cells = which(Tuft@active.ident == "Tuft_Cell"))
a=c()
a = FindMarkers(object = DE.Tuft,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "Tuft_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.Tuft_Comp = subset(DE.Tuft, cells = which(DE.ColProg@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.Tuft_Comp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_Tuft_YvODE_wilcox_genes.csv", sep = ""))
}
DE.EE = subset(Tuft, cells = which(Tuft@active.ident == "EnteroEndocrine_Cell"))
a=c()
a = FindMarkers(object = DE.EE,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "EE_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.EE_Comp = subset(DE.EE, cells = which(DE.ColProg@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.Tuft_Comp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_EE_YvODE_wilcox_genes.csv", sep = ""))
}
#################################### Overlay Tuft-EE on initial ##############################################
reclust = cbind(SI.integrated@meta.data, "reclust" = c(rep(3,nrow(SI.integrated@meta.data))))
levels(reclust$reclust) = c(1:3)
tenmeans = TuftEE
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(SI.integrated@meta.data),]
for ( i in 1:nrow(tenmeans)) {
a = tenmeans[i,2]
b = which(row.names(reclust) == row.names(tenmeans)[i])
reclust[b,9] = a
}
Idents(SI.integrated) = reclust$reclust
levels(SI.integrated) = c(1:3)
SI.integrated@meta.data = cbind(SI.integrated@meta.data, "cluster" = SI.integrated@active.ident)
levels(SI.integrated@meta.data$cluster) = c("Enteroendocrine", "Tuft", "Other")
pdf(file = "Tuft-EE-tSNE.pdf")
DimPlot(SI.integrated, reduction = "tsne", cols = c("Red", "Blue", "#E9E9E9" ), group.by = "cluster") + theme(legend.position = "bottom")
dev.off()
#################################### Overlay Stem-TA on initial ##############################################
reclust = cbind(SI.integrated@meta.data, "reclust" = c(rep(5,nrow(SI.integrated@meta.data))))
tenmeans = StemTa
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(SI.integrated@meta.data),]
for ( i in 1:nrow(tenmeans)) {
a = tenmeans[i,2]
b = which(row.names(reclust) == row.names(tenmeans)[i])
reclust[b,10] = a
}
Idents(SI.integrated) = reclust$reclust
levels(SI.integrated) = c(1:5)
### Remove cluster column if already added by Tuft code
##### SI.integrated@meta.data = SI.integrated@meta.data[,-which(colnames(SI.integrated@meta.data) == "cluster")]
SI.integrated@meta.data = cbind(SI.integrated@meta.data, "cluster" = SI.integrated@active.ident)
levels(SI.integrated@meta.data$cluster) = c("Colonocyte_Precursor", "Goblet_Precursor", "Stem_cell", "TA-cell", "Other")
pdf(file = "Stem-TA-tSNE.pdf")
DimPlot(SI.integrated, reduction = "tsne", cols = c("Red", "Blue", "Green", "Magenta", "#E9E9E9" ), group.by = "cluster") + theme(legend.position = "bottom",legend.title = element_text(size=5))
dev.off()
######################## csv with Stem/TA numbers ################################
### Remove cluster column if already added by Tuft code
##### SI.integrated@meta.data = SI.integrated@meta.data[,-which(colnames(SI.integrated@meta.data) == "cluster")]
Stem = subset(SI.integrated, idents = "Stem-TA")
tenmeans = StemTa
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(Stem@meta.data),]
Idents(Stem) = tenmeans[,2]
levels(Stem@active.ident) = c("Colonocyte_Precursor", "Goblet_Precursor", "Stem_cell", "TA-cell")
num = cbind(Stem@meta.data, "Cluster" = Stem@active.ident)
num$Cluster = droplevels(num$Cluster)
info = table(num[,c(5,6,9)])
write.csv(info, "Mouse_Stem-TA_Numbers.csv")
##### VLN plots ######
levels(Stem@active.ident) = c(1,2,3,4)
cols = c("Red", "Blue", "Green", "Magenta")
pdf(file= "4_mean_Stem-TA_Violin.pdf")
VlnPlot(Stem, features = c("Lgr5", "Sox4", "Slc12a2","Ascl2","Mki67", "Pcna"), pt.size = 0.001, cols = cols)
VlnPlot(Stem, features = c("Lgr5", "Sox4", "Slc12a2","Ascl2","Mki67", "Pcna"), pt.size = 0,cols = cols)
VlnPlot(Stem, features = c("Dll1", "Atoh1", "Reg4","Muc2","Selenbp1", "Lgals3"), pt.size = 0.001, cols = cols)
VlnPlot(Stem, features = c("Dll1", "Atoh1", "Reg4","Muc2","Selenbp1", "Lgals3"), pt.size = 0,cols = cols)
VlnPlot(Stem, features = c("Slc37a2", "Cyp2c55", "Car1", "Aqp4", "Muc3", "Ggh"), pt.size = 0.001, cols = cols)
VlnPlot(Stem, features = c("Slc37a2", "Cyp2c55", "Car1", "Aqp4", "Muc3", "Ggh"), pt.size = 0,cols = cols)
dev.off()
pdf(file="4_mean_Stem-TA_Ridge.pdf")
RidgePlot(Stem, features = c("Lgr5", "Sox4", "Slc12a2","Ascl2","Mki67", "Pcna"), cols = cols)
RidgePlot(Stem, features = c("Dll1", "Atoh1", "Reg4","Muc2","Selenbp1", "Lgals3"), cols = cols)
RidgePlot(Stem, features = c("Slc37a2", "Cyp2c55", "Car1", "Aqp4", "Muc3", "Ggh"), cols = cols)
dev.off()
## Wilcox
Stem@meta.data$Age[which(Stem@meta.data$Age == "Young_20w")] = "Young"
DE.Stem = subset(Stem, cells = which(Stem@active.ident == "Stem_cell"))
a=c()
a = FindMarkers(object = DE.Stem ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "Stem_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.StemComp = subset(DE.Stem, cells = which(DE.Stem@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.StemComp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_Stem_YvODE_wilcox_genes.csv", sep = ""))
}
DE.TA_cell = subset(Stem, cells = which(Stem@active.ident == "TA-cell"))
a=c()
a = FindMarkers(object = DE.TA_cell ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "TA_cell_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.TA_Comp = subset(DE.TA_cell, cells = which(DE.TA_cell@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.TA_Comp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_TA_YvODE_wilcox_genes.csv", sep = ""))
}
DE.ColProg = subset(Stem, cells = which(Stem@active.ident == "Colonocyte_Precursor"))
a=c()
a = FindMarkers(object = DE.ColProg ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "ColProg_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.ColProg_Comp = subset(DE.ColProg, cells = which(DE.ColProg@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.ColProg_Comp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_ColProg_YvODE_wilcox_genes.csv", sep = ""))
}
DE.GobProg = subset(Stem, cells = which(Stem@active.ident == "Goblet_Precursor"))
a=c()
a = FindMarkers(object = DE.GobProg ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "GobProg_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.GobProg_Comp = subset(DE.GobProg, cells = which(DE.ColProg@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.GobProg_Comp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_GobProg_YvODE_wilcox_genes.csv", sep = ""))
}
############ Separate tSNE by sample / age / batch ##################
tenmeans = Initial
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(SI.integrated@meta.data),]
Idents(SI.integrated) = tenmeans[,2]
levels(SI.integrated@active.ident) = c("Colonocyte_1", "Goblet", "Colonocyte_2", "Tuft-EE", "Stem-TA")
pdf(file = "Separate_Sample_tSNE.pdf")
DimPlot(SI.integrated, reduction = "tsne", split.by = "Sample", ncol = 3, pt.size = 0.01) + theme(legend.position = "bottom")
dev.off()
SI.integrated@meta.data$Age[which(SI.integrated@meta.data$Age == "Young_20w")] = "Young"
pdf(file = "Separate_Age_tSNE.pdf")
DimPlot(SI.integrated, reduction = "tsne", split.by = "Age", ncol = 3, pt.size = 0.01) + theme(legend.position = "bottom")
dev.off()
SI.integrated@meta.data$Batch = 1
SI.integrated@meta.data$Batch[SI.integrated@meta.data$Sample %in% c("Old_1","Old_2", "Old_3", "Young_1", "Young_2", "Young_3")] = 2
pdf(file = "Separate_Experiment_tSNE.pdf")
DimPlot(SI.integrated, reduction = "tsne", split.by = "Batch", ncol = 2, pt.size = 0.01) + theme(legend.position = "bottom", lab) + labs(title = "Batch")
dev.off()
######################## Colonocyte by compartment ################################
Colonocyte = subset(SI.integrated, idents = c("Colonocyte_1","Colonocyte_2"))
Idents(Colonocyte)= Colonocyte@meta.data$Compartment
Colonocyte@meta.data$Compartment = factor(Colonocyte@meta.data$Compartment, levels = c("Cecum", "ProxCol", "DistCol"))
pdf(file = "Colonocyte_TSNE.pdf")
DimPlot(Colonocyte, reduction = "tsne", pt.size = 0.1, group.by = "Compartment")
dev.off()
Colonocyte.markers <- FindAllMarkers(Colonocyte, only.pos = F, min.pct = 0.1, logfc.threshold = 0.1)
Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top10 <- Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
write.csv(top10, file= "Colonocyte_compartment_genes.csv")
Colonocyte@meta.data$Compartment = factor(Colonocyte@meta.data$Compartment, levels = c("Cecum", "ProxCol", "DistCol"))
col.plot = c("Red", "Black", "Blue")
pdf(file = "Heatmap_Colonocyte1.pdf" )
plot = DoHeatmap(Colonocyte, features = top10$gene, size=2, angle=0, group.by = "Compartment") + NoLegend() + scale_fill_gradient2( low = "Blue", mid = "Black", high = "Red", midpoint = 0, guide = "colourbar", aesthetics = "fill") +theme(axis.text = element_text(size=5))
print(plot)
dev.off()
Colonocyte= NormalizeData(Colonocyte, verbose = F)
Colonocyte = FindVariableFeatures(Colonocyte, verbose = F)
Colonocyte = ScaleData(Colonocyte, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
Colonocyte = RunPCA(Colonocyte, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
Colonocyte = RunTSNE(Colonocyte, dims = 1:30)
pdf(file = "Colonocyte_TSNE1.pdf")
DimPlot(Colonocyte, reduction = "tsne", pt.size = 0.1, group.by = "Compartment")
dev.off()
Colonocyte = FindNeighbors(Colonocyte, dims = 1:10)
Colonocyte = FindClusters(Colonocyte, resolution = 0.035)
pdf(file = paste("Graph_based_Colonocyte.pdf", sep = "") )
plot = DimPlot(Colonocyte, reduction = "tsne")
print(plot)
dev.off()
Colonocyte.markers <- FindAllMarkers(Colonocyte, only.pos = F, min.pct = 0.1, logfc.threshold = 0.1)
Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top10 <- Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
col.plot = c("Red", "Black", "Blue")
pdf(file = "Heatmap_Colonocyte_Graph.pdf" )
plot = DoHeatmap(Colonocyte, features = top10$gene, size=2, angle=0) + NoLegend() + scale_fill_gradient2( low = "Blue", mid = "Black", high = "Red", midpoint = 0, guide = "colourbar", aesthetics = "fill") +theme(axis.text = element_text(size=5))
print(plot)
dev.off()
Colonocyte_clean = subset(Colonocyte, idents=c(0,1))
pdf(file = "Colonocyte_clean_TSNE.pdf")
DimPlot(Colonocyte_clean, reduction = "tsne", pt.size = 0.1, group.by = "Compartment")
dev.off()
Colonocyte_clean = FindNeighbors(Colonocyte_clean, dims = 1:10)
Colonocyte_clean = FindClusters(Colonocyte_clean, resolution = 0.03)
pdf(file = paste("Graph_based_Colonocyte_clean.pdf", sep = "") )
plot = DimPlot(Colonocyte_clean, reduction = "tsne")
print(plot)
dev.off()
identities = cbind(Colonocyte_clean@meta.data, "Test" = Colonocyte_clean@active.ident)
levels(identities$Test) = c(0,1,"A","B")
Colonocyte_zero = subset(Colonocyte_clean, idents=0)
Colonocyte_zero = FindNeighbors(Colonocyte_zero, dims = 1:10)
Colonocyte_zero = FindClusters(Colonocyte_zero, resolution = 0.03)
levels(Colonocyte_zero@active.ident) = c("A", "B")
pdf(file = paste("Graph_based_Colonocyte_zero.pdf", sep = "") )
plot = DimPlot(Colonocyte_zero, reduction = "tsne")
print(plot)
dev.off()
a = cbind(Colonocyte_zero@meta.data, "test" = Colonocyte_zero@active.ident)
for (i in 1:nrow(a)) {
b = which(row.names(a)[i] == row.names(identities))
identities$Test[b] = a$test[i]
}
identities$Test = droplevels(identities$Test)
Idents(Colonocyte_clean) = identities$Test
pdf(file = paste("Graph_based_Colonocyte_Merge.pdf", sep = "") )
plot = DimPlot(Colonocyte_clean, reduction = "tsne")
print(plot)
dev.off()
Colonocyte_clean.markers <- FindAllMarkers(Colonocyte_clean, only.pos = F, min.pct = 0.1, logfc.threshold = 0.1)
Colonocyte_clean.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top10 <- Colonocyte_clean.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
col.plot = c("Red", "Black", "Blue")
Colonocyte_clean@meta.data = cbind(Colonocyte_clean@meta.data, "Clust" = Colonocyte_clean@active.ident)
levels(Colonocyte_clean@meta.data$Clust) = c("Cecum-Specific","DistCol-Specific", "ProxCol-Specific")
Colonocyte_clean@meta.data$Clust = factor(Colonocyte_clean@meta.data$Clust, levels = c("Cecum-Specific", "ProxCol-Specific","DistCol-Specific"))
genes = top10$gene[c(1:10,21:30,11:20)]
pdf(file = "Heatmap_Colonocyte_Graph_merge.pdf" )
plot = DoHeatmap(Colonocyte_clean, features = genes, size=2, angle=0, group.by = "Clust") + NoLegend() + scale_fill_gradient2( low = "Blue", mid = "Black", high = "Red", midpoint = 0, guide = "colourbar", aesthetics = "fill") +theme(axis.text = element_text(size=5))
print(plot)
dev.off()
pdf("3colonocytes.pdf")
DimPlot(Colonocyte_clean, reduction = "tsne", group.by = "Clust")
DimPlot(Colonocyte_clean, reduction = "tsne", group.by = "Compartment")
dev.off()
write.csv(table(Colonocyte_clean@meta.data[,c(5,6,12)]), file = "3colonocytes_numbers.csv")
a = Colonocyte_clean@meta.data[,c(1,12)]
write.csv(a, file="3colonocytes_each_cell.csv")
######################## Colonocyte Y v O DEG's ##########################
#For Wilcox
Colonocyte_clean@meta.data$Age[which(Colonocyte_clean@meta.data$Age == "Young_20w")] = "Young"
DE.cecum = subset(Colonocyte_clean, cells = which(Colonocyte_clean@meta.data$Compartment == "Cecum"))
a=c()
a = FindMarkers(object = DE.cecum,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "Cecum_enrich_colonocyte_YvODE_wilcox_genes.csv")
DE.ProxCol = subset(Colonocyte_clean, cells = which(Colonocyte_clean@meta.data$Compartment == "ProxCol"))
a=c()
a = FindMarkers(object = DE.ProxCol,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "ProxCol_enrich_colonocyte_YvODE_wilcox_genes.csv")
DE.DistCol = subset(Colonocyte_clean, cells = which(Colonocyte_clean@meta.data$Compartment == "DistCol"))
a=c()
a = FindMarkers(object = DE.DistCol,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "DistCol_enrich_colonocyte_YvODE_wilcox_genes.csv")
######################## Colonocyte by compartment enrichment - on initial clust################################
reclust = cbind(SI.integrated@meta.data, "reclust" = c(rep(4,nrow(SI.integrated@meta.data))))
levels(reclust$reclust) = c(1:4)
tenmeans = data.frame("Cluster" = Colonocyte_clean@meta.data$Clust)
row.names(tenmeans) = row.names(Colonocyte_clean@meta.data)
for ( i in 1:nrow(tenmeans)) {
a = tenmeans[i,1]
b = which(row.names(reclust) == row.names(tenmeans)[i])
reclust[b,9] = a
}
Idents(SI.integrated) = reclust$reclust
levels(SI.integrated) = c(1:4)
SI.integrated@meta.data = cbind(SI.integrated@meta.data, "cluster" = SI.integrated@active.ident)
levels(SI.integrated@meta.data$cluster) = c("Cecum-enriched", "ProxCol-enriched", "DistCol-enriched", "Other")
pdf(file = "3colonocyte-on-initial.pdf")
DimPlot(SI.integrated, reduction = "tsne", cols = c("Red","Green", "Blue", "#E9E9E9" ), group.by = "cluster")+ theme(legend.position = "bottom")
dev.off()
########### YvO all cells
a=c()
a = FindMarkers(object = SI.integrated,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "AllCells_YvODE_wilcox_genes.csv")
############# Goblet ridge plots
pdf(file= "Goblet_Ridge.pdf")
RidgePlot(SI.integrated, features = c("Muc2", "Agr2"), group.by="Age", idents = "Goblet")
RidgePlot(SI.integrated, features = c("Muc2"), group.by="Age")
RidgePlot(SI.integrated, features = c("Agr2"), group.by="Age")
dev.off()
SI.comp = subset(SI.integrated, cells = which(SI.integrated@meta.data$Compartment == "DistCol"))
SI.comp@meta.data$Age[which(SI.comp@meta.data$Age == "Young_20w")] = "Young"
pdf(file= "Goblet_Ridge_DistCol.pdf")
VlnPlot(SI.comp, features = c("Muc2", "Agr2"), group.by="Age", idents = "Goblet", pt.size = 1)
dev.off()
############## Colonocytes all together - common markers ########
SI.Colonocyte = SI.integrated
SI.Colonocyte@meta.data = cbind(SI.Colonocyte@meta.data, "Clust" = SI.Colonocyte@active.ident)
SI.Colonocyte@meta.data$Clust[which(SI.Colonocyte@meta.data$Clust == "Colonocyte_2")] = "Colonocyte_1"
SI.Colonocyte@meta.data$Clust = droplevels(SI.Colonocyte@meta.data$Clust)
levels(SI.Colonocyte@meta.data$Clust) = c("Colonocyte", "Goblet", "Tuft-EE", "Stem-TA")
Idents(SI.Colonocyte) = SI.Colonocyte@meta.data$Clust
SI.Colonocyte.markers <- FindAllMarkers(SI.Colonocyte, only.pos = F, min.pct = 0.1, logfc.threshold = 0.1)
SI.Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top10 <- SI.Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
write.csv(top10, file= "Merged_Colonocyte_genes.csv")
col.plot = c("Red", "Black", "Blue")
pdf(file = "Heatmap_Merged_Colonocyte.pdf" )
plot = DoHeatmap(SI.Colonocyte, features = top10$gene, size=2, angle=0) + NoLegend() + scale_fill_gradient2( low = "Blue", mid = "Black", high = "Red", midpoint = 0, guide = "colourbar", aesthetics = "fill") +theme(axis.text = element_text(size=5))
print(plot)
dev.off()
pdf(file= "Colonocyte_Vln.pdf")
VlnPlot(SI.Colonocyte, features = c("Alpi", "Slc2a1", "Car2", "Krt20", "Slc26a3"), group.by="Clust", pt.size = 0.1)
VlnPlot(SI.Colonocyte, features = c("Alpi", "Slc2a1", "Car2", "Krt20", "Slc26a3"), group.by="Clust", pt.size = 0)
dev.off()
pdf(file="Colonocyte_YvO_Vln.pdf")
VlnPlot(SI.Colonocyte, features = c("Alpi", "Slc2a1", "Car2", "Krt20", "Slc26a3"), group.by="Age", idents = "Colonocyte", pt.size = 0)
dev.off()
|
/R-scripts/Epithelium/Batch-2/recalc.R
|
no_license
|
QYD720/Aging_Colon_Atlas
|
R
| false | false | 26,050 |
r
|
require(ggplot2)
require(dplyr)
require(Matrix)
require(cowplot)
require(Seurat)
load(file="/data/tmp/Dovydas/Single_Cell_RNAseq/2020_02_05_YvO_Single_Cell/Seurat/With_old_4_best_mice/Merged.RData")
# Loading cluster information for different cell populations
Initial = read.csv(file = "5kmeans_initial.csv") # Initial 5 k-means
StemTa = read.csv(file = "4kmeans_Stem_TA.csv") # 4 k means of Stem/TA cluster
TuftEE = read.csv(file = "2kmeans_Tuft_EE.csv") # 2 k means of Tuft/EE cluster
## Overlaying the initial 5 k-means clustering info on cells
tenmeans = Initial
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(SI.integrated@meta.data),]
Idents(SI.integrated) = tenmeans[,2]
levels(SI.integrated@active.ident) = c("Colonocyte_1", "Goblet", "Colonocyte_2", "Tuft-EE", "Stem-TA")
# Generating csv files for number of each cell type in different mice (Initial clustering).
num = cbind(SI.integrated@meta.data, "Cluster" = SI.integrated@active.ident)
info = table(num[,c(5,6,9)])
write.csv(info, "Mouse_Cell_Numbers.csv")
#################################### Re-clustering Tuft/EE ##############################################
Tuft = subset(SI.integrated, idents = "Tuft-EE")
tenmeans = TuftEE
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(Tuft@meta.data),]
Idents(Tuft) = tenmeans[,2]
levels(Tuft@active.ident) = c("EnteroEndocrine_Cell", "Tuft_Cell")
Tuft = NormalizeData(Tuft, verbose = F)
Tuft = FindVariableFeatures(Tuft, verbose = F)
Tuft = ScaleData(Tuft, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
Tuft = RunPCA(Tuft, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
Tuft = RunTSNE(Tuft, dims = 1:30)
pdf(file = "Tuft-EE-tSNE_Reclust.pdf")
DimPlot(Tuft, reduction = "tsne") + theme(legend.position = "bottom")
DimPlot(Tuft, reduction = "tsne", group.by = "Compartment") + theme(legend.position = "bottom")
DimPlot(Tuft, reduction = "tsne", group.by = "Age") + theme(legend.position = "bottom")
FeaturePlot(Tuft, features = c("Dclk1", "Cd24a", "Chga", "Chgb"))
dev.off()
Tuft1 = subset(Tuft, idents = "Tuft_Cell")
Tuft1 = FindNeighbors(Tuft1, dims = 1:10)
Tuft1 = FindClusters(Tuft1, resolution = 0.1)
pdf(file = paste("Graph_based_Tuft1.pdf", sep = "") )
plot = DimPlot(Tuft1, reduction = "tsne")
print(plot)
dev.off()
Tuft1.markers <- FindAllMarkers(Tuft1, only.pos = F, min.pct = 0.1, logfc.threshold = 0.1)
Tuft1.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top10 <- Tuft1.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
write.csv(top10, file= "Tuft1_compartment_genes.csv")
Tuft1@meta.data$Compartment = factor(Tuft1@meta.data$Compartment, levels = c("Cecum", "ProxCol", "DistCol"))
col.plot = c("Red", "Black", "Blue")
pdf(file = "Heatmap_Tuft1.pdf" )
plot = DoHeatmap(Tuft1, features = top10$gene, size=2, angle=0, group.by = "Compartment") + NoLegend() + scale_fill_gradient2( low = "Blue", mid = "Black", high = "Red", midpoint = 0, guide = "colourbar", aesthetics = "fill") +theme(axis.text = element_text(size=5))
print(plot)
dev.off()
EnteroE = subset(Tuft, idents = "EnteroEndocrine_Cell")
EnteroE= NormalizeData(EnteroE, verbose = F)
EnteroE = FindVariableFeatures(EnteroE, verbose = F)
EnteroE = ScaleData(EnteroE, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
EnteroE = RunPCA(EnteroE, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
EnteroE = RunTSNE(EnteroE, dims = 1:30)
pdf(file = "EnteroE-tSNE_Reclust.pdf")
DimPlot(EnteroE, reduction = "tsne") + theme(legend.position = "bottom")
DimPlot(EnteroE, reduction = "tsne", group.by = "Compartment") + theme(legend.position = "bottom")
DimPlot(EnteroE, reduction = "tsne", group.by = "Age") + theme(legend.position = "bottom")
dev.off()
pdf(file = "EE-subtype-markers.pdf") ##From Haber paper
FeaturePlot(EnteroE, features = c("Clca4", "Slc12a2", "Sox4", "Dll1"), reduction = "tsne") #Early + Mid progenitor
FeaturePlot(EnteroE, features = c("Neurod1", "Neurod2", "Serpina1c", "Ghrl"), reduction = "tsne") #Late + A progenitor
FeaturePlot(EnteroE, features = c("Cck", "Gal", "Pyy", "Gcg"), reduction = "tsne") #SILA + SIL-P
FeaturePlot(EnteroE, features = c("Bdnf", "Scgn", "Gip", "Fabp5"), reduction = "tsne") #SIK-P + SIK
FeaturePlot(EnteroE, features = c("Sst", "Iapp", "Nts", "Adgrd1"), reduction = "tsne") #SAKD + SIN
FeaturePlot(EnteroE, features = c("Tac1", "Gch1", "Reg4", "Afp"), reduction = "tsne") #EC + EC-Reg4
dev.off()
pdf(file = "Tuft-subtype-markers.pdf")
FeaturePlot(Tuft, features = c("Ptprc", "Dclk1", "Cd24a", "Il25"), reduction = "tsne")
FeaturePlot(Tuft, features = c("Tslp", "Rac2", "Ptgs1", "Irf7"), reduction = "tsne")
FeaturePlot(Tuft, features = c("Nradd", "Gng13", "Nrep", "Rgs2"), reduction = "tsne")
dev.off()
# Generating csv files for number of each cell type in different mice (Tuft).
num = cbind(Tuft@meta.data, "Cluster" = Tuft@active.ident)
info = table(num[,c(5,6,9)])
write.csv(info, "Mouse_Tuft-EE_Numbers.csv")
# Wilcox
Tuft = subset(SI.integrated, idents = "Tuft-EE")
tenmeans = TuftEE
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(Tuft@meta.data),]
Idents(Tuft) = tenmeans[,2]
levels(Tuft@active.ident) = c("EnteroEndocrine_Cell", "Tuft_Cell")
Tuft@meta.data$Age[which(Tuft@meta.data$Age == "Young_20w")] = "Young"
DE.Tuft = subset(Tuft, cells = which(Tuft@active.ident == "Tuft_Cell"))
a=c()
a = FindMarkers(object = DE.Tuft,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "Tuft_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.Tuft_Comp = subset(DE.Tuft, cells = which(DE.ColProg@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.Tuft_Comp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_Tuft_YvODE_wilcox_genes.csv", sep = ""))
}
DE.EE = subset(Tuft, cells = which(Tuft@active.ident == "EnteroEndocrine_Cell"))
a=c()
a = FindMarkers(object = DE.EE,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "EE_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.EE_Comp = subset(DE.EE, cells = which(DE.ColProg@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.Tuft_Comp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_EE_YvODE_wilcox_genes.csv", sep = ""))
}
#################################### Overlay Tuft-EE on initial ##############################################
reclust = cbind(SI.integrated@meta.data, "reclust" = c(rep(3,nrow(SI.integrated@meta.data))))
levels(reclust$reclust) = c(1:3)
tenmeans = TuftEE
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(SI.integrated@meta.data),]
for ( i in 1:nrow(tenmeans)) {
a = tenmeans[i,2]
b = which(row.names(reclust) == row.names(tenmeans)[i])
reclust[b,9] = a
}
Idents(SI.integrated) = reclust$reclust
levels(SI.integrated) = c(1:3)
SI.integrated@meta.data = cbind(SI.integrated@meta.data, "cluster" = SI.integrated@active.ident)
levels(SI.integrated@meta.data$cluster) = c("Enteroendocrine", "Tuft", "Other")
pdf(file = "Tuft-EE-tSNE.pdf")
DimPlot(SI.integrated, reduction = "tsne", cols = c("Red", "Blue", "#E9E9E9" ), group.by = "cluster") + theme(legend.position = "bottom")
dev.off()
#################################### Overlay Stem-TA on initial ##############################################
reclust = cbind(SI.integrated@meta.data, "reclust" = c(rep(5,nrow(SI.integrated@meta.data))))
tenmeans = StemTa
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(SI.integrated@meta.data),]
for ( i in 1:nrow(tenmeans)) {
a = tenmeans[i,2]
b = which(row.names(reclust) == row.names(tenmeans)[i])
reclust[b,10] = a
}
Idents(SI.integrated) = reclust$reclust
levels(SI.integrated) = c(1:5)
### Remove cluster column if already added by Tuft code
##### SI.integrated@meta.data = SI.integrated@meta.data[,-which(colnames(SI.integrated@meta.data) == "cluster")]
SI.integrated@meta.data = cbind(SI.integrated@meta.data, "cluster" = SI.integrated@active.ident)
levels(SI.integrated@meta.data$cluster) = c("Colonocyte_Precursor", "Goblet_Precursor", "Stem_cell", "TA-cell", "Other")
pdf(file = "Stem-TA-tSNE.pdf")
DimPlot(SI.integrated, reduction = "tsne", cols = c("Red", "Blue", "Green", "Magenta", "#E9E9E9" ), group.by = "cluster") + theme(legend.position = "bottom",legend.title = element_text(size=5))
dev.off()
######################## csv with Stem/TA numbers ################################
### Remove cluster column if already added by Tuft code
##### SI.integrated@meta.data = SI.integrated@meta.data[,-which(colnames(SI.integrated@meta.data) == "cluster")]
Stem = subset(SI.integrated, idents = "Stem-TA")
tenmeans = StemTa
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(Stem@meta.data),]
Idents(Stem) = tenmeans[,2]
levels(Stem@active.ident) = c("Colonocyte_Precursor", "Goblet_Precursor", "Stem_cell", "TA-cell")
num = cbind(Stem@meta.data, "Cluster" = Stem@active.ident)
num$Cluster = droplevels(num$Cluster)
info = table(num[,c(5,6,9)])
write.csv(info, "Mouse_Stem-TA_Numbers.csv")
##### VLN plots ######
levels(Stem@active.ident) = c(1,2,3,4)
cols = c("Red", "Blue", "Green", "Magenta")
pdf(file= "4_mean_Stem-TA_Violin.pdf")
VlnPlot(Stem, features = c("Lgr5", "Sox4", "Slc12a2","Ascl2","Mki67", "Pcna"), pt.size = 0.001, cols = cols)
VlnPlot(Stem, features = c("Lgr5", "Sox4", "Slc12a2","Ascl2","Mki67", "Pcna"), pt.size = 0,cols = cols)
VlnPlot(Stem, features = c("Dll1", "Atoh1", "Reg4","Muc2","Selenbp1", "Lgals3"), pt.size = 0.001, cols = cols)
VlnPlot(Stem, features = c("Dll1", "Atoh1", "Reg4","Muc2","Selenbp1", "Lgals3"), pt.size = 0,cols = cols)
VlnPlot(Stem, features = c("Slc37a2", "Cyp2c55", "Car1", "Aqp4", "Muc3", "Ggh"), pt.size = 0.001, cols = cols)
VlnPlot(Stem, features = c("Slc37a2", "Cyp2c55", "Car1", "Aqp4", "Muc3", "Ggh"), pt.size = 0,cols = cols)
dev.off()
pdf(file="4_mean_Stem-TA_Ridge.pdf")
RidgePlot(Stem, features = c("Lgr5", "Sox4", "Slc12a2","Ascl2","Mki67", "Pcna"), cols = cols)
RidgePlot(Stem, features = c("Dll1", "Atoh1", "Reg4","Muc2","Selenbp1", "Lgals3"), cols = cols)
RidgePlot(Stem, features = c("Slc37a2", "Cyp2c55", "Car1", "Aqp4", "Muc3", "Ggh"), cols = cols)
dev.off()
## Wilcox
Stem@meta.data$Age[which(Stem@meta.data$Age == "Young_20w")] = "Young"
DE.Stem = subset(Stem, cells = which(Stem@active.ident == "Stem_cell"))
a=c()
a = FindMarkers(object = DE.Stem ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "Stem_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.StemComp = subset(DE.Stem, cells = which(DE.Stem@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.StemComp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_Stem_YvODE_wilcox_genes.csv", sep = ""))
}
DE.TA_cell = subset(Stem, cells = which(Stem@active.ident == "TA-cell"))
a=c()
a = FindMarkers(object = DE.TA_cell ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "TA_cell_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.TA_Comp = subset(DE.TA_cell, cells = which(DE.TA_cell@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.TA_Comp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_TA_YvODE_wilcox_genes.csv", sep = ""))
}
DE.ColProg = subset(Stem, cells = which(Stem@active.ident == "Colonocyte_Precursor"))
a=c()
a = FindMarkers(object = DE.ColProg ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "ColProg_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.ColProg_Comp = subset(DE.ColProg, cells = which(DE.ColProg@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.ColProg_Comp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_ColProg_YvODE_wilcox_genes.csv", sep = ""))
}
DE.GobProg = subset(Stem, cells = which(Stem@active.ident == "Goblet_Precursor"))
a=c()
a = FindMarkers(object = DE.GobProg ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "GobProg_YvODE_wilcox_genes.csv")
for (i in c("Cecum", "ProxCol", "DistCol")){
DE.GobProg_Comp = subset(DE.GobProg, cells = which(DE.ColProg@meta.data$Compartment == i))
a=c()
a = FindMarkers(object = DE.GobProg_Comp ,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = paste(i,"_GobProg_YvODE_wilcox_genes.csv", sep = ""))
}
############ Separate tSNE by sample / age / batch ##################
tenmeans = Initial
tenmeans[,2] = as.factor(tenmeans[,2])
row.names(tenmeans) = tenmeans[,1]
tenmeans = tenmeans[row.names(tenmeans) %in% row.names(SI.integrated@meta.data),]
Idents(SI.integrated) = tenmeans[,2]
levels(SI.integrated@active.ident) = c("Colonocyte_1", "Goblet", "Colonocyte_2", "Tuft-EE", "Stem-TA")
pdf(file = "Separate_Sample_tSNE.pdf")
DimPlot(SI.integrated, reduction = "tsne", split.by = "Sample", ncol = 3, pt.size = 0.01) + theme(legend.position = "bottom")
dev.off()
SI.integrated@meta.data$Age[which(SI.integrated@meta.data$Age == "Young_20w")] = "Young"
pdf(file = "Separate_Age_tSNE.pdf")
DimPlot(SI.integrated, reduction = "tsne", split.by = "Age", ncol = 3, pt.size = 0.01) + theme(legend.position = "bottom")
dev.off()
SI.integrated@meta.data$Batch = 1
SI.integrated@meta.data$Batch[SI.integrated@meta.data$Sample %in% c("Old_1","Old_2", "Old_3", "Young_1", "Young_2", "Young_3")] = 2
pdf(file = "Separate_Experiment_tSNE.pdf")
DimPlot(SI.integrated, reduction = "tsne", split.by = "Batch", ncol = 2, pt.size = 0.01) + theme(legend.position = "bottom", lab) + labs(title = "Batch")
dev.off()
######################## Colonocyte by compartment ################################
Colonocyte = subset(SI.integrated, idents = c("Colonocyte_1","Colonocyte_2"))
Idents(Colonocyte)= Colonocyte@meta.data$Compartment
Colonocyte@meta.data$Compartment = factor(Colonocyte@meta.data$Compartment, levels = c("Cecum", "ProxCol", "DistCol"))
pdf(file = "Colonocyte_TSNE.pdf")
DimPlot(Colonocyte, reduction = "tsne", pt.size = 0.1, group.by = "Compartment")
dev.off()
Colonocyte.markers <- FindAllMarkers(Colonocyte, only.pos = F, min.pct = 0.1, logfc.threshold = 0.1)
Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top10 <- Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
write.csv(top10, file= "Colonocyte_compartment_genes.csv")
Colonocyte@meta.data$Compartment = factor(Colonocyte@meta.data$Compartment, levels = c("Cecum", "ProxCol", "DistCol"))
col.plot = c("Red", "Black", "Blue")
pdf(file = "Heatmap_Colonocyte1.pdf" )
plot = DoHeatmap(Colonocyte, features = top10$gene, size=2, angle=0, group.by = "Compartment") + NoLegend() + scale_fill_gradient2( low = "Blue", mid = "Black", high = "Red", midpoint = 0, guide = "colourbar", aesthetics = "fill") +theme(axis.text = element_text(size=5))
print(plot)
dev.off()
Colonocyte= NormalizeData(Colonocyte, verbose = F)
Colonocyte = FindVariableFeatures(Colonocyte, verbose = F)
Colonocyte = ScaleData(Colonocyte, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
Colonocyte = RunPCA(Colonocyte, features = row.names(SI.Seurat@assays$RNA@data), verbose = F)
Colonocyte = RunTSNE(Colonocyte, dims = 1:30)
pdf(file = "Colonocyte_TSNE1.pdf")
DimPlot(Colonocyte, reduction = "tsne", pt.size = 0.1, group.by = "Compartment")
dev.off()
Colonocyte = FindNeighbors(Colonocyte, dims = 1:10)
Colonocyte = FindClusters(Colonocyte, resolution = 0.035)
pdf(file = paste("Graph_based_Colonocyte.pdf", sep = "") )
plot = DimPlot(Colonocyte, reduction = "tsne")
print(plot)
dev.off()
Colonocyte.markers <- FindAllMarkers(Colonocyte, only.pos = F, min.pct = 0.1, logfc.threshold = 0.1)
Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top10 <- Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
col.plot = c("Red", "Black", "Blue")
pdf(file = "Heatmap_Colonocyte_Graph.pdf" )
plot = DoHeatmap(Colonocyte, features = top10$gene, size=2, angle=0) + NoLegend() + scale_fill_gradient2( low = "Blue", mid = "Black", high = "Red", midpoint = 0, guide = "colourbar", aesthetics = "fill") +theme(axis.text = element_text(size=5))
print(plot)
dev.off()
Colonocyte_clean = subset(Colonocyte, idents=c(0,1))
pdf(file = "Colonocyte_clean_TSNE.pdf")
DimPlot(Colonocyte_clean, reduction = "tsne", pt.size = 0.1, group.by = "Compartment")
dev.off()
Colonocyte_clean = FindNeighbors(Colonocyte_clean, dims = 1:10)
Colonocyte_clean = FindClusters(Colonocyte_clean, resolution = 0.03)
pdf(file = paste("Graph_based_Colonocyte_clean.pdf", sep = "") )
plot = DimPlot(Colonocyte_clean, reduction = "tsne")
print(plot)
dev.off()
identities = cbind(Colonocyte_clean@meta.data, "Test" = Colonocyte_clean@active.ident)
levels(identities$Test) = c(0,1,"A","B")
Colonocyte_zero = subset(Colonocyte_clean, idents=0)
Colonocyte_zero = FindNeighbors(Colonocyte_zero, dims = 1:10)
Colonocyte_zero = FindClusters(Colonocyte_zero, resolution = 0.03)
levels(Colonocyte_zero@active.ident) = c("A", "B")
pdf(file = paste("Graph_based_Colonocyte_zero.pdf", sep = "") )
plot = DimPlot(Colonocyte_zero, reduction = "tsne")
print(plot)
dev.off()
a = cbind(Colonocyte_zero@meta.data, "test" = Colonocyte_zero@active.ident)
for (i in 1:nrow(a)) {
b = which(row.names(a)[i] == row.names(identities))
identities$Test[b] = a$test[i]
}
identities$Test = droplevels(identities$Test)
Idents(Colonocyte_clean) = identities$Test
pdf(file = paste("Graph_based_Colonocyte_Merge.pdf", sep = "") )
plot = DimPlot(Colonocyte_clean, reduction = "tsne")
print(plot)
dev.off()
Colonocyte_clean.markers <- FindAllMarkers(Colonocyte_clean, only.pos = F, min.pct = 0.1, logfc.threshold = 0.1)
Colonocyte_clean.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top10 <- Colonocyte_clean.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
col.plot = c("Red", "Black", "Blue")
Colonocyte_clean@meta.data = cbind(Colonocyte_clean@meta.data, "Clust" = Colonocyte_clean@active.ident)
levels(Colonocyte_clean@meta.data$Clust) = c("Cecum-Specific","DistCol-Specific", "ProxCol-Specific")
Colonocyte_clean@meta.data$Clust = factor(Colonocyte_clean@meta.data$Clust, levels = c("Cecum-Specific", "ProxCol-Specific","DistCol-Specific"))
genes = top10$gene[c(1:10,21:30,11:20)]
pdf(file = "Heatmap_Colonocyte_Graph_merge.pdf" )
plot = DoHeatmap(Colonocyte_clean, features = genes, size=2, angle=0, group.by = "Clust") + NoLegend() + scale_fill_gradient2( low = "Blue", mid = "Black", high = "Red", midpoint = 0, guide = "colourbar", aesthetics = "fill") +theme(axis.text = element_text(size=5))
print(plot)
dev.off()
pdf("3colonocytes.pdf")
DimPlot(Colonocyte_clean, reduction = "tsne", group.by = "Clust")
DimPlot(Colonocyte_clean, reduction = "tsne", group.by = "Compartment")
dev.off()
write.csv(table(Colonocyte_clean@meta.data[,c(5,6,12)]), file = "3colonocytes_numbers.csv")
a = Colonocyte_clean@meta.data[,c(1,12)]
write.csv(a, file="3colonocytes_each_cell.csv")
######################## Colonocyte Y v O DEG's ##########################
#For Wilcox
Colonocyte_clean@meta.data$Age[which(Colonocyte_clean@meta.data$Age == "Young_20w")] = "Young"
DE.cecum = subset(Colonocyte_clean, cells = which(Colonocyte_clean@meta.data$Compartment == "Cecum"))
a=c()
a = FindMarkers(object = DE.cecum,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "Cecum_enrich_colonocyte_YvODE_wilcox_genes.csv")
DE.ProxCol = subset(Colonocyte_clean, cells = which(Colonocyte_clean@meta.data$Compartment == "ProxCol"))
a=c()
a = FindMarkers(object = DE.ProxCol,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "ProxCol_enrich_colonocyte_YvODE_wilcox_genes.csv")
DE.DistCol = subset(Colonocyte_clean, cells = which(Colonocyte_clean@meta.data$Compartment == "DistCol"))
a=c()
a = FindMarkers(object = DE.DistCol,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "DistCol_enrich_colonocyte_YvODE_wilcox_genes.csv")
######################## Colonocyte by compartment enrichment - on initial clust################################
reclust = cbind(SI.integrated@meta.data, "reclust" = c(rep(4,nrow(SI.integrated@meta.data))))
levels(reclust$reclust) = c(1:4)
tenmeans = data.frame("Cluster" = Colonocyte_clean@meta.data$Clust)
row.names(tenmeans) = row.names(Colonocyte_clean@meta.data)
for ( i in 1:nrow(tenmeans)) {
a = tenmeans[i,1]
b = which(row.names(reclust) == row.names(tenmeans)[i])
reclust[b,9] = a
}
Idents(SI.integrated) = reclust$reclust
levels(SI.integrated) = c(1:4)
SI.integrated@meta.data = cbind(SI.integrated@meta.data, "cluster" = SI.integrated@active.ident)
levels(SI.integrated@meta.data$cluster) = c("Cecum-enriched", "ProxCol-enriched", "DistCol-enriched", "Other")
pdf(file = "3colonocyte-on-initial.pdf")
DimPlot(SI.integrated, reduction = "tsne", cols = c("Red","Green", "Blue", "#E9E9E9" ), group.by = "cluster")+ theme(legend.position = "bottom")
dev.off()
########### YvO all cells
a=c()
a = FindMarkers(object = SI.integrated,test.use = "wilcox", group.by = 'Age', ident.1 = "Young", ident.2 = "Old")
a = na.omit(a[a$p_val_adj <0.05,] )
a = cbind(a, "Gene"=row.names(a))
a$Gene = as.character(a$Gene)
write.csv(x = a ,file = "AllCells_YvODE_wilcox_genes.csv")
############# Goblet ridge plots
pdf(file= "Goblet_Ridge.pdf")
RidgePlot(SI.integrated, features = c("Muc2", "Agr2"), group.by="Age", idents = "Goblet")
RidgePlot(SI.integrated, features = c("Muc2"), group.by="Age")
RidgePlot(SI.integrated, features = c("Agr2"), group.by="Age")
dev.off()
SI.comp = subset(SI.integrated, cells = which(SI.integrated@meta.data$Compartment == "DistCol"))
SI.comp@meta.data$Age[which(SI.comp@meta.data$Age == "Young_20w")] = "Young"
pdf(file= "Goblet_Ridge_DistCol.pdf")
VlnPlot(SI.comp, features = c("Muc2", "Agr2"), group.by="Age", idents = "Goblet", pt.size = 1)
dev.off()
############## Colonocytes all together - common markers ########
SI.Colonocyte = SI.integrated
SI.Colonocyte@meta.data = cbind(SI.Colonocyte@meta.data, "Clust" = SI.Colonocyte@active.ident)
SI.Colonocyte@meta.data$Clust[which(SI.Colonocyte@meta.data$Clust == "Colonocyte_2")] = "Colonocyte_1"
SI.Colonocyte@meta.data$Clust = droplevels(SI.Colonocyte@meta.data$Clust)
levels(SI.Colonocyte@meta.data$Clust) = c("Colonocyte", "Goblet", "Tuft-EE", "Stem-TA")
Idents(SI.Colonocyte) = SI.Colonocyte@meta.data$Clust
SI.Colonocyte.markers <- FindAllMarkers(SI.Colonocyte, only.pos = F, min.pct = 0.1, logfc.threshold = 0.1)
SI.Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top10 <- SI.Colonocyte.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
write.csv(top10, file= "Merged_Colonocyte_genes.csv")
col.plot = c("Red", "Black", "Blue")
pdf(file = "Heatmap_Merged_Colonocyte.pdf" )
plot = DoHeatmap(SI.Colonocyte, features = top10$gene, size=2, angle=0) + NoLegend() + scale_fill_gradient2( low = "Blue", mid = "Black", high = "Red", midpoint = 0, guide = "colourbar", aesthetics = "fill") +theme(axis.text = element_text(size=5))
print(plot)
dev.off()
pdf(file= "Colonocyte_Vln.pdf")
VlnPlot(SI.Colonocyte, features = c("Alpi", "Slc2a1", "Car2", "Krt20", "Slc26a3"), group.by="Clust", pt.size = 0.1)
VlnPlot(SI.Colonocyte, features = c("Alpi", "Slc2a1", "Car2", "Krt20", "Slc26a3"), group.by="Clust", pt.size = 0)
dev.off()
pdf(file="Colonocyte_YvO_Vln.pdf")
VlnPlot(SI.Colonocyte, features = c("Alpi", "Slc2a1", "Car2", "Krt20", "Slc26a3"), group.by="Age", idents = "Colonocyte", pt.size = 0)
dev.off()
|
### This script makes supplementary figures for genome-wide statistics ###
load_pkgs <- function(pkgs){
new_pkgs <- pkgs[!(pkgs %in% installed.packages()[, 'Package'])]
if(length(new_pkgs)) install.packages(new_pkgs)
for(pkg in pkgs){
suppressWarnings(suppressMessages(library(pkg, character.only = T)))
}
}
pkgs <- c('dplyr', 'tidyr', 'ggplot2', 'cowplot')
load_pkgs(pkgs)
options(stringsAsFactors = F, warn = -1, warnings = -1, scipen = 10000)
plot_format <- '.tiff'
data <- read.table('../../processed_data/snv/snv_data_clean.txt',
sep = '\t', header = T)
###############################################################################
# Conservation of in-frame vs. out-of-frame exons (whole genome)
###############################################################################
# release 89 5/17 based on hg38
ensembl <- biomaRt::useMart('ENSEMBL_MART_ENSEMBL', dataset = 'hsapiens_gene_ensembl')
attributes <- c('ensembl_gene_id', 'description', 'chromosome_name',
'start_position', 'end_position', 'strand', 'ensembl_transcript_id',
'transcript_start', 'transcript_end', 'ensembl_exon_id',
'exon_chrom_start', 'exon_chrom_end', 'is_constitutive', 'rank',
'phase', 'end_phase')
all_exon_ids <- biomaRt::getBM(attributes = attributes, mart = ensembl)
# calculate various coordinates necessary to determine relative scaled position
# of SNVs within intron/exon
all_exon_ids <- all_exon_ids %>%
mutate(intron1_end = exon_chrom_start - 1,
intron1_start = intron1_end - 99,
intron2_start = exon_chrom_end + 1,
intron2_end = intron2_start + 99,
upstr_intron_start = ifelse(strand == 1, intron1_start, intron2_start),
upstr_intron_end = ifelse(strand == 1, intron1_end, intron2_end),
downstr_intron_start = ifelse(strand == 1, intron2_start, intron1_start),
downstr_intron_end = ifelse(strand == 1, intron2_end, intron1_end),
upstr_intron_len = upstr_intron_end - upstr_intron_start + 1,
downstr_intron_len = downstr_intron_end - downstr_intron_start + 1,
exon_len = exon_chrom_end - exon_chrom_start + 1)
inframe_outframe_exons <- all_exon_ids %>%
filter(phase != -1 & end_phase != -1) %>%
mutate(exon_type = case_when(.$phase == 0 & .$end_phase == 0 ~ 'inframe',
TRUE ~ 'outframe')) %>%
distinct(ensembl_exon_id, .keep_all = T)
# let's get upstream intron coordinates first. We will generate a bed file
# containing position of each nucleotide in range so we can get conservation per
# nucleotide
inframe_outframe_exons %>%
filter(chromosome_name %in% c(1:22, 'X', 'Y')) %>%
mutate(chromosome_name = paste0('chr', chromosome_name)) %>%
select(chromosome_name, upstr_intron_start, upstr_intron_end, ensembl_exon_id) %>%
group_by(ensembl_exon_id) %>%
mutate(position = list(seq(upstr_intron_start, upstr_intron_end, by = 1))) %>%
unnest(position) %>%
ungroup() %>%
mutate(id = paste(ensembl_exon_id, position, sep = '_'),
start = position,
end = position + 1) %>%
select(chromosome_name, start, end, id) %>%
write.table(file = '../../processed_data/snv/nat_upstr_intron_positions.bed',
sep = '\t', col.names = F, row.names = F, quote = F)
system(paste('bash',
'../run_phastCons.sh',
'../../processed_data/snv/nat_upstr_intron_positions.bed',
'../../processed_data/snv/nat_upstr_intron_cons_scores_all.bed'))
# downstream intron
inframe_outframe_exons %>%
filter(chromosome_name %in% c(1:22, 'X', 'Y')) %>%
mutate(chromosome_name = paste0('chr', chromosome_name)) %>%
select(chromosome_name, downstr_intron_start, downstr_intron_end, ensembl_exon_id) %>%
group_by(ensembl_exon_id) %>%
mutate(position = list(seq(downstr_intron_start,downstr_intron_end, by = 1))) %>%
unnest(position) %>%
ungroup() %>%
mutate(id = paste(ensembl_exon_id, position, sep = '_'),
start = position,
end = position + 1) %>%
select(chromosome_name, start, end, id) %>%
write.table(file = '../../processed_data/snv/nat_downstr_intron_positions.bed',
sep = '\t', col.names = F, row.names = F, quote = F)
system(paste('bash',
'../run_phastCons.sh',
'../../processed_data/snv/nat_downstr_intron_positions.bed',
'../../processed_data/snv/nat_downstr_intron_cons_scores_all.bed'))
# exon
inframe_outframe_exons %>%
filter(chromosome_name %in% c(1:22, 'X', 'Y')) %>%
mutate(chromosome_name = paste0('chr', chromosome_name)) %>%
select(chromosome_name, exon_chrom_start, exon_chrom_end, ensembl_exon_id) %>%
group_by(ensembl_exon_id) %>%
mutate(position = list(seq(exon_chrom_start,exon_chrom_end, by = 1))) %>%
unnest(position) %>%
ungroup() %>%
mutate(id = paste(ensembl_exon_id, position, sep = '_'),
start = position,
end = position + 1) %>%
select(chromosome_name, start, end, id) %>%
write.table(file = '../../processed_data/snv/nat_exon_positions.bed',
sep = '\t', col.names = F, row.names = F, quote = F)
system(paste('bash',
'../run_phastCons.sh',
'../../processed_data/snv/nat_exon_positions.bed',
'../../processed_data/snv/nat_exon_cons_scores_all.bed'))
# it takes awhile to read in the conservation score files and calculate summary,
# so we'll only do this if they don't already exist
if(!file.exists('../../processed_data/snv/nat_upstr_cons_summary.rds')){
nat_upstr_cons <- read.table('../../processed_data/snv/nat_upstr_intron_cons_scores_all.bed',
sep = '\t', header = F,
col.names = c('name', 'size', 'bases_covered',
'snp_sum', 'mean0', 'mean_cons_score')) %>%
filter(bases_covered != 0) %>%
tidyr::separate(name, into = c('ensembl_id', 'position'), sep = '_', remove = T) %>%
select(-(size:mean0)) %>%
left_join(select(inframe_outframe_exons, ensembl_exon_id, upstr_intron_len,
upstr_intron_start, upstr_intron_end, strand, exon_type),
by = c('ensembl_id' = 'ensembl_exon_id')) %>%
arrange(ensembl_id, position) %>%
mutate(position = as.numeric(position),
rel_position = ifelse(strand == 1,
upstr_intron_end - position,
position - upstr_intron_start),
# upstream intron, keep them all negative,
rel_position = -1 * rel_position,
rel_position_scaled = rel_position / upstr_intron_len,
rel_pos_binned = cut(rel_position_scaled, breaks = seq(-1, 0, 0.01))) %>%
group_by(rel_pos_binned, exon_type) %>%
summarise(mean_cons_per_rel_pos = mean(mean_cons_score, na.rm = T))
# save as RDS so levels in factor variable are saved correctly
saveRDS(nat_upstr_cons, '../../processed_data/snv/nat_upstr_cons_summary.rds')
} else{
nat_upstr_cons <- readRDS('../../processed_data/snv/nat_upstr_cons_summary.rds')
}
if(!file.exists('../../processed_data/snv/nat_downstr_cons_summary.rds')) {
nat_downstr_cons <- read.table('../../processed_data/snv/nat_downstr_intron_cons_scores_all.bed',
sep = '\t', header = F,
col.names = c('name', 'size', 'bases_covered',
'snp_sum', 'mean0', 'mean_cons_score')) %>%
filter(bases_covered != 0) %>%
tidyr::separate(name, into = c('ensembl_id', 'position'), sep = '_', remove = T) %>%
select(-(size:mean0)) %>%
left_join(select(inframe_outframe_exons, ensembl_exon_id, downstr_intron_len,
downstr_intron_start, downstr_intron_end, strand, exon_type),
by = c('ensembl_id' = 'ensembl_exon_id')) %>%
arrange(ensembl_id, position) %>%
mutate(position = as.numeric(position),
rel_position = ifelse(strand == 1,
position - downstr_intron_start,
downstr_intron_end - position),
rel_position_scaled = 1 + (rel_position / downstr_intron_len),
rel_pos_binned = cut(rel_position_scaled, breaks = seq(1, 2, 0.01),
include.lowest = T)) %>%
group_by(rel_pos_binned, exon_type) %>%
summarise(mean_cons_per_rel_pos = mean(mean_cons_score, na.rm = T))
saveRDS(nat_downstr_cons, '../../processed_data/snv/nat_downstr_cons_summary.rds')
} else {
nat_downstr_cons <- readRDS('../../processed_data/snv/nat_downstr_cons_summary.rds')
}
if(!file.exists('../../processed_data/snv/nat_exon_cons_summary.rds')) {
nat_exon_cons <- read.table('../../processed_data/snv/nat_exon_cons_scores_all.bed',
sep = '\t', header = F,
col.names = c('name', 'size', 'bases_covered',
'snp_sum', 'mean0', 'mean_cons_score')) %>%
filter(bases_covered != 0) %>%
tidyr::separate(name, into = c('ensembl_id', 'position'), sep = '_', remove = T) %>%
select(-(size:mean0)) %>%
left_join(select(inframe_outframe_exons, ensembl_exon_id, exon_len,
exon_chrom_start, exon_chrom_end, strand, exon_type),
by = c('ensembl_id' = 'ensembl_exon_id')) %>%
arrange(ensembl_id, position) %>%
mutate(position = as.numeric(position),
rel_position = ifelse(strand == 1,
position - exon_chrom_start,
exon_chrom_end - position),
rel_position_scaled = rel_position / exon_len,
rel_pos_binned = cut(rel_position_scaled, breaks = seq(0, 1, 0.01),
include.lowest = T)) %>%
group_by(rel_pos_binned, exon_type) %>%
summarise(mean_cons_per_rel_pos = mean(mean_cons_score, na.rm = T))
saveRDS(nat_exon_cons, '../../processed_data/snv/nat_exon_cons_summary.rds')
} else {
nat_exon_cons <- readRDS('../../processed_data/snv/nat_exon_cons_summary.rds')
}
nat_cons <- bind_rows(nat_upstr_cons, nat_exon_cons, nat_downstr_cons)
nat_cons$rel_pos_binned <- factor(nat_cons$rel_pos_binned, levels = unique(nat_cons$rel_pos_binned))
nat_cons$exon_type <- factor(nat_cons$exon_type)
levels(nat_cons$exon_type) <- c('phase (0-0)', 'other phases')
# natural conservation between in-frame and out-of-frame exons, genome-wide
ggplot(nat_cons, aes(rel_pos_binned, mean_cons_per_rel_pos, color = exon_type)) +
geom_point() + scale_color_manual(values = c('black', 'red')) +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
theme(legend.position = c(0.85, 0.80)) +
labs(x = '',
y = 'average\nphastCons score', color = 'exon phase') +
scale_y_continuous(breaks = c(0, 0.2, 0.4, 0.6, 0.8, 1)) +
coord_cartesian(ylim=c(0, 1)) +
theme(legend.title = element_blank(),
legend.position = c(0.675, 0.7),
axis.title.x = element_text(size = 14),
axis.text.x = element_blank(),
axis.text.y = element_text(size = 10, color = 'grey20'),
axis.ticks.x = element_blank(),
axis.ticks.y = element_line(color = 'grey50'),
axis.line.x = element_line(color = 'grey50'),
axis.line.y = element_line(color = 'grey50'),
legend.text = element_text(size = 12))
ggsave(paste0('../../figs/supplement/SF8_genome_exon_cons_inframe_vs_outframe', plot_format),
height = 4, width = 5, unit = 'in')
save.image("../../processed_data/snv/snv_intron_cons.RData")
|
/scripts/supplement/SF8_inframe_outframe_cons.R
|
no_license
|
KosuriLab/MFASS
|
R
| false | false | 12,069 |
r
|
### This script makes supplementary figures for genome-wide statistics ###
load_pkgs <- function(pkgs){
new_pkgs <- pkgs[!(pkgs %in% installed.packages()[, 'Package'])]
if(length(new_pkgs)) install.packages(new_pkgs)
for(pkg in pkgs){
suppressWarnings(suppressMessages(library(pkg, character.only = T)))
}
}
pkgs <- c('dplyr', 'tidyr', 'ggplot2', 'cowplot')
load_pkgs(pkgs)
options(stringsAsFactors = F, warn = -1, warnings = -1, scipen = 10000)
plot_format <- '.tiff'
data <- read.table('../../processed_data/snv/snv_data_clean.txt',
sep = '\t', header = T)
###############################################################################
# Conservation of in-frame vs. out-of-frame exons (whole genome)
###############################################################################
# release 89 5/17 based on hg38
ensembl <- biomaRt::useMart('ENSEMBL_MART_ENSEMBL', dataset = 'hsapiens_gene_ensembl')
attributes <- c('ensembl_gene_id', 'description', 'chromosome_name',
'start_position', 'end_position', 'strand', 'ensembl_transcript_id',
'transcript_start', 'transcript_end', 'ensembl_exon_id',
'exon_chrom_start', 'exon_chrom_end', 'is_constitutive', 'rank',
'phase', 'end_phase')
all_exon_ids <- biomaRt::getBM(attributes = attributes, mart = ensembl)
# calculate various coordinates necessary to determine relative scaled position
# of SNVs within intron/exon
all_exon_ids <- all_exon_ids %>%
mutate(intron1_end = exon_chrom_start - 1,
intron1_start = intron1_end - 99,
intron2_start = exon_chrom_end + 1,
intron2_end = intron2_start + 99,
upstr_intron_start = ifelse(strand == 1, intron1_start, intron2_start),
upstr_intron_end = ifelse(strand == 1, intron1_end, intron2_end),
downstr_intron_start = ifelse(strand == 1, intron2_start, intron1_start),
downstr_intron_end = ifelse(strand == 1, intron2_end, intron1_end),
upstr_intron_len = upstr_intron_end - upstr_intron_start + 1,
downstr_intron_len = downstr_intron_end - downstr_intron_start + 1,
exon_len = exon_chrom_end - exon_chrom_start + 1)
inframe_outframe_exons <- all_exon_ids %>%
filter(phase != -1 & end_phase != -1) %>%
mutate(exon_type = case_when(.$phase == 0 & .$end_phase == 0 ~ 'inframe',
TRUE ~ 'outframe')) %>%
distinct(ensembl_exon_id, .keep_all = T)
# let's get upstream intron coordinates first. We will generate a bed file
# containing position of each nucleotide in range so we can get conservation per
# nucleotide
inframe_outframe_exons %>%
filter(chromosome_name %in% c(1:22, 'X', 'Y')) %>%
mutate(chromosome_name = paste0('chr', chromosome_name)) %>%
select(chromosome_name, upstr_intron_start, upstr_intron_end, ensembl_exon_id) %>%
group_by(ensembl_exon_id) %>%
mutate(position = list(seq(upstr_intron_start, upstr_intron_end, by = 1))) %>%
unnest(position) %>%
ungroup() %>%
mutate(id = paste(ensembl_exon_id, position, sep = '_'),
start = position,
end = position + 1) %>%
select(chromosome_name, start, end, id) %>%
write.table(file = '../../processed_data/snv/nat_upstr_intron_positions.bed',
sep = '\t', col.names = F, row.names = F, quote = F)
system(paste('bash',
'../run_phastCons.sh',
'../../processed_data/snv/nat_upstr_intron_positions.bed',
'../../processed_data/snv/nat_upstr_intron_cons_scores_all.bed'))
# downstream intron
inframe_outframe_exons %>%
filter(chromosome_name %in% c(1:22, 'X', 'Y')) %>%
mutate(chromosome_name = paste0('chr', chromosome_name)) %>%
select(chromosome_name, downstr_intron_start, downstr_intron_end, ensembl_exon_id) %>%
group_by(ensembl_exon_id) %>%
mutate(position = list(seq(downstr_intron_start,downstr_intron_end, by = 1))) %>%
unnest(position) %>%
ungroup() %>%
mutate(id = paste(ensembl_exon_id, position, sep = '_'),
start = position,
end = position + 1) %>%
select(chromosome_name, start, end, id) %>%
write.table(file = '../../processed_data/snv/nat_downstr_intron_positions.bed',
sep = '\t', col.names = F, row.names = F, quote = F)
system(paste('bash',
'../run_phastCons.sh',
'../../processed_data/snv/nat_downstr_intron_positions.bed',
'../../processed_data/snv/nat_downstr_intron_cons_scores_all.bed'))
# exon
inframe_outframe_exons %>%
filter(chromosome_name %in% c(1:22, 'X', 'Y')) %>%
mutate(chromosome_name = paste0('chr', chromosome_name)) %>%
select(chromosome_name, exon_chrom_start, exon_chrom_end, ensembl_exon_id) %>%
group_by(ensembl_exon_id) %>%
mutate(position = list(seq(exon_chrom_start,exon_chrom_end, by = 1))) %>%
unnest(position) %>%
ungroup() %>%
mutate(id = paste(ensembl_exon_id, position, sep = '_'),
start = position,
end = position + 1) %>%
select(chromosome_name, start, end, id) %>%
write.table(file = '../../processed_data/snv/nat_exon_positions.bed',
sep = '\t', col.names = F, row.names = F, quote = F)
system(paste('bash',
'../run_phastCons.sh',
'../../processed_data/snv/nat_exon_positions.bed',
'../../processed_data/snv/nat_exon_cons_scores_all.bed'))
# it takes awhile to read in the conservation score files and calculate summary,
# so we'll only do this if they don't already exist
if(!file.exists('../../processed_data/snv/nat_upstr_cons_summary.rds')){
nat_upstr_cons <- read.table('../../processed_data/snv/nat_upstr_intron_cons_scores_all.bed',
sep = '\t', header = F,
col.names = c('name', 'size', 'bases_covered',
'snp_sum', 'mean0', 'mean_cons_score')) %>%
filter(bases_covered != 0) %>%
tidyr::separate(name, into = c('ensembl_id', 'position'), sep = '_', remove = T) %>%
select(-(size:mean0)) %>%
left_join(select(inframe_outframe_exons, ensembl_exon_id, upstr_intron_len,
upstr_intron_start, upstr_intron_end, strand, exon_type),
by = c('ensembl_id' = 'ensembl_exon_id')) %>%
arrange(ensembl_id, position) %>%
mutate(position = as.numeric(position),
rel_position = ifelse(strand == 1,
upstr_intron_end - position,
position - upstr_intron_start),
# upstream intron, keep them all negative,
rel_position = -1 * rel_position,
rel_position_scaled = rel_position / upstr_intron_len,
rel_pos_binned = cut(rel_position_scaled, breaks = seq(-1, 0, 0.01))) %>%
group_by(rel_pos_binned, exon_type) %>%
summarise(mean_cons_per_rel_pos = mean(mean_cons_score, na.rm = T))
# save as RDS so levels in factor variable are saved correctly
saveRDS(nat_upstr_cons, '../../processed_data/snv/nat_upstr_cons_summary.rds')
} else{
nat_upstr_cons <- readRDS('../../processed_data/snv/nat_upstr_cons_summary.rds')
}
if(!file.exists('../../processed_data/snv/nat_downstr_cons_summary.rds')) {
nat_downstr_cons <- read.table('../../processed_data/snv/nat_downstr_intron_cons_scores_all.bed',
sep = '\t', header = F,
col.names = c('name', 'size', 'bases_covered',
'snp_sum', 'mean0', 'mean_cons_score')) %>%
filter(bases_covered != 0) %>%
tidyr::separate(name, into = c('ensembl_id', 'position'), sep = '_', remove = T) %>%
select(-(size:mean0)) %>%
left_join(select(inframe_outframe_exons, ensembl_exon_id, downstr_intron_len,
downstr_intron_start, downstr_intron_end, strand, exon_type),
by = c('ensembl_id' = 'ensembl_exon_id')) %>%
arrange(ensembl_id, position) %>%
mutate(position = as.numeric(position),
rel_position = ifelse(strand == 1,
position - downstr_intron_start,
downstr_intron_end - position),
rel_position_scaled = 1 + (rel_position / downstr_intron_len),
rel_pos_binned = cut(rel_position_scaled, breaks = seq(1, 2, 0.01),
include.lowest = T)) %>%
group_by(rel_pos_binned, exon_type) %>%
summarise(mean_cons_per_rel_pos = mean(mean_cons_score, na.rm = T))
saveRDS(nat_downstr_cons, '../../processed_data/snv/nat_downstr_cons_summary.rds')
} else {
nat_downstr_cons <- readRDS('../../processed_data/snv/nat_downstr_cons_summary.rds')
}
if(!file.exists('../../processed_data/snv/nat_exon_cons_summary.rds')) {
nat_exon_cons <- read.table('../../processed_data/snv/nat_exon_cons_scores_all.bed',
sep = '\t', header = F,
col.names = c('name', 'size', 'bases_covered',
'snp_sum', 'mean0', 'mean_cons_score')) %>%
filter(bases_covered != 0) %>%
tidyr::separate(name, into = c('ensembl_id', 'position'), sep = '_', remove = T) %>%
select(-(size:mean0)) %>%
left_join(select(inframe_outframe_exons, ensembl_exon_id, exon_len,
exon_chrom_start, exon_chrom_end, strand, exon_type),
by = c('ensembl_id' = 'ensembl_exon_id')) %>%
arrange(ensembl_id, position) %>%
mutate(position = as.numeric(position),
rel_position = ifelse(strand == 1,
position - exon_chrom_start,
exon_chrom_end - position),
rel_position_scaled = rel_position / exon_len,
rel_pos_binned = cut(rel_position_scaled, breaks = seq(0, 1, 0.01),
include.lowest = T)) %>%
group_by(rel_pos_binned, exon_type) %>%
summarise(mean_cons_per_rel_pos = mean(mean_cons_score, na.rm = T))
saveRDS(nat_exon_cons, '../../processed_data/snv/nat_exon_cons_summary.rds')
} else {
nat_exon_cons <- readRDS('../../processed_data/snv/nat_exon_cons_summary.rds')
}
nat_cons <- bind_rows(nat_upstr_cons, nat_exon_cons, nat_downstr_cons)
nat_cons$rel_pos_binned <- factor(nat_cons$rel_pos_binned, levels = unique(nat_cons$rel_pos_binned))
nat_cons$exon_type <- factor(nat_cons$exon_type)
levels(nat_cons$exon_type) <- c('phase (0-0)', 'other phases')
# natural conservation between in-frame and out-of-frame exons, genome-wide
ggplot(nat_cons, aes(rel_pos_binned, mean_cons_per_rel_pos, color = exon_type)) +
geom_point() + scale_color_manual(values = c('black', 'red')) +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
theme(legend.position = c(0.85, 0.80)) +
labs(x = '',
y = 'average\nphastCons score', color = 'exon phase') +
scale_y_continuous(breaks = c(0, 0.2, 0.4, 0.6, 0.8, 1)) +
coord_cartesian(ylim=c(0, 1)) +
theme(legend.title = element_blank(),
legend.position = c(0.675, 0.7),
axis.title.x = element_text(size = 14),
axis.text.x = element_blank(),
axis.text.y = element_text(size = 10, color = 'grey20'),
axis.ticks.x = element_blank(),
axis.ticks.y = element_line(color = 'grey50'),
axis.line.x = element_line(color = 'grey50'),
axis.line.y = element_line(color = 'grey50'),
legend.text = element_text(size = 12))
ggsave(paste0('../../figs/supplement/SF8_genome_exon_cons_inframe_vs_outframe', plot_format),
height = 4, width = 5, unit = 'in')
save.image("../../processed_data/snv/snv_intron_cons.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare.R
\name{any_local_behind_lockfile}
\alias{any_local_behind_lockfile}
\title{check if any local packages are behind lockfile}
\usage{
any_local_behind_lockfile(
lockfile_path = "./renv.lock",
dep_source_paths = NULL
)
}
\arguments{
\item{lockfile_path}{a length one character vector path of the lockfile for}
\item{dep_source_paths}{a character vector of file paths to extract
package dependencies from. If NULL (default) the whole local library is compared.}
}
\value{
TRUE if dev packages are behind lockfile, FALSE otherwise.
}
\description{
A wrapper for \link{get_local_behind_lockfile} that returns TRUE if any
dependencies found in \code{dep_source_paths} are behind the lockfile version in
\code{lockfile_path}
}
\seealso{
Other comparisons:
\code{\link{compare_local_to_lockfile}()},
\code{\link{get_local_behind_lockfile}()}
}
\concept{comparisons}
|
/man/any_local_behind_lockfile.Rd
|
permissive
|
MilesMcBain/capsule
|
R
| false | true | 950 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare.R
\name{any_local_behind_lockfile}
\alias{any_local_behind_lockfile}
\title{check if any local packages are behind lockfile}
\usage{
any_local_behind_lockfile(
lockfile_path = "./renv.lock",
dep_source_paths = NULL
)
}
\arguments{
\item{lockfile_path}{a length one character vector path of the lockfile for}
\item{dep_source_paths}{a character vector of file paths to extract
package dependencies from. If NULL (default) the whole local library is compared.}
}
\value{
TRUE if dev packages are behind lockfile, FALSE otherwise.
}
\description{
A wrapper for \link{get_local_behind_lockfile} that returns TRUE if any
dependencies found in \code{dep_source_paths} are behind the lockfile version in
\code{lockfile_path}
}
\seealso{
Other comparisons:
\code{\link{compare_local_to_lockfile}()},
\code{\link{get_local_behind_lockfile}()}
}
\concept{comparisons}
|
context("ld annotation")
test_that("annotations work", {
t <- mins(0: 60 * 24 * 10)
dt <- behavr(data.table(id=1, t=t, x=runif(length(t)), key="id"),
data.table(id=1,key="id"))
dt
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations()
plh <- pl + scale_x_hours()
pld <- pl + scale_x_days()
plc <- pl + scale_x_continuous()
testthat::expect_equal(ggplot_build(pld)$data[[1]], ggplot_build(plh)$data[[1]])
testthat::expect_equal(ggplot_build(plc)$data[[1]], ggplot_build(plh)$data[[1]])
pld
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations(colour=NA)
pl
})
test_that("annotations work with unbound limits", {
t <- mins(0: 60 * 24 * 10)
dt <- behavr(data.table(id=1, t=t, x=runif(length(t)), key="id"),
data.table(id=1,key="id"))
dt
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations(x_limits = c(days(2.5), NA)) + geom_point()
pl
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations(x_limits = c(NA, days(3.4))) + geom_point()
pl
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations(x_limits = c(days(1), days(3.4))) + geom_point()
pl
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations(x_limits = c(days(10), days(3.4))) + geom_point()
testthat::expect_error(print(pl), "limits are not in order")
})
#
# library(ggetho)
# metadata <- data.table(id=sprintf("toy_experiment|%02d" , 1:40), region_id=1:40,
# condition=c("A","B"),
# sex=c("M","M", "F", "F"))
# head(metadata)
#
# dt <- toy_activity_data(metadata, seed=107)
#
# pl <- ggetho(dt, aes(x=t, y=moving)) +
# stat_ld_annotations(phase = hours(-16)) +
# stat_pop_etho()
# pl
#
# pl <- ggetho(dt, aes(x=t, y=moving)) +
# stat_ld_annotations(phase = hours(0)) +
# stat_pop_etho()
# pl
#
#
# pl <- ggetho(dt, aes(x=t, y=moving)) +
# stat_ld_annotations(phase = hours(-1)) +
# stat_pop_etho()
# pl
#
#
# pl <- ggetho(dt, aes(x=t, y=moving)) +
# stat_ld_annotations(phase = hours(+1)) +
# stat_pop_etho()
# pl
#
#
# pl <- ggetho(dt, aes(x=t, y=moving)) +
# stat_ld_annotations(l_duration = hours(16)) +
# stat_pop_etho()
# pl
#
|
/tests/testthat/test-stat_ld_annotation.R
|
no_license
|
rethomics/ggetho
|
R
| false | false | 2,117 |
r
|
context("ld annotation")
test_that("annotations work", {
t <- mins(0: 60 * 24 * 10)
dt <- behavr(data.table(id=1, t=t, x=runif(length(t)), key="id"),
data.table(id=1,key="id"))
dt
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations()
plh <- pl + scale_x_hours()
pld <- pl + scale_x_days()
plc <- pl + scale_x_continuous()
testthat::expect_equal(ggplot_build(pld)$data[[1]], ggplot_build(plh)$data[[1]])
testthat::expect_equal(ggplot_build(plc)$data[[1]], ggplot_build(plh)$data[[1]])
pld
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations(colour=NA)
pl
})
test_that("annotations work with unbound limits", {
t <- mins(0: 60 * 24 * 10)
dt <- behavr(data.table(id=1, t=t, x=runif(length(t)), key="id"),
data.table(id=1,key="id"))
dt
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations(x_limits = c(days(2.5), NA)) + geom_point()
pl
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations(x_limits = c(NA, days(3.4))) + geom_point()
pl
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations(x_limits = c(days(1), days(3.4))) + geom_point()
pl
pl <- ggetho(dt, aes(t,x)) + stat_ld_annotations(x_limits = c(days(10), days(3.4))) + geom_point()
testthat::expect_error(print(pl), "limits are not in order")
})
#
# library(ggetho)
# metadata <- data.table(id=sprintf("toy_experiment|%02d" , 1:40), region_id=1:40,
# condition=c("A","B"),
# sex=c("M","M", "F", "F"))
# head(metadata)
#
# dt <- toy_activity_data(metadata, seed=107)
#
# pl <- ggetho(dt, aes(x=t, y=moving)) +
# stat_ld_annotations(phase = hours(-16)) +
# stat_pop_etho()
# pl
#
# pl <- ggetho(dt, aes(x=t, y=moving)) +
# stat_ld_annotations(phase = hours(0)) +
# stat_pop_etho()
# pl
#
#
# pl <- ggetho(dt, aes(x=t, y=moving)) +
# stat_ld_annotations(phase = hours(-1)) +
# stat_pop_etho()
# pl
#
#
# pl <- ggetho(dt, aes(x=t, y=moving)) +
# stat_ld_annotations(phase = hours(+1)) +
# stat_pop_etho()
# pl
#
#
# pl <- ggetho(dt, aes(x=t, y=moving)) +
# stat_ld_annotations(l_duration = hours(16)) +
# stat_pop_etho()
# pl
#
|
# 1. z původních jemných vytvoří zjednodušené okresy, kraje a stát
# 2. z obvodu státu vytvoří čtverce a čtverečky
# 3. pražská Vltava & řeky města Brna
# interní data uloží pro budoucí zpracování
library(tidyverse)
library(devtools)
library(sf)
library(tidyverse)
library(RCzechia)
# low res polygony
source("./data-raw/lo-res-polygons.R")
# faunistické čtverce a čtverečky
source("./data-raw/ctverce-a-ctverecky.R")
# městské řeky
source("./data-raw/reky_mesta.R")
# use data - internal (= private)
use_data(okresy_low_res,
kraje_low_res,
republika_low_res,
faunisticke_ctverce,
faunisticke_ctverecky,
reky_brno,
reky_praha,
internal = T,
overwrite = T
)
|
/data-raw/use-data.R
|
no_license
|
gzitzlsb-it4i/RCzechia
|
R
| false | false | 712 |
r
|
# 1. z původních jemných vytvoří zjednodušené okresy, kraje a stát
# 2. z obvodu státu vytvoří čtverce a čtverečky
# 3. pražská Vltava & řeky města Brna
# interní data uloží pro budoucí zpracování
library(tidyverse)
library(devtools)
library(sf)
library(tidyverse)
library(RCzechia)
# low res polygony
source("./data-raw/lo-res-polygons.R")
# faunistické čtverce a čtverečky
source("./data-raw/ctverce-a-ctverecky.R")
# městské řeky
source("./data-raw/reky_mesta.R")
# use data - internal (= private)
use_data(okresy_low_res,
kraje_low_res,
republika_low_res,
faunisticke_ctverce,
faunisticke_ctverecky,
reky_brno,
reky_praha,
internal = T,
overwrite = T
)
|
## GENERIC METHOD DEFINITIONS
##
## AUTHOR: BRIAN M. BOT
#####
# setGeneric(
# name = "getRepo",
# def = function(repository, ...){
# standardGeneric("getRepo")
# }
# )
|
/R/AllGenerics.R
|
no_license
|
brian-bot/mlbstats
|
R
| false | false | 180 |
r
|
## GENERIC METHOD DEFINITIONS
##
## AUTHOR: BRIAN M. BOT
#####
# setGeneric(
# name = "getRepo",
# def = function(repository, ...){
# standardGeneric("getRepo")
# }
# )
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Interactive two-dimensional K-means clustering and the Elbow Method"),
# headerPanel("title"", windowTitle = title)
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
textInput('kmseed', "Set a seed to be able to reproduce your results", value = "123"),
selectInput('xcol', 'Variable one', names(mtcars),
selected = names(mtcars)[[4]]),
selectInput('ycol', 'Variable two', names(mtcars),
selected = names(mtcars)[[7]]),
sliderInput('clusters', 'Number of clusters', min = 1, max = 9,
value = 3, ticks = TRUE,
animate = TRUE, round = TRUE)
),
mainPanel(
tabsetPanel(
tabPanel("Documentation", HTML("This app will enable you to interactively K-means cluster the mtcars dataset,
two dimensions at the time. <br> <br> Below you'll find instructions for the use of
the app and the interpretation of what is shown.<br> <br>
In the <strong>input bar</strong> on the left your can select two dimensions of the mtcars dataset, as
well as a randomisation seed. The app will run a K-means clustering algorithm on the
dataset with just the two selected variables, initialized with the selected seed. The K-means algorithm
is not deteministic by nature. This means that. especially with many clusters, the results
can be different with a different initialization. Preserving the seed enables you to reproduce the
exact same values in two different runs. <br> <br>
In the <strong>Results</strong> tab of this app you'll find a graphical representation of the results of the clustering. <br>
The first plot shows a plot the data with the two selected variables on the x and y axes. The
center positions of the clusters are indicated with big X's. The data points that belong to
the same cluster share the same color. <br>
The second plot is the so-called Elbow plot, which helps to select the right number of clusters for the
data selection at hand. The current number of clusters is indicated with a big red dot. The plots shows
the total within-clusters sum of squares against the number of clusters. This number is decreasing with
number of clusters, but at some point, the gain is not 'worth' the increased complexity of the model anymore. This
is where the plot shows an angle, or 'elbow', indicating the ideal number of clusters. As you can see, for most variable
combinations in this data set the ideal number of clusters is 2 or 3.")),
tabPanel("Results", plotOutput("plot1"), plotOutput("plot2"))
)
)
)
))
|
/ui.R
|
no_license
|
ohartoog/Dataproducts_Week4
|
R
| false | false | 3,422 |
r
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Interactive two-dimensional K-means clustering and the Elbow Method"),
# headerPanel("title"", windowTitle = title)
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
textInput('kmseed', "Set a seed to be able to reproduce your results", value = "123"),
selectInput('xcol', 'Variable one', names(mtcars),
selected = names(mtcars)[[4]]),
selectInput('ycol', 'Variable two', names(mtcars),
selected = names(mtcars)[[7]]),
sliderInput('clusters', 'Number of clusters', min = 1, max = 9,
value = 3, ticks = TRUE,
animate = TRUE, round = TRUE)
),
mainPanel(
tabsetPanel(
tabPanel("Documentation", HTML("This app will enable you to interactively K-means cluster the mtcars dataset,
two dimensions at the time. <br> <br> Below you'll find instructions for the use of
the app and the interpretation of what is shown.<br> <br>
In the <strong>input bar</strong> on the left your can select two dimensions of the mtcars dataset, as
well as a randomisation seed. The app will run a K-means clustering algorithm on the
dataset with just the two selected variables, initialized with the selected seed. The K-means algorithm
is not deteministic by nature. This means that. especially with many clusters, the results
can be different with a different initialization. Preserving the seed enables you to reproduce the
exact same values in two different runs. <br> <br>
In the <strong>Results</strong> tab of this app you'll find a graphical representation of the results of the clustering. <br>
The first plot shows a plot the data with the two selected variables on the x and y axes. The
center positions of the clusters are indicated with big X's. The data points that belong to
the same cluster share the same color. <br>
The second plot is the so-called Elbow plot, which helps to select the right number of clusters for the
data selection at hand. The current number of clusters is indicated with a big red dot. The plots shows
the total within-clusters sum of squares against the number of clusters. This number is decreasing with
number of clusters, but at some point, the gain is not 'worth' the increased complexity of the model anymore. This
is where the plot shows an angle, or 'elbow', indicating the ideal number of clusters. As you can see, for most variable
combinations in this data set the ideal number of clusters is 2 or 3.")),
tabPanel("Results", plotOutput("plot1"), plotOutput("plot2"))
)
)
)
))
|
measured <- ts(1:10,start=1.0,frequency=0.5)
measured
lag(measured,2)
|
/book/packt/R.Object-oriented.Programming/6682OS_02_Codes/chapter2/chapter_2_ex18.R
|
no_license
|
xenron/sandbox-da-r
|
R
| false | false | 73 |
r
|
measured <- ts(1:10,start=1.0,frequency=0.5)
measured
lag(measured,2)
|
#' Plot the features in an explanation
#'
#' This functions creates a compact visual representation of the explanations
#' for each case and label combination in an explanation. Each extracted feature
#' is shown with its weight, thus giving the importance of the feature in the
#' label prediction.
#'
#' @param explanation A `data.frame` as returned by [explain()].
#'
#' @param ncol The number of columns in the facetted plot
#'
#' @return A `ggplot` object
#'
#' @import ggplot2
#' @export
#'
#' @family explanation plots
#'
#' @examples
#' # Create some explanations
#' library(MASS)
#' iris_test <- iris[1, 1:4]
#' iris_train <- iris[-1, 1:4]
#' iris_lab <- iris[[5]][-1]
#' model <- lda(iris_train, iris_lab)
#' explanation <- lime(iris_train, model)
#' explanations <- explain(iris_test, explanation, n_labels = 1, n_features = 2)
#'
#' # Get an overview with the standard plot
#' plot_features(explanations)
#'
plot_features <- function(explanation, ncol = 2) {
type_pal <- c('Supports', 'Contradicts')
explanation$type <- factor(ifelse(sign(explanation$feature_weight) == 1, type_pal[1], type_pal[2]), levels = type_pal)
description <- paste0(explanation$case, '_', explanation$label)
desc_width <- max(nchar(description)) + 1
description <- paste0(format(description, width = desc_width), explanation$feature_desc)
explanation$description <- factor(description, levels = description[order(abs(explanation$feature_weight))])
explanation$case <- factor(explanation$case, unique(explanation$case))
if (explanation$model_type[1] == 'classification') {
explanation$probability <- format(explanation$label_prob, digits = 2)
p <- ggplot(explanation) +
facet_wrap(~ case + label + probability, labeller = label_both_upper, scales = 'free', ncol = ncol)
} else if (explanation$model_type[1] == 'regression') {
p <- ggplot(explanation) +
facet_wrap(~ case + prediction, labeller = label_both_upper, scales = 'free', ncol = ncol)
}
p +
geom_col(aes_(~description, ~feature_weight, fill = ~type)) +
coord_flip() +
scale_fill_manual(values = c('forestgreen', 'firebrick'), drop = FALSE) +
scale_x_discrete(labels = function(lab) substr(lab, desc_width+1, nchar(lab))) +
labs(y = 'Weight', x = 'Feature', fill = '') +
theme_lime()
}
#' Plot a condensed overview of all explanations
#'
#' This function produces a facetted heatmap visualisation of all
#' case/label/feature combinations. Compared to [plot_features()] it is much
#' more condensed, thus allowing for an overview of many explanations in one
#' plot. On the other hand it is less useful for getting exact numerical
#' statistics of the explanation.
#'
#' @param explanation A `data.frame` as returned by [explain()].
#' @param ... Parameters passed on to [ggplot2::facet_wrap()]
#'
#' @return A `ggplot` object
#'
#' @import ggplot2
#' @export
#'
#' @family explanation plots
#'
#' @examples
#' # Create some explanations
#' library(MASS)
#' iris_test <- iris[1, 1:4]
#' iris_train <- iris[-1, 1:4]
#' iris_lab <- iris[[5]][-1]
#' model <- lda(iris_train, iris_lab)
#' explanation <- lime(iris_train, model)
#' explanations <- explain(iris_test, explanation, n_labels = 1, n_features = 2)
#'
#' # Get an overview with the standard plot
#' plot_explanations(explanations)
plot_explanations <- function(explanation, ...) {
num_cases <- unique(suppressWarnings(as.numeric(explanation$case)))
if (!anyNA(num_cases)) {
explanation$case <- factor(explanation$case, levels = as.character(sort(num_cases)))
}
explanation$feature_desc <- factor(
explanation$feature_desc,
levels = rev(unique(explanation$feature_desc[order(explanation$feature, explanation$feature_value)]))
)
p <- ggplot(explanation, aes_(~case, ~feature_desc)) +
geom_tile(aes_(fill = ~feature_weight)) +
scale_x_discrete('Case', expand = c(0, 0)) +
scale_y_discrete('Feature', expand = c(0, 0)) +
scale_fill_gradient2('Feature\nweight', low = '#8e0152', mid = '#f7f7f7', high = '#276419') +
theme_lime() +
theme(panel.border = element_rect(fill = NA, colour = 'grey60', size = 1),
panel.grid = element_blank(),
legend.position = 'right',
axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
if (is.null(explanation$label)) {
p
} else {
p + facet_wrap(~label, ...)
}
}
theme_lime <- function(...) {
theme_minimal() +
theme(
strip.text = element_text(face = 'bold', size = 9),
plot.margin = margin(15, 15, 15, 15),
legend.background = element_blank(),
legend.key = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
axis.ticks = element_blank(),
legend.position = 'bottom',
panel.spacing.y = unit(15, 'pt'),
strip.text.x = element_text(margin = margin(t = 2, b = 2), hjust = 0),
axis.title.y = element_text(margin = margin(r = 10)),
axis.title.x = element_text(margin = margin(t = 10)),
...
)
}
#' @importFrom tools toTitleCase
label_both_upper <- function(labels, multi_line = TRUE, sep = ': ') {
names(labels) <- toTitleCase(names(labels))
label_both(labels, multi_line, sep)
}
|
/R/plot.R
|
no_license
|
satya-dasara/lime
|
R
| false | false | 5,205 |
r
|
#' Plot the features in an explanation
#'
#' This functions creates a compact visual representation of the explanations
#' for each case and label combination in an explanation. Each extracted feature
#' is shown with its weight, thus giving the importance of the feature in the
#' label prediction.
#'
#' @param explanation A `data.frame` as returned by [explain()].
#'
#' @param ncol The number of columns in the facetted plot
#'
#' @return A `ggplot` object
#'
#' @import ggplot2
#' @export
#'
#' @family explanation plots
#'
#' @examples
#' # Create some explanations
#' library(MASS)
#' iris_test <- iris[1, 1:4]
#' iris_train <- iris[-1, 1:4]
#' iris_lab <- iris[[5]][-1]
#' model <- lda(iris_train, iris_lab)
#' explanation <- lime(iris_train, model)
#' explanations <- explain(iris_test, explanation, n_labels = 1, n_features = 2)
#'
#' # Get an overview with the standard plot
#' plot_features(explanations)
#'
plot_features <- function(explanation, ncol = 2) {
type_pal <- c('Supports', 'Contradicts')
explanation$type <- factor(ifelse(sign(explanation$feature_weight) == 1, type_pal[1], type_pal[2]), levels = type_pal)
description <- paste0(explanation$case, '_', explanation$label)
desc_width <- max(nchar(description)) + 1
description <- paste0(format(description, width = desc_width), explanation$feature_desc)
explanation$description <- factor(description, levels = description[order(abs(explanation$feature_weight))])
explanation$case <- factor(explanation$case, unique(explanation$case))
if (explanation$model_type[1] == 'classification') {
explanation$probability <- format(explanation$label_prob, digits = 2)
p <- ggplot(explanation) +
facet_wrap(~ case + label + probability, labeller = label_both_upper, scales = 'free', ncol = ncol)
} else if (explanation$model_type[1] == 'regression') {
p <- ggplot(explanation) +
facet_wrap(~ case + prediction, labeller = label_both_upper, scales = 'free', ncol = ncol)
}
p +
geom_col(aes_(~description, ~feature_weight, fill = ~type)) +
coord_flip() +
scale_fill_manual(values = c('forestgreen', 'firebrick'), drop = FALSE) +
scale_x_discrete(labels = function(lab) substr(lab, desc_width+1, nchar(lab))) +
labs(y = 'Weight', x = 'Feature', fill = '') +
theme_lime()
}
#' Plot a condensed overview of all explanations
#'
#' This function produces a facetted heatmap visualisation of all
#' case/label/feature combinations. Compared to [plot_features()] it is much
#' more condensed, thus allowing for an overview of many explanations in one
#' plot. On the other hand it is less useful for getting exact numerical
#' statistics of the explanation.
#'
#' @param explanation A `data.frame` as returned by [explain()].
#' @param ... Parameters passed on to [ggplot2::facet_wrap()]
#'
#' @return A `ggplot` object
#'
#' @import ggplot2
#' @export
#'
#' @family explanation plots
#'
#' @examples
#' # Create some explanations
#' library(MASS)
#' iris_test <- iris[1, 1:4]
#' iris_train <- iris[-1, 1:4]
#' iris_lab <- iris[[5]][-1]
#' model <- lda(iris_train, iris_lab)
#' explanation <- lime(iris_train, model)
#' explanations <- explain(iris_test, explanation, n_labels = 1, n_features = 2)
#'
#' # Get an overview with the standard plot
#' plot_explanations(explanations)
plot_explanations <- function(explanation, ...) {
num_cases <- unique(suppressWarnings(as.numeric(explanation$case)))
if (!anyNA(num_cases)) {
explanation$case <- factor(explanation$case, levels = as.character(sort(num_cases)))
}
explanation$feature_desc <- factor(
explanation$feature_desc,
levels = rev(unique(explanation$feature_desc[order(explanation$feature, explanation$feature_value)]))
)
p <- ggplot(explanation, aes_(~case, ~feature_desc)) +
geom_tile(aes_(fill = ~feature_weight)) +
scale_x_discrete('Case', expand = c(0, 0)) +
scale_y_discrete('Feature', expand = c(0, 0)) +
scale_fill_gradient2('Feature\nweight', low = '#8e0152', mid = '#f7f7f7', high = '#276419') +
theme_lime() +
theme(panel.border = element_rect(fill = NA, colour = 'grey60', size = 1),
panel.grid = element_blank(),
legend.position = 'right',
axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
if (is.null(explanation$label)) {
p
} else {
p + facet_wrap(~label, ...)
}
}
theme_lime <- function(...) {
theme_minimal() +
theme(
strip.text = element_text(face = 'bold', size = 9),
plot.margin = margin(15, 15, 15, 15),
legend.background = element_blank(),
legend.key = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
axis.ticks = element_blank(),
legend.position = 'bottom',
panel.spacing.y = unit(15, 'pt'),
strip.text.x = element_text(margin = margin(t = 2, b = 2), hjust = 0),
axis.title.y = element_text(margin = margin(r = 10)),
axis.title.x = element_text(margin = margin(t = 10)),
...
)
}
#' @importFrom tools toTitleCase
label_both_upper <- function(labels, multi_line = TRUE, sep = ': ') {
names(labels) <- toTitleCase(names(labels))
label_both(labels, multi_line, sep)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecated.R
\name{deprecated}
\alias{deprecated}
\alias{add_comparison}
\alias{add_global}
\alias{tab_style_bold_p}
\alias{tab_style_bold_labels}
\alias{tab_style_italicize_levels}
\alias{tab_style_italicize_labels}
\alias{tab_style_bold_levels}
\alias{tbl_summary_}
\alias{add_p_}
\title{Deprecated functions}
\usage{
add_comparison(...)
add_global(...)
tab_style_bold_p(...)
tab_style_bold_labels(...)
tab_style_italicize_levels(...)
tab_style_italicize_labels(...)
tab_style_bold_levels(...)
tbl_summary_(...)
add_p_(...)
}
\description{
\Sexpr[results=rd, stage=render]{lifecycle::badge("deprecated")}
Some functions have been deprecated and are no longer being actively
supported.
}
\keyword{internal}
|
/man/deprecated.Rd
|
permissive
|
thomas-neitmann/gtsummary
|
R
| false | true | 794 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecated.R
\name{deprecated}
\alias{deprecated}
\alias{add_comparison}
\alias{add_global}
\alias{tab_style_bold_p}
\alias{tab_style_bold_labels}
\alias{tab_style_italicize_levels}
\alias{tab_style_italicize_labels}
\alias{tab_style_bold_levels}
\alias{tbl_summary_}
\alias{add_p_}
\title{Deprecated functions}
\usage{
add_comparison(...)
add_global(...)
tab_style_bold_p(...)
tab_style_bold_labels(...)
tab_style_italicize_levels(...)
tab_style_italicize_labels(...)
tab_style_bold_levels(...)
tbl_summary_(...)
add_p_(...)
}
\description{
\Sexpr[results=rd, stage=render]{lifecycle::badge("deprecated")}
Some functions have been deprecated and are no longer being actively
supported.
}
\keyword{internal}
|
heat.bidirec <- read.csv("210330_012_Cengen medium_GPCR NHR_Spearman rho_heatmap_raw.csv", header = TRUE, row.names = 1, check.names = FALSE)
##reads file for similarity between gene expresion for gene pairs across neurons in Cengen data
gpcr.num <- ncol(heat.bidirec)
##identifies total number of genes of 1st category from number of columns
nhr.num <- nrow(heat.bidirec)
##identifies total number of genes of 2nd category from number of rows
uniq.pairs <- data.frame("NHR" = rep(NA, 1000000), "GPCR" = rep(NA, 1000000), "rho" = rep(NA, 1000000))
#creates an empty data frame of three columns and 1000000 rows - CHANGE VALUES HERE
index <- 1
##index idenfies which row to fill in output file
##'j' follows genes in 2nd category
for (j in 1:nhr.num){
##'i' follows genes in 1st category
for (i in 1:gpcr.num){
if(heat.bidirec[j , i] >= 0.5){
##Values greater than or equal to 0.5 - CHANGE VALUES HERE
uniq.pairs$NHR[index] <- row.names(heat.bidirec)[j]
##inputs name of gene of 2nd category
uniq.pairs$GPCR[index] <- colnames(heat.bidirec)[i]
##inputs name of gene of 1st category
uniq.pairs$rho[index] <- heat.bidirec[j , i]
##inputs the rho correlation value - CHANGE VECTOR NAME HERE
index <- index + 1
}else {
index <- index
}
}
}
write.csv(uniq.pairs, file = "210401_007_Cengen medium_correl rho greater equal 0.5 nhr gpcr pairs.csv", row.names = FALSE)
##outputs CSV file containing unique pair names
rm(list = c("heat.bidirec", "uniq.pairs", "gpcr.num", "nhr.num", "i", "j", "index"))
##rm(list = ls())
|
/210401_005_Cengen medium_extract greater 0.9_heatmap nhr gpcr.R
|
no_license
|
hobertlab/NHR-GPCR_Sural_2021
|
R
| false | false | 1,778 |
r
|
heat.bidirec <- read.csv("210330_012_Cengen medium_GPCR NHR_Spearman rho_heatmap_raw.csv", header = TRUE, row.names = 1, check.names = FALSE)
##reads file for similarity between gene expresion for gene pairs across neurons in Cengen data
gpcr.num <- ncol(heat.bidirec)
##identifies total number of genes of 1st category from number of columns
nhr.num <- nrow(heat.bidirec)
##identifies total number of genes of 2nd category from number of rows
uniq.pairs <- data.frame("NHR" = rep(NA, 1000000), "GPCR" = rep(NA, 1000000), "rho" = rep(NA, 1000000))
#creates an empty data frame of three columns and 1000000 rows - CHANGE VALUES HERE
index <- 1
##index idenfies which row to fill in output file
##'j' follows genes in 2nd category
for (j in 1:nhr.num){
##'i' follows genes in 1st category
for (i in 1:gpcr.num){
if(heat.bidirec[j , i] >= 0.5){
##Values greater than or equal to 0.5 - CHANGE VALUES HERE
uniq.pairs$NHR[index] <- row.names(heat.bidirec)[j]
##inputs name of gene of 2nd category
uniq.pairs$GPCR[index] <- colnames(heat.bidirec)[i]
##inputs name of gene of 1st category
uniq.pairs$rho[index] <- heat.bidirec[j , i]
##inputs the rho correlation value - CHANGE VECTOR NAME HERE
index <- index + 1
}else {
index <- index
}
}
}
write.csv(uniq.pairs, file = "210401_007_Cengen medium_correl rho greater equal 0.5 nhr gpcr pairs.csv", row.names = FALSE)
##outputs CSV file containing unique pair names
rm(list = c("heat.bidirec", "uniq.pairs", "gpcr.num", "nhr.num", "i", "j", "index"))
##rm(list = ls())
|
library(ISLR)
library(glmnet)
library(ggplot2)
data(Hitters)
?Hitters
dim(Hitters)
clean <- Hitters[!is.na(Hitters$Salary), ]
dim(clean)
?subset
train.1 <- head(clean, 100)
#salary.1 <- train.1$salary
#train.1$salary <- NULL
train.2 <- head(clean, 200)
#salary.2 <- train.2$salary
#train.2$salary <- NULL
test <- tail(clean, 63)
dim(train.1); dim(train.2); dim(test)
mod.x <- glm(Salary ~ ., data=train.1, family="gaussian")
mod.x$fitted.values
preds.x <- predict(mod.x)
err.x <- mean((preds.x - train.1$Salary)^2)
str(mod.x)
preds.test <- predict(mod.x, newdata=test)
err.test <- mean((preds.test - test$Salary)^2)
curva.ap.1 <- ldply(seq(50, 100, 5), function(i) {
mod.1 <- glm(Salary ~ ., data=train.1[1:i, ], family="gaussian")
error.entrena <- mean((predict(mod.1) - train.1[1:i, ]$Salary) ^ 2)
error.valida <- mean((predict(mod.1, newdata=test) - test$Salary) ^ 2)
data.frame(i = i,
error.entrena = error.entrena,
error.valida = error.valida)
})
curva.ap.1.m <- melt(curva.ap.1, id.var = "i")
ggplot(curva.ap.1.m, aes(x = i, y = value, colour = variable)) +
geom_point() + geom_line() + ylab("ECM")
# Rx: Aun hay mucho que hacer ya que las 2 curvas estan muy distantes una de la otra,
# incrementar los datos de entrenamiento va a mejorar la prediccion.
# Conviene probar modelos mas estructurados o mas varialbes de entrada,
# ya que puede ser que la varianza sea alta.
# Ridge
?glmnet
salary.1 <- train.1$Salary
# Quitar variables no numericas
glm.train <- subset(train.1, select=-c(League, Division, NewLeague))
glm.train$Salary <- NULL
#train.1$salary <- NULL
mod.ridge <- glmnet(x=as.matrix(glm.train), y=as.matrix(salary.1),
family="gaussian", alpha=0, intercept=T)
?glmnet
?predict.glmnet
plot(mod.ridge, xvar="lambda")
str(mod.ridge)
mod.ridge[[1]]
predict(mod.ridge, newx=as.matrix(glm.train))
set.seed(840424)
sample(runif(1000), 1)
## Modelos con 200 datos de entrenamiento
curva.ap.2 <- ldply(seq(50, 200, 10), function(i) {
mod.2 <- glm(Salary ~ ., data=train.2[1:i, ], family="gaussian")
error.entrena <- mean((predict(mod.2) - train.2[1:i, ]$Salary) ^ 2)
error.valida <- mean((predict(mod.2, newdata=test) - test$Salary) ^ 2)
data.frame(i = i,
error.entrena = error.entrena,
error.valida = error.valida)
})
curva.ap.2.m <- melt(curva.ap.2, id.var = "i")
ggplot(curva.ap.2.m, aes(x = i, y = value, colour = variable)) +
geom_point() + geom_line() + ylab("ECM")
|
/l10/hw1.R
|
no_license
|
alfonsokim/machine-learning-class
|
R
| false | false | 2,532 |
r
|
library(ISLR)
library(glmnet)
library(ggplot2)
data(Hitters)
?Hitters
dim(Hitters)
clean <- Hitters[!is.na(Hitters$Salary), ]
dim(clean)
?subset
train.1 <- head(clean, 100)
#salary.1 <- train.1$salary
#train.1$salary <- NULL
train.2 <- head(clean, 200)
#salary.2 <- train.2$salary
#train.2$salary <- NULL
test <- tail(clean, 63)
dim(train.1); dim(train.2); dim(test)
mod.x <- glm(Salary ~ ., data=train.1, family="gaussian")
mod.x$fitted.values
preds.x <- predict(mod.x)
err.x <- mean((preds.x - train.1$Salary)^2)
str(mod.x)
preds.test <- predict(mod.x, newdata=test)
err.test <- mean((preds.test - test$Salary)^2)
curva.ap.1 <- ldply(seq(50, 100, 5), function(i) {
mod.1 <- glm(Salary ~ ., data=train.1[1:i, ], family="gaussian")
error.entrena <- mean((predict(mod.1) - train.1[1:i, ]$Salary) ^ 2)
error.valida <- mean((predict(mod.1, newdata=test) - test$Salary) ^ 2)
data.frame(i = i,
error.entrena = error.entrena,
error.valida = error.valida)
})
curva.ap.1.m <- melt(curva.ap.1, id.var = "i")
ggplot(curva.ap.1.m, aes(x = i, y = value, colour = variable)) +
geom_point() + geom_line() + ylab("ECM")
# Rx: Aun hay mucho que hacer ya que las 2 curvas estan muy distantes una de la otra,
# incrementar los datos de entrenamiento va a mejorar la prediccion.
# Conviene probar modelos mas estructurados o mas varialbes de entrada,
# ya que puede ser que la varianza sea alta.
# Ridge
?glmnet
salary.1 <- train.1$Salary
# Quitar variables no numericas
glm.train <- subset(train.1, select=-c(League, Division, NewLeague))
glm.train$Salary <- NULL
#train.1$salary <- NULL
mod.ridge <- glmnet(x=as.matrix(glm.train), y=as.matrix(salary.1),
family="gaussian", alpha=0, intercept=T)
?glmnet
?predict.glmnet
plot(mod.ridge, xvar="lambda")
str(mod.ridge)
mod.ridge[[1]]
predict(mod.ridge, newx=as.matrix(glm.train))
set.seed(840424)
sample(runif(1000), 1)
## Modelos con 200 datos de entrenamiento
curva.ap.2 <- ldply(seq(50, 200, 10), function(i) {
mod.2 <- glm(Salary ~ ., data=train.2[1:i, ], family="gaussian")
error.entrena <- mean((predict(mod.2) - train.2[1:i, ]$Salary) ^ 2)
error.valida <- mean((predict(mod.2, newdata=test) - test$Salary) ^ 2)
data.frame(i = i,
error.entrena = error.entrena,
error.valida = error.valida)
})
curva.ap.2.m <- melt(curva.ap.2, id.var = "i")
ggplot(curva.ap.2.m, aes(x = i, y = value, colour = variable)) +
geom_point() + geom_line() + ylab("ECM")
|
t <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
t$Date <- as.Date(t$Date, "%d/%m/%Y")
t <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
t <- t[complete.cases(t),]
dateTime <- paste(t$Date, t$Time)
dateTime <- setNames(dateTime, "DateTime")
t <- t[ ,!(names(t) %in% c("Date","Time"))]
t <- cbind(dateTime, t)
t$dateTime <- as.POSIXct(dateTime)
plot(t$Global_active_power~t$dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
|
/plot2.R
|
no_license
|
mrraul80/ExData_Plotting1
|
R
| false | false | 660 |
r
|
t <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
t$Date <- as.Date(t$Date, "%d/%m/%Y")
t <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
t <- t[complete.cases(t),]
dateTime <- paste(t$Date, t$Time)
dateTime <- setNames(dateTime, "DateTime")
t <- t[ ,!(names(t) %in% c("Date","Time"))]
t <- cbind(dateTime, t)
t$dateTime <- as.POSIXct(dateTime)
plot(t$Global_active_power~t$dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
|
householdpowerconsumption <- "~/Desktop/Coursera/DataScienceusingRCourse4/Project1/household_power_consumption.txt"
DATA <- read.table(householdpowerconsumption, header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
SUBDATA <- DATA[DATA$Date %in% c("1/2/2007", "2/2/2007"),]
globalactivepower <- as.numeric(SUBDATA$Global_active_power)
png("plot1.png", width = 480, height = 480)
hist(globalactivepower, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
/Plot1.R
|
no_license
|
Bryanplane/ExData_Plotting1
|
R
| false | false | 510 |
r
|
householdpowerconsumption <- "~/Desktop/Coursera/DataScienceusingRCourse4/Project1/household_power_consumption.txt"
DATA <- read.table(householdpowerconsumption, header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
SUBDATA <- DATA[DATA$Date %in% c("1/2/2007", "2/2/2007"),]
globalactivepower <- as.numeric(SUBDATA$Global_active_power)
png("plot1.png", width = 480, height = 480)
hist(globalactivepower, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
\name{ghermite.h.polynomials}
\alias{ghermite.h.polynomials}
\title{ Create list of generalized Hermite polynomials }
\description{
This function returns a list with \eqn{n + 1} elements containing
the order \eqn{k} generalized Hermite polynomials, \eqn{H_k^{\left( \mu \right)} \left( x \right)},
for orders \eqn{k = 0,\;1,\; \ldots ,\;n}.
}
\usage{
ghermite.h.polynomials(n, mu, normalized = FALSE)
}
\arguments{
\item{n}{ integer value for the highest polynomial order }
\item{mu}{ numeric value for the polynomial parameter }
\item{normalized}{ boolean value which, if TRUE, returns recurrence relations for normalized polynomials }
}
\details{
The parameter \eqn{\mu} must be greater than -0.5.
The function \code{ghermite.h.recurrences} produces a data frame with the recurrence relation parameters
for the polynomials. If the \code{normalized} argument is FALSE, the
function \code{orthogonal.polynomials} is used to construct the list of orthogonal polynomial objects.
Otherwise, the function \code{orthonormal.polynomials} is used to construct the
list of orthonormal polynomial objects.
}
\value{
A list of \eqn{n + 1} polynomial objects
\item{1 }{order 0 generalized Hermite polynomial}
\item{2 }{order 1 generalized Hermite polynomial}
...
\item{n+1 }{order \eqn{n} generalized Hermite polynomial}
}
\references{
Alvarez-Nordase, R., M. K. Atakishiyeva and N. M. Atakishiyeva, 2004. A q-extension of
the generalized Hermite polynomials with continuous orthogonality property on R,
\emph{International Journal of Pure and Applied Mathematics}, 10(3), 335-347.
Abramowitz, M. and I. A. Stegun, 1968. \emph{Handbook of Mathematical Functions with
Formulas, Graphs, and Mathematical Tables}, Dover Publications, Inc., New York.
Courant, R., and D. Hilbert, 1989. \emph{Methods of Mathematical Physics},
John Wiley, New York, NY.
Szego, G., 1939. \emph{Orthogonal Polynomials}, 23, American Mathematical Society
Colloquium Publications, Providence, RI.
}
\author{ Frederick Novomestky \email{fnovomes@poly.edu} }
\seealso{
\code{\link{ghermite.h.recurrences}},
\code{\link{orthogonal.polynomials}},
\code{\link{orthonormal.polynomials}}
}
\examples{
###
### gemerate a list of normalized generalized Hermite polynomials of orders 0 to 10
### polynomial parameter is 1.0
###
normalized.p.list <- ghermite.h.polynomials( 10, 1, normalized=TRUE )
print( normalized.p.list )
###
### gemerate a list of unnormalized generalized Hermite polynomials of orders 0 to 10
### polynomial parameter is 1.0
###
unnormalized.p.list <- ghermite.h.polynomials( 10, 1, normalized=FALSE )
print( unnormalized.p.list )
}
\keyword{ math }
|
/man/ghermite.h.polynomials.Rd
|
no_license
|
cran/orthopolynom
|
R
| false | false | 2,734 |
rd
|
\name{ghermite.h.polynomials}
\alias{ghermite.h.polynomials}
\title{ Create list of generalized Hermite polynomials }
\description{
This function returns a list with \eqn{n + 1} elements containing
the order \eqn{k} generalized Hermite polynomials, \eqn{H_k^{\left( \mu \right)} \left( x \right)},
for orders \eqn{k = 0,\;1,\; \ldots ,\;n}.
}
\usage{
ghermite.h.polynomials(n, mu, normalized = FALSE)
}
\arguments{
\item{n}{ integer value for the highest polynomial order }
\item{mu}{ numeric value for the polynomial parameter }
\item{normalized}{ boolean value which, if TRUE, returns recurrence relations for normalized polynomials }
}
\details{
The parameter \eqn{\mu} must be greater than -0.5.
The function \code{ghermite.h.recurrences} produces a data frame with the recurrence relation parameters
for the polynomials. If the \code{normalized} argument is FALSE, the
function \code{orthogonal.polynomials} is used to construct the list of orthogonal polynomial objects.
Otherwise, the function \code{orthonormal.polynomials} is used to construct the
list of orthonormal polynomial objects.
}
\value{
A list of \eqn{n + 1} polynomial objects
\item{1 }{order 0 generalized Hermite polynomial}
\item{2 }{order 1 generalized Hermite polynomial}
...
\item{n+1 }{order \eqn{n} generalized Hermite polynomial}
}
\references{
Alvarez-Nordase, R., M. K. Atakishiyeva and N. M. Atakishiyeva, 2004. A q-extension of
the generalized Hermite polynomials with continuous orthogonality property on R,
\emph{International Journal of Pure and Applied Mathematics}, 10(3), 335-347.
Abramowitz, M. and I. A. Stegun, 1968. \emph{Handbook of Mathematical Functions with
Formulas, Graphs, and Mathematical Tables}, Dover Publications, Inc., New York.
Courant, R., and D. Hilbert, 1989. \emph{Methods of Mathematical Physics},
John Wiley, New York, NY.
Szego, G., 1939. \emph{Orthogonal Polynomials}, 23, American Mathematical Society
Colloquium Publications, Providence, RI.
}
\author{ Frederick Novomestky \email{fnovomes@poly.edu} }
\seealso{
\code{\link{ghermite.h.recurrences}},
\code{\link{orthogonal.polynomials}},
\code{\link{orthonormal.polynomials}}
}
\examples{
###
### gemerate a list of normalized generalized Hermite polynomials of orders 0 to 10
### polynomial parameter is 1.0
###
normalized.p.list <- ghermite.h.polynomials( 10, 1, normalized=TRUE )
print( normalized.p.list )
###
### gemerate a list of unnormalized generalized Hermite polynomials of orders 0 to 10
### polynomial parameter is 1.0
###
unnormalized.p.list <- ghermite.h.polynomials( 10, 1, normalized=FALSE )
print( unnormalized.p.list )
}
\keyword{ math }
|
\name{h2o.SpeeDRF}
\alias{h2o.SpeeDRF}
\title{
H2O: Single-Node Random Forest
}
\description{
Performs single-node random forest classification on a data set.
}
\usage{
h2o.SpeeDRF(x, y, data, classification = TRUE, nfolds = 0, validation, mtry = -1,
ntree = 50, depth = 50, sample.rate = 2/3, oobee = TRUE, importance = FALSE,
nbins = 1024, seed = -1, stat.type = "ENTROPY", balance.classes = FALSE,
verbose = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A vector containing the names or indices of the predictor variables to use in building the random forest model.
}
\item{y}{
The name or index of the response variable. If the data does not contain a header, this is the column index, designated by increasing numbers from left to right. (The response must be either an integer or a categorical variable).
}
\item{data}{
An \code{\linkS4class{H2OParsedData}} object containing the variables in the model.
}
\item{classification}{
(Optional) A logical value indicating whether a classification model should be built (as opposed to regression).
}
\item{nfolds}{
(Optional) Number of folds for cross-validation. If \code{nfolds >= 2}, then \code{validation} must remain empty.
}
\item{validation}{
(Optional) An \code{\linkS4class{H2OParsedData}} object indicating the validation dataset used to construct confusion matrix. If left blank, this defaults to the training data when \code{nfolds = 0}.}
\item{mtry}{
(Optional) Number of features to randomly select at each split in the tree. If set to the default of -1, this will be set to \code{sqrt(ncol(data))}, rounded down to the nearest integer.
}
\item{ntree}{
(Optional) Number of trees to grow. (Must be a nonnegative integer).
}
\item{depth}{
(Optional) Maximum depth to grow the tree.
}
\item{sample.rate}{
(Optional) Sampling rate for constructing data from which individual trees are grown.
}
\item{oobee}{
(Optional) A logical value indicating whether to calculate the out of bag error estimate.
}
\item{importance}{
(Optional) A logical value indicating whether to compute variable importance measures. (If set to \code{TRUE}, the algorithm will take longer to finish.)
}
\item{nbins}{
(Optional) Build a histogram of this many bins, then split at best point.
}
\item{seed}{
(Optional) Seed for building the random forest. If \code{seed = -1}, one will automatically be generated by H2O.
}
\item{stat.type}{
(Optional) Type of statistic to use, equal to either "ENTROPY" or "GINI".
}
\item{balance.classes}{
(Optional) A logical value indicating whether classes should be rebalanced. Use for datasets where the levels of the response class are very unbalanced.
}
\item{verbose}{
(Optional) A logical value indicating whether verbose results should be returned.
}
\item{local_mode}{
(Optional) A logical value stating whether to
TRUE) build the random forest on local data to the node in a distributed and parallel fashion -OR-
FALSE) first pull all of the data local to each node and then build a random forest in a distributed and parallel fashion
Note: local_mode typically has poorer predictive performance for smaller datasets, but roughly equivalent performance with non-local mode.
}
}
\details{
IMPORTANT: Currently, you must initialize H2O with the flag \code{beta = TRUE} in \code{h2o.init} in order to use this method!
This method runs random forest model building on a single node, as opposed to the multi-node implementation in \code{\link{h2o.randomForest}}.
}
\value{
An object of class \code{\linkS4class{H2OSpeeDRFModel}} with slots key, data, valid (the validation dataset), and model, where the last is a list of the following components:
\item{params }{Input parameters for building the model.}
\item{ntree }{Number of trees grown.}
\item{depth }{Depth of the trees grown.}
\item{nbins }{Number of bins used in building the histogram.}
\item{classification }{Logical value indicating if the model is classification.}
\item{mse }{Mean-squared error for each tree.}
\item{confusion }{Confusion matrix of the prediction.}
}
\seealso{
\code{\linkS4class{H2OSpeeDRFModel}}, \code{\link{h2o.randomForest}}
}
\examples{
# Currently still in beta, so don't automatically run example
\dontrun{
library(h2o)
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, beta = TRUE)
irisPath = system.file("extdata", "iris.csv", package = "h2o")
iris.hex = h2o.importFile(localH2O, path = irisPath, key = "iris.hex")
h2o.SpeeDRF(x = c(2,3,4), y = 5, data = iris.hex, ntree = 50, depth = 100)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/R/h2o-package/man/h2o.SpeeDRF.Rd
|
permissive
|
alee-altiscale/h2o
|
R
| false | false | 4,807 |
rd
|
\name{h2o.SpeeDRF}
\alias{h2o.SpeeDRF}
\title{
H2O: Single-Node Random Forest
}
\description{
Performs single-node random forest classification on a data set.
}
\usage{
h2o.SpeeDRF(x, y, data, classification = TRUE, nfolds = 0, validation, mtry = -1,
ntree = 50, depth = 50, sample.rate = 2/3, oobee = TRUE, importance = FALSE,
nbins = 1024, seed = -1, stat.type = "ENTROPY", balance.classes = FALSE,
verbose = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A vector containing the names or indices of the predictor variables to use in building the random forest model.
}
\item{y}{
The name or index of the response variable. If the data does not contain a header, this is the column index, designated by increasing numbers from left to right. (The response must be either an integer or a categorical variable).
}
\item{data}{
An \code{\linkS4class{H2OParsedData}} object containing the variables in the model.
}
\item{classification}{
(Optional) A logical value indicating whether a classification model should be built (as opposed to regression).
}
\item{nfolds}{
(Optional) Number of folds for cross-validation. If \code{nfolds >= 2}, then \code{validation} must remain empty.
}
\item{validation}{
(Optional) An \code{\linkS4class{H2OParsedData}} object indicating the validation dataset used to construct confusion matrix. If left blank, this defaults to the training data when \code{nfolds = 0}.}
\item{mtry}{
(Optional) Number of features to randomly select at each split in the tree. If set to the default of -1, this will be set to \code{sqrt(ncol(data))}, rounded down to the nearest integer.
}
\item{ntree}{
(Optional) Number of trees to grow. (Must be a nonnegative integer).
}
\item{depth}{
(Optional) Maximum depth to grow the tree.
}
\item{sample.rate}{
(Optional) Sampling rate for constructing data from which individual trees are grown.
}
\item{oobee}{
(Optional) A logical value indicating whether to calculate the out of bag error estimate.
}
\item{importance}{
(Optional) A logical value indicating whether to compute variable importance measures. (If set to \code{TRUE}, the algorithm will take longer to finish.)
}
\item{nbins}{
(Optional) Build a histogram of this many bins, then split at best point.
}
\item{seed}{
(Optional) Seed for building the random forest. If \code{seed = -1}, one will automatically be generated by H2O.
}
\item{stat.type}{
(Optional) Type of statistic to use, equal to either "ENTROPY" or "GINI".
}
\item{balance.classes}{
(Optional) A logical value indicating whether classes should be rebalanced. Use for datasets where the levels of the response class are very unbalanced.
}
\item{verbose}{
(Optional) A logical value indicating whether verbose results should be returned.
}
\item{local_mode}{
(Optional) A logical value stating whether to
TRUE) build the random forest on local data to the node in a distributed and parallel fashion -OR-
FALSE) first pull all of the data local to each node and then build a random forest in a distributed and parallel fashion
Note: local_mode typically has poorer predictive performance for smaller datasets, but roughly equivalent performance with non-local mode.
}
}
\details{
IMPORTANT: Currently, you must initialize H2O with the flag \code{beta = TRUE} in \code{h2o.init} in order to use this method!
This method runs random forest model building on a single node, as opposed to the multi-node implementation in \code{\link{h2o.randomForest}}.
}
\value{
An object of class \code{\linkS4class{H2OSpeeDRFModel}} with slots key, data, valid (the validation dataset), and model, where the last is a list of the following components:
\item{params }{Input parameters for building the model.}
\item{ntree }{Number of trees grown.}
\item{depth }{Depth of the trees grown.}
\item{nbins }{Number of bins used in building the histogram.}
\item{classification }{Logical value indicating if the model is classification.}
\item{mse }{Mean-squared error for each tree.}
\item{confusion }{Confusion matrix of the prediction.}
}
\seealso{
\code{\linkS4class{H2OSpeeDRFModel}}, \code{\link{h2o.randomForest}}
}
\examples{
# Currently still in beta, so don't automatically run example
\dontrun{
library(h2o)
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, beta = TRUE)
irisPath = system.file("extdata", "iris.csv", package = "h2o")
iris.hex = h2o.importFile(localH2O, path = irisPath, key = "iris.hex")
h2o.SpeeDRF(x = c(2,3,4), y = 5, data = iris.hex, ntree = 50, depth = 100)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
dataset <- read.csv("C:/Users/aakar/Desktop/IS/EOD-WMT.csv")
dataset$Date <- as.Date(dataset$Date)
set.seed(7)
rferesult1 <- dataset[,2:5]
#normalizing data
attach(rferesult1)
open_minimum <- min(Open)
open_maximum <- max(Open)
rferesult1 <- within(rferesult1, Open <- (Open - open_minimum)/(open_maximum - open_minimum))
high_minimum <- min(High)
high_maximum <- max(High)
rferesult1 <- within(rferesult1, High <- (High - high_minimum)/(high_maximum - high_minimum))
low_minimum <- min(Low)
low_maximum <- max(Low)
rferesult1 <- within(rferesult1, Low <- (Low - low_minimum)/(low_maximum - low_minimum))
close_minimum <- min(Close)
close_maximum <- max(Close)
rferesult1 <- within(rferesult1, Close <- (Close - close_minimum)/(close_maximum - close_minimum))
rferesult <- rferesult1
#dividing dataset into training and testing randomly
indices <- sample(1:nrow(rferesult),size = 0.2 * nrow(rferesult))
actual_data <- dataset$Close[indices]
traindata <- rferesult[-indices,]
testdata <- rferesult[indices,]
#building recurrent neural network model
library(RSNNS)
fit1 <- elman(traindata[,1:3], traindata[,4], size = 50, maxit = 1000, learnFuncParams = c(0.01))
pred1 <- predict(fit1,testdata[,1:3])
pred1<-as.data.frame(pred1)
result <- pred1[1:349,]
compare<-cbind(testdata[,4],pred1$V1)
colnames(compare)<-c("actual","predicted")
head(compare)
ta#denormalizing predicted values
dataframe1 <- as.data.frame(compare)
dataframe1<-within(dataframe1,dataframe1$actual <- (dataframe1$actual * (close_maximum - close_minimum)) + close_minimum)
dataframe1<-within(dataframe1,dataframe1$predicted <- (dataframe1$predicted * (close_maximum - close_minimum)) + close_minimum)
head(dataframe1)
library(Metrics)
rmse(testdata[,4], pred1$V1)
|
/recurrenneuralnetworks.R
|
no_license
|
aakarshnadella/Analyzing-Performance-of-various-Machine-Learning-Algorithms-using-Walmart-Stock-Price-Data
|
R
| false | false | 1,748 |
r
|
dataset <- read.csv("C:/Users/aakar/Desktop/IS/EOD-WMT.csv")
dataset$Date <- as.Date(dataset$Date)
set.seed(7)
rferesult1 <- dataset[,2:5]
#normalizing data
attach(rferesult1)
open_minimum <- min(Open)
open_maximum <- max(Open)
rferesult1 <- within(rferesult1, Open <- (Open - open_minimum)/(open_maximum - open_minimum))
high_minimum <- min(High)
high_maximum <- max(High)
rferesult1 <- within(rferesult1, High <- (High - high_minimum)/(high_maximum - high_minimum))
low_minimum <- min(Low)
low_maximum <- max(Low)
rferesult1 <- within(rferesult1, Low <- (Low - low_minimum)/(low_maximum - low_minimum))
close_minimum <- min(Close)
close_maximum <- max(Close)
rferesult1 <- within(rferesult1, Close <- (Close - close_minimum)/(close_maximum - close_minimum))
rferesult <- rferesult1
#dividing dataset into training and testing randomly
indices <- sample(1:nrow(rferesult),size = 0.2 * nrow(rferesult))
actual_data <- dataset$Close[indices]
traindata <- rferesult[-indices,]
testdata <- rferesult[indices,]
#building recurrent neural network model
library(RSNNS)
fit1 <- elman(traindata[,1:3], traindata[,4], size = 50, maxit = 1000, learnFuncParams = c(0.01))
pred1 <- predict(fit1,testdata[,1:3])
pred1<-as.data.frame(pred1)
result <- pred1[1:349,]
compare<-cbind(testdata[,4],pred1$V1)
colnames(compare)<-c("actual","predicted")
head(compare)
ta#denormalizing predicted values
dataframe1 <- as.data.frame(compare)
dataframe1<-within(dataframe1,dataframe1$actual <- (dataframe1$actual * (close_maximum - close_minimum)) + close_minimum)
dataframe1<-within(dataframe1,dataframe1$predicted <- (dataframe1$predicted * (close_maximum - close_minimum)) + close_minimum)
head(dataframe1)
library(Metrics)
rmse(testdata[,4], pred1$V1)
|
rl_algo = function(job, data, instance, agent.name) {
tex = paste0(agent.name, sprintf("$test(iter = 1000, sname = '%s', render = FALSE)", instance))
perf = eval(parse(text = tex))
return(perf = perf) # key for table join
}
|
/benchmark/bt_algorithms.R
|
no_license
|
LinSelina/rlR
|
R
| false | false | 231 |
r
|
rl_algo = function(job, data, instance, agent.name) {
tex = paste0(agent.name, sprintf("$test(iter = 1000, sname = '%s', render = FALSE)", instance))
perf = eval(parse(text = tex))
return(perf = perf) # key for table join
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/creation-ops.R, R/gen-namespace-docs.R,
% R/gen-namespace-examples.R
\name{torch_randn}
\alias{torch_randn}
\title{Randn}
\usage{
torch_randn(
...,
names = NULL,
dtype = NULL,
layout = torch_strided(),
device = NULL,
requires_grad = FALSE
)
}
\arguments{
\item{...}{(int...) a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple.}
\item{names}{optional names for the dimensions}
\item{dtype}{(\code{torch.dtype}, optional) the desired data type of returned tensor. Default: if \code{NULL}, uses a global default (see \code{torch_set_default_tensor_type}).}
\item{layout}{(\code{torch.layout}, optional) the desired layout of returned Tensor. Default: \code{torch_strided}.}
\item{device}{(\code{torch.device}, optional) the desired device of returned tensor. Default: if \code{NULL}, uses the current device for the default tensor type (see \code{torch_set_default_tensor_type}). \code{device} will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.}
\item{requires_grad}{(bool, optional) If autograd should record operations on the returned tensor. Default: \code{FALSE}.}
}
\description{
Randn
}
\section{randn(*size, out=NULL, dtype=NULL, layout=torch.strided, device=NULL, requires_grad=False) -> Tensor }{
Returns a tensor filled with random numbers from a normal distribution
with mean \code{0} and variance \code{1} (also called the standard normal
distribution).
\deqn{
\mbox{out}_{i} \sim \mathcal{N}(0, 1)
}
The shape of the tensor is defined by the variable argument \code{size}.
}
\examples{
if (torch_is_installed()) {
torch_randn(c(4))
torch_randn(c(2, 3))
}
}
|
/man/torch_randn.Rd
|
permissive
|
krzjoa/torch
|
R
| false | true | 1,847 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/creation-ops.R, R/gen-namespace-docs.R,
% R/gen-namespace-examples.R
\name{torch_randn}
\alias{torch_randn}
\title{Randn}
\usage{
torch_randn(
...,
names = NULL,
dtype = NULL,
layout = torch_strided(),
device = NULL,
requires_grad = FALSE
)
}
\arguments{
\item{...}{(int...) a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple.}
\item{names}{optional names for the dimensions}
\item{dtype}{(\code{torch.dtype}, optional) the desired data type of returned tensor. Default: if \code{NULL}, uses a global default (see \code{torch_set_default_tensor_type}).}
\item{layout}{(\code{torch.layout}, optional) the desired layout of returned Tensor. Default: \code{torch_strided}.}
\item{device}{(\code{torch.device}, optional) the desired device of returned tensor. Default: if \code{NULL}, uses the current device for the default tensor type (see \code{torch_set_default_tensor_type}). \code{device} will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.}
\item{requires_grad}{(bool, optional) If autograd should record operations on the returned tensor. Default: \code{FALSE}.}
}
\description{
Randn
}
\section{randn(*size, out=NULL, dtype=NULL, layout=torch.strided, device=NULL, requires_grad=False) -> Tensor }{
Returns a tensor filled with random numbers from a normal distribution
with mean \code{0} and variance \code{1} (also called the standard normal
distribution).
\deqn{
\mbox{out}_{i} \sim \mathcal{N}(0, 1)
}
The shape of the tensor is defined by the variable argument \code{size}.
}
\examples{
if (torch_is_installed()) {
torch_randn(c(4))
torch_randn(c(2, 3))
}
}
|
library(mtk)
### Name: mtkResult
### Title: The constructor of the class 'mtkResult'
### Aliases: mtkResult
### ** Examples
## See examples from help(mtkAnalyserResult), help(mtkDesignerResult), help(mtkEvaluatorResult)
|
/data/genthat_extracted_code/mtk/examples/mtkResult.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 231 |
r
|
library(mtk)
### Name: mtkResult
### Title: The constructor of the class 'mtkResult'
### Aliases: mtkResult
### ** Examples
## See examples from help(mtkAnalyserResult), help(mtkDesignerResult), help(mtkEvaluatorResult)
|
## Collating results and comparing accuracy
## Set directory
setwd(dirname(rstudioapi::documentPath()))
## Libraries
library(tidyverse)
## Load models and merge into one frame
load("../Results/baseline_results.RData")
results_1 <- result_export %>%
mutate(model = "Baseline")
rm(result_export)
load("../Results/pp_0.5.RData")
results_2 <- result_export %>%
mutate(model = "pp_0.5")
rm(result_export)
load("../Results/pp_0.25.RData")
results_3 <- result_export %>%
mutate(model = "pp_0.25")
rm(result_export)
load("../Results/pp_0.1.RData")
results_4 <- result_export %>%
mutate(model = "pp_0.1")
rm(result_export)
combined_results <- do.call("rbind", list(results_1, results_2, results_3, results_4))
save(combined_results, file = "../Results/Combined_Results.RData")
## headline Accuracy comparison
combined_results %>%
group_by(model) %>%
summarise(MAE = round(mean(MAE),3),
RMSE = round(mean(RMSE),3))
combined_results %>%
group_by(model) %>%
summarise(mae = round(mean(MAE),4),
rmse = round(mean(RMSE),4),)
|
/Code/04_Accuracy_comparison.R
|
no_license
|
blobo184/powerpriors
|
R
| false | false | 1,104 |
r
|
## Collating results and comparing accuracy
## Set directory
setwd(dirname(rstudioapi::documentPath()))
## Libraries
library(tidyverse)
## Load models and merge into one frame
load("../Results/baseline_results.RData")
results_1 <- result_export %>%
mutate(model = "Baseline")
rm(result_export)
load("../Results/pp_0.5.RData")
results_2 <- result_export %>%
mutate(model = "pp_0.5")
rm(result_export)
load("../Results/pp_0.25.RData")
results_3 <- result_export %>%
mutate(model = "pp_0.25")
rm(result_export)
load("../Results/pp_0.1.RData")
results_4 <- result_export %>%
mutate(model = "pp_0.1")
rm(result_export)
combined_results <- do.call("rbind", list(results_1, results_2, results_3, results_4))
save(combined_results, file = "../Results/Combined_Results.RData")
## headline Accuracy comparison
combined_results %>%
group_by(model) %>%
summarise(MAE = round(mean(MAE),3),
RMSE = round(mean(RMSE),3))
combined_results %>%
group_by(model) %>%
summarise(mae = round(mean(MAE),4),
rmse = round(mean(RMSE),4),)
|
# Generates survival curve data for diagnoses.
library(argparse)
library(data.table)
library(feather)
library(survival)
library(dplyr)
library(dtplyr)
library(stringr)
parser <- ArgumentParser()
parser$add_argument('--input', required = TRUE, help = 'the Feather file to read hazard data from')
parser$add_argument('--output', required = TRUE, help = 'the Feather file to write curve data to')
args <- parser$parse_args()
# Load the data.
message('Loading data')
dt.data <- read_feather(args$input) %>%
setDT
# Generate the data.
message('Generating curve data')
make.shifted.data <- function (X) {
setorder(X, time)
data.table(
time = tail(X$time, -1) - 1e-6,
X %>%
select(-time) %>%
head(-1)
)
}
survfit.result <- survfit(Surv(duration, event_status) ~ rf, dt.data)
dt.base <- data.table(
strata = rep(names(survfit.result$strata), survfit.result$strata),
time = survfit.result$time,
survival = survfit.result$surv,
lower = survfit.result$lower,
upper = survfit.result$upper
)
extracted.strata <- str_match(dt.base$strata, '^rf=(.+)$')
dt.base[, `:=`(
rf = gsub('\\s', '', extracted.strata[, 2]),
strata = NULL
)]
dt.shifted <- dt.base %>%
group_by(rf) %>%
do(make.shifted.data(.))
dt.shifted[, `:=`(
rf = NULL
)]
dt.output <- list(dt.base, dt.shifted) %>%
rbindlist(use.names = TRUE)
# Write the output.
message('Writing output')
dt.output %>%
write_feather(args$output)
|
/scripts/outcomes/time_to_zero/rf/get_curve_data_rf.R
|
permissive
|
morrislab/plos-medicine-joint-patterns
|
R
| false | false | 1,460 |
r
|
# Generates survival curve data for diagnoses.
library(argparse)
library(data.table)
library(feather)
library(survival)
library(dplyr)
library(dtplyr)
library(stringr)
parser <- ArgumentParser()
parser$add_argument('--input', required = TRUE, help = 'the Feather file to read hazard data from')
parser$add_argument('--output', required = TRUE, help = 'the Feather file to write curve data to')
args <- parser$parse_args()
# Load the data.
message('Loading data')
dt.data <- read_feather(args$input) %>%
setDT
# Generate the data.
message('Generating curve data')
make.shifted.data <- function (X) {
setorder(X, time)
data.table(
time = tail(X$time, -1) - 1e-6,
X %>%
select(-time) %>%
head(-1)
)
}
survfit.result <- survfit(Surv(duration, event_status) ~ rf, dt.data)
dt.base <- data.table(
strata = rep(names(survfit.result$strata), survfit.result$strata),
time = survfit.result$time,
survival = survfit.result$surv,
lower = survfit.result$lower,
upper = survfit.result$upper
)
extracted.strata <- str_match(dt.base$strata, '^rf=(.+)$')
dt.base[, `:=`(
rf = gsub('\\s', '', extracted.strata[, 2]),
strata = NULL
)]
dt.shifted <- dt.base %>%
group_by(rf) %>%
do(make.shifted.data(.))
dt.shifted[, `:=`(
rf = NULL
)]
dt.output <- list(dt.base, dt.shifted) %>%
rbindlist(use.names = TRUE)
# Write the output.
message('Writing output')
dt.output %>%
write_feather(args$output)
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
logreg_cpp <- function(X_, y_, b_, means, sds, lambda) {
.Call('rIsing_logreg_cpp', PACKAGE = 'rIsing', X_, y_, b_, means, sds, lambda)
}
logreg_setup <- function(X, y, scale, regpath, nlambda, lambda_min_ratio) {
.Call('rIsing_logreg_setup', PACKAGE = 'rIsing', X, y, scale, regpath, nlambda, lambda_min_ratio)
}
regpath_ising <- function(Xs_, y_, nlambda, lambda_min_ratio) {
.Call('rIsing_regpath_ising', PACKAGE = 'rIsing', Xs_, y_, nlambda, lambda_min_ratio)
}
logreg_cpp2 <- function(X_, y_, lambda, nlambda, lambda_min_ratio, scale) {
.Call('rIsing_logreg_cpp2', PACKAGE = 'rIsing', X_, y_, lambda, nlambda, lambda_min_ratio, scale)
}
|
/R/RcppExports.R
|
no_license
|
cran/rIsing
|
R
| false | false | 808 |
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
logreg_cpp <- function(X_, y_, b_, means, sds, lambda) {
.Call('rIsing_logreg_cpp', PACKAGE = 'rIsing', X_, y_, b_, means, sds, lambda)
}
logreg_setup <- function(X, y, scale, regpath, nlambda, lambda_min_ratio) {
.Call('rIsing_logreg_setup', PACKAGE = 'rIsing', X, y, scale, regpath, nlambda, lambda_min_ratio)
}
regpath_ising <- function(Xs_, y_, nlambda, lambda_min_ratio) {
.Call('rIsing_regpath_ising', PACKAGE = 'rIsing', Xs_, y_, nlambda, lambda_min_ratio)
}
logreg_cpp2 <- function(X_, y_, lambda, nlambda, lambda_min_ratio, scale) {
.Call('rIsing_logreg_cpp2', PACKAGE = 'rIsing', X_, y_, lambda, nlambda, lambda_min_ratio, scale)
}
|
# Regularized Discriminant Analysis
# load the package
library(klaR)
data(iris)
# fit model
fit <- rda(Species~., data=iris, gamma=0.05, lambda=0.01)
# summarize the fit
print(fit)
# make predictions
predictions <- predict(fit, iris[,1:4])$class
# summarize accuracy
table(predictions, iris$Species)
|
/08_Machine_Learning_Mastery_with_R/03_Algorithms/01_Algorithms/5-NonLinearClassiication/regularized_discriminant_analysis.R
|
no_license
|
jggrimesdc-zz/MachineLearningExercises
|
R
| false | false | 301 |
r
|
# Regularized Discriminant Analysis
# load the package
library(klaR)
data(iris)
# fit model
fit <- rda(Species~., data=iris, gamma=0.05, lambda=0.01)
# summarize the fit
print(fit)
# make predictions
predictions <- predict(fit, iris[,1:4])$class
# summarize accuracy
table(predictions, iris$Species)
|
plot <- function(plot_file_name, data_frame, x_column, y_column, x_label, y_label){
ggsave(plot_file_name,ggplot(data_frame, aes_string(x=x_column, y=y_column)) + geom_bar(stat = "identity") + ylab(y_label) + xlab(x_label))
}
|
/common.r
|
no_license
|
RainerBlessing/CakeDFIRScripts
|
R
| false | false | 228 |
r
|
plot <- function(plot_file_name, data_frame, x_column, y_column, x_label, y_label){
ggsave(plot_file_name,ggplot(data_frame, aes_string(x=x_column, y=y_column)) + geom_bar(stat = "identity") + ylab(y_label) + xlab(x_label))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tripleInteraction.R
\name{tripleEquation}
\alias{tripleEquation}
\title{Make equation with triple interaction}
\usage{
tripleEquation(X = NULL, M = NULL, Y = NULL, vars = NULL,
suffix = 0, moderator = list(), covar = NULL, range = TRUE,
mode = 0, data = NULL, rangemode = 1, probs = c(0.16, 0.5, 0.84))
}
\arguments{
\item{X}{Name of independent variable}
\item{M}{Name of mediator}
\item{Y}{Name of dependent variable}
\item{vars}{A list of variables names and sites}
\item{suffix}{A number}
\item{moderator}{A list of moderators}
\item{covar}{A list of covariates}
\item{range}{A logical}
\item{mode}{A number}
\item{data}{A data.frame}
\item{rangemode}{range mode}
\item{probs}{numeric vector of probabilities with values in [0,1]}
}
\description{
Make equation with triple interaction
}
\examples{
X="negemot";M="ideology";Y="govact";suffix=0
cat(tripleEquation(X=X,M=M,Y=Y))
vars=list(name=list(c("sex","age")),site=list(c("a","c")))
vars=list(name=list(c("W","Z"),c("V","Q")),site=list(c("a","b","c"),c("a","b","c")))
X="negemot";Y="govact";suffix=0
moderator=list(name=c("W"),site=list(c("c")))
cat(tripleEquation(X=X,Y=Y,moderator=moderator))
covar=list(name=c("C1","C2","C3"),label=c("ese","sex","tenure"),site=list(c("M","Y"),"Y","Y"))
cat(tripleEquation(X=X,M=M,Y=Y,moderator=moderator,covar=covar))
cat(tripleEquation(X=X,M=M,Y=Y,moderator=moderator,covar=covar,mode=1))
cat(tripleEquation(X=X,M=M,Y=Y,vars=vars))
cat(tripleEquation(X=X,M=M,Y=Y,vars=vars,moderator=moderator,covar=covar))
cat(tripleEquation(X=X,M=M,Y=Y,vars=vars,mode=1))
cat(tripleEquation(X=X,M=M,Y=Y,vars=vars,covar=covar,mode=1))
X="negemot";Y="govact";suffix=0
vars=list(name=list(c("sex","age")),site=list(c("c")))
cat(tripleEquation(X=X,Y=Y,vars=vars))
}
|
/man/tripleEquation.Rd
|
no_license
|
mkim0710/processR
|
R
| false | true | 1,834 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tripleInteraction.R
\name{tripleEquation}
\alias{tripleEquation}
\title{Make equation with triple interaction}
\usage{
tripleEquation(X = NULL, M = NULL, Y = NULL, vars = NULL,
suffix = 0, moderator = list(), covar = NULL, range = TRUE,
mode = 0, data = NULL, rangemode = 1, probs = c(0.16, 0.5, 0.84))
}
\arguments{
\item{X}{Name of independent variable}
\item{M}{Name of mediator}
\item{Y}{Name of dependent variable}
\item{vars}{A list of variables names and sites}
\item{suffix}{A number}
\item{moderator}{A list of moderators}
\item{covar}{A list of covariates}
\item{range}{A logical}
\item{mode}{A number}
\item{data}{A data.frame}
\item{rangemode}{range mode}
\item{probs}{numeric vector of probabilities with values in [0,1]}
}
\description{
Make equation with triple interaction
}
\examples{
X="negemot";M="ideology";Y="govact";suffix=0
cat(tripleEquation(X=X,M=M,Y=Y))
vars=list(name=list(c("sex","age")),site=list(c("a","c")))
vars=list(name=list(c("W","Z"),c("V","Q")),site=list(c("a","b","c"),c("a","b","c")))
X="negemot";Y="govact";suffix=0
moderator=list(name=c("W"),site=list(c("c")))
cat(tripleEquation(X=X,Y=Y,moderator=moderator))
covar=list(name=c("C1","C2","C3"),label=c("ese","sex","tenure"),site=list(c("M","Y"),"Y","Y"))
cat(tripleEquation(X=X,M=M,Y=Y,moderator=moderator,covar=covar))
cat(tripleEquation(X=X,M=M,Y=Y,moderator=moderator,covar=covar,mode=1))
cat(tripleEquation(X=X,M=M,Y=Y,vars=vars))
cat(tripleEquation(X=X,M=M,Y=Y,vars=vars,moderator=moderator,covar=covar))
cat(tripleEquation(X=X,M=M,Y=Y,vars=vars,mode=1))
cat(tripleEquation(X=X,M=M,Y=Y,vars=vars,covar=covar,mode=1))
X="negemot";Y="govact";suffix=0
vars=list(name=list(c("sex","age")),site=list(c("c")))
cat(tripleEquation(X=X,Y=Y,vars=vars))
}
|
# Test file for fast_iasva.R function
library(iasva)
library(SummarizedExperiment)
context(desc = "fast_iasva")
# load test data
counts_file <- system.file("extdata", "iasva_counts_test.Rds", package = "iasva")
counts <- readRDS(counts_file)
anns_file <- system.file("extdata", "iasva_anns_test.Rds", package = "iasva")
anns <- readRDS(anns_file)
Geo_Lib_Size <- colSums(log(counts + 1))
Patient_ID <- anns$Patient_ID
mod <- model.matrix(~Patient_ID + Geo_Lib_Size)
# create summarized experiment object
summ_exp <- SummarizedExperiment(assays = counts)
# test that input is summarized experiment
test_that("correct input format", {
expect_equal(object = class(summ_exp)[1], expected = "SummarizedExperiment")
expect_gt(object = nrow(assay(summ_exp)), expected = 1)
expect_gt(object = ncol(assay(summ_exp)), expected = 1)
})
iasva.res <- fast_iasva(summ_exp, mod[, -1], num.sv = 5)
# test that output is list
test_that("correct output results", {
expect_type(object = iasva.res, type = "list")
expect_equal(object = length(iasva.res), expected = 3)
})
|
/tests/testthat/test_fast_iasva.R
|
no_license
|
UcarLab/iasva
|
R
| false | false | 1,066 |
r
|
# Test file for fast_iasva.R function
library(iasva)
library(SummarizedExperiment)
context(desc = "fast_iasva")
# load test data
counts_file <- system.file("extdata", "iasva_counts_test.Rds", package = "iasva")
counts <- readRDS(counts_file)
anns_file <- system.file("extdata", "iasva_anns_test.Rds", package = "iasva")
anns <- readRDS(anns_file)
Geo_Lib_Size <- colSums(log(counts + 1))
Patient_ID <- anns$Patient_ID
mod <- model.matrix(~Patient_ID + Geo_Lib_Size)
# create summarized experiment object
summ_exp <- SummarizedExperiment(assays = counts)
# test that input is summarized experiment
test_that("correct input format", {
expect_equal(object = class(summ_exp)[1], expected = "SummarizedExperiment")
expect_gt(object = nrow(assay(summ_exp)), expected = 1)
expect_gt(object = ncol(assay(summ_exp)), expected = 1)
})
iasva.res <- fast_iasva(summ_exp, mod[, -1], num.sv = 5)
# test that output is list
test_that("correct output results", {
expect_type(object = iasva.res, type = "list")
expect_equal(object = length(iasva.res), expected = 3)
})
|
# Exercise 2: More ggplot2 Grammar
# Install and load `ggplot2`
# install.packages("ggplot2") # if needed
library("ggplot2")
# For this exercise you will again be working with the `diamonds` data set.
# Use `?diamonds` to review details about this data set
?diamonds
## Statistical Transformations
# Draw a bar chart of the diamonds data, organized by cut
# The height of each bar is based on the "count" (number) of diamonds with that cut
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut))
# Use the `stat_count` to apply the statistical transformation "count" to the diamonds
# by cut. You do not need a separate geometry layer!
ggplot(data = diamonds) +
stat_count(mapping = aes(x = cut))
# Use the `stat_summary` function to draw a chart with a summary layer.
# Map the x-position to diamond `cut`, and the y-position to diamond `depth`
# Bonus: use `min` as the function ymin, `max` as the function ymax, and `median` as the function y
ggplot(data = diamonds) +
stat_summary(mapping = aes(x = cut, y = depth),
fun.ymin = min, fun.ymax = max, fun.y = median)
## Position Adjustments
# Draw a bar chart of diamond data organized by cut, with each bar filled by clarity.
# You should see a _stacked_ bar chart.
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity))
# Draw the same chart again, but with each element positioned to "fill" the y axis
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "fill")
# Draw the same chart again, but with each element positioned to "dodge" each other
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "dodge")
# Draw a plot with point geometry with the x-position mapped to `cut` and the y-position mapped to `clarity`
# This creates a "grid" grouping the points
ggplot(data = diamonds) +
geom_point(mapping = aes(x = cut, y = clarity))
# Use the "jitter" position adjustment to keep the points from all overlapping!
# (This works a little better with a sample of diamond data, such as from the previous exercise).
ggplot(data = diamonds) +
geom_point(mapping = aes(x = cut, y = clarity), position = "jitter")
## Scales
# Draw a "boxplot" (with `geom_boxplot()`) for the diamond's price (y) by color (x)
# This has a lot of outliers, making it harder to read. To fix this, draw the same plot but
# with a _logarithmic_ scale for the y axis.
# For another version, draw the same plot but with `violin` geometry instead of `boxplot` geometry!
# How does the logarithmic scale change the data presentation?
# Another interesting plot: draw a plot of the diamonds price (y) by carat (x), using a heatmap of 2d bins
# (geom_bin2d)
# What happens when you make the x and y channels scale logarithmically?
# Draw a scatter plot for the diamonds price (y) by carat (x). Color each point by the clarity
# (Remember, this will take a while. Use a sample of the diamonds for faster results)
# Change the color of the previous plot using a ColorBrewer scale of your choice. What looks nice?
## Coordinate Systems
# Draw a bar chart with x-position and fill color BOTH mapped to cut
# For best results, SET the `width` of the geometry to be 1 (fill plot, no space between)
# You can save this to a variable for easier modifications
# Draw the same chart, but with the coordinate system flipped
# Draw the same chart, but in a polar coordinate system. Now you have a Coxcomb chart!
## Facets
# Take the scatter plot of price by carat data (colored by clarity) and add _facets_ based on
# the diamond's `color`
## Saving Plots
# Use the `ggsave()` function to save one of your plots (the most recent one generated) to disk.
# Name the output file "my-plot.png".
# Make sure you've set the working directory!!
|
/exercise-2/exercise.R
|
permissive
|
magdanat/module13-ggplot2
|
R
| false | false | 3,811 |
r
|
# Exercise 2: More ggplot2 Grammar
# Install and load `ggplot2`
# install.packages("ggplot2") # if needed
library("ggplot2")
# For this exercise you will again be working with the `diamonds` data set.
# Use `?diamonds` to review details about this data set
?diamonds
## Statistical Transformations
# Draw a bar chart of the diamonds data, organized by cut
# The height of each bar is based on the "count" (number) of diamonds with that cut
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut))
# Use the `stat_count` to apply the statistical transformation "count" to the diamonds
# by cut. You do not need a separate geometry layer!
ggplot(data = diamonds) +
stat_count(mapping = aes(x = cut))
# Use the `stat_summary` function to draw a chart with a summary layer.
# Map the x-position to diamond `cut`, and the y-position to diamond `depth`
# Bonus: use `min` as the function ymin, `max` as the function ymax, and `median` as the function y
ggplot(data = diamonds) +
stat_summary(mapping = aes(x = cut, y = depth),
fun.ymin = min, fun.ymax = max, fun.y = median)
## Position Adjustments
# Draw a bar chart of diamond data organized by cut, with each bar filled by clarity.
# You should see a _stacked_ bar chart.
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity))
# Draw the same chart again, but with each element positioned to "fill" the y axis
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "fill")
# Draw the same chart again, but with each element positioned to "dodge" each other
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut, fill = clarity), position = "dodge")
# Draw a plot with point geometry with the x-position mapped to `cut` and the y-position mapped to `clarity`
# This creates a "grid" grouping the points
ggplot(data = diamonds) +
geom_point(mapping = aes(x = cut, y = clarity))
# Use the "jitter" position adjustment to keep the points from all overlapping!
# (This works a little better with a sample of diamond data, such as from the previous exercise).
ggplot(data = diamonds) +
geom_point(mapping = aes(x = cut, y = clarity), position = "jitter")
## Scales
# Draw a "boxplot" (with `geom_boxplot()`) for the diamond's price (y) by color (x)
# This has a lot of outliers, making it harder to read. To fix this, draw the same plot but
# with a _logarithmic_ scale for the y axis.
# For another version, draw the same plot but with `violin` geometry instead of `boxplot` geometry!
# How does the logarithmic scale change the data presentation?
# Another interesting plot: draw a plot of the diamonds price (y) by carat (x), using a heatmap of 2d bins
# (geom_bin2d)
# What happens when you make the x and y channels scale logarithmically?
# Draw a scatter plot for the diamonds price (y) by carat (x). Color each point by the clarity
# (Remember, this will take a while. Use a sample of the diamonds for faster results)
# Change the color of the previous plot using a ColorBrewer scale of your choice. What looks nice?
## Coordinate Systems
# Draw a bar chart with x-position and fill color BOTH mapped to cut
# For best results, SET the `width` of the geometry to be 1 (fill plot, no space between)
# You can save this to a variable for easier modifications
# Draw the same chart, but with the coordinate system flipped
# Draw the same chart, but in a polar coordinate system. Now you have a Coxcomb chart!
## Facets
# Take the scatter plot of price by carat data (colored by clarity) and add _facets_ based on
# the diamond's `color`
## Saving Plots
# Use the `ggsave()` function to save one of your plots (the most recent one generated) to disk.
# Name the output file "my-plot.png".
# Make sure you've set the working directory!!
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:6, WORD_NUM:93">
</head>
<body bgcolor="white">
<a href="#0" id="0">Li Peng issues a stern lecture to student leaders and refuses to discuss their demands.</a>
<a href="#1" id="1">May 31 _ The first of several pro-government rallies is staged by farmers and workers in Beijing suburbs.</a>
<a href="#2" id="2">May 30 _ Students unveil their ``Goddess of Democracy,'' a replica of the Statue of Liberty, on the square.</a>
<a href="#3" id="3">May 9 _ Journalists petition the government for press freedom.</a>
<a href="#4" id="4">``They look like they ,'' said one local resident.</a>
<a href="#5" id="5">Troops first entered the square from the south, when a column trotted toward the student encampment about 1 a.m. (noon EDT Saturday).</a>
</body>
</html>
|
/DUC-Dataset/Summary_m100_R/D104.M.100.html.R
|
no_license
|
Angela7126/SLNSumEval
|
R
| false | false | 813 |
r
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:6, WORD_NUM:93">
</head>
<body bgcolor="white">
<a href="#0" id="0">Li Peng issues a stern lecture to student leaders and refuses to discuss their demands.</a>
<a href="#1" id="1">May 31 _ The first of several pro-government rallies is staged by farmers and workers in Beijing suburbs.</a>
<a href="#2" id="2">May 30 _ Students unveil their ``Goddess of Democracy,'' a replica of the Statue of Liberty, on the square.</a>
<a href="#3" id="3">May 9 _ Journalists petition the government for press freedom.</a>
<a href="#4" id="4">``They look like they ,'' said one local resident.</a>
<a href="#5" id="5">Troops first entered the square from the south, when a column trotted toward the student encampment about 1 a.m. (noon EDT Saturday).</a>
</body>
</html>
|
#Multiple Linear Regression
#Linear Modeling : DV vs more than 1 IVs
#sales Qty vs price & promotion
#Omni Store
#creating data using Vector
sales= c(4141,3842,3056,3519,4226, 4630,3507,3754, 5000,5120,4011, 5015,1916,675, 3636,3224,2295, 2730,2618,4421, 4113,3746, 3532, 3825,1096, 761,2088,820,2114, 1882,2159,1602,3354,2927)
price = c(59,59,59,59,59,59,59,59,59,59,59,59, 79,79,79,79,79,79,79,79,79, 79,79,79,99,99, 99,99,99,99,99,99,99,99)
promotion= c(200,200,200,200,400,400,400,400, 600,600,600,600,200,200,200,200, 400,400,400,400,600,600,600,600, 200,200,200,200,400,400,400,400,600,600)
omni1 = data.frame(sales, price, promotion)
head(omni1)
str(omni1)
#2nd Method : CSV file
omni2 = read.csv(file.choose())
#3rd Method : gsheet
library(gsheet)
url = "https://docs.google.com/spreadsheets/d/1h7HU0X_Q4T5h5D1Q36qoK40Tplz94x_HZYHOJJC_edU/edit#gid=1595306231"
omni3 = as.data.frame(gsheet::gsheet2tbl(url))
#Make one of data frames active
omni = omni2
?lm #see help of LM
#Simple Linear Model would look like this
slr1 = lm(formula = sales ~ price, data=omni) # sales depend on price of item
slr2 = lm(formula = sales ~ promotion, data=omni) # sales depend on promotion exp
summary(slr1)
summary(slr2)
#MLR Create Multiple Linear Regression
# we want to see how Sales Qty depend on Price and Promotion Values
mlrmodel1 = lm(formula = sales ~ price + promotion, data=omni)
#how to give parameter values in different sequence, use arguments names if in different order
mlrmodel1 = lm( data=omni, formula = sales ~ price + promotion)
range(omni$sales)
summary(mlrmodel1) # summary statistics IMP STEP
#understand values : R2, AdjR2, Fstats pvalue, Coeff, ***, Residuals
coef(mlrmodel1) #coefficients b1, b2
#anova(mlrmodel1) #seeing from anova model
#Predicted Values----
fitted(mlrmodel1)
names(omni)
#create a dataframe of new sample values
(ndata1 = data.frame(price=c(60,70), promotion=c(300,400)))
predict(mlrmodel1, newdata=ndata1)
cbind(ndata1, Predict=predict(mlrmodel1, newdata=ndata1, predict='response'))
#R2 and Adjs R2
names(mlrmodel1)
summary(mlrmodel1)
summary(mlrmodel1)$r.squared
#Manual Calculation of Adjs R2
(r2 = summary(mlrmodel1)$r.squared)
k = 2 # no of IVs
(n = nrow(omni)) # sample size
(adjr2 = 1 - ( (1 - r2) * ((n - 1)/ (n - k - 1))))
# Fstatistics
summary(mlrmodel1)$fstatistic[1] # from output of model
(df1 = k) ; (df2 = n-k-1)
qf(.95, df1, df2) # from table wrt df1 & df2
#Model Fstats > table(Fstat)
# Pvalue of Model
fstat = summary(mlrmodel1)$fstatistic
pf(fstat[1], fstat[2], fstat[3], lower.tail=FALSE)
# this is < 0.05 : Significant
#
#Plots of the Modle
plot(mlrmodel1,1) # no pattern, equal variance
plot(mlrmodel1,2) # Residues are normally distributed
plot(mlrmodel1,3)
plot(mlrmodel1,4) # tells outliers which affect model
# Confidence Intervals
#Fitted values : Predicting on IVs using model
fitted(mlrmodel1)
residuals(mlrmodel1)
mlrmodel1$residuals
cbind(omni$sales, fitted(mlrmodel1), omni$sales - fitted(mlrmodel1), residuals(mlrmodel1))
#sqrt(sum((residuals(mlrmodel1)^2)))
names(mlrmodel1)
summary(mlrmodel1)
#Diagnostics Test for Checking Assumptions
#Should be Linear relationship between Residuals Vs Ypi, X1i, X2i
cbind(fitted(mlrmodel1), residuals(mlrmodel1))
plot(cbind(fitted(mlrmodel1), residuals(mlrmodel1)))
#not quadratic
plot(cbind(omni$price, residuals(mlrmodel1)))
plot(cbind(omni$promotion, residuals(mlrmodel1)))
#May indicate quadratic term of IVs
#Train and Test Data
# RMSE
|
/bb.R
|
no_license
|
amit2625/FA_5_2018
|
R
| false | false | 3,504 |
r
|
#Multiple Linear Regression
#Linear Modeling : DV vs more than 1 IVs
#sales Qty vs price & promotion
#Omni Store
#creating data using Vector
sales= c(4141,3842,3056,3519,4226, 4630,3507,3754, 5000,5120,4011, 5015,1916,675, 3636,3224,2295, 2730,2618,4421, 4113,3746, 3532, 3825,1096, 761,2088,820,2114, 1882,2159,1602,3354,2927)
price = c(59,59,59,59,59,59,59,59,59,59,59,59, 79,79,79,79,79,79,79,79,79, 79,79,79,99,99, 99,99,99,99,99,99,99,99)
promotion= c(200,200,200,200,400,400,400,400, 600,600,600,600,200,200,200,200, 400,400,400,400,600,600,600,600, 200,200,200,200,400,400,400,400,600,600)
omni1 = data.frame(sales, price, promotion)
head(omni1)
str(omni1)
#2nd Method : CSV file
omni2 = read.csv(file.choose())
#3rd Method : gsheet
library(gsheet)
url = "https://docs.google.com/spreadsheets/d/1h7HU0X_Q4T5h5D1Q36qoK40Tplz94x_HZYHOJJC_edU/edit#gid=1595306231"
omni3 = as.data.frame(gsheet::gsheet2tbl(url))
#Make one of data frames active
omni = omni2
?lm #see help of LM
#Simple Linear Model would look like this
slr1 = lm(formula = sales ~ price, data=omni) # sales depend on price of item
slr2 = lm(formula = sales ~ promotion, data=omni) # sales depend on promotion exp
summary(slr1)
summary(slr2)
#MLR Create Multiple Linear Regression
# we want to see how Sales Qty depend on Price and Promotion Values
mlrmodel1 = lm(formula = sales ~ price + promotion, data=omni)
#how to give parameter values in different sequence, use arguments names if in different order
mlrmodel1 = lm( data=omni, formula = sales ~ price + promotion)
range(omni$sales)
summary(mlrmodel1) # summary statistics IMP STEP
#understand values : R2, AdjR2, Fstats pvalue, Coeff, ***, Residuals
coef(mlrmodel1) #coefficients b1, b2
#anova(mlrmodel1) #seeing from anova model
#Predicted Values----
fitted(mlrmodel1)
names(omni)
#create a dataframe of new sample values
(ndata1 = data.frame(price=c(60,70), promotion=c(300,400)))
predict(mlrmodel1, newdata=ndata1)
cbind(ndata1, Predict=predict(mlrmodel1, newdata=ndata1, predict='response'))
#R2 and Adjs R2
names(mlrmodel1)
summary(mlrmodel1)
summary(mlrmodel1)$r.squared
#Manual Calculation of Adjs R2
(r2 = summary(mlrmodel1)$r.squared)
k = 2 # no of IVs
(n = nrow(omni)) # sample size
(adjr2 = 1 - ( (1 - r2) * ((n - 1)/ (n - k - 1))))
# Fstatistics
summary(mlrmodel1)$fstatistic[1] # from output of model
(df1 = k) ; (df2 = n-k-1)
qf(.95, df1, df2) # from table wrt df1 & df2
#Model Fstats > table(Fstat)
# Pvalue of Model
fstat = summary(mlrmodel1)$fstatistic
pf(fstat[1], fstat[2], fstat[3], lower.tail=FALSE)
# this is < 0.05 : Significant
#
#Plots of the Modle
plot(mlrmodel1,1) # no pattern, equal variance
plot(mlrmodel1,2) # Residues are normally distributed
plot(mlrmodel1,3)
plot(mlrmodel1,4) # tells outliers which affect model
# Confidence Intervals
#Fitted values : Predicting on IVs using model
fitted(mlrmodel1)
residuals(mlrmodel1)
mlrmodel1$residuals
cbind(omni$sales, fitted(mlrmodel1), omni$sales - fitted(mlrmodel1), residuals(mlrmodel1))
#sqrt(sum((residuals(mlrmodel1)^2)))
names(mlrmodel1)
summary(mlrmodel1)
#Diagnostics Test for Checking Assumptions
#Should be Linear relationship between Residuals Vs Ypi, X1i, X2i
cbind(fitted(mlrmodel1), residuals(mlrmodel1))
plot(cbind(fitted(mlrmodel1), residuals(mlrmodel1)))
#not quadratic
plot(cbind(omni$price, residuals(mlrmodel1)))
plot(cbind(omni$promotion, residuals(mlrmodel1)))
#May indicate quadratic term of IVs
#Train and Test Data
# RMSE
|
expand_args <- function(...){
ell <- list(...)
max_length <- max(vapply(ell, length, 0))
lapply(ell, rep, length.out = max_length)
}
ldgamanum <- function(x, loc, scale) {
(loc-1)*log(x) - x/scale
}
# mean <- function(x) {
# sum(x)/length(x)
# }
har_mean <- function(x) {
if(sum(x == 0) > 0) stop("zero value in harmonic mean")
1/mean(1/x)
}
sum_sq <- function(x) sum(x^2)
prncp_reg <- function(x) x %% (2*pi)
#makes and angle in [0, 2*pi]
prncp_reg.minuspi.pi <- function(x) {
y <- (x + pi) %% (2*pi)
y_neg <- which(y < 0)
y[y_neg] <- 2*pi + y[y_neg]
y - pi
} #makes a single angle in [-pi, pi]
atan3 <- function(x) prncp_reg(atan(x[2]/x[1]))
sph2cart <- function(x)
c(cos(x[1])*sin(x[2]), sin(x[1])*sin(x[2]), cos(x[2]))
#calculates unit vectors from pair of angles
listLen <- function(l)
vapply(1:length(l), function(i) length(l[[i]]), 0)
rm_NA_rad <- function(data, rad = TRUE) {
if(length(dim(data)) == 2) {
phi.no.na <- data[,1]
psi.no.na <- data[,2]
na.phi.id <- NULL
na.psi.id <- NULL
is.na.phi <- is.na(data[,1])
is.na.psi <- is.na(data[,2])
if(sum(is.na.phi) > 0)
na.phi.id <- which(is.na.phi)
if(sum(is.na.psi) > 0)
na.psi.id <- which(is.na.psi)
na.id <- union(na.phi.id, na.psi.id)
if(length(na.id) > 0){
phi.no.na <- data[,1][-na.id]
psi.no.na <- data[,2][-na.id]
}
if(rad) res <- prncp_reg(cbind(phi.no.na, psi.no.na))
else res <- prncp_reg(cbind(phi.no.na, psi.no.na) * pi / 180)
colnames(res) <- colnames(data)
} else {
data.no.na <- data
na.id <- NULL
is.na.data <- is.na(data)
if(sum(is.na.data) > 0){
na.id <- which(is.na.data)
data.no.na <- data[-na.id]
}
if(rad) res <- prncp_reg(data.no.na)
else res <- prncp_reg(data.no.na * pi / 180)
}
res
} #removes NA and converts into radians
# rdirichlet <- function (n, alpha) # random generation from dirichlet
# {
# len <- length(alpha)
# x <- matrix(rgamma(len * n, alpha), ncol = len, byrow = TRUE)
# tot <- x %*% rep(1, len)
# x/as.vector(tot)
# }
# rnorm2 <- function (n = 1, mu, Sigma) # random generation from biv normal
# {
# p <- 2L
# eS <- eigen(Sigma, symmetric = TRUE)
# ev <- eS$values
# X <- matrix(rnorm(p * n), n)
# X <- drop(mu) + eS$vectors %*% diag(sqrt(pmax(ev, 0)), p) %*% t(X)
# if (n == 1) drop(X)
# else t(X)
# }
list_by_row <- function(mat, row_index) # create a list with elements being rows of a matrix
{
mat.list <- lapply(1:nrow(mat), function(j) mat[j, ])
names(mat.list) <- rownames(mat)
mat.list
}
addtolist <- function(list_in, ...) # add element to a list
{
ell <- list(...)
c(list_in, ell)
}
press_enter <- function() # waits for the user to press [enter]
{
cat("Press [enter] to continue")
line <- readline()
}
kappas2sigmas_wnorm2 <- function(kappa1, kappa2, kappa3) {
den <- kappa1*kappa2 - kappa3^2
sigma1 <- kappa2/den
sigma2 <- kappa1/den
rho <- -kappa3/sqrt(kappa1*kappa2)
c(sigma11 = sigma1, sigma22 = sigma2, rho = rho)
}
sigmas2kappas_wnorm2 <- function(sigma11, sigma22, rho) {
den <- sigma11*sigma22*(1-rho^2)
kappa1 <- sigma22/den
kappa2 <- sigma11/den
kappa3 <- -rho/((1-rho^2)*sqrt(sigma11*sigma22))
c(kappa1 = kappa1, kappa2 = kappa2, kappa3 = kappa3)
}
which.max_entry1 <- function(x) {
which.max(x)[1]
}
signif_or_round <- function(x, ...) {
for(j in length(x)) {
if (abs(x[j]) > 1) return(round(x[j], ...))
else return(signif(x[j], ...))
}
}
# print est (ci_lower, ci_upper) for each element
est_ci <- function(est, lower, upper, digits = 2)
{
out_mat <- est
for(j in 1:length(est)) {
out_mat[j] <- paste0(format(signif_or_round(est[j], digits), nsmall = digits),
" (",
format(signif_or_round(lower[j], digits), nsmall = digits),
", ",
format(signif_or_round(upper[j], digits), nsmall = digits),
")")
}
as.data.frame(out_mat)
}
|
/issuestests/BAMBI/R/basic_fns.R
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false | false | 4,223 |
r
|
expand_args <- function(...){
ell <- list(...)
max_length <- max(vapply(ell, length, 0))
lapply(ell, rep, length.out = max_length)
}
ldgamanum <- function(x, loc, scale) {
(loc-1)*log(x) - x/scale
}
# mean <- function(x) {
# sum(x)/length(x)
# }
har_mean <- function(x) {
if(sum(x == 0) > 0) stop("zero value in harmonic mean")
1/mean(1/x)
}
sum_sq <- function(x) sum(x^2)
prncp_reg <- function(x) x %% (2*pi)
#makes and angle in [0, 2*pi]
prncp_reg.minuspi.pi <- function(x) {
y <- (x + pi) %% (2*pi)
y_neg <- which(y < 0)
y[y_neg] <- 2*pi + y[y_neg]
y - pi
} #makes a single angle in [-pi, pi]
atan3 <- function(x) prncp_reg(atan(x[2]/x[1]))
sph2cart <- function(x)
c(cos(x[1])*sin(x[2]), sin(x[1])*sin(x[2]), cos(x[2]))
#calculates unit vectors from pair of angles
listLen <- function(l)
vapply(1:length(l), function(i) length(l[[i]]), 0)
rm_NA_rad <- function(data, rad = TRUE) {
if(length(dim(data)) == 2) {
phi.no.na <- data[,1]
psi.no.na <- data[,2]
na.phi.id <- NULL
na.psi.id <- NULL
is.na.phi <- is.na(data[,1])
is.na.psi <- is.na(data[,2])
if(sum(is.na.phi) > 0)
na.phi.id <- which(is.na.phi)
if(sum(is.na.psi) > 0)
na.psi.id <- which(is.na.psi)
na.id <- union(na.phi.id, na.psi.id)
if(length(na.id) > 0){
phi.no.na <- data[,1][-na.id]
psi.no.na <- data[,2][-na.id]
}
if(rad) res <- prncp_reg(cbind(phi.no.na, psi.no.na))
else res <- prncp_reg(cbind(phi.no.na, psi.no.na) * pi / 180)
colnames(res) <- colnames(data)
} else {
data.no.na <- data
na.id <- NULL
is.na.data <- is.na(data)
if(sum(is.na.data) > 0){
na.id <- which(is.na.data)
data.no.na <- data[-na.id]
}
if(rad) res <- prncp_reg(data.no.na)
else res <- prncp_reg(data.no.na * pi / 180)
}
res
} #removes NA and converts into radians
# rdirichlet <- function (n, alpha) # random generation from dirichlet
# {
# len <- length(alpha)
# x <- matrix(rgamma(len * n, alpha), ncol = len, byrow = TRUE)
# tot <- x %*% rep(1, len)
# x/as.vector(tot)
# }
# rnorm2 <- function (n = 1, mu, Sigma) # random generation from biv normal
# {
# p <- 2L
# eS <- eigen(Sigma, symmetric = TRUE)
# ev <- eS$values
# X <- matrix(rnorm(p * n), n)
# X <- drop(mu) + eS$vectors %*% diag(sqrt(pmax(ev, 0)), p) %*% t(X)
# if (n == 1) drop(X)
# else t(X)
# }
list_by_row <- function(mat, row_index) # create a list with elements being rows of a matrix
{
mat.list <- lapply(1:nrow(mat), function(j) mat[j, ])
names(mat.list) <- rownames(mat)
mat.list
}
addtolist <- function(list_in, ...) # add element to a list
{
ell <- list(...)
c(list_in, ell)
}
press_enter <- function() # waits for the user to press [enter]
{
cat("Press [enter] to continue")
line <- readline()
}
kappas2sigmas_wnorm2 <- function(kappa1, kappa2, kappa3) {
den <- kappa1*kappa2 - kappa3^2
sigma1 <- kappa2/den
sigma2 <- kappa1/den
rho <- -kappa3/sqrt(kappa1*kappa2)
c(sigma11 = sigma1, sigma22 = sigma2, rho = rho)
}
sigmas2kappas_wnorm2 <- function(sigma11, sigma22, rho) {
den <- sigma11*sigma22*(1-rho^2)
kappa1 <- sigma22/den
kappa2 <- sigma11/den
kappa3 <- -rho/((1-rho^2)*sqrt(sigma11*sigma22))
c(kappa1 = kappa1, kappa2 = kappa2, kappa3 = kappa3)
}
which.max_entry1 <- function(x) {
which.max(x)[1]
}
signif_or_round <- function(x, ...) {
for(j in length(x)) {
if (abs(x[j]) > 1) return(round(x[j], ...))
else return(signif(x[j], ...))
}
}
# print est (ci_lower, ci_upper) for each element
est_ci <- function(est, lower, upper, digits = 2)
{
out_mat <- est
for(j in 1:length(est)) {
out_mat[j] <- paste0(format(signif_or_round(est[j], digits), nsmall = digits),
" (",
format(signif_or_round(lower[j], digits), nsmall = digits),
", ",
format(signif_or_round(upper[j], digits), nsmall = digits),
")")
}
as.data.frame(out_mat)
}
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/cervix.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.35,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/cervix/cervix_048.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/cervix/cervix_048.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 361 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/cervix.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.35,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/cervix/cervix_048.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# Author: Begum Topcuoglu
# Date: 2019-01-14
######################################################################
# Description:
# This function defines:
# 1. Tuning budget as a grid for the classification methods chosen
# 2. Cross-validation method (how many repeats and folds)
# 3. Caret name for the classification method chosen
######################################################################
######################################################################
# Dependencies and Outputs:
# Filenames to put to function:
# 1. "L2_Logistic_Regression"
# 2. "L1_Linear_SVM"
# 3. "L2_Linear_SVM"
# 4. "RBF_SVM"
# 5. "Decision_Tree"
# 6. "Random_Forest"
# 7. "XGBoost"
# Usage:
# Call as source when using the function. The function is:
# tuning_grid()
# Output:
# List of:
# 1. Tuning budget as a grid the classification methods chosen
# 2. Cross-validation method
# 3. Caret name for the classification method chosen
######################################################################
######################################################################
#------------------------- DEFINE FUNCTION -------------------#
######################################################################
tuning_grid <- function(train_data, model, outcome, hyperparameters){
# NOTE: Hyperparameters should be a csv containing a dataframe
# where the first column "param" is the hyperparameter name
# and the second column "val" are the values to be tested
# and third column "model" is the model being used
hyperparameters <- read.csv(hyperparameters, stringsAsFactors = F)
hyperparameters <- hyperparameters[hyperparameters$model == model, ]
hyperparameters <- split(hyperparameters$val, hyperparameters$param)
# set outcome as first column if null
#if(is.null(outcome)){
# outcome <- colnames(train_data)[1]
#}
# -------------------------CV method definition--------------------------------------->
# ADDED cv index to make sure
# 1. the internal 5-folds are stratified for diagnosis classes
# 2. Resample the dataset 100 times for 5-fold cv to get robust hp.
# IN trainControl function:
# 1. Train the model with final hp decision to use model to predict
# 2. Return 2class summary and save predictions to calculate cvROC
# 3. Save the predictions and class probabilities/decision values.
folds <- 5
cvIndex <- createMultiFolds(factor(train_data[,outcome]), folds, times=100)
cv <- trainControl(method="repeatedcv",
number=folds,
index = cvIndex,
returnResamp="final",
classProbs=TRUE,
summaryFunction=twoClassSummary,
indexFinal=NULL,
savePredictions = TRUE)
# # ----------------------------------------------------------------------->
# -------------------Classification Method Definition---------------------->
# ---------------------------------1--------------------------------------->
# For linear models we are using LiblineaR package
#
# LiblineaR can produce 10 types of (generalized) linear models:
# The regularization can be
# 1. L1
# 2. L2
# The losses can be:
# 1. Regular L2-loss for SVM (hinge loss),
# 2. L1-loss for SVM
# 3. Logistic loss for logistic regression.
#
# Here we will use L1 and L2 regularization and hinge loss (L2-loss) for linear SVMs
# We will use logistic loss for L2-resularized logistic regression
# The liblinear 'type' choioces are below:
#
# for classification
# • 0 – L2-regularized logistic regression (primal)---> we use this for l2-logistic
# • 1 – L2-regularized L2-loss support vector classification (dual)
# • 2 – L2-regularized L2-loss support vector classification (primal) ---> we use this for l2-linear SVM
# • 3 – L2-regularized L1-loss support vector classification (dual)
# • 4 – support vector classification by Crammer and Singer
# • 5 – L1-regularized L2-loss support vector classification---> we use this for l1-linear SVM
# • 6 – L1-regularized logistic regression
# • 7 – L2-regularized logistic regression (dual)
#
#for regression
# • 11 – L2-regularized L2-loss support vector regression (primal)
# • 12 – L2-regularized L2-loss support vector regression (dual)
# • 13 – L2-regularized L1-loss support vector regression (dual)
# ------------------------------------------------------------------------>
# ---------------------------------2--------------------------------------->
# We use ROC metric for all the models
# To do that I had to make changes to the caret package functions.
# The files 'data/caret_models/svmLinear3.R and svmLinear5.R are my functions.
# I added 1 line to get Decision Values for linear SVMs:
#
# prob = function(modelFit, newdata, submodels = NULL){
# predict(modelFit, newdata, decisionValues = TRUE)$decisionValues
# },
#
# This line gives decision values instead of probabilities and computes ROC in:
# 1. train function with the cross-validataion
# 2. final trained model
# using decision values and saves them in the variable "prob"
# This allows us to pass the cv function for all models:
# cv <- trainControl(method="repeatedcv",
# repeats = 100,
# number=folds,
# index = cvIndex,
# returnResamp="final",
# classProbs=TRUE,
# summaryFunction=twoClassSummary,
# indexFinal=NULL,
# savePredictions = TRUE)
#
# There parameters we pass for L1 and L2 SVM:
# classProbs=TRUE,
# summaryFunction=twoClassSummary,
# are actually computing ROC from decision values not probabilities
# ------------------------------------------------------------------------>
# Grid and caret method defined for each classification models
if(model=="L2_Logistic_Regression") {
grid <- expand.grid(cost = hyperparameters$cost,
loss = "L2_primal",
# This chooses type=0 for liblinear R package
# which is logistic loss, primal solve for L2 regularized logistic regression
epsilon = 0.01) #default epsilon recommended from liblinear
method <- "regLogistic"
}
else if (model=="L1_Linear_SVM"){ #
grid <- expand.grid(cost = hyperparameters$cost,
Loss = "L2")
method <- "svmLinear4" # I wrote this function in caret
}
else if (model=="L2_Linear_SVM"){
grid <- expand.grid(cost = hyperparameters$cost,
Loss = "L2")
method <- "svmLinear3" # I changed this function in caret
}
else if (model=="RBF_SVM"){
grid <- expand.grid(sigma = hyperparameters$sigma,
C = hyperparameters$C)
method <-"svmRadial"
}
else if (model=="Decision_Tree"){
grid <- expand.grid(maxdepth = hyperparameters$maxdepth) # maybe change these default parameters?
method <-"rpart2"
}
else if (model=="Random_Forest"){
if(is.null(hyperparameters$mtry)){
# get number of features
n_features <- ncol(train_data) - 1
if(n_features > 20000) n_features <- 20000
# if few features
if(n_features < 19){ mtry <- 1:6
} else {
# if many features
mtry <- floor(seq(1, n_features, length=6))
}
# only keep ones with less features than you have
hyperparameters$mtry <- mtry[mtry <= n_features]
}
grid <- expand.grid(mtry = hyperparameters$mtry)
method = "rf"
}
else if (model=="XGBoost"){
grid <- expand.grid(nrounds=hyperparameters$nrounds,
gamma=hyperparameters$gamma,
eta=hyperparameters$eta,
max_depth=hyperparameters$max_depth,
colsample_bytree=hyperparameters$colsample_bytree,
min_child_weight=hyperparameters$min_child_weight,
subsample=hyperparameters$subsample)
method <- "xgbTree"
}
else {
print("Model not available")
}
# Return:
# 1. the hyper-parameter grid to tune
# 2. the caret function to train with
# 3, cv method
params <- list(grid, method, cv)
return(params)
}
|
/code/R/tuning_grid.R
|
permissive
|
lucas-bishop/ML_pipeline_microbiome
|
R
| false | false | 8,467 |
r
|
# Author: Begum Topcuoglu
# Date: 2019-01-14
######################################################################
# Description:
# This function defines:
# 1. Tuning budget as a grid for the classification methods chosen
# 2. Cross-validation method (how many repeats and folds)
# 3. Caret name for the classification method chosen
######################################################################
######################################################################
# Dependencies and Outputs:
# Filenames to put to function:
# 1. "L2_Logistic_Regression"
# 2. "L1_Linear_SVM"
# 3. "L2_Linear_SVM"
# 4. "RBF_SVM"
# 5. "Decision_Tree"
# 6. "Random_Forest"
# 7. "XGBoost"
# Usage:
# Call as source when using the function. The function is:
# tuning_grid()
# Output:
# List of:
# 1. Tuning budget as a grid the classification methods chosen
# 2. Cross-validation method
# 3. Caret name for the classification method chosen
######################################################################
######################################################################
#------------------------- DEFINE FUNCTION -------------------#
######################################################################
tuning_grid <- function(train_data, model, outcome, hyperparameters){
# NOTE: Hyperparameters should be a csv containing a dataframe
# where the first column "param" is the hyperparameter name
# and the second column "val" are the values to be tested
# and third column "model" is the model being used
hyperparameters <- read.csv(hyperparameters, stringsAsFactors = F)
hyperparameters <- hyperparameters[hyperparameters$model == model, ]
hyperparameters <- split(hyperparameters$val, hyperparameters$param)
# set outcome as first column if null
#if(is.null(outcome)){
# outcome <- colnames(train_data)[1]
#}
# -------------------------CV method definition--------------------------------------->
# ADDED cv index to make sure
# 1. the internal 5-folds are stratified for diagnosis classes
# 2. Resample the dataset 100 times for 5-fold cv to get robust hp.
# IN trainControl function:
# 1. Train the model with final hp decision to use model to predict
# 2. Return 2class summary and save predictions to calculate cvROC
# 3. Save the predictions and class probabilities/decision values.
folds <- 5
cvIndex <- createMultiFolds(factor(train_data[,outcome]), folds, times=100)
cv <- trainControl(method="repeatedcv",
number=folds,
index = cvIndex,
returnResamp="final",
classProbs=TRUE,
summaryFunction=twoClassSummary,
indexFinal=NULL,
savePredictions = TRUE)
# # ----------------------------------------------------------------------->
# -------------------Classification Method Definition---------------------->
# ---------------------------------1--------------------------------------->
# For linear models we are using LiblineaR package
#
# LiblineaR can produce 10 types of (generalized) linear models:
# The regularization can be
# 1. L1
# 2. L2
# The losses can be:
# 1. Regular L2-loss for SVM (hinge loss),
# 2. L1-loss for SVM
# 3. Logistic loss for logistic regression.
#
# Here we will use L1 and L2 regularization and hinge loss (L2-loss) for linear SVMs
# We will use logistic loss for L2-resularized logistic regression
# The liblinear 'type' choioces are below:
#
# for classification
# • 0 – L2-regularized logistic regression (primal)---> we use this for l2-logistic
# • 1 – L2-regularized L2-loss support vector classification (dual)
# • 2 – L2-regularized L2-loss support vector classification (primal) ---> we use this for l2-linear SVM
# • 3 – L2-regularized L1-loss support vector classification (dual)
# • 4 – support vector classification by Crammer and Singer
# • 5 – L1-regularized L2-loss support vector classification---> we use this for l1-linear SVM
# • 6 – L1-regularized logistic regression
# • 7 – L2-regularized logistic regression (dual)
#
#for regression
# • 11 – L2-regularized L2-loss support vector regression (primal)
# • 12 – L2-regularized L2-loss support vector regression (dual)
# • 13 – L2-regularized L1-loss support vector regression (dual)
# ------------------------------------------------------------------------>
# ---------------------------------2--------------------------------------->
# We use ROC metric for all the models
# To do that I had to make changes to the caret package functions.
# The files 'data/caret_models/svmLinear3.R and svmLinear5.R are my functions.
# I added 1 line to get Decision Values for linear SVMs:
#
# prob = function(modelFit, newdata, submodels = NULL){
# predict(modelFit, newdata, decisionValues = TRUE)$decisionValues
# },
#
# This line gives decision values instead of probabilities and computes ROC in:
# 1. train function with the cross-validataion
# 2. final trained model
# using decision values and saves them in the variable "prob"
# This allows us to pass the cv function for all models:
# cv <- trainControl(method="repeatedcv",
# repeats = 100,
# number=folds,
# index = cvIndex,
# returnResamp="final",
# classProbs=TRUE,
# summaryFunction=twoClassSummary,
# indexFinal=NULL,
# savePredictions = TRUE)
#
# There parameters we pass for L1 and L2 SVM:
# classProbs=TRUE,
# summaryFunction=twoClassSummary,
# are actually computing ROC from decision values not probabilities
# ------------------------------------------------------------------------>
# Grid and caret method defined for each classification models
if(model=="L2_Logistic_Regression") {
grid <- expand.grid(cost = hyperparameters$cost,
loss = "L2_primal",
# This chooses type=0 for liblinear R package
# which is logistic loss, primal solve for L2 regularized logistic regression
epsilon = 0.01) #default epsilon recommended from liblinear
method <- "regLogistic"
}
else if (model=="L1_Linear_SVM"){ #
grid <- expand.grid(cost = hyperparameters$cost,
Loss = "L2")
method <- "svmLinear4" # I wrote this function in caret
}
else if (model=="L2_Linear_SVM"){
grid <- expand.grid(cost = hyperparameters$cost,
Loss = "L2")
method <- "svmLinear3" # I changed this function in caret
}
else if (model=="RBF_SVM"){
grid <- expand.grid(sigma = hyperparameters$sigma,
C = hyperparameters$C)
method <-"svmRadial"
}
else if (model=="Decision_Tree"){
grid <- expand.grid(maxdepth = hyperparameters$maxdepth) # maybe change these default parameters?
method <-"rpart2"
}
else if (model=="Random_Forest"){
if(is.null(hyperparameters$mtry)){
# get number of features
n_features <- ncol(train_data) - 1
if(n_features > 20000) n_features <- 20000
# if few features
if(n_features < 19){ mtry <- 1:6
} else {
# if many features
mtry <- floor(seq(1, n_features, length=6))
}
# only keep ones with less features than you have
hyperparameters$mtry <- mtry[mtry <= n_features]
}
grid <- expand.grid(mtry = hyperparameters$mtry)
method = "rf"
}
else if (model=="XGBoost"){
grid <- expand.grid(nrounds=hyperparameters$nrounds,
gamma=hyperparameters$gamma,
eta=hyperparameters$eta,
max_depth=hyperparameters$max_depth,
colsample_bytree=hyperparameters$colsample_bytree,
min_child_weight=hyperparameters$min_child_weight,
subsample=hyperparameters$subsample)
method <- "xgbTree"
}
else {
print("Model not available")
}
# Return:
# 1. the hyper-parameter grid to tune
# 2. the caret function to train with
# 3, cv method
params <- list(grid, method, cv)
return(params)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{chinook}
\alias{chinook}
\title{SNP data from chinook reference populations}
\format{A tbl_df-ed (from dplyr) data frame with 7,301 rows and 185 variables. The first
three columns are
\describe{
\item{repunit (chr)}{the reporting unit that the individual is in}
\item{pop (chr)}{the population from which the individual was sampled}
\item{ID (chr)}{Unique identifier of the individual fish}
}
The remaining columns are two columns for each locus. These columns are named like,
"Locus.1" and "Locus.2" for the first and second gene copies at that locus. For example,
"Ots_104569-86.1" and "Ots_104569-86.2". The locus columns are ints and missing data
is denoted by NA.}
\source{
\url{http://datadryad.org/resource/doi:10.5061/dryad.574sv/1}
}
\description{
Chinook salmon baseline data similar to that which can be
downloaded from \url{http://datadryad.org/resource/doi:10.5061/dryad.574sv/1}.
This data set includes 91 SNPs and 7301 fish and is what the Dryad data became
after we converted from TaqMan to SNPtype assays (being forced to toss some loci)
and tossed out a bunch of lousy historical
samples from Trinity River.
}
|
/man/chinook.Rd
|
no_license
|
SOLV-Code/rubias
|
R
| false | true | 1,244 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{chinook}
\alias{chinook}
\title{SNP data from chinook reference populations}
\format{A tbl_df-ed (from dplyr) data frame with 7,301 rows and 185 variables. The first
three columns are
\describe{
\item{repunit (chr)}{the reporting unit that the individual is in}
\item{pop (chr)}{the population from which the individual was sampled}
\item{ID (chr)}{Unique identifier of the individual fish}
}
The remaining columns are two columns for each locus. These columns are named like,
"Locus.1" and "Locus.2" for the first and second gene copies at that locus. For example,
"Ots_104569-86.1" and "Ots_104569-86.2". The locus columns are ints and missing data
is denoted by NA.}
\source{
\url{http://datadryad.org/resource/doi:10.5061/dryad.574sv/1}
}
\description{
Chinook salmon baseline data similar to that which can be
downloaded from \url{http://datadryad.org/resource/doi:10.5061/dryad.574sv/1}.
This data set includes 91 SNPs and 7301 fish and is what the Dryad data became
after we converted from TaqMan to SNPtype assays (being forced to toss some loci)
and tossed out a bunch of lousy historical
samples from Trinity River.
}
|
setwd("/Users/zhangh24/GoogleDrive/multi_ethnic/")
library(tidyverse)
glgc_sample_size = read.csv("./data/GLGC_sample_size.csv")
glgc_sample_size_sum = glgc_sample_size %>% group_by(ethnic) %>%
summarize(max_size = max(sample_size))
sum(glgc_sample_size_sum$max_size)
glgc_sample_size_sum_noEUR = glgc_sample_size %>% filter(ethnic != "EUR") %>%
group_by(ethnic) %>% summarize(max_size = max(sample_size))
sum(glgc_sample_size_sum_noEUR$max_size)
glgc_sample_size_update = glgc_sample_size %>% rename(eth = ethnic)
aou_sample_size = read.csv("./data/AoU_sample_size.csv")
aou_sample_size_sum = aou_sample_size %>% group_by(ethnic) %>%
summarize(max_size = max(sample_size))
sum(aou_sample_size_sum$max_size)
aou_sample_size_sum_noEUR = aou_sample_size %>% filter(ethnic != "EUR") %>%
group_by(ethnic) %>% summarize(max_size = max(sample_size))
sum(aou_sample_size_sum_noEUR$max_size)
aou_sample_size_update = aou_sample_size %>% rename(eth = ethnic)
ukb_sample_size = read.csv("./data/UKBB_sample_size.csv")
ukb_sample_size = ukb_sample_size %>%
filter(ethnic!="EUR") %>%
mutate(total = tuning + validation)
ukb_sample_size_sum = ukb_sample_size %>% group_by(ethnic) %>%
summarize(max_size = max(total))
sum(ukb_sample_size_sum$max_size)
ukb_sample_size_sum_noEUR = ukb_sample_size_sum
ukb_sample_size_update = ukb_sample_size %>% select(ethnic, trait, total) %>%
rename(sample_size = total, eth = ethnic)
me23_sample_size = read.csv("/Users/zhangh24/GoogleDrive/multi_ethnic/result/23andme/23andme_sample_size.csv")
me23_sample_size = me23_sample_size %>%
mutate(total = train.control + train.case+ tun.control+ tun.case+vad.control+vad.case)
me23_sample_size_update = me23_sample_size %>%
select(eth,trait, total) %>%
mutate(eth_update = case_when(
eth == "European" ~ "EUR",
eth == "African American" ~ "AFR",
eth == "Latino" ~ "AMR",
eth == "East Asian" ~ "EAS",
eth == "South Asian" ~ "SAS"
)) %>%
select(eth_update, trait, total) %>%
rename(eth = eth_update,
sample_size = total)
me23_sample_size_sum = me23_sample_size_update %>% group_by(eth) %>%
summarize(max_size = max(sample_size))
sum(me23_sample_size_sum$max_size)
me23_sample_size_sum_noEUR = me23_sample_size %>% filter(eth != "European") %>%
group_by(eth) %>% summarize(max_size = max(total))
sum(me23_sample_size_sum_noEUR$max_size)
sum(glgc_sample_size_sum$max_size)+
sum(aou_sample_size_sum$max_size)+
sum(ukb_sample_size_sum$max_size)+
sum(me23_sample_size_sum$max_size)
sum(glgc_sample_size_sum_noEUR$max_size)+
sum(aou_sample_size_sum_noEUR$max_size)+
sum(ukb_sample_size_sum_noEUR$max_size)+
sum(me23_sample_size_sum_noEUR$max_size)
temp = data.frame(eth = glgc_sample_size_sum$ethnic,
sample_size = as.numeric(
glgc_sample_size_sum$max_size+
me23_sample_size_sum$max_size+
c(aou_sample_size_sum$max_size[1:2],0,aou_sample_size_sum$max_size[3],0)+
c(ukb_sample_size_sum$max_size[1:3],0,ukb_sample_size_sum$max_size[4])))
# com_data = rbind(glgc_sample_size_update, aou_sample_size_update, ukb_sample_size_update,me23_sample_size_update )
# com_data_sum = com_data %>% group_by(eth) %>%
# summarise(max_N = max(sample_size))
#organize table for glgc and all of us
glgc_sample_size = read.csv("./data/GLGC_sample_size.csv")
eth_vec = c("EUR", "AFR", "AMR", "EAS", "SAS")
eth_name = c("European", "African", "Hispanic", "East Asian", "South Asian")
trait_vec = c("HDL", "LDL", "logTG", "TC")
result_list = list()
temp = 1
for(l in 1:length(trait_vec)){
for(i in 1:length(eth_vec)){
ethnic = glgc_sample_size$ethnic
trait = glgc_sample_size$trait
sample_size = glgc_sample_size$sample_size
idx <- which(ethnic==eth_vec[i]& trait==trait_vec[l])
temp_result = data.frame(trait = trait_vec[l],
eth = eth_name[i],
sample_size = sample_size[idx])
result_list[[temp]] = temp_result
temp = temp + 1
}
}
glgc_table = rbindlist(result_list)
write.csv(glgc_table, file = "./result/GLGC/glgc_sample_size_clean.csv", row.names = F)
aou_sample_size = read.csv("./data/AoU_sample_size.csv")
eth_vec = c("EUR", "AFR", "AMR")
eth_name = c("European", "African", "Hispanic")
trait_name = c("Height", "BMI")
trait_vec = c("height", "bmi")
result_list = list()
temp = 1
for(l in 1:length(trait_vec)){
for(i in 1:length(eth_vec)){
ethnic = aou_sample_size$ethnic
trait = aou_sample_size$trait
sample_size = aou_sample_size$sample_size
idx <- which(ethnic==eth_vec[i]& trait==trait_vec[l])
temp_result = data.frame(trait = trait_name[l],
eth = eth_name[i],
sample_size = sample_size[idx])
result_list[[temp]] = temp_result
temp = temp + 1
}
}
aou_table = rbindlist(result_list)
write.csv(aou_table, file = "./result/AoU/aou_sample_size_clean.csv", row.names = F)
ukb_sample_size = read.csv("./data/UKBB_sample_size.csv")
ukb_sample_size = ukb_sample_size %>%
filter(ethnic!="EUR") %>%
mutate(total = tuning + validation)
eth_vec = c("EUR", "AFR", "EAS", "SAS")
eth_name = c("European", "African", "East Asian", "South Asian")
trait_name = c("HDL", "LDL", "logTG", "TC", "Height", "BMI")
trait_vec = c("HDL", "LDL", "logTG", "TC", "height", "bmi")
result_list = list()
temp = 1
for(l in 1:4){
for(i in 2:4){
ethnic = ukb_sample_size$ethnic
trait = ukb_sample_size$trait
tun_sample_size = ukb_sample_size$tuning
vad_sample_size = ukb_sample_size$validation
idx <- which(ethnic==eth_vec[i]& trait==trait_vec[l])
temp_result = data.frame(trait = trait_name[l],
eth = eth_name[i],
tun_sample = tun_sample_size[idx],
vad_sample = vad_sample_size[idx])
result_list[[temp]] = temp_result
temp = temp + 1
}
}
for(l in 5:6){
for(i in 2:2){
ethnic = ukb_sample_size$ethnic
trait = ukb_sample_size$trait
tun_sample_size = ukb_sample_size$tuning
vad_sample_size = ukb_sample_size$validation
idx <- which(ethnic==eth_vec[i]& trait==trait_vec[l])
temp_result = data.frame(trait = trait_name[l],
eth = eth_name[i],
tun_sample = tun_sample_size[idx],
vad_sample = vad_sample_size[idx])
result_list[[temp]] = temp_result
temp = temp + 1
}
}
ukb_table = rbindlist(result_list)
write.csv(ukb_table, file = "./result/GLGC/ukb_sample_size_clean.csv", row.names = F)
glgc_sample_size_sum = glgc_sample_size %>% group_by(ethnic) %>%
summarize(max_size = max(sample_size))
sum(glgc_sample_size_sum$max_size)
glgc_sample_size_sum_noEUR = glgc_sample_size %>% filter(ethnic != "EUR") %>%
group_by(ethnic) %>% summarize(max_size = max(sample_size))
sum(glgc_sample_size_sum_noEUR$max_size)
glgc_sample_size_update = glgc_sample_size %>% rename(eth = ethnic)
aou_sample_size = read.csv("./data/AoU_sample_size.csv")
aou_sample_size_sum = aou_sample_size %>% group_by(ethnic) %>%
summarize(max_size = max(sample_size))
|
/code/GLGC_analysis/17_sample_size_table.R
|
no_license
|
andrewhaoyu/multi_ethnic
|
R
| false | false | 7,146 |
r
|
setwd("/Users/zhangh24/GoogleDrive/multi_ethnic/")
library(tidyverse)
glgc_sample_size = read.csv("./data/GLGC_sample_size.csv")
glgc_sample_size_sum = glgc_sample_size %>% group_by(ethnic) %>%
summarize(max_size = max(sample_size))
sum(glgc_sample_size_sum$max_size)
glgc_sample_size_sum_noEUR = glgc_sample_size %>% filter(ethnic != "EUR") %>%
group_by(ethnic) %>% summarize(max_size = max(sample_size))
sum(glgc_sample_size_sum_noEUR$max_size)
glgc_sample_size_update = glgc_sample_size %>% rename(eth = ethnic)
aou_sample_size = read.csv("./data/AoU_sample_size.csv")
aou_sample_size_sum = aou_sample_size %>% group_by(ethnic) %>%
summarize(max_size = max(sample_size))
sum(aou_sample_size_sum$max_size)
aou_sample_size_sum_noEUR = aou_sample_size %>% filter(ethnic != "EUR") %>%
group_by(ethnic) %>% summarize(max_size = max(sample_size))
sum(aou_sample_size_sum_noEUR$max_size)
aou_sample_size_update = aou_sample_size %>% rename(eth = ethnic)
ukb_sample_size = read.csv("./data/UKBB_sample_size.csv")
ukb_sample_size = ukb_sample_size %>%
filter(ethnic!="EUR") %>%
mutate(total = tuning + validation)
ukb_sample_size_sum = ukb_sample_size %>% group_by(ethnic) %>%
summarize(max_size = max(total))
sum(ukb_sample_size_sum$max_size)
ukb_sample_size_sum_noEUR = ukb_sample_size_sum
ukb_sample_size_update = ukb_sample_size %>% select(ethnic, trait, total) %>%
rename(sample_size = total, eth = ethnic)
me23_sample_size = read.csv("/Users/zhangh24/GoogleDrive/multi_ethnic/result/23andme/23andme_sample_size.csv")
me23_sample_size = me23_sample_size %>%
mutate(total = train.control + train.case+ tun.control+ tun.case+vad.control+vad.case)
me23_sample_size_update = me23_sample_size %>%
select(eth,trait, total) %>%
mutate(eth_update = case_when(
eth == "European" ~ "EUR",
eth == "African American" ~ "AFR",
eth == "Latino" ~ "AMR",
eth == "East Asian" ~ "EAS",
eth == "South Asian" ~ "SAS"
)) %>%
select(eth_update, trait, total) %>%
rename(eth = eth_update,
sample_size = total)
me23_sample_size_sum = me23_sample_size_update %>% group_by(eth) %>%
summarize(max_size = max(sample_size))
sum(me23_sample_size_sum$max_size)
me23_sample_size_sum_noEUR = me23_sample_size %>% filter(eth != "European") %>%
group_by(eth) %>% summarize(max_size = max(total))
sum(me23_sample_size_sum_noEUR$max_size)
sum(glgc_sample_size_sum$max_size)+
sum(aou_sample_size_sum$max_size)+
sum(ukb_sample_size_sum$max_size)+
sum(me23_sample_size_sum$max_size)
sum(glgc_sample_size_sum_noEUR$max_size)+
sum(aou_sample_size_sum_noEUR$max_size)+
sum(ukb_sample_size_sum_noEUR$max_size)+
sum(me23_sample_size_sum_noEUR$max_size)
temp = data.frame(eth = glgc_sample_size_sum$ethnic,
sample_size = as.numeric(
glgc_sample_size_sum$max_size+
me23_sample_size_sum$max_size+
c(aou_sample_size_sum$max_size[1:2],0,aou_sample_size_sum$max_size[3],0)+
c(ukb_sample_size_sum$max_size[1:3],0,ukb_sample_size_sum$max_size[4])))
# com_data = rbind(glgc_sample_size_update, aou_sample_size_update, ukb_sample_size_update,me23_sample_size_update )
# com_data_sum = com_data %>% group_by(eth) %>%
# summarise(max_N = max(sample_size))
#organize table for glgc and all of us
glgc_sample_size = read.csv("./data/GLGC_sample_size.csv")
eth_vec = c("EUR", "AFR", "AMR", "EAS", "SAS")
eth_name = c("European", "African", "Hispanic", "East Asian", "South Asian")
trait_vec = c("HDL", "LDL", "logTG", "TC")
result_list = list()
temp = 1
for(l in 1:length(trait_vec)){
for(i in 1:length(eth_vec)){
ethnic = glgc_sample_size$ethnic
trait = glgc_sample_size$trait
sample_size = glgc_sample_size$sample_size
idx <- which(ethnic==eth_vec[i]& trait==trait_vec[l])
temp_result = data.frame(trait = trait_vec[l],
eth = eth_name[i],
sample_size = sample_size[idx])
result_list[[temp]] = temp_result
temp = temp + 1
}
}
glgc_table = rbindlist(result_list)
write.csv(glgc_table, file = "./result/GLGC/glgc_sample_size_clean.csv", row.names = F)
aou_sample_size = read.csv("./data/AoU_sample_size.csv")
eth_vec = c("EUR", "AFR", "AMR")
eth_name = c("European", "African", "Hispanic")
trait_name = c("Height", "BMI")
trait_vec = c("height", "bmi")
result_list = list()
temp = 1
for(l in 1:length(trait_vec)){
for(i in 1:length(eth_vec)){
ethnic = aou_sample_size$ethnic
trait = aou_sample_size$trait
sample_size = aou_sample_size$sample_size
idx <- which(ethnic==eth_vec[i]& trait==trait_vec[l])
temp_result = data.frame(trait = trait_name[l],
eth = eth_name[i],
sample_size = sample_size[idx])
result_list[[temp]] = temp_result
temp = temp + 1
}
}
aou_table = rbindlist(result_list)
write.csv(aou_table, file = "./result/AoU/aou_sample_size_clean.csv", row.names = F)
ukb_sample_size = read.csv("./data/UKBB_sample_size.csv")
ukb_sample_size = ukb_sample_size %>%
filter(ethnic!="EUR") %>%
mutate(total = tuning + validation)
eth_vec = c("EUR", "AFR", "EAS", "SAS")
eth_name = c("European", "African", "East Asian", "South Asian")
trait_name = c("HDL", "LDL", "logTG", "TC", "Height", "BMI")
trait_vec = c("HDL", "LDL", "logTG", "TC", "height", "bmi")
result_list = list()
temp = 1
for(l in 1:4){
for(i in 2:4){
ethnic = ukb_sample_size$ethnic
trait = ukb_sample_size$trait
tun_sample_size = ukb_sample_size$tuning
vad_sample_size = ukb_sample_size$validation
idx <- which(ethnic==eth_vec[i]& trait==trait_vec[l])
temp_result = data.frame(trait = trait_name[l],
eth = eth_name[i],
tun_sample = tun_sample_size[idx],
vad_sample = vad_sample_size[idx])
result_list[[temp]] = temp_result
temp = temp + 1
}
}
for(l in 5:6){
for(i in 2:2){
ethnic = ukb_sample_size$ethnic
trait = ukb_sample_size$trait
tun_sample_size = ukb_sample_size$tuning
vad_sample_size = ukb_sample_size$validation
idx <- which(ethnic==eth_vec[i]& trait==trait_vec[l])
temp_result = data.frame(trait = trait_name[l],
eth = eth_name[i],
tun_sample = tun_sample_size[idx],
vad_sample = vad_sample_size[idx])
result_list[[temp]] = temp_result
temp = temp + 1
}
}
ukb_table = rbindlist(result_list)
write.csv(ukb_table, file = "./result/GLGC/ukb_sample_size_clean.csv", row.names = F)
glgc_sample_size_sum = glgc_sample_size %>% group_by(ethnic) %>%
summarize(max_size = max(sample_size))
sum(glgc_sample_size_sum$max_size)
glgc_sample_size_sum_noEUR = glgc_sample_size %>% filter(ethnic != "EUR") %>%
group_by(ethnic) %>% summarize(max_size = max(sample_size))
sum(glgc_sample_size_sum_noEUR$max_size)
glgc_sample_size_update = glgc_sample_size %>% rename(eth = ethnic)
aou_sample_size = read.csv("./data/AoU_sample_size.csv")
aou_sample_size_sum = aou_sample_size %>% group_by(ethnic) %>%
summarize(max_size = max(sample_size))
|
library(MPRAnalyze)
library(argparse)
library(ggplot2)
library(BiocParallel)
getOpts <- function(){
parser <- ArgumentParser(description='Analyze MPRA data')
parser$add_argument('--rna', help='RNA counts file')
parser$add_argument('--dna', help='DNA counts file')
parser$add_argument('--rna_annot', help='RNA annot file')
parser$add_argument('--dna_annot', help='DNA annot file')
parser$add_argument('--size_norm', action="store_true", help='DNA annot file')
parser$add_argument('--output', help='Output file name')
args <- parser$parse_args()
return(args)
}
args = getOpts()
dna = data.matrix(read.table(args$dna, sep='\t', header=T, row.names="refname"))
rna = data.matrix(read.table(args$rna, sep='\t', header=T, row.names="refname"))
dna_annot = read.table(args$dna_annot, sep='\t', header=T, row.names="bcnum")
rna_annot = read.table(args$rna_annot, sep='\t', header=T, row.names="bcnum_rep")
obj = MpraObject(dnaCounts = dna, rnaCounts = rna, dnaAnnot = dna_annot, rnaAnnot = rna_annot)
if (args$size_norm == TRUE) {
obj <- estimateDepthFactors(obj, which.lib = "dna",
depth.estimator = "uq")
obj <- estimateDepthFactors(obj, lib.factor = c("rep"),
which.lib = "rna",
depth.estimator = "uq")
}
obj = analyzeQuantification(obj = obj, dnaDesign = ~ barcode, rnaDesign = ~ rep)
write.table(testEmpirical(obj), args$output, quote=FALSE, sep='\t')
|
/analysis/starr-seq/scripts/mpra_analyze.R
|
no_license
|
arushiv/islet_cage
|
R
| false | false | 1,512 |
r
|
library(MPRAnalyze)
library(argparse)
library(ggplot2)
library(BiocParallel)
getOpts <- function(){
parser <- ArgumentParser(description='Analyze MPRA data')
parser$add_argument('--rna', help='RNA counts file')
parser$add_argument('--dna', help='DNA counts file')
parser$add_argument('--rna_annot', help='RNA annot file')
parser$add_argument('--dna_annot', help='DNA annot file')
parser$add_argument('--size_norm', action="store_true", help='DNA annot file')
parser$add_argument('--output', help='Output file name')
args <- parser$parse_args()
return(args)
}
args = getOpts()
dna = data.matrix(read.table(args$dna, sep='\t', header=T, row.names="refname"))
rna = data.matrix(read.table(args$rna, sep='\t', header=T, row.names="refname"))
dna_annot = read.table(args$dna_annot, sep='\t', header=T, row.names="bcnum")
rna_annot = read.table(args$rna_annot, sep='\t', header=T, row.names="bcnum_rep")
obj = MpraObject(dnaCounts = dna, rnaCounts = rna, dnaAnnot = dna_annot, rnaAnnot = rna_annot)
if (args$size_norm == TRUE) {
obj <- estimateDepthFactors(obj, which.lib = "dna",
depth.estimator = "uq")
obj <- estimateDepthFactors(obj, lib.factor = c("rep"),
which.lib = "rna",
depth.estimator = "uq")
}
obj = analyzeQuantification(obj = obj, dnaDesign = ~ barcode, rnaDesign = ~ rep)
write.table(testEmpirical(obj), args$output, quote=FALSE, sep='\t')
|
#' @title
#' Loading/Attaching and listing of packages with dependencies
#'
#' @description
#' \code{library_with_dep} and \code{require_with_dep} behave
#' respectively like \code{\link[base]{library}} and
#' \code{\link[base]{require}}, but also load and attach
#' dependent packages (typically packages listed in the \code{Imports}
#' field of the \code{DESCRIPTION} file).
#'
#' @param package
#' the name of a package, given as a name or literal character string,
#' or a character string, depending on whether character.only is
#' \code{FALSE} (default) or \code{TRUE}.
#'
#' @param help
#' the name of a package, given as a name or literal character string,
#' or a character string, depending on whether character.only is
#' \code{FALSE} (default) or \code{TRUE}.
#'
#' @param pos
#' the position on the search list at which to attach
#' the loaded namespace.
#' Can also be the name of a position on the current
#' search list as given by \code{\link[base]{search}}().
#'
#' @param lib.loc
#' character. A vector describing the location of R
#' library trees to search through, or \code{NULL}.
#' The default value of \code{NULL} corresponds to all libraries
#' currently known to \code{\link[base]{.libPaths}}().
#' Non-existent library trees are silently ignored.
#'
#' @param character.only
#' logical. Indicates whether \code{package} or \code{help}
#' can be assumed to be character strings.
#'
#' @param logical.return
#' logical. If it is \code{TRUE}, then \code{FALSE} or \code{TRUE}
#' is returned to indicate success.
#'
#' @param warn.conflicts
#' logical. If \code{TRUE}, warnings are printed about
#' conflicts from attaching the new package.
#' A conflict is a function masking a function,
#' or a non-function masking a non-function.
#'
#' @param quietly
#' logical. If \code{TRUE}, no message confirming package
#' attaching is printed, and most often,
#' no errors/warnings are printed if package attaching fails.
#'
#' @param verbose
#' logical. If \code{TRUE}, additional diagnostics are printed.
#'
#' @param which
#' character. A vector listing the types of dependencies,
#' a subset of \code{c("Depends", "Imports", "LinkingTo", "Suggests", "Enhances")}.
#' Character string \code{"all"} is shorthand for that vector,
#' character string \code{"most"} for the same vector without \code{"Enhances"}.
#'
#' @param recursive
#' logical. Should (reverse) dependencies of (reverse)
#' dependencies (and so on) be included?
#'
#' @param reverse
#' logical. If \code{FALSE} (default),
#' regular dependencies are calculated, otherwise reverse dependencies.
#'
#' @importFrom tools package_dependencies
#' @importFrom utils installed.packages
#' @export
#'
#' @seealso \code{\link[base]{library}} and
#' \code{\link[base]{require}} from package \pkg{base};
#' \code{\link[tools]{package_dependencies}} from \pkg{tools};
#' \code{\link[utils]{installed.packages}} from \pkg{utils}.
#'
library_with_dep <-
function(package,
help,
pos = 2,
lib.loc = NULL,
character.only = FALSE,
logical.return = FALSE,
warn.conflicts = TRUE,
quietly = FALSE,
verbose = getOption("verbose"),
which = "Imports",
recursive = FALSE,
reverse = FALSE)
{
if (!character.only) {
package <- as.character(substitute(package))
help <- as.character(substitute(help))
}
ip <- utils::installed.packages()
pd <- tools::package_dependencies(package,
ip,
which = which,
recursive = recursive,
reverse = reverse,
verbose = verbose)
pd <- pd[[package]]
for (p in c(package, pd)) {
library(p,
help = help,
pos = pos,
lib.loc = lib.loc,
character.only = TRUE,
logical.return = logical.return,
warn.conflicts = warn.conflicts,
quietly = quietly,
verbose = verbose)
}
invisible(NULL)
}
|
/R/library_with_dep.R
|
no_license
|
cran/bazar
|
R
| false | false | 4,265 |
r
|
#' @title
#' Loading/Attaching and listing of packages with dependencies
#'
#' @description
#' \code{library_with_dep} and \code{require_with_dep} behave
#' respectively like \code{\link[base]{library}} and
#' \code{\link[base]{require}}, but also load and attach
#' dependent packages (typically packages listed in the \code{Imports}
#' field of the \code{DESCRIPTION} file).
#'
#' @param package
#' the name of a package, given as a name or literal character string,
#' or a character string, depending on whether character.only is
#' \code{FALSE} (default) or \code{TRUE}.
#'
#' @param help
#' the name of a package, given as a name or literal character string,
#' or a character string, depending on whether character.only is
#' \code{FALSE} (default) or \code{TRUE}.
#'
#' @param pos
#' the position on the search list at which to attach
#' the loaded namespace.
#' Can also be the name of a position on the current
#' search list as given by \code{\link[base]{search}}().
#'
#' @param lib.loc
#' character. A vector describing the location of R
#' library trees to search through, or \code{NULL}.
#' The default value of \code{NULL} corresponds to all libraries
#' currently known to \code{\link[base]{.libPaths}}().
#' Non-existent library trees are silently ignored.
#'
#' @param character.only
#' logical. Indicates whether \code{package} or \code{help}
#' can be assumed to be character strings.
#'
#' @param logical.return
#' logical. If it is \code{TRUE}, then \code{FALSE} or \code{TRUE}
#' is returned to indicate success.
#'
#' @param warn.conflicts
#' logical. If \code{TRUE}, warnings are printed about
#' conflicts from attaching the new package.
#' A conflict is a function masking a function,
#' or a non-function masking a non-function.
#'
#' @param quietly
#' logical. If \code{TRUE}, no message confirming package
#' attaching is printed, and most often,
#' no errors/warnings are printed if package attaching fails.
#'
#' @param verbose
#' logical. If \code{TRUE}, additional diagnostics are printed.
#'
#' @param which
#' character. A vector listing the types of dependencies,
#' a subset of \code{c("Depends", "Imports", "LinkingTo", "Suggests", "Enhances")}.
#' Character string \code{"all"} is shorthand for that vector,
#' character string \code{"most"} for the same vector without \code{"Enhances"}.
#'
#' @param recursive
#' logical. Should (reverse) dependencies of (reverse)
#' dependencies (and so on) be included?
#'
#' @param reverse
#' logical. If \code{FALSE} (default),
#' regular dependencies are calculated, otherwise reverse dependencies.
#'
#' @importFrom tools package_dependencies
#' @importFrom utils installed.packages
#' @export
#'
#' @seealso \code{\link[base]{library}} and
#' \code{\link[base]{require}} from package \pkg{base};
#' \code{\link[tools]{package_dependencies}} from \pkg{tools};
#' \code{\link[utils]{installed.packages}} from \pkg{utils}.
#'
library_with_dep <-
function(package,
help,
pos = 2,
lib.loc = NULL,
character.only = FALSE,
logical.return = FALSE,
warn.conflicts = TRUE,
quietly = FALSE,
verbose = getOption("verbose"),
which = "Imports",
recursive = FALSE,
reverse = FALSE)
{
if (!character.only) {
package <- as.character(substitute(package))
help <- as.character(substitute(help))
}
ip <- utils::installed.packages()
pd <- tools::package_dependencies(package,
ip,
which = which,
recursive = recursive,
reverse = reverse,
verbose = verbose)
pd <- pd[[package]]
for (p in c(package, pd)) {
library(p,
help = help,
pos = pos,
lib.loc = lib.loc,
character.only = TRUE,
logical.return = logical.return,
warn.conflicts = warn.conflicts,
quietly = quietly,
verbose = verbose)
}
invisible(NULL)
}
|
data <- retention(id,
cv_w1,cv_w2,cv_w3,cv_w4,cv_w5,
dn_w1,dn_w2,dn_w3,dn_w4,dn_w5,
ch_w1,ch_w2,ch_w3,ch_w4,ch_w5,
ph_w1,ph_w2,ph_w3,ph_w4,ph_w5,
ep_w1,ep_w2,ep_w3,ep_w4,ep_w5,
health_w1,health_w2,health_w3,health_w4,health_w5,
imp_w1,imp_w2,imp_w3,imp_w4,imp_w5,
isced_w1,isced_w2,isced_w3,isced_w4,isced_w5,
cv_retention,dn_retention,ch_retention,ph_retention,ep_retention,health_retention,imp_retention,isced_retention,
index1,index2,index3,index4,index5)
# routing marital status
w <- c(1,2,4,5,6)
for(k in 2:5){
ind <- which(data[,"mstatc"]==4 & data[,"wave"]==w[k])
data[ind,"mstat"] <- data[(ind-1),"mstat"]
}
# find sample that experiences change in ADL
ADL <- data[,"ADL"]
indexADL <- (ADL>0)
indexADL2 <- matrix(FALSE,nrow(data),1)
for(i in 1:(nrow(data)/5)){
obs <- which(!is.na(indexADL[(i*5-4):(i*5)]))
if(sum(diff(obs))==(length(obs)-1)){ # check whether observations lie in consecutive waves
firstobs <- (i*5-5+obs[1])
lastobs <- (i*5-5+obs[length(obs)])
if(sum(indexADL[firstobs:lastobs])>0 & sum(indexADL[firstobs:lastobs])<length(obs) & indexADL[firstobs]==FALSE){
indexADL2[(i*5-4):(i*5)] <- TRUE
}
}
}
samp <- data[indexADL2,]
i <- which(is.na(samp[,3])) # remove subjects not present in certain waves
i2 <- which(samp[,"age"]==-9) # remove deceased subjects
samp <- samp[-c(i,i2),]
# routing children
nas <- which(is.na(samp[,"child"]))
ind <- unique(samp[nas,"mergeid"])
for(l in 1:length(ind)){
obs <- ind[l]
indsamp <- which(samp[,"mergeid"]%in%obs)
indobs <- which(data[,"mergeid"]%in%obs)
couple <- data[indobs[1],"coupleid"]
if(!(couple%in%"")){
indcouple <- which(data[,"coupleid"]%in%couple)
min <- match(indobs,indcouple)
min <- min[complete.cases(min)]
indpart <- indcouple[-min]
replace <- which(!is.na(data[indpart,"child"]))
if(!is.na(indpart[replace[1]])){
samp[indsamp,"child"] <- data[indpart[replace[1]],"child"]
}
}
if(is.na(sum(samp[indsamp,"child"]))){
nach <- which(is.na(samp[indsamp,"child"]))
samp[indsamp[nach],"child"] <- samp[indsamp[-nach][1],"child"]
}
}
# routing marital status
for(k in 2:5){
ind <- which(samp[,"mstatc"]==4 & samp[,"wave"]==w[k])
samp[ind,"mstat"] <- samp[(ind-1),"mstat"]
}
nas <- which(is.na(samp[,"mstat"]))
ind <- unique(samp[nas,"mergeid"])
for(l in 1:length(ind)){
obs <- ind[l]
indsamp <- which(samp[,"mergeid"]%in%obs)
indobs <- which(data[,"mergeid"]%in%obs)
indnam <- which(is.na(samp[indsamp,"mstat"]))
for(k in 1:length(indnam)){
if(samp[indsamp[indnam],"mstatc"][k]==4 & !is.na(samp[indsamp[indnam],"mstatc"][k])){
samp[indsamp[indnam],"mstat"][k] <- samp[(indsamp[indnam]-1),"mstat"][k]
}
if(is.na(samp[indsamp[indnam],"mstatc"][k])){
couple <- samp[indsamp[indnam],"coupleid"][k]
if(!(couple%in%"")){
indcouple <- which(data[,"coupleid"]%in%couple)
min <- match(indobs,indcouple)
min <- min[complete.cases(min)]
indpart <- indcouple[-min]
replace <- which(!is.na(data[indpart,"child"]))
if(!is.na(indpart[replace[1]])){
samp[indsamp[indnam],"mstat"][k] <- data[indpart[replace[1]],"mstat"]
}
}
}
}
}
# remove imputed variables
samp <- samp[,-c(12,15,24,26)]
samp[,"gender"] <- as.numeric(as.factor(samp[,"gender"]))
# what missings are where
missings <- matrix(NA,1,ncol(samp))
rownames(missings) <- c("number missing")
colnames(missings) <- colnames(samp)
for(p in 1:ncol(samp)){
missings[1,p] <- sum(is.na(samp[,p]))
}
# try form imputations
install.packages("Amelia")
library(Amelia)
imputations <- amelia(samp[,c(1:2,5:8,11,13,15:17,19:20)],m=1,ts="wave",cs="id",
idvars=c("country","firstwave"),
noms=c("gender","mstat","chronic"),
ords=c("sphus"))
imp <- imputations$imputations$imp1
|
/R/code/full data set 2.R
|
no_license
|
annedh93/MasterThesis
|
R
| false | false | 4,098 |
r
|
data <- retention(id,
cv_w1,cv_w2,cv_w3,cv_w4,cv_w5,
dn_w1,dn_w2,dn_w3,dn_w4,dn_w5,
ch_w1,ch_w2,ch_w3,ch_w4,ch_w5,
ph_w1,ph_w2,ph_w3,ph_w4,ph_w5,
ep_w1,ep_w2,ep_w3,ep_w4,ep_w5,
health_w1,health_w2,health_w3,health_w4,health_w5,
imp_w1,imp_w2,imp_w3,imp_w4,imp_w5,
isced_w1,isced_w2,isced_w3,isced_w4,isced_w5,
cv_retention,dn_retention,ch_retention,ph_retention,ep_retention,health_retention,imp_retention,isced_retention,
index1,index2,index3,index4,index5)
# routing marital status
w <- c(1,2,4,5,6)
for(k in 2:5){
ind <- which(data[,"mstatc"]==4 & data[,"wave"]==w[k])
data[ind,"mstat"] <- data[(ind-1),"mstat"]
}
# find sample that experiences change in ADL
ADL <- data[,"ADL"]
indexADL <- (ADL>0)
indexADL2 <- matrix(FALSE,nrow(data),1)
for(i in 1:(nrow(data)/5)){
obs <- which(!is.na(indexADL[(i*5-4):(i*5)]))
if(sum(diff(obs))==(length(obs)-1)){ # check whether observations lie in consecutive waves
firstobs <- (i*5-5+obs[1])
lastobs <- (i*5-5+obs[length(obs)])
if(sum(indexADL[firstobs:lastobs])>0 & sum(indexADL[firstobs:lastobs])<length(obs) & indexADL[firstobs]==FALSE){
indexADL2[(i*5-4):(i*5)] <- TRUE
}
}
}
samp <- data[indexADL2,]
i <- which(is.na(samp[,3])) # remove subjects not present in certain waves
i2 <- which(samp[,"age"]==-9) # remove deceased subjects
samp <- samp[-c(i,i2),]
# routing children
nas <- which(is.na(samp[,"child"]))
ind <- unique(samp[nas,"mergeid"])
for(l in 1:length(ind)){
obs <- ind[l]
indsamp <- which(samp[,"mergeid"]%in%obs)
indobs <- which(data[,"mergeid"]%in%obs)
couple <- data[indobs[1],"coupleid"]
if(!(couple%in%"")){
indcouple <- which(data[,"coupleid"]%in%couple)
min <- match(indobs,indcouple)
min <- min[complete.cases(min)]
indpart <- indcouple[-min]
replace <- which(!is.na(data[indpart,"child"]))
if(!is.na(indpart[replace[1]])){
samp[indsamp,"child"] <- data[indpart[replace[1]],"child"]
}
}
if(is.na(sum(samp[indsamp,"child"]))){
nach <- which(is.na(samp[indsamp,"child"]))
samp[indsamp[nach],"child"] <- samp[indsamp[-nach][1],"child"]
}
}
# routing marital status
for(k in 2:5){
ind <- which(samp[,"mstatc"]==4 & samp[,"wave"]==w[k])
samp[ind,"mstat"] <- samp[(ind-1),"mstat"]
}
nas <- which(is.na(samp[,"mstat"]))
ind <- unique(samp[nas,"mergeid"])
for(l in 1:length(ind)){
obs <- ind[l]
indsamp <- which(samp[,"mergeid"]%in%obs)
indobs <- which(data[,"mergeid"]%in%obs)
indnam <- which(is.na(samp[indsamp,"mstat"]))
for(k in 1:length(indnam)){
if(samp[indsamp[indnam],"mstatc"][k]==4 & !is.na(samp[indsamp[indnam],"mstatc"][k])){
samp[indsamp[indnam],"mstat"][k] <- samp[(indsamp[indnam]-1),"mstat"][k]
}
if(is.na(samp[indsamp[indnam],"mstatc"][k])){
couple <- samp[indsamp[indnam],"coupleid"][k]
if(!(couple%in%"")){
indcouple <- which(data[,"coupleid"]%in%couple)
min <- match(indobs,indcouple)
min <- min[complete.cases(min)]
indpart <- indcouple[-min]
replace <- which(!is.na(data[indpart,"child"]))
if(!is.na(indpart[replace[1]])){
samp[indsamp[indnam],"mstat"][k] <- data[indpart[replace[1]],"mstat"]
}
}
}
}
}
# remove imputed variables
samp <- samp[,-c(12,15,24,26)]
samp[,"gender"] <- as.numeric(as.factor(samp[,"gender"]))
# what missings are where
missings <- matrix(NA,1,ncol(samp))
rownames(missings) <- c("number missing")
colnames(missings) <- colnames(samp)
for(p in 1:ncol(samp)){
missings[1,p] <- sum(is.na(samp[,p]))
}
# try form imputations
install.packages("Amelia")
library(Amelia)
imputations <- amelia(samp[,c(1:2,5:8,11,13,15:17,19:20)],m=1,ts="wave",cs="id",
idvars=c("country","firstwave"),
noms=c("gender","mstat","chronic"),
ords=c("sphus"))
imp <- imputations$imputations$imp1
|
# set library paths:
libpathKiri = "/net/pc132059/nobackup_1/users/whan/R/x86_64-redhat-linux-gnu-library/3.3"
libpath = "/usr/people/bakker/R/x86_64-redhat-linux-gnu-library/3.4"
#libpath = "/tmp/RtmpSDMBdh/downloaded_packages"
.libPaths(c(.libPaths()))#, libpathKiri))
library(maps)
library(maptools)
library(ncdf4)
library(tidyverse)
library(rasterVis)
library(ggplot2)
library(caret)
library(leaps)
library(grid)
library(gridExtra)
library(glmnet)
library(quantreg)
library(rqPen)
library(gamlss)
library(gamlss.tr)
library(randomForest)
library(quantregForest)
library(grf)
library(gbm)
library(neuralnet)
library(qrnn)
library(e1071)
library(qrsvm)
library(rpart)
library(gptk)
library(class)
library(pryr)
library(devtools)
library(Rcpp)
sourceCpp("/usr/people/bakker/kilianbakker/R/crps_ensemble.cpp")
source("/usr/people/bakker/kilianbakker/R/functions.R")
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "musteps"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- musteps
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 14
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(10,12,17)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 1){
fitcontrol <- gamlss.control(c.crit = 0.01, n.cyc = 50, mu.step = 1, sigma.step = 1, nu.step = 1,
tau.step = 1, gd.tol = Inf, iter = 0, trace = F, autostep = TRUE, save = T)
fitcontrol2 <- glim.control(cc = 0.01, cyc = 50, glm.trace = F, bf.cyc = 50, bf.tol = 0.01, bf.trace = F)
if (DistMethod %in% c(-1,0,1)){
fit0 <- gamlss(formula = CSI_obs~1, sigma.formula = CSI_obs~1, data = TrainingData, family = NO(mu.link = "identity", sigma.link = "identity"),
control = fitcontrol,i.control = fitcontrol2, trace = F)
if (DistMethod == -1){
fit1 <- fit0
} else if (DistMethod == 0){
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = paste0("~Global")), direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
} else if (DistMethod == 1){
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = form), steps = musteps[hyper], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit1, what="sigma", scope = list(upper = form), steps = sigmasteps[2], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
}
Predictionset_mu <- unname(predict(fit1, newdata = TestingData, what = c("mu")), force = FALSE)
Predictionset_sigma <- pmax(unname(predict(fit1, newdata = TestingData, what = c("sigma")), force = FALSE),array(0.001, c(length(testIndices))))
for (i in 1:length(testIndices)){
tempPredictionset[testIndices[i],] <- unname(qNO(quants, mu = Predictionset_mu[i], sigma = Predictionset_sigma[i]))
}
} else if (DistMethod %in% c(2,3)){
if (DistMethod == 2){
gen.trun(par=c(min(RadiationData[predictand][[1]][tempIndices], na.rm = T),
max(RadiationData[predictand][[1]][tempIndices], na.rm = T)),"NO", type="both")
} else if (DistMethod == 3){
gen.trun(par=c(0),"NO", type="left")
}
fit0 <- gamlss(formula = CSI_obs~1, sigma.formula = CSI_obs~1, data = TrainingData, family = NOtr(mu.link = "identity", sigma.link = "identity"), control = fitcontrol,i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = form), steps = musteps[hyper], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit1, what="sigma", scope = list(upper = form), steps = sigmasteps[2], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
Predictionset_mu <- unname(predict(fit1, newdata = TestingData, what = c("mu")), force = FALSE)
Predictionset_sigma <- pmax(unname(predict(fit1, newdata = TestingData, what = c("sigma")), force = FALSE),array(0.001, c(length(testIndices))))
for (i in 1:length(testIndices)){
tempPredictionset[testIndices[i],] <- unname(qNOtr(quants, mu = Predictionset_mu[i], sigma = Predictionset_sigma[i]))
}
}
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText, hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "sigmasteps"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- sigmasteps
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 14
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(10,12)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 1){
fitcontrol <- gamlss.control(c.crit = 0.01, n.cyc = 50, mu.step = 1, sigma.step = 1, nu.step = 1,
tau.step = 1, gd.tol = Inf, iter = 0, trace = F, autostep = TRUE, save = T)
fitcontrol2 <- glim.control(cc = 0.01, cyc = 50, glm.trace = F, bf.cyc = 50, bf.tol = 0.01, bf.trace = F)
if (DistMethod %in% c(-1,0,1)){
fit0 <- gamlss(formula = CSI_obs~1, sigma.formula = CSI_obs~1, data = TrainingData, family = NO(mu.link = "identity", sigma.link = "identity"),
control = fitcontrol,i.control = fitcontrol2, trace = F)
if (DistMethod == -1){
fit1 <- fit0
} else if (DistMethod == 0){
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = paste0("~Global")), direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
} else if (DistMethod == 1){
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = form), steps = musteps[2], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit1, what="sigma", scope = list(upper = form), steps = sigmasteps[hyper], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
}
Predictionset_mu <- unname(predict(fit1, newdata = TestingData, what = c("mu")), force = FALSE)
Predictionset_sigma <- pmax(unname(predict(fit1, newdata = TestingData, what = c("sigma")), force = FALSE),array(0.001, c(length(testIndices))))
for (i in 1:length(testIndices)){
tempPredictionset[testIndices[i],] <- unname(qNO(quants, mu = Predictionset_mu[i], sigma = Predictionset_sigma[i]))
}
} else if (DistMethod %in% c(2,3)){
if (DistMethod == 2){
gen.trun(par=c(min(RadiationData[predictand][[1]][tempIndices], na.rm = T),
max(RadiationData[predictand][[1]][tempIndices], na.rm = T)),"NO", type="both")
} else if (DistMethod == 3){
gen.trun(par=c(0),"NO", type="left")
}
fit0 <- gamlss(formula = CSI_obs~1, sigma.formula = CSI_obs~1, data = TrainingData, family = NOtr(mu.link = "identity", sigma.link = "identity"), control = fitcontrol,i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = form), steps = musteps[2], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit1, what="sigma", scope = list(upper = form), steps = sigmasteps[hyper], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
Predictionset_mu <- unname(predict(fit1, newdata = TestingData, what = c("mu")), force = FALSE)
Predictionset_sigma <- pmax(unname(predict(fit1, newdata = TestingData, what = c("sigma")), force = FALSE),array(0.001, c(length(testIndices))))
for (i in 1:length(testIndices)){
tempPredictionset[testIndices[i],] <- unname(qNOtr(quants, mu = Predictionset_mu[i], sigma = Predictionset_sigma[i]))
}
}
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# set library paths:
libpathKiri = "/net/pc132059/nobackup_1/users/whan/R/x86_64-redhat-linux-gnu-library/3.3"
libpath = "/usr/people/bakker/R/x86_64-redhat-linux-gnu-library/3.4"
#libpath = "/tmp/RtmpSDMBdh/downloaded_packages"
.libPaths(c(.libPaths()))#, libpathKiri))
library(maps)
library(maptools)
library(ncdf4)
library(tidyverse)
library(rasterVis)
library(ggplot2)
library(caret)
library(leaps)
library(grid)
library(gridExtra)
library(glmnet)
library(quantreg)
library(rqPen)
library(gamlss)
library(gamlss.tr)
library(randomForest)
library(quantregForest)
library(grf)
library(gbm)
library(neuralnet)
library(qrnn)
library(e1071)
library(qrsvm)
library(rpart)
library(gptk)
library(class)
library(pryr)
library(devtools)
library(Rcpp)
sourceCpp("/usr/people/bakker/kilianbakker/R/crps_ensemble.cpp")
source("/usr/people/bakker/kilianbakker/R/functions.R")
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "ntrees"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- ntrees
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(14,15,18)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 4){
fit1 <- quantregForest(Predictors, Predictand, sampsize=ceiling(nrow(Predictors)*samps[3]), mtry = ceiling(ncol(Predictors)*mtries[2]), nodesize = nodesizes[1], ntree = ntrees[hyper])
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, what = quants)
#QRFimp[time,season,CVmonth,] <- importance(fit1)
} else if (RegMethod == 5){
for (q in 1:length(quants)){
fit1 <- gbm(CSI_obs~., data=TrainingData, distribution=list(name = "quantile",alpha = quants[q]), n.trees = ntrees[hyper], shrinkage=shrinkages[3],
interaction.depth=depths[1], bag.fraction = samps[3], train.fraction = 1, n.minobsinnode = nodesizes[1], verbose=FALSE)
tempPredictionset[testIndices,q] <- predict(fit1, newdata = TestingData, n.trees = ntrees[hyper])
#GBMimp[time,season,CVmonth,q,] <- varImp(fit1,gbmTrees)[[1]]
}
} else if (RegMethod == 9){
fit1 <- quantile_forest(Predictors, Predictand, quantiles = quants, regression.splitting = FALSE, sample.fraction = samps[3],
mtry = ceiling(ncol(Predictors)*mtries[2]), num.trees = ntrees[hyper], min.node.size = nodesizes[1], honesty = F, honesty.fraction = NULL)
#split_frequencies(fit1, max.depth = 5)
#GRFimp[time,season,CVmonth,] <- variable_importance(fit1)[,1]
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, quantiles = quants)
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "mtries"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- mtries
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(14,18)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 4){
fit1 <- quantregForest(Predictors, Predictand, sampsize=ceiling(nrow(Predictors)*samps[3]), mtry = ceiling(ncol(Predictors)*mtries[hyper]), nodesize = nodesizes[1], ntree = ntrees[2])
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, what = quants)
#QRFimp[time,season,CVmonth,] <- importance(fit1)
} else if (RegMethod == 5){
for (q in 1:length(quants)){
fit1 <- gbm(CSI_obs~., data=TrainingData, distribution=list(name = "quantile",alpha = quants[q]), n.trees = ntrees[2], shrinkage=shrinkages[3],
interaction.depth=depths[1], bag.fraction = samps[3], train.fraction = 1, n.minobsinnode = nodesizes[1], verbose=FALSE)
tempPredictionset[testIndices,q] <- predict(fit1, newdata = TestingData, n.trees = ntrees[2])
#GBMimp[time,season,CVmonth,q,] <- varImp(fit1,gbmTrees)[[1]]
}
} else if (RegMethod == 9){
fit1 <- quantile_forest(Predictors, Predictand, quantiles = quants, regression.splitting = FALSE, sample.fraction = samps[3],
mtry = ceiling(ncol(Predictors)*mtries[hyper]), num.trees = ntrees[2], min.node.size = nodesizes[1], honesty = F, honesty.fraction = NULL)
#split_frequencies(fit1, max.depth = 5)
#GRFimp[time,season,CVmonth,] <- variable_importance(fit1)[,1]
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, quantiles = quants)
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "nodesizes"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- nodesizes
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(14,15,18)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 4){
fit1 <- quantregForest(Predictors, Predictand, sampsize=ceiling(nrow(Predictors)*samps[3]), mtry = ceiling(ncol(Predictors)*mtries[2]), nodesize = nodesizes[hyper], ntree = ntrees[2])
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, what = quants)
#QRFimp[time,season,CVmonth,] <- importance(fit1)
} else if (RegMethod == 5){
for (q in 1:length(quants)){
fit1 <- gbm(CSI_obs~., data=TrainingData, distribution=list(name = "quantile",alpha = quants[q]), n.trees = ntrees[2], shrinkage=shrinkages[3],
interaction.depth=depths[1], bag.fraction = samps[3], train.fraction = 1, n.minobsinnode = nodesizes[hyper], verbose=FALSE)
tempPredictionset[testIndices,q] <- predict(fit1, newdata = TestingData, n.trees = ntrees[2])
#GBMimp[time,season,CVmonth,q,] <- varImp(fit1,gbmTrees)[[1]]
}
} else if (RegMethod == 9){
fit1 <- quantile_forest(Predictors, Predictand, quantiles = quants, regression.splitting = FALSE, sample.fraction = samps[3],
mtry = ceiling(ncol(Predictors)*mtries[2]), num.trees = ntrees[2], min.node.size = nodesizes[hyper], honesty = F, honesty.fraction = NULL)
#split_frequencies(fit1, max.depth = 5)
#GRFimp[time,season,CVmonth,] <- variable_importance(fit1)[,1]
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, quantiles = quants)
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "samps"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- samps
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(14,15,18)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 4){
fit1 <- quantregForest(Predictors, Predictand, sampsize=ceiling(nrow(Predictors)*samps[hyper]), mtry = ceiling(ncol(Predictors)*mtries[2]), nodesize = nodesizes[1], ntree = ntrees[2])
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, what = quants)
#QRFimp[time,season,CVmonth,] <- importance(fit1)
} else if (RegMethod == 9){
fit1 <- quantile_forest(Predictors, Predictand, quantiles = quants, regression.splitting = FALSE, sample.fraction = samps[hyper],
mtry = ceiling(ncol(Predictors)*mtries[2]), num.trees = ntrees[2], min.node.size = nodesizes[1], honesty = F, honesty.fraction = NULL)
#split_frequencies(fit1, max.depth = 5)
#GRFimp[time,season,CVmonth,] <- variable_importance(fit1)[,1]
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, quantiles = quants)
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# set library paths:
libpathKiri = "/net/pc132059/nobackup_1/users/whan/R/x86_64-redhat-linux-gnu-library/3.3"
libpath = "/usr/people/bakker/R/x86_64-redhat-linux-gnu-library/3.4"
#libpath = "/tmp/RtmpSDMBdh/downloaded_packages"
.libPaths(c(.libPaths()))#, libpathKiri))
library(maps)
library(maptools)
library(ncdf4)
library(tidyverse)
library(rasterVis)
library(ggplot2)
library(caret)
library(leaps)
library(grid)
library(gridExtra)
library(glmnet)
library(quantreg)
library(rqPen)
library(gamlss)
library(gamlss.tr)
library(randomForest)
library(quantregForest)
library(grf)
library(gbm)
library(neuralnet)
library(qrnn)
library(e1071)
library(qrsvm)
library(rpart)
library(gptk)
library(class)
library(pryr)
library(devtools)
library(Rcpp)
sourceCpp("/usr/people/bakker/kilianbakker/R/crps_ensemble.cpp")
source("/usr/people/bakker/kilianbakker/R/functions.R")
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "shrinkages"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- shrinkages
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(15)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 5){
for (q in 1:length(quants)){
fit1 <- gbm(CSI_obs~., data=TrainingData, distribution=list(name = "quantile",alpha = quants[q]), n.trees = ntrees[2], shrinkage=shrinkages[hyper],
interaction.depth=depths[1], bag.fraction = samps[3], train.fraction = 1, n.minobsinnode = nodesizes[1], verbose=FALSE)
tempPredictionset[testIndices,q] <- predict(fit1, newdata = TestingData, n.trees = ntrees[2])
#GBMimp[time,season,CVmonth,q,] <- varImp(fit1,gbmTrees)[[1]]
}
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "depths"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- depths
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(15)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 5){
for (q in 1:length(quants)){
fit1 <- gbm(CSI_obs~., data=TrainingData, distribution=list(name = "quantile",alpha = quants[q]), n.trees = ntrees[2], shrinkage=shrinkages[3],
interaction.depth=depths[hyper], bag.fraction = samps[3], train.fraction = 1, n.minobsinnode = nodesizes[1], verbose=FALSE)
tempPredictionset[testIndices,q] <- predict(fit1, newdata = TestingData, n.trees = ntrees[2])
#GBMimp[time,season,CVmonth,q,] <- varImp(fit1,gbmTrees)[[1]]
}
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "iters"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- iters
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(16)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 6){
fit1 <- mcqrnn.fit(as.matrix(Predictors), as.matrix(Predictand), n.hidden = hiddens1[1], n.hidden2 = hiddens2[1], tau = quants, iter.max=iters[hyper], n.trials=2,
trace = F, Th = sigmoid, Th.prime = sigmoid.prime)
tempPredictionset[testIndices,] <- as.matrix(mcqrnn.predict(as.matrix(TestingData), fit1, tau = quants))
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "hiddens1"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- hiddens1
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(16)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 6){
fit1 <- mcqrnn.fit(as.matrix(Predictors), as.matrix(Predictand), n.hidden = hiddens1[hyper], n.hidden2 = hiddens2[1], tau = quants, iter.max=iters[2], n.trials=2,
trace = F, Th = sigmoid, Th.prime = sigmoid.prime)
tempPredictionset[testIndices,] <- as.matrix(mcqrnn.predict(as.matrix(TestingData), fit1, tau = quants))
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "hiddens2"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- hiddens2
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(16)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 6){
fit1 <- mcqrnn.fit(as.matrix(Predictors), as.matrix(Predictand), n.hidden = hiddens1[1], n.hidden2 = hiddens2[hyper], tau = quants, iter.max=iters[2], n.trials=2,
trace = F, Th = sigmoid, Th.prime = sigmoid.prime)
tempPredictionset[testIndices,] <- as.matrix(mcqrnn.predict(as.matrix(TestingData), fit1, tau = quants))
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "penalties"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
penalties <- c(0,0.01,0.1,1)
hyperparameters <- penalties
for (hyper in 1:length(hyperparameters)){
print("a")
Sys.sleep(0.01)
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(16)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 6){
fit1 <- mcqrnn.fit(as.matrix(Predictors), as.matrix(Predictand), n.hidden = hiddens1[1], n.hidden2 = hiddens2[1], tau = quants, iter.max=iters[2], n.trials=2,
trace = F, Th = sigmoid, Th.prime = sigmoid.prime, penalty = penalties[hyper])
tempPredictionset[testIndices,] <- as.matrix(mcqrnn.predict(as.matrix(TestingData), fit1, tau = quants))
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
|
/temphyper.R
|
no_license
|
kilianbakker/KNMI-internship
|
R
| false | false | 164,354 |
r
|
# set library paths:
libpathKiri = "/net/pc132059/nobackup_1/users/whan/R/x86_64-redhat-linux-gnu-library/3.3"
libpath = "/usr/people/bakker/R/x86_64-redhat-linux-gnu-library/3.4"
#libpath = "/tmp/RtmpSDMBdh/downloaded_packages"
.libPaths(c(.libPaths()))#, libpathKiri))
library(maps)
library(maptools)
library(ncdf4)
library(tidyverse)
library(rasterVis)
library(ggplot2)
library(caret)
library(leaps)
library(grid)
library(gridExtra)
library(glmnet)
library(quantreg)
library(rqPen)
library(gamlss)
library(gamlss.tr)
library(randomForest)
library(quantregForest)
library(grf)
library(gbm)
library(neuralnet)
library(qrnn)
library(e1071)
library(qrsvm)
library(rpart)
library(gptk)
library(class)
library(pryr)
library(devtools)
library(Rcpp)
sourceCpp("/usr/people/bakker/kilianbakker/R/crps_ensemble.cpp")
source("/usr/people/bakker/kilianbakker/R/functions.R")
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "musteps"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- musteps
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 14
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(10,12,17)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 1){
fitcontrol <- gamlss.control(c.crit = 0.01, n.cyc = 50, mu.step = 1, sigma.step = 1, nu.step = 1,
tau.step = 1, gd.tol = Inf, iter = 0, trace = F, autostep = TRUE, save = T)
fitcontrol2 <- glim.control(cc = 0.01, cyc = 50, glm.trace = F, bf.cyc = 50, bf.tol = 0.01, bf.trace = F)
if (DistMethod %in% c(-1,0,1)){
fit0 <- gamlss(formula = CSI_obs~1, sigma.formula = CSI_obs~1, data = TrainingData, family = NO(mu.link = "identity", sigma.link = "identity"),
control = fitcontrol,i.control = fitcontrol2, trace = F)
if (DistMethod == -1){
fit1 <- fit0
} else if (DistMethod == 0){
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = paste0("~Global")), direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
} else if (DistMethod == 1){
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = form), steps = musteps[hyper], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit1, what="sigma", scope = list(upper = form), steps = sigmasteps[2], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
}
Predictionset_mu <- unname(predict(fit1, newdata = TestingData, what = c("mu")), force = FALSE)
Predictionset_sigma <- pmax(unname(predict(fit1, newdata = TestingData, what = c("sigma")), force = FALSE),array(0.001, c(length(testIndices))))
for (i in 1:length(testIndices)){
tempPredictionset[testIndices[i],] <- unname(qNO(quants, mu = Predictionset_mu[i], sigma = Predictionset_sigma[i]))
}
} else if (DistMethod %in% c(2,3)){
if (DistMethod == 2){
gen.trun(par=c(min(RadiationData[predictand][[1]][tempIndices], na.rm = T),
max(RadiationData[predictand][[1]][tempIndices], na.rm = T)),"NO", type="both")
} else if (DistMethod == 3){
gen.trun(par=c(0),"NO", type="left")
}
fit0 <- gamlss(formula = CSI_obs~1, sigma.formula = CSI_obs~1, data = TrainingData, family = NOtr(mu.link = "identity", sigma.link = "identity"), control = fitcontrol,i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = form), steps = musteps[hyper], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit1, what="sigma", scope = list(upper = form), steps = sigmasteps[2], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
Predictionset_mu <- unname(predict(fit1, newdata = TestingData, what = c("mu")), force = FALSE)
Predictionset_sigma <- pmax(unname(predict(fit1, newdata = TestingData, what = c("sigma")), force = FALSE),array(0.001, c(length(testIndices))))
for (i in 1:length(testIndices)){
tempPredictionset[testIndices[i],] <- unname(qNOtr(quants, mu = Predictionset_mu[i], sigma = Predictionset_sigma[i]))
}
}
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText, hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "sigmasteps"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- sigmasteps
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 14
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(10,12)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 1){
fitcontrol <- gamlss.control(c.crit = 0.01, n.cyc = 50, mu.step = 1, sigma.step = 1, nu.step = 1,
tau.step = 1, gd.tol = Inf, iter = 0, trace = F, autostep = TRUE, save = T)
fitcontrol2 <- glim.control(cc = 0.01, cyc = 50, glm.trace = F, bf.cyc = 50, bf.tol = 0.01, bf.trace = F)
if (DistMethod %in% c(-1,0,1)){
fit0 <- gamlss(formula = CSI_obs~1, sigma.formula = CSI_obs~1, data = TrainingData, family = NO(mu.link = "identity", sigma.link = "identity"),
control = fitcontrol,i.control = fitcontrol2, trace = F)
if (DistMethod == -1){
fit1 <- fit0
} else if (DistMethod == 0){
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = paste0("~Global")), direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
} else if (DistMethod == 1){
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = form), steps = musteps[2], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit1, what="sigma", scope = list(upper = form), steps = sigmasteps[hyper], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
}
Predictionset_mu <- unname(predict(fit1, newdata = TestingData, what = c("mu")), force = FALSE)
Predictionset_sigma <- pmax(unname(predict(fit1, newdata = TestingData, what = c("sigma")), force = FALSE),array(0.001, c(length(testIndices))))
for (i in 1:length(testIndices)){
tempPredictionset[testIndices[i],] <- unname(qNO(quants, mu = Predictionset_mu[i], sigma = Predictionset_sigma[i]))
}
} else if (DistMethod %in% c(2,3)){
if (DistMethod == 2){
gen.trun(par=c(min(RadiationData[predictand][[1]][tempIndices], na.rm = T),
max(RadiationData[predictand][[1]][tempIndices], na.rm = T)),"NO", type="both")
} else if (DistMethod == 3){
gen.trun(par=c(0),"NO", type="left")
}
fit0 <- gamlss(formula = CSI_obs~1, sigma.formula = CSI_obs~1, data = TrainingData, family = NOtr(mu.link = "identity", sigma.link = "identity"), control = fitcontrol,i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit0, what="mu", scope = list(upper = form), steps = musteps[2], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
fit1 <- stepGAIC(fit1, what="sigma", scope = list(upper = form), steps = sigmasteps[hyper], direction = "both", control = fitcontrol, i.control = fitcontrol2, trace = F)
Predictionset_mu <- unname(predict(fit1, newdata = TestingData, what = c("mu")), force = FALSE)
Predictionset_sigma <- pmax(unname(predict(fit1, newdata = TestingData, what = c("sigma")), force = FALSE),array(0.001, c(length(testIndices))))
for (i in 1:length(testIndices)){
tempPredictionset[testIndices[i],] <- unname(qNOtr(quants, mu = Predictionset_mu[i], sigma = Predictionset_sigma[i]))
}
}
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# set library paths:
libpathKiri = "/net/pc132059/nobackup_1/users/whan/R/x86_64-redhat-linux-gnu-library/3.3"
libpath = "/usr/people/bakker/R/x86_64-redhat-linux-gnu-library/3.4"
#libpath = "/tmp/RtmpSDMBdh/downloaded_packages"
.libPaths(c(.libPaths()))#, libpathKiri))
library(maps)
library(maptools)
library(ncdf4)
library(tidyverse)
library(rasterVis)
library(ggplot2)
library(caret)
library(leaps)
library(grid)
library(gridExtra)
library(glmnet)
library(quantreg)
library(rqPen)
library(gamlss)
library(gamlss.tr)
library(randomForest)
library(quantregForest)
library(grf)
library(gbm)
library(neuralnet)
library(qrnn)
library(e1071)
library(qrsvm)
library(rpart)
library(gptk)
library(class)
library(pryr)
library(devtools)
library(Rcpp)
sourceCpp("/usr/people/bakker/kilianbakker/R/crps_ensemble.cpp")
source("/usr/people/bakker/kilianbakker/R/functions.R")
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "ntrees"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- ntrees
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(14,15,18)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 4){
fit1 <- quantregForest(Predictors, Predictand, sampsize=ceiling(nrow(Predictors)*samps[3]), mtry = ceiling(ncol(Predictors)*mtries[2]), nodesize = nodesizes[1], ntree = ntrees[hyper])
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, what = quants)
#QRFimp[time,season,CVmonth,] <- importance(fit1)
} else if (RegMethod == 5){
for (q in 1:length(quants)){
fit1 <- gbm(CSI_obs~., data=TrainingData, distribution=list(name = "quantile",alpha = quants[q]), n.trees = ntrees[hyper], shrinkage=shrinkages[3],
interaction.depth=depths[1], bag.fraction = samps[3], train.fraction = 1, n.minobsinnode = nodesizes[1], verbose=FALSE)
tempPredictionset[testIndices,q] <- predict(fit1, newdata = TestingData, n.trees = ntrees[hyper])
#GBMimp[time,season,CVmonth,q,] <- varImp(fit1,gbmTrees)[[1]]
}
} else if (RegMethod == 9){
fit1 <- quantile_forest(Predictors, Predictand, quantiles = quants, regression.splitting = FALSE, sample.fraction = samps[3],
mtry = ceiling(ncol(Predictors)*mtries[2]), num.trees = ntrees[hyper], min.node.size = nodesizes[1], honesty = F, honesty.fraction = NULL)
#split_frequencies(fit1, max.depth = 5)
#GRFimp[time,season,CVmonth,] <- variable_importance(fit1)[,1]
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, quantiles = quants)
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "mtries"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- mtries
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(14,18)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 4){
fit1 <- quantregForest(Predictors, Predictand, sampsize=ceiling(nrow(Predictors)*samps[3]), mtry = ceiling(ncol(Predictors)*mtries[hyper]), nodesize = nodesizes[1], ntree = ntrees[2])
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, what = quants)
#QRFimp[time,season,CVmonth,] <- importance(fit1)
} else if (RegMethod == 5){
for (q in 1:length(quants)){
fit1 <- gbm(CSI_obs~., data=TrainingData, distribution=list(name = "quantile",alpha = quants[q]), n.trees = ntrees[2], shrinkage=shrinkages[3],
interaction.depth=depths[1], bag.fraction = samps[3], train.fraction = 1, n.minobsinnode = nodesizes[1], verbose=FALSE)
tempPredictionset[testIndices,q] <- predict(fit1, newdata = TestingData, n.trees = ntrees[2])
#GBMimp[time,season,CVmonth,q,] <- varImp(fit1,gbmTrees)[[1]]
}
} else if (RegMethod == 9){
fit1 <- quantile_forest(Predictors, Predictand, quantiles = quants, regression.splitting = FALSE, sample.fraction = samps[3],
mtry = ceiling(ncol(Predictors)*mtries[hyper]), num.trees = ntrees[2], min.node.size = nodesizes[1], honesty = F, honesty.fraction = NULL)
#split_frequencies(fit1, max.depth = 5)
#GRFimp[time,season,CVmonth,] <- variable_importance(fit1)[,1]
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, quantiles = quants)
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "nodesizes"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- nodesizes
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(14,15,18)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 4){
fit1 <- quantregForest(Predictors, Predictand, sampsize=ceiling(nrow(Predictors)*samps[3]), mtry = ceiling(ncol(Predictors)*mtries[2]), nodesize = nodesizes[hyper], ntree = ntrees[2])
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, what = quants)
#QRFimp[time,season,CVmonth,] <- importance(fit1)
} else if (RegMethod == 5){
for (q in 1:length(quants)){
fit1 <- gbm(CSI_obs~., data=TrainingData, distribution=list(name = "quantile",alpha = quants[q]), n.trees = ntrees[2], shrinkage=shrinkages[3],
interaction.depth=depths[1], bag.fraction = samps[3], train.fraction = 1, n.minobsinnode = nodesizes[hyper], verbose=FALSE)
tempPredictionset[testIndices,q] <- predict(fit1, newdata = TestingData, n.trees = ntrees[2])
#GBMimp[time,season,CVmonth,q,] <- varImp(fit1,gbmTrees)[[1]]
}
} else if (RegMethod == 9){
fit1 <- quantile_forest(Predictors, Predictand, quantiles = quants, regression.splitting = FALSE, sample.fraction = samps[3],
mtry = ceiling(ncol(Predictors)*mtries[2]), num.trees = ntrees[2], min.node.size = nodesizes[hyper], honesty = F, honesty.fraction = NULL)
#split_frequencies(fit1, max.depth = 5)
#GRFimp[time,season,CVmonth,] <- variable_importance(fit1)[,1]
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, quantiles = quants)
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "samps"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- samps
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(14,15,18)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 4){
fit1 <- quantregForest(Predictors, Predictand, sampsize=ceiling(nrow(Predictors)*samps[hyper]), mtry = ceiling(ncol(Predictors)*mtries[2]), nodesize = nodesizes[1], ntree = ntrees[2])
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, what = quants)
#QRFimp[time,season,CVmonth,] <- importance(fit1)
} else if (RegMethod == 9){
fit1 <- quantile_forest(Predictors, Predictand, quantiles = quants, regression.splitting = FALSE, sample.fraction = samps[hyper],
mtry = ceiling(ncol(Predictors)*mtries[2]), num.trees = ntrees[2], min.node.size = nodesizes[1], honesty = F, honesty.fraction = NULL)
#split_frequencies(fit1, max.depth = 5)
#GRFimp[time,season,CVmonth,] <- variable_importance(fit1)[,1]
tempPredictionset[testIndices,] <- predict(fit1, newdata = TestingData, quantiles = quants)
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# set library paths:
libpathKiri = "/net/pc132059/nobackup_1/users/whan/R/x86_64-redhat-linux-gnu-library/3.3"
libpath = "/usr/people/bakker/R/x86_64-redhat-linux-gnu-library/3.4"
#libpath = "/tmp/RtmpSDMBdh/downloaded_packages"
.libPaths(c(.libPaths()))#, libpathKiri))
library(maps)
library(maptools)
library(ncdf4)
library(tidyverse)
library(rasterVis)
library(ggplot2)
library(caret)
library(leaps)
library(grid)
library(gridExtra)
library(glmnet)
library(quantreg)
library(rqPen)
library(gamlss)
library(gamlss.tr)
library(randomForest)
library(quantregForest)
library(grf)
library(gbm)
library(neuralnet)
library(qrnn)
library(e1071)
library(qrsvm)
library(rpart)
library(gptk)
library(class)
library(pryr)
library(devtools)
library(Rcpp)
sourceCpp("/usr/people/bakker/kilianbakker/R/crps_ensemble.cpp")
source("/usr/people/bakker/kilianbakker/R/functions.R")
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "shrinkages"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- shrinkages
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(15)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 5){
for (q in 1:length(quants)){
fit1 <- gbm(CSI_obs~., data=TrainingData, distribution=list(name = "quantile",alpha = quants[q]), n.trees = ntrees[2], shrinkage=shrinkages[hyper],
interaction.depth=depths[1], bag.fraction = samps[3], train.fraction = 1, n.minobsinnode = nodesizes[1], verbose=FALSE)
tempPredictionset[testIndices,q] <- predict(fit1, newdata = TestingData, n.trees = ntrees[2])
#GBMimp[time,season,CVmonth,q,] <- varImp(fit1,gbmTrees)[[1]]
}
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "depths"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- depths
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(15)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 5){
for (q in 1:length(quants)){
fit1 <- gbm(CSI_obs~., data=TrainingData, distribution=list(name = "quantile",alpha = quants[q]), n.trees = ntrees[2], shrinkage=shrinkages[3],
interaction.depth=depths[hyper], bag.fraction = samps[3], train.fraction = 1, n.minobsinnode = nodesizes[1], verbose=FALSE)
tempPredictionset[testIndices,q] <- predict(fit1, newdata = TestingData, n.trees = ntrees[2])
#GBMimp[time,season,CVmonth,q,] <- varImp(fit1,gbmTrees)[[1]]
}
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "iters"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- iters
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(16)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 6){
fit1 <- mcqrnn.fit(as.matrix(Predictors), as.matrix(Predictand), n.hidden = hiddens1[1], n.hidden2 = hiddens2[1], tau = quants, iter.max=iters[hyper], n.trials=2,
trace = F, Th = sigmoid, Th.prime = sigmoid.prime)
tempPredictionset[testIndices,] <- as.matrix(mcqrnn.predict(as.matrix(TestingData), fit1, tau = quants))
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "hiddens1"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- hiddens1
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(16)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 6){
fit1 <- mcqrnn.fit(as.matrix(Predictors), as.matrix(Predictand), n.hidden = hiddens1[hyper], n.hidden2 = hiddens2[1], tau = quants, iter.max=iters[2], n.trials=2,
trace = F, Th = sigmoid, Th.prime = sigmoid.prime)
tempPredictionset[testIndices,] <- as.matrix(mcqrnn.predict(as.matrix(TestingData), fit1, tau = quants))
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "hiddens2"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
hyperparameters <- hiddens2
for (hyper in 1:length(hyperparameters)){
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(16)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 6){
fit1 <- mcqrnn.fit(as.matrix(Predictors), as.matrix(Predictand), n.hidden = hiddens1[1], n.hidden2 = hiddens2[hyper], tau = quants, iter.max=iters[2], n.trials=2,
trace = F, Th = sigmoid, Th.prime = sigmoid.prime)
tempPredictionset[testIndices,] <- as.matrix(mcqrnn.predict(as.matrix(TestingData), fit1, tau = quants))
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
# The variables/constants
fnames <- data.frame(files = list.files(path = "/nobackup/users/bakker/Data2/radiationvariables", full.names = TRUE), stringsAsFactors = FALSE)
init_dates <- as.numeric(unique(gsub(".rds", "", basename(fnames$files))))
stationycoor <- c(52.933, 52.317, 52.65, 53.383, 52.5, 52.1, 52.9, 52.45, 53.217, 52.7, 52.05, 53.417, 52.433, 52.75, 53.117,
52.067, 53.2, 52.267, 51.45, 51.217, 51.983, 51.967, 51.971, 51.567, 51.85, 51.45, 51.65, 51.2, 50.9, 51.5)
stationxcoor <- c(4.783, 4.783, 4.983, 5.35, 4.6, 5.183, 5.383, 5.517, 5.75, 5.883, 5.867, 6.2, 6.267, 6.567, 6.583, 6.65, 7.15,
6.883, 3.6, 3.867, 4.117, 4.45, 4.927, 4.933, 5.15, 5.383, 5.7, 5.767, 5.767, 6.2)
stationNumber <- c(235, 240, 249, 251, 257, 260, 267, 269, 270, 273, 275, 277, 278, 279, 280, 283, 286, 290, 310, 319, 330,
344, 348, 350, 356, 370, 375, 377, 380, 391)
stationName <- c("De Kooy", "Schiphol", "Berkhout", "Hoorn (Terschelling)", "Wijk aan zee",
"De Bilt", "Stavoren", "Lelystad", "Leeuwarden", "Marknesse", "Deelen", "Lauwersoog", "Heino",
"Hoogeveen", "Eelde", "Hupsel", "Nieuw Beerta", "Twenthe", "Vlissingen", "Westdorpe",
"Hoek van Holland", "Rotterdam", "Cabauw", "Gilze-Rijen", "Herwijnen", "Eindhoven", "Vonkel", "Ell", "Maastricht", "Arcen")
CoastDistance <- calculating_distances(stationxcoor, stationycoor, "Coast")
WaterDistance <- calculating_distances(stationxcoor, stationycoor, "Water")
InlandDistance <- calculating_distances(stationxcoor, stationycoor, "Inland")
stationData <- data.frame(Station = stationName, Number = stationNumber, xcoor = stationxcoor, ycoor = stationycoor,
DistToCoast = CoastDistance, DistToWater = WaterDistance, DistToInland = InlandDistance)
variableNames <- c("Global", "Direct_SURF", "Direct_TOA", "NCS_SURF", "NCS_TOA", "CC_Total", "CC_Low", "CC_Medium", "CC_High", "CW_Total", "CW_Low",
"CW_Medium", "CW_High", "PW_Total", "PW_Low", "PW_Medium", "PW_High", "Rain", "AOD_500", "Ang_exp", "Ozone", "T_Low", "T_Medium", "T_High",
"RH_Low", "RH_Medium", "RH_High", "CosZenith", "Lat", "Lon", "DistToCoast", "DistToWater", "DistToInland", "DoY")
clearskyData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/clearsky_data.rds")
zenithangleData <- readRDS(file = "/usr/people/bakker/kilianbakker/Data/zenithangles_data.rds")
observationData <-read.table("/usr/people/bakker/kilianbakker/Data/observation_data.txt", sep=",", col.names=c("Station","Date","Time","Radiation"), fill=TRUE)
observationData[[4]] <- observationData[[4]]*(10000/3600)
varSizes <- c("variables_1x1gridbox_1lt","variables_1x1gridbox_3lt","variables_1x1gridbox_3lt_RadNoAvg","variables_3x3gridbox_1lt", "variables_3x3gridbox_3lt",
"variables_3x3gridbox_3lt_RadNoAvg","variables_5x5gridbox_1lt", "variables_5x5gridbox_3lt", "variables_5x5gridbox_3lt_RadNoAvg",
"variables_7x7gridbox_1lt", "variables_7x7gridbox_3lt", "variables_7x7gridbox_3lt_RadNoAvg","variables_9x9gridbox_1lt", "variables_9x9gridbox_3lt",
"variables_9x9gridbox_3lt_RadNoAvg")[c(14)]
for (vars in 1:length(varSizes)){
varSize <- varSizes[vars]
AllvariableData <- readRDS(file = paste0("/usr/people/bakker/kilianbakker/Data/",varSize,".rds"))
hyperparamText <- "penalties"
samps <- c(1/6,2/6,3/6,4/6,5/6)
mtries <- c(1/6,2/6,3/6,4/6,5/6)
nodesizes <- c(5,10,20,50)
ntrees <- c(100,500,1000,2000)
shrinkages <- c(0.001,0.01,0.1,0.2,0.5,1)
depths <- c(1,5,10,20,40)
hiddens1 <- c(1,2,3)
hiddens2 <- c(1,2,3)
iters <- c(2,10,100)
musteps <- c(1,3,5,10)
sigmasteps <- c(0,1,3,5)
penalties <- c(0,0.01,0.1,1)
hyperparameters <- penalties
for (hyper in 1:length(hyperparameters)){
print("a")
Sys.sleep(0.01)
stepsize <- 1/50
quants <- seq(stepsize,1-stepsize, stepsize)
predictand <- 2 #1 = obs_SURF, 2 = obs_SURF/CSR, 3 = obs_SURF/obs_TOA
predictandNames <- c("Radiation_obs", "CSI_obs", "Ratio_obs")
heights2 <- c(0, 2, 110.88, 540.34, 761.97, 988.50, 1457.30, 1948.99, 3012.18, 4206.43, 5574.44, 7185.44, 9163.96, 11784.06)
leadTimes <- c(5:19, 29:43)
sigmaPredictors <- c(0,0,0,1,1,1,2,2,2,1,1,1,0,0,0,0,0,0,1,1,1,2,2,2,1,1,1,0,0,0)
seasons <- list(c(201612,201701,201702,201712,201801,201802), c(201604,201605,201703,201704,201705,201803),
c(201606,201607,201608,201706,201707,201708), c(201609,201610,201611,201709,201710,201711))
clearskyHours <-readRDS("/usr/people/bakker/kilianbakker/Data/clearskyhours.rds")
clearskyHours <- clearskyHours[,which(as.numeric(colnames(clearskyHours)) %in% init_dates)]
settings <- list(c(2,1,0,0),c(2,1,1,0),c(2,1,3,0),c(2,1,4,0),c(2,1,5,0),c(2,1,6,0),c(2,2,0,0),c(2,2,1,-1),c(2,2,1,0),c(2,2,1,1),c(2,2,1,2),c(2,2,1,3),
c(2,2,3,0),c(2,2,4,0),c(2,2,5,0),c(2,2,6,0),c(2,2,8,0),c(2,2,9,0),c(2,2,10,0))
NumberofCV <- 3
setting <- 4
stationPlaces <- c(1:30)
time <- 8
season <- 1
CVmonth <- 1
repeated_init_dates <- rep(init_dates,each = length(stationPlaces))
repeated_places <- rep(stationData[[2]][stationPlaces], length(init_dates))
variableIndices <- c(1:length(variableNames))
QRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
GBMimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(quants),length(variableIndices)))
GRFimp <- array(NA, c(length(leadTimes),length(seasons),NumberofCV,length(variableIndices)))
for (setting in c(16)){
ContMethod <- settings[[setting]][1]
ProbMethod <- settings[[setting]][2]
RegMethod <- settings[[setting]][3]
DistMethod <- settings[[setting]][4]
if (ProbMethod == 1){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces),length(leadTimes)))
} else if (ProbMethod == 2){
Predictionset <- array(NA, c(length(init_dates), length(stationPlaces), length(leadTimes), length(quants)))
}
for (time in c(4,8,12,19,23,27)){#1:length(leadTimes)){
leadTime <- leadTimes[time]
temp_init_dates <- init_dates
if (leadTime > 24){
for (d in 1:length(temp_init_dates)){
tempDate <- paste0(substr(temp_init_dates[d],1,4), "-", substr(temp_init_dates[d],5,6), "-", substr(temp_init_dates[d],7,8))
temp_init_dates[d] <- as.numeric(gsub("-","",as.Date(tempDate) + 1))
}
}
temp_init_dates2 <- init_dates
#temp_init_dates2 <- c(20160605,20160817,20160824,20160825,20160913,20160914,20161005,20161128,20170409,20170526,20171015,20180207,20180225)
DateIndices <- which(repeated_init_dates %in% temp_init_dates2)
tempObsData <- filter(observationData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
obs_SURF <- tempObsData[order(tempObsData$Date),][[4]]
tempCSData <- filter(clearskyData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
CSR <- tempCSData[order(tempCSData$Date),][[4]]
tempZAData <- filter(zenithangleData, Station %in% stationData[[2]][stationPlaces], Time == (leadTime %% 24), Date %in% temp_init_dates)
Zenith <- tempZAData[order(tempZAData$Date),][[4]]
obs_TOA <- 1367*cos(Zenith*pi/180)
tempvariableData <- AllvariableData[, stationPlaces, time, variableIndices]
variableData <- array(NA, c(length(init_dates)*length(stationPlaces),length(variableIndices)))
for (date in 1:length(init_dates)){
variableData[(length(stationPlaces)*(date - 1) + 1):(length(stationPlaces)*date),] <- tempvariableData[date,,]
}
for (var in 1:5){
variableData[,var] <- variableData[,var]/CSR
}
if (ContMethod == 1){
CSI_obs <- round(obs_SURF/CSR, digits = log(1/DiscretizeWidth)/log(10))
Ratio_obs <- round(obs_SURF/obs_TOA, digits = log(1/DiscretizeWidth)/log(10))
obs_SURF <- round(obs_SURF, digits = log(1/DiscretizeWidth)/log(10))
CSI_for = round(variableData[,which(variableNames[variableIndices] == "Global")], digits = log(1/DiscretizeWidth)/log(10))
} else if (ContMethod == 2){
CSI_obs = obs_SURF/CSR
Ratio_obs = obs_SURF/obs_TOA
CSI_for = variableData[,which(variableNames[variableIndices] == "Global")]
}
RadiationData <- data.frame(obs_SURF, CSI_obs, Ratio_obs, CSI_for, Station = repeated_places, Date = repeated_init_dates)
All_Data <- data.frame(variableData, RadiationData[predictand][[1]], repeated_places, repeated_init_dates)
colnames(All_Data) <- c(variableNames[variableIndices], predictandNames[predictand], "Station", "Date")
for (season in 1:length(seasons)){
DataMonths <- seasons[[season]]
tempIndices <- intersect(intersect(which(CSR > 20), which(floor(repeated_init_dates/100) %in% DataMonths)), DateIndices)
tempIndices <- sample(tempIndices)
if (ProbMethod == 1){
tempPredictionset <- array(NA,c(length(tempIndices)))
} else if (ProbMethod == 2){
tempPredictionset <- array(NA,c(length(tempIndices),length(quants)))
}
if (length(tempIndices) >= 50){
for (CVmonth in 1:NumberofCV){
testIndices <- c(floor(length(tempIndices)*(CVmonth-1)/NumberofCV)+1):(floor(length(tempIndices)*CVmonth/NumberofCV))
TrainingData <- All_Data[tempIndices,][-testIndices, 1:(length(colnames(All_Data))-2)]
TestingData <- All_Data[tempIndices,][testIndices, 1:(length(colnames(All_Data))-3)]
#zeroVarColumns <- c(which(as.numeric(apply(TrainingData,2,var)) <= 0.0001))
#if (length(zeroVarColumns) > 0){
# TrainingData <- TrainingData[,-zeroVarColumns]
# TestingData <- TestingData[,-zeroVarColumns]
#}
Predictors <- TrainingData[,-length(colnames(TrainingData))]
Predictand <- TrainingData[,length(colnames(TrainingData))]
#normal linear regression
#variableNumber <- 1
#plotData <- TrainingData[,c(variableNumber, length(TrainingData[1,])-1)]
#FitPlot(plotData, 300, -0.5, 0.1)
form <- paste0("~", paste(colnames(TestingData), collapse = "+"))
formula1 <- paste0(predictandNames[predictand], "~.")
formula2 <- paste0(predictandNames[predictand], "~1")
formula3 <- paste0(predictandNames[predictand], form)
#stepwise linear regression
if (RegMethod == 6){
fit1 <- mcqrnn.fit(as.matrix(Predictors), as.matrix(Predictand), n.hidden = hiddens1[1], n.hidden2 = hiddens2[1], tau = quants, iter.max=iters[2], n.trials=2,
trace = F, Th = sigmoid, Th.prime = sigmoid.prime, penalty = penalties[hyper])
tempPredictionset[testIndices,] <- as.matrix(mcqrnn.predict(as.matrix(TestingData), fit1, tau = quants))
}
tempPredictionset[testIndices,] <- t(apply(tempPredictionset[testIndices,],1,sort))
if (RegMethod > 0){
saveRDS(fit1, file = paste0("/nobackup/users/bakker/Data2/fits/fit_", CVmonth, "_", season, "_", time, "_", hyperparamText, hyperparameters[hyper], "_", RegMethod,"_", DistMethod, ".rds"))
rm(fit1, TrainingData, TestingData)
}
}
}
for (date in 1:length(init_dates)){
tempIndices2 <- which(tempIndices %in% c((length(stationPlaces)*(date - 1)+1):(length(stationPlaces)*date)))
if (ProbMethod == 1){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time] <- tempPredictionset[tempIndices2]*CSR[tempIndices[tempIndices2]]
} else if (ProbMethod == 2){
Predictionset[date,tempIndices[tempIndices2] - length(stationPlaces)*(date - 1),time,] <- tempPredictionset[tempIndices2,]*array(CSR[tempIndices[tempIndices2]], c(length(tempIndices2), length(quants)))
}
}
}
rm(variableData, All_Data, RadiationData)
}
saveRDS(Predictionset, file = paste0("/nobackup/users/bakker/Data2/predictionsets2/PS_",hyperparamText,hyperparameters[hyper], "_", RegMethod, "_", DistMethod,".rds"))
rm(Predictionset)
gc()
}
}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/viewPoints.R
\name{plotAvgViewpoint}
\alias{plotAvgViewpoint}
\title{Plot coverage around a set of virtual 4C viewpoints}
\usage{
plotAvgViewpoint(x, left_dist = 1e+05, right_dist = 1e+05,
ylab = "Average signal", xlab = "Relative position", fix = "center",
...)
}
\arguments{
\item{x}{A GenomicInteractions object which is output from viewPoint}
\item{left_dist}{Distance 'left' of interactions to consider, in bp.}
\item{right_dist}{Distance 'right' of interactions to consider, in bp.}
\item{ylab}{Y axis label.}
\item{xlab}{X axis label.}
\item{fix}{One of "center", "start", "end". Passed to `resize`. Interaction
distances are calculated relative to this part of the bait.}
\item{...}{additional arguments to plot}
}
\value{
Coverage that is plotted (invisibly)
}
\description{
Plots summarised coverage of interactions around a set of viewpoints,
e.g. promoters. This function requires the output of `viewPoint()` as input.
}
|
/man/plotAvgViewpoint.Rd
|
no_license
|
ComputationalRegulatoryGenomicsICL/GenomicInteractions-old
|
R
| false | false | 1,030 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/viewPoints.R
\name{plotAvgViewpoint}
\alias{plotAvgViewpoint}
\title{Plot coverage around a set of virtual 4C viewpoints}
\usage{
plotAvgViewpoint(x, left_dist = 1e+05, right_dist = 1e+05,
ylab = "Average signal", xlab = "Relative position", fix = "center",
...)
}
\arguments{
\item{x}{A GenomicInteractions object which is output from viewPoint}
\item{left_dist}{Distance 'left' of interactions to consider, in bp.}
\item{right_dist}{Distance 'right' of interactions to consider, in bp.}
\item{ylab}{Y axis label.}
\item{xlab}{X axis label.}
\item{fix}{One of "center", "start", "end". Passed to `resize`. Interaction
distances are calculated relative to this part of the bait.}
\item{...}{additional arguments to plot}
}
\value{
Coverage that is plotted (invisibly)
}
\description{
Plots summarised coverage of interactions around a set of viewpoints,
e.g. promoters. This function requires the output of `viewPoint()` as input.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Methods.R
\name{addExtractionDirectives}
\alias{addExtractionDirectives}
\title{Extraction Directives}
\usage{
addExtractionDirectives(x, coverage = NULL, prefix = NULL, append = T, ...)
}
\arguments{
\item{x}{environment, a study}
\item{coverage}{numeric, a pair of values between 0 and 1 that specify the range
of coverage to be included in the category}
\item{prefix}{character, a prefix that will be added to each label in the form
prefix_label. It must not contain any underscore}
\item{append}{logic, initialize (FALSE) or append a new definition (TRUE)}
\item{...}{not implemented}
}
\description{
Training feature directives determine how training features (pixels) are extracted
from training data. \code{\link[=addExtractionDirectives]{addExtractionDirectives()}} either initialize or add a new
row to the table of directives.
}
\examples{
\dontrun{
addExtractionDirectives(MyStudy,c(0.75,100),'core', append = F)
addExtractionDirectives(MyStudy,c(0,0.75),'border', append = T)
}
}
|
/man/addExtractionDirectives.Rd
|
no_license
|
luigidolcetti/RUNIMC
|
R
| false | true | 1,074 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Methods.R
\name{addExtractionDirectives}
\alias{addExtractionDirectives}
\title{Extraction Directives}
\usage{
addExtractionDirectives(x, coverage = NULL, prefix = NULL, append = T, ...)
}
\arguments{
\item{x}{environment, a study}
\item{coverage}{numeric, a pair of values between 0 and 1 that specify the range
of coverage to be included in the category}
\item{prefix}{character, a prefix that will be added to each label in the form
prefix_label. It must not contain any underscore}
\item{append}{logic, initialize (FALSE) or append a new definition (TRUE)}
\item{...}{not implemented}
}
\description{
Training feature directives determine how training features (pixels) are extracted
from training data. \code{\link[=addExtractionDirectives]{addExtractionDirectives()}} either initialize or add a new
row to the table of directives.
}
\examples{
\dontrun{
addExtractionDirectives(MyStudy,c(0.75,100),'core', append = F)
addExtractionDirectives(MyStudy,c(0,0.75),'border', append = T)
}
}
|
# Plot4
library(dplyr)
library(data.table)
filename <- "exdata_data_houshold_power_consumption.zip"
if(!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, filename, method="curl")
}
if(!file.exists("household_power_consumption.txt")){
unzip(filename)
}
mydata4 <- fread("household_power_consumption.txt", sep=";", na.strings = "?")
mydata4 <- subset(mydata4, Date=="1/2/2007" | Date=="2/2/2007")
mydata4$datetime <- as.POSIXct(paste(mydata4$Date, mydata4$Time), format = "%d/%m/%Y %H:%M:%S")
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
# Plot4.1
plot(y=mydata4$Global_active_power, x=mydata4$datetime, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
# Plot 4.2
plot(y=mydata4$Voltage, x=mydata4$datetime, type="l",
xlab="datetime", ylab="Voltage")
# Plot 4.3
plot(mydata4$datetime, mydata4$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
lines(mydata4$datetime, mydata4$Sub_metering_2, col="red")
lines(mydata4$datetime, mydata4$Sub_metering_3, col="blue")
legend("topright", col=c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1), lwd=c(1,1))
# Plot 4.4
plot(y=mydata4$Global_reactive_power, x=mydata4$datetime, type="l",
xlab="datetime", ylab="global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
dzpiers/ExData_Plotting1
|
R
| false | false | 1,421 |
r
|
# Plot4
library(dplyr)
library(data.table)
filename <- "exdata_data_houshold_power_consumption.zip"
if(!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, filename, method="curl")
}
if(!file.exists("household_power_consumption.txt")){
unzip(filename)
}
mydata4 <- fread("household_power_consumption.txt", sep=";", na.strings = "?")
mydata4 <- subset(mydata4, Date=="1/2/2007" | Date=="2/2/2007")
mydata4$datetime <- as.POSIXct(paste(mydata4$Date, mydata4$Time), format = "%d/%m/%Y %H:%M:%S")
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
# Plot4.1
plot(y=mydata4$Global_active_power, x=mydata4$datetime, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
# Plot 4.2
plot(y=mydata4$Voltage, x=mydata4$datetime, type="l",
xlab="datetime", ylab="Voltage")
# Plot 4.3
plot(mydata4$datetime, mydata4$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
lines(mydata4$datetime, mydata4$Sub_metering_2, col="red")
lines(mydata4$datetime, mydata4$Sub_metering_3, col="blue")
legend("topright", col=c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1), lwd=c(1,1))
# Plot 4.4
plot(y=mydata4$Global_reactive_power, x=mydata4$datetime, type="l",
xlab="datetime", ylab="global_reactive_power")
dev.off()
|
#make 2d plots for biscuit data
library("ggplot2")
library("vegan")
library("cowplot")
library("gridGraphics")
setwd("/home/jacob/projects/DenoiseCompare_Out/biscuit/med/COMBINED/biom/fixed_combined/pplacer_distances//")
# Colours taken from here: http://godsnotwheregodsnot.blogspot.ca/2012/09/color-distribution-methodology.html
diff_col <- c("#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059",
"#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87",
"#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80",
"#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100",
"#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F",
"#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09",
"#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66",
"#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C",
"#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81",
"#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00",
"#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700",
"#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329",
"#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C")
### Plot weighted UniFrac data
#read in proportions for the PC
proportions <- read.table("weighted_PC_cords.txt", sep = "\t", nrow=1, skip=4)
#read in the number oof samples
SampleNum <- read.table("weighted_PC_cords.txt", sep = "\t", nrow=1)
#Get coordinates for samples.
sample_cord <- read.table("weighted_PC_cords.txt",
sep ="\t", skip = 9,
nrow = SampleNum[[2]],
header=FALSE, row.names=1)
unifrac_combined_biscuit <- sample_cord[,c("V2", "V3", "V4")]
colnames(unifrac_combined_biscuit) <- c("PC1", "PC2", "PC3")
unifrac_combined_biscuit$sample <- as.factor(gsub("^.+_", "", rownames(unifrac_combined_biscuit)))
unifrac_combined_biscuit$Pipeline <- factor(gsub("_.+$", "", rownames(unifrac_combined_biscuit)))
unifrac_combined_biscuit$soil <- NA
unifrac_combined_biscuit$Pipeline <- gsub("Dada", "DADA2", unifrac_combined_biscuit$Pipeline)
unifrac_combined_biscuit$Pipeline <- gsub("Unoise", "UNOISE3", unifrac_combined_biscuit$Pipeline)
unifrac_combined_biscuit$Pipeline <- gsub("open-ref", "Open", unifrac_combined_biscuit$Pipeline)
unifrac_combined_biscuit$sample <- gsub("001101", "", unifrac_combined_biscuit$sample)
rownames(unifrac_combined_biscuit) <- gsub("001101", "", rownames(unifrac_combined_biscuit))
#filter out samples that were removed by rarfaction
samples_to_keep <- unifrac_combined_biscuit[unifrac_combined_biscuit$Pipeline == "DADA2", "sample"]
unifrac_filtered <- unifrac_combined_biscuit[unifrac_combined_biscuit$sample %in% samples_to_keep,]
unifrac_filtered$sample <- as.factor(unifrac_filtered$sample)
unifrac_plot <- ggplot(data=unifrac_filtered, aes(PC1, PC2)) +
geom_point(aes(fill=sample, size=1.5, shape=Pipeline)) +
theme_minimal() +
xlab(paste("PC1 (",round(proportions$V1, 2)*100,"%)", sep="")) +
ylab(paste("PC2 (",round(proportions$V2, 2)*100,"%)", sep="")) +
scale_shape_manual(values = c(21, 22, 23, 24, 25)) +
guides(size=FALSE, fill=FALSE) +
scale_fill_manual(values=diff_col)
for (sample in levels(unifrac_filtered$sample)) {
unifrac_plot <- unifrac_plot + geom_line(data = unifrac_filtered[which(unifrac_filtered$sample == sample),], aes(x = PC1, y = PC2))
}
plot(unifrac_plot)
#remove Deblur-noPos
unifrac_rm_noPos <- unifrac_filtered[-which(unifrac_filtered$Pipeline == "Deblur-noPos"),]
unifrac_rm_noPos$sample <- as.factor(unifrac_rm_noPos$sample)
noPos_unifrac_plot <- ggplot(data=unifrac_rm_noPos, aes(PC1, PC2)) +
geom_point(aes(fill=sample, size=1.5, shape=Pipeline)) +
theme_minimal() +
xlab(paste("PC1 (",round(proportions$V1, 2)*100,"%)", sep="")) +
ylab(paste("PC2 (",round(proportions$V2, 2)*100,"%)", sep="")) +
scale_shape_manual(values = c(21, 22, 23, 24, 25)) +
guides(size=FALSE, fill=FALSE) +
scale_fill_manual(values=diff_col)
for (sample in levels(unifrac_rm_noPos$sample)) {
noPos_unifrac_plot <- noPos_unifrac_plot + geom_line(data = unifrac_rm_noPos[which(unifrac_rm_noPos$sample == sample),], aes(x = PC1, y = PC2))
}
plot(noPos_unifrac_plot)
### Plot bray-curtis distances ordination
bray_curtis_dm <- read.table("../../final_combined/bray_curtis/distances/bray_curtis_rare_CombinedV2_biscuit_taxa_L6.txt",
sep="\t",
header=T,
stringsAsFactors = FALSE,
row.names=1)
bray_curtis_NMDS <- metaMDS(as.dist(bray_curtis_dm))
bray_curtis_NMDS_df <- data.frame(NMDS1=bray_curtis_NMDS$points[,1],
NMDS2=bray_curtis_NMDS$points[,2],
sample=as.factor(gsub("^.+_", "", rownames(bray_curtis_NMDS$points))),
Pipeline=as.factor(sub("_.*","",rownames(bray_curtis_NMDS$points))))
bray_curtis_NMDS_df <- bray_curtis_NMDS_df[complete.cases(bray_curtis_NMDS_df),]
bray_curtis_NMDS_df$Pipeline <- gsub("Unoise", "UNOISE3", bray_curtis_NMDS_df$Pipeline)
bray_curtis_NMDS_df$Pipeline <- gsub("Dada", "DADA2", bray_curtis_NMDS_df$Pipeline)
bray_curtis_NMDS_df$Pipeline <- gsub("open-ref", "Open", bray_curtis_NMDS_df$Pipeline)
#remove noPos
bray_curtis_NMDS_df_NO_noPos <- bray_curtis_NMDS_df[-which(bray_curtis_NMDS_df$Pipeline == "Deblur-noPos"),]
#remove samples that are not in all the others after rarefaction
samples_to_keep <- bray_curtis_NMDS_df_NO_noPos[which(bray_curtis_NMDS_df$Pipeline == "Deblur"), "sample"]
bray_curtis_NMDS_df_NO_noPos <- bray_curtis_NMDS_df_NO_noPos[bray_curtis_NMDS_df_NO_noPos$sample %in% as.character(samples_to_keep),]
bray_curtis_plot <- ggplot(data=bray_curtis_NMDS_df_NO_noPos, aes(NMDS1, NMDS2)) +
geom_point(aes(fill=sample, size=1.5, shape=Pipeline)) +
theme_minimal() +
xlab("NMDS1") +
ylab("NMDS2") +
scale_shape_manual(values = c(21, 22, 23, 24, 25)) +
guides(size=FALSE, fill=FALSE) +
scale_fill_manual(values=diff_col)
for (sample in levels(bray_curtis_NMDS_df_NO_noPos$sample)) {
bray_curtis_plot <- bray_curtis_plot + geom_line(data = bray_curtis_NMDS_df_NO_noPos[which(bray_curtis_NMDS_df_NO_noPos$sample == sample),], aes(x = NMDS1, y = NMDS2))
}
plot(bray_curtis_plot)
#lets do unweighted now
unweight_porp <- read.table("unweighted_cords.txt", sep="\t", skip=4, nrow=1)
unweight_samples <- read.table("unweighted_cords.txt", sep="\t", nrow=1)
unweight_coords <- read.table("unweighted_cords.txt",
sep ="\t", skip = 9,
nrow = unweight_samples[[2]],
header=FALSE, row.names=1)
unweighted_combined_biscuit <- unweight_coords[,c("V2", "V3", "V4")]
colnames(unweighted_combined_biscuit) <- c("PC1", "PC2", "PC3")
unweighted_combined_biscuit$sample <- as.factor(gsub("^.+_", "", rownames(unweighted_combined_biscuit)))
unweighted_combined_biscuit$Pipeline <- factor(gsub("_.+$", "", rownames(unweighted_combined_biscuit)))
unweighted_combined_biscuit$Pipeline <- gsub("Dada", "DADA2", unweighted_combined_biscuit$Pipeline)
unweighted_combined_biscuit$Pipeline <- gsub("Unoise", "UNOISE3", unweighted_combined_biscuit$Pipeline)
unweighted_combined_biscuit$sample <- gsub("001101", "", unweighted_combined_biscuit$sample)
unweighted_combined_biscuit$sample <- as.factor(unweighted_combined_biscuit$sample)
samples_to_keep <- unweighted_combined_biscuit[unweighted_combined_biscuit$Pipeline == "DADA2", "sample"]
unweighted_filtered <- unweighted_combined_biscuit[unweighted_combined_biscuit$sample %in% samples_to_keep,]
unweighted_filtered$sample <- as.factor(unweighted_filtered$sample)
unweighted_filtered$Pipeline <- gsub("open-ref", "Open", unweighted_filtered$Pipeline)
unweighted_filtered <- unweighted_filtered[-which(unweighted_filtered$Pipeline=="Deblur-noPos"),]
unweighted_plot <- ggplot(data=unweighted_filtered, aes(PC1, PC2)) +
geom_point(aes(fill=sample, size=1.5, shape=Pipeline)) +
theme_minimal() +
xlab(paste("PC1 (",round(unweight_porp$V1, 2)*100,"%)", sep="")) +
ylab(paste("PC2 (",round(unweight_porp$V2, 2)*100,"%)", sep="")) +
scale_shape_manual(values = c(21, 22, 23, 24, 25)) +
guides(size=FALSE, fill=FALSE) +
scale_fill_manual(values=diff_col)
for (sample in levels(unweighted_filtered$sample)) {
unweighted_plot <- unweighted_plot + geom_line(data = unweighted_filtered[which(unweighted_filtered$sample == sample),], aes(x = PC1, y = PC2))
}
plot(unweighted_plot)
biscuit_unweighted_plot <- unweighted_plot
#plot final figure
|
/Rscripts/Old_Scripts/biscuit_ordination_pplacer.R
|
no_license
|
nearinj/Denoiser-Comparison
|
R
| false | false | 9,120 |
r
|
#make 2d plots for biscuit data
library("ggplot2")
library("vegan")
library("cowplot")
library("gridGraphics")
setwd("/home/jacob/projects/DenoiseCompare_Out/biscuit/med/COMBINED/biom/fixed_combined/pplacer_distances//")
# Colours taken from here: http://godsnotwheregodsnot.blogspot.ca/2012/09/color-distribution-methodology.html
diff_col <- c("#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059",
"#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87",
"#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80",
"#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100",
"#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F",
"#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09",
"#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66",
"#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C",
"#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81",
"#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00",
"#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700",
"#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329",
"#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C")
### Plot weighted UniFrac data
#read in proportions for the PC
proportions <- read.table("weighted_PC_cords.txt", sep = "\t", nrow=1, skip=4)
#read in the number oof samples
SampleNum <- read.table("weighted_PC_cords.txt", sep = "\t", nrow=1)
#Get coordinates for samples.
sample_cord <- read.table("weighted_PC_cords.txt",
sep ="\t", skip = 9,
nrow = SampleNum[[2]],
header=FALSE, row.names=1)
unifrac_combined_biscuit <- sample_cord[,c("V2", "V3", "V4")]
colnames(unifrac_combined_biscuit) <- c("PC1", "PC2", "PC3")
unifrac_combined_biscuit$sample <- as.factor(gsub("^.+_", "", rownames(unifrac_combined_biscuit)))
unifrac_combined_biscuit$Pipeline <- factor(gsub("_.+$", "", rownames(unifrac_combined_biscuit)))
unifrac_combined_biscuit$soil <- NA
unifrac_combined_biscuit$Pipeline <- gsub("Dada", "DADA2", unifrac_combined_biscuit$Pipeline)
unifrac_combined_biscuit$Pipeline <- gsub("Unoise", "UNOISE3", unifrac_combined_biscuit$Pipeline)
unifrac_combined_biscuit$Pipeline <- gsub("open-ref", "Open", unifrac_combined_biscuit$Pipeline)
unifrac_combined_biscuit$sample <- gsub("001101", "", unifrac_combined_biscuit$sample)
rownames(unifrac_combined_biscuit) <- gsub("001101", "", rownames(unifrac_combined_biscuit))
#filter out samples that were removed by rarfaction
samples_to_keep <- unifrac_combined_biscuit[unifrac_combined_biscuit$Pipeline == "DADA2", "sample"]
unifrac_filtered <- unifrac_combined_biscuit[unifrac_combined_biscuit$sample %in% samples_to_keep,]
unifrac_filtered$sample <- as.factor(unifrac_filtered$sample)
unifrac_plot <- ggplot(data=unifrac_filtered, aes(PC1, PC2)) +
geom_point(aes(fill=sample, size=1.5, shape=Pipeline)) +
theme_minimal() +
xlab(paste("PC1 (",round(proportions$V1, 2)*100,"%)", sep="")) +
ylab(paste("PC2 (",round(proportions$V2, 2)*100,"%)", sep="")) +
scale_shape_manual(values = c(21, 22, 23, 24, 25)) +
guides(size=FALSE, fill=FALSE) +
scale_fill_manual(values=diff_col)
for (sample in levels(unifrac_filtered$sample)) {
unifrac_plot <- unifrac_plot + geom_line(data = unifrac_filtered[which(unifrac_filtered$sample == sample),], aes(x = PC1, y = PC2))
}
plot(unifrac_plot)
#remove Deblur-noPos
unifrac_rm_noPos <- unifrac_filtered[-which(unifrac_filtered$Pipeline == "Deblur-noPos"),]
unifrac_rm_noPos$sample <- as.factor(unifrac_rm_noPos$sample)
noPos_unifrac_plot <- ggplot(data=unifrac_rm_noPos, aes(PC1, PC2)) +
geom_point(aes(fill=sample, size=1.5, shape=Pipeline)) +
theme_minimal() +
xlab(paste("PC1 (",round(proportions$V1, 2)*100,"%)", sep="")) +
ylab(paste("PC2 (",round(proportions$V2, 2)*100,"%)", sep="")) +
scale_shape_manual(values = c(21, 22, 23, 24, 25)) +
guides(size=FALSE, fill=FALSE) +
scale_fill_manual(values=diff_col)
for (sample in levels(unifrac_rm_noPos$sample)) {
noPos_unifrac_plot <- noPos_unifrac_plot + geom_line(data = unifrac_rm_noPos[which(unifrac_rm_noPos$sample == sample),], aes(x = PC1, y = PC2))
}
plot(noPos_unifrac_plot)
### Plot bray-curtis distances ordination
bray_curtis_dm <- read.table("../../final_combined/bray_curtis/distances/bray_curtis_rare_CombinedV2_biscuit_taxa_L6.txt",
sep="\t",
header=T,
stringsAsFactors = FALSE,
row.names=1)
bray_curtis_NMDS <- metaMDS(as.dist(bray_curtis_dm))
bray_curtis_NMDS_df <- data.frame(NMDS1=bray_curtis_NMDS$points[,1],
NMDS2=bray_curtis_NMDS$points[,2],
sample=as.factor(gsub("^.+_", "", rownames(bray_curtis_NMDS$points))),
Pipeline=as.factor(sub("_.*","",rownames(bray_curtis_NMDS$points))))
bray_curtis_NMDS_df <- bray_curtis_NMDS_df[complete.cases(bray_curtis_NMDS_df),]
bray_curtis_NMDS_df$Pipeline <- gsub("Unoise", "UNOISE3", bray_curtis_NMDS_df$Pipeline)
bray_curtis_NMDS_df$Pipeline <- gsub("Dada", "DADA2", bray_curtis_NMDS_df$Pipeline)
bray_curtis_NMDS_df$Pipeline <- gsub("open-ref", "Open", bray_curtis_NMDS_df$Pipeline)
#remove noPos
bray_curtis_NMDS_df_NO_noPos <- bray_curtis_NMDS_df[-which(bray_curtis_NMDS_df$Pipeline == "Deblur-noPos"),]
#remove samples that are not in all the others after rarefaction
samples_to_keep <- bray_curtis_NMDS_df_NO_noPos[which(bray_curtis_NMDS_df$Pipeline == "Deblur"), "sample"]
bray_curtis_NMDS_df_NO_noPos <- bray_curtis_NMDS_df_NO_noPos[bray_curtis_NMDS_df_NO_noPos$sample %in% as.character(samples_to_keep),]
bray_curtis_plot <- ggplot(data=bray_curtis_NMDS_df_NO_noPos, aes(NMDS1, NMDS2)) +
geom_point(aes(fill=sample, size=1.5, shape=Pipeline)) +
theme_minimal() +
xlab("NMDS1") +
ylab("NMDS2") +
scale_shape_manual(values = c(21, 22, 23, 24, 25)) +
guides(size=FALSE, fill=FALSE) +
scale_fill_manual(values=diff_col)
for (sample in levels(bray_curtis_NMDS_df_NO_noPos$sample)) {
bray_curtis_plot <- bray_curtis_plot + geom_line(data = bray_curtis_NMDS_df_NO_noPos[which(bray_curtis_NMDS_df_NO_noPos$sample == sample),], aes(x = NMDS1, y = NMDS2))
}
plot(bray_curtis_plot)
#lets do unweighted now
unweight_porp <- read.table("unweighted_cords.txt", sep="\t", skip=4, nrow=1)
unweight_samples <- read.table("unweighted_cords.txt", sep="\t", nrow=1)
unweight_coords <- read.table("unweighted_cords.txt",
sep ="\t", skip = 9,
nrow = unweight_samples[[2]],
header=FALSE, row.names=1)
unweighted_combined_biscuit <- unweight_coords[,c("V2", "V3", "V4")]
colnames(unweighted_combined_biscuit) <- c("PC1", "PC2", "PC3")
unweighted_combined_biscuit$sample <- as.factor(gsub("^.+_", "", rownames(unweighted_combined_biscuit)))
unweighted_combined_biscuit$Pipeline <- factor(gsub("_.+$", "", rownames(unweighted_combined_biscuit)))
unweighted_combined_biscuit$Pipeline <- gsub("Dada", "DADA2", unweighted_combined_biscuit$Pipeline)
unweighted_combined_biscuit$Pipeline <- gsub("Unoise", "UNOISE3", unweighted_combined_biscuit$Pipeline)
unweighted_combined_biscuit$sample <- gsub("001101", "", unweighted_combined_biscuit$sample)
unweighted_combined_biscuit$sample <- as.factor(unweighted_combined_biscuit$sample)
samples_to_keep <- unweighted_combined_biscuit[unweighted_combined_biscuit$Pipeline == "DADA2", "sample"]
unweighted_filtered <- unweighted_combined_biscuit[unweighted_combined_biscuit$sample %in% samples_to_keep,]
unweighted_filtered$sample <- as.factor(unweighted_filtered$sample)
unweighted_filtered$Pipeline <- gsub("open-ref", "Open", unweighted_filtered$Pipeline)
unweighted_filtered <- unweighted_filtered[-which(unweighted_filtered$Pipeline=="Deblur-noPos"),]
unweighted_plot <- ggplot(data=unweighted_filtered, aes(PC1, PC2)) +
geom_point(aes(fill=sample, size=1.5, shape=Pipeline)) +
theme_minimal() +
xlab(paste("PC1 (",round(unweight_porp$V1, 2)*100,"%)", sep="")) +
ylab(paste("PC2 (",round(unweight_porp$V2, 2)*100,"%)", sep="")) +
scale_shape_manual(values = c(21, 22, 23, 24, 25)) +
guides(size=FALSE, fill=FALSE) +
scale_fill_manual(values=diff_col)
for (sample in levels(unweighted_filtered$sample)) {
unweighted_plot <- unweighted_plot + geom_line(data = unweighted_filtered[which(unweighted_filtered$sample == sample),], aes(x = PC1, y = PC2))
}
plot(unweighted_plot)
biscuit_unweighted_plot <- unweighted_plot
#plot final figure
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/critical.R
\name{critical}
\alias{critical}
\title{Critical points of the regression function}
\usage{
critical(model, der = NULL)
}
\arguments{
\item{model}{Parametric or nonparametric regression out
obtained by \code{\link{frfast}} function.}
\item{der}{Number which determines any inference process. By default
\code{der} is \code{NULL}. If this term is \code{0}, the calculation is for
the point which maximize the estimate. If it is \code{1} it is
designed for the first derivative and if it is \code{2}, it returns the point
which equals the second derivative to zero.}
}
\value{
An object is returned with the following elements:
\item{Estimation}{ \code{x} value which maximize the regression function with
their 95\% confidence intervals (for each level).}
\item{First_der}{\code{x} value which maximize the first derivative with their
95\% confidence intervals (for each level).}
\item{Second_der}{\code{x} value which equals the second derivative to zero
with their 95\% confidence intervals (for each level).}
}
\description{
This function draws inference about some critical point in
the support of \eqn{X} which is associated with some features of the regression
function (e.g., minimum, maximum or inflection points which indicate changes
in the sign of curvature). Returns the value of the covariate \code{x}
which maximizes the estimate of the function, the value of the covariate
\code{x} which maximizes the first derivative and the value of the covariate
\code{x} which equals the second derivative to zero, for each level of the
factor.
}
\examples{
library(npregfast)
data(barnacle)
fit <- frfast(DW ~ RC, data = barnacle) # without interactions
critical(fit)
critical(fit, der = 0)
critical(fit, der = 1)
critical(fit, der = 2)
# fit2 <- frfast(DW ~ RC : F, data = barnacle) # with interactions
# critical(fit2)
# critical(fit2, der = 0)
# critical(fit2, der = 1)
# critical(fit2, der = 2)
}
\references{
Sestelo, M. (2013). Development and computational implementation of
estimation and inference methods in flexible regression models.
Applications in Biology, Engineering and Environment. PhD Thesis, Department
of Statistics and O.R. University of Vigo.
}
\author{
Marta Sestelo, Nora M. Villanueva and Javier Roca-Pardinas.
}
|
/man/critical.Rd
|
no_license
|
noramvillanueva/npregfast
|
R
| false | true | 2,349 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/critical.R
\name{critical}
\alias{critical}
\title{Critical points of the regression function}
\usage{
critical(model, der = NULL)
}
\arguments{
\item{model}{Parametric or nonparametric regression out
obtained by \code{\link{frfast}} function.}
\item{der}{Number which determines any inference process. By default
\code{der} is \code{NULL}. If this term is \code{0}, the calculation is for
the point which maximize the estimate. If it is \code{1} it is
designed for the first derivative and if it is \code{2}, it returns the point
which equals the second derivative to zero.}
}
\value{
An object is returned with the following elements:
\item{Estimation}{ \code{x} value which maximize the regression function with
their 95\% confidence intervals (for each level).}
\item{First_der}{\code{x} value which maximize the first derivative with their
95\% confidence intervals (for each level).}
\item{Second_der}{\code{x} value which equals the second derivative to zero
with their 95\% confidence intervals (for each level).}
}
\description{
This function draws inference about some critical point in
the support of \eqn{X} which is associated with some features of the regression
function (e.g., minimum, maximum or inflection points which indicate changes
in the sign of curvature). Returns the value of the covariate \code{x}
which maximizes the estimate of the function, the value of the covariate
\code{x} which maximizes the first derivative and the value of the covariate
\code{x} which equals the second derivative to zero, for each level of the
factor.
}
\examples{
library(npregfast)
data(barnacle)
fit <- frfast(DW ~ RC, data = barnacle) # without interactions
critical(fit)
critical(fit, der = 0)
critical(fit, der = 1)
critical(fit, der = 2)
# fit2 <- frfast(DW ~ RC : F, data = barnacle) # with interactions
# critical(fit2)
# critical(fit2, der = 0)
# critical(fit2, der = 1)
# critical(fit2, der = 2)
}
\references{
Sestelo, M. (2013). Development and computational implementation of
estimation and inference methods in flexible regression models.
Applications in Biology, Engineering and Environment. PhD Thesis, Department
of Statistics and O.R. University of Vigo.
}
\author{
Marta Sestelo, Nora M. Villanueva and Javier Roca-Pardinas.
}
|
#Librerias que se van a usar para todo el análisis
pckg <- c("easypackages","tidyverse","rvest","purrr","tidytext","tm")
# install.packages("pckg") desomentar en caso de no tener innstalados los paquetes
library(easypackages)
libraries(pckg)
# Definición de funciones
obtieneNoticiasBusqueda <- function(busqueda){
news_pag = "https://news.google.com/"
parametro_busqueda = "search?q="
busqueda_no_espacios = gsub(" ","%20", busqueda)
parametro_final = "&hl=es-419&gl=US&ceid=US:es-419"
html_dir = paste0(news_pag,parametro_busqueda,busqueda_no_espacios,parametro_final)
google_news = read_html(html_dir)
noticias = google_news %>%
html_nodes(css = ".xP6mwf") %>%
html_children()
noticiasDF = map(noticias,obtieneNoticiasData)
noticiasDF = bind_rows(noticiasDF)
noticiasDF = noticiasDF[!is.na(noticiasDF$Titular),]
return(noticiasDF)
}
obtieneNoticiasData = function(noticia){
news_pag = "https://news.google.com/"
titular = noticia %>% html_node("h3") %>% html_text()
fecha = noticia %>% html_node("time") %>% html_attr("datetime")
diario = noticia %>% html_node("a.wEwyrc.AVN2gc.uQIVzc.Sksgp") %>% html_text()
link_enmascarado = noticia %>% html_node("h3 a") %>% html_attr("href")
link_enmascarado = paste0(news_pag,substring(link_enmascarado,3))
link_leido = read_html(link_enmascarado)
link = link_leido %>%
html_nodes(css='a') %>%
tail(1) %>%
html_attr("href")
noticiaDF = data.frame(Titular=titular, Fecha=fecha, Diario=diario, Link=link, stringsAsFactors = F)
return(noticiaDF)
}
noticiasMujer <- obtieneNoticiasBusqueda("Violencia contra la mujer Ecuador")
# Diario con mas noticias publicadas del tema
noticiasMujer %>%
count(Diario) %>%
slice_max(order_by = n,n=5)
# Diarios
Diarios <- c("El Comercio (Ecuador)", "El Telégrafo (por eliminar)",
"El Universo","La Hora (Ecuador)","Primicias")
Estructura = data.frame(Diario=Diarios)
Estructura$CSS = NA
Estructura$CSS[Estructura$Diario=='El Comercio (Ecuador)'] = '.paragraphs'
Estructura$CSS[Estructura$Diario=='El Telégrafo (por eliminar)'] = '.itemFullText'
Estructura$CSS[Estructura$Diario=='El Universo'] = '.field-name-body'
Estructura$CSS[Estructura$Diario=='La Hora (Ecuador)'] = '#contenedorGeneral'
Estructura$CSS[Estructura$Diario=='Primicias'] = '#entry-content-inarticle'
obtenerNoticiaNacional = function(link_noticia, diario, diccionario_css){
noticia_leida = read_html(link_noticia)
css = diccionario_css$CSS[diccionario_css$Diario==diario]
text_nodes = noticia_leida %>%
html_nodes(css = css) %>%
html_nodes("p")
text = text_nodes %>%
html_text()
text = paste0(text, collapse = " ")
return(text)
}
noticiasMujer <- noticiasMujer %>% filter(Diario %in% Diarios)
news = map2_chr(noticiasMujer$Link, noticiasMujer$Diario, obtenerNoticiaNacional, diccionario_css=Estructura)
noticiasMujer$Noticia = news
sapply(noticiasMujer, function(x) sum(is.na(x)))
which(is.na(noticiasMujer$Fecha))
noticiasMujer[57,4]
noticiasMujer[57,2] <- "2020-12-01T00:00:00Z"
glimpse(noticiasMujer)
# puede que sea por la fecha de publicacion revisar mañana
# fecha de corte 28 de noviembre / fecha inicio 5 marzo
saveRDS(noticiasMujer, "applications/Caso_estudio/noticiasMujer.RDS")
# LEER DATOS
rm(list=ls())
noticiasMujer <- readRDS("applications/Caso_estudio/noticiasMujer.RDS")
tidy_Mujer = noticiasMujer %>%
unnest_tokens(output = bigrama,
input = Noticia,
token = 'ngrams',
n = 2,
to_lower = T)
# Bigramas mas repetidos
tidy_Mujer %>%
count(bigrama) %>%
slice_max(order_by = n,n=10)
# stopwords
library(readxl)
stopwords_es_1 = read_excel("applications/Caso_estudio/CustomStopWords.xlsx")
names(stopwords_es_1) = c("Token","Fuente")
stopwords_es_2 = tibble(Token=tm::stopwords(kind = "es"), Fuente="tm")
stopwords_es = rbind(stopwords_es_1, stopwords_es_2)
stopwords_es = stopwords_es[!duplicated(stopwords_es$Token),]
remove(stopwords_es_1, stopwords_es_2)
bigramas_mujer = tidy_Mujer %>%
separate(bigrama, c("palabra1", "palabra2"), sep = " ") %>%
filter(!palabra1 %in% c(stopwords_es$Token)) %>%
filter(!palabra2 %in% c(stopwords_es$Token))
bigramas_frec_mujer = bigramas_mujer %>%
count(Titular, palabra1, palabra2, sort = TRUE) %>%
unite(bigrama, palabra1, palabra2, sep = " ")
bigramas_frec_mujer %>% select(bigrama, n) %>% head()
#tf-idf
bigramas_tfidf_mujer = bigramas_frec_mujer%>%
bind_tf_idf(bigrama, Titular, n)
bigramas_tfidf_mujer %>% arrange(desc(tf_idf))
# Red Bigramas
library(igraph)
# install.packages("influential")
library(influential)
bigrama_grafo_mujer = bigramas_mujer %>%
count(palabra1, palabra2, sort = TRUE) %>%
filter(n >= 5) %>%
graph_from_data_frame()
bigrama_grafo_mujer
library(ggraph)
set.seed(1998)
ggraph(bigrama_grafo_mujer, layout = "fr") +
geom_edge_link() +
geom_node_point() +
geom_node_label(aes(label = name), vjust = 1, hjust = 0.2)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigrama_grafo_mujer, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
ggraph(bigrama_grafo_mujer, layout = "fr") +
geom_edge_link() +
geom_node_point() +
geom_node_text(aes(label = name), vjust = 1, hjust = 1)
library(tidyverse)
|
/applications/Caso_estudio/script.R
|
no_license
|
CristopherA98/Curso_PLN
|
R
| false | false | 5,535 |
r
|
#Librerias que se van a usar para todo el análisis
pckg <- c("easypackages","tidyverse","rvest","purrr","tidytext","tm")
# install.packages("pckg") desomentar en caso de no tener innstalados los paquetes
library(easypackages)
libraries(pckg)
# Definición de funciones
obtieneNoticiasBusqueda <- function(busqueda){
news_pag = "https://news.google.com/"
parametro_busqueda = "search?q="
busqueda_no_espacios = gsub(" ","%20", busqueda)
parametro_final = "&hl=es-419&gl=US&ceid=US:es-419"
html_dir = paste0(news_pag,parametro_busqueda,busqueda_no_espacios,parametro_final)
google_news = read_html(html_dir)
noticias = google_news %>%
html_nodes(css = ".xP6mwf") %>%
html_children()
noticiasDF = map(noticias,obtieneNoticiasData)
noticiasDF = bind_rows(noticiasDF)
noticiasDF = noticiasDF[!is.na(noticiasDF$Titular),]
return(noticiasDF)
}
obtieneNoticiasData = function(noticia){
news_pag = "https://news.google.com/"
titular = noticia %>% html_node("h3") %>% html_text()
fecha = noticia %>% html_node("time") %>% html_attr("datetime")
diario = noticia %>% html_node("a.wEwyrc.AVN2gc.uQIVzc.Sksgp") %>% html_text()
link_enmascarado = noticia %>% html_node("h3 a") %>% html_attr("href")
link_enmascarado = paste0(news_pag,substring(link_enmascarado,3))
link_leido = read_html(link_enmascarado)
link = link_leido %>%
html_nodes(css='a') %>%
tail(1) %>%
html_attr("href")
noticiaDF = data.frame(Titular=titular, Fecha=fecha, Diario=diario, Link=link, stringsAsFactors = F)
return(noticiaDF)
}
noticiasMujer <- obtieneNoticiasBusqueda("Violencia contra la mujer Ecuador")
# Diario con mas noticias publicadas del tema
noticiasMujer %>%
count(Diario) %>%
slice_max(order_by = n,n=5)
# Diarios
Diarios <- c("El Comercio (Ecuador)", "El Telégrafo (por eliminar)",
"El Universo","La Hora (Ecuador)","Primicias")
Estructura = data.frame(Diario=Diarios)
Estructura$CSS = NA
Estructura$CSS[Estructura$Diario=='El Comercio (Ecuador)'] = '.paragraphs'
Estructura$CSS[Estructura$Diario=='El Telégrafo (por eliminar)'] = '.itemFullText'
Estructura$CSS[Estructura$Diario=='El Universo'] = '.field-name-body'
Estructura$CSS[Estructura$Diario=='La Hora (Ecuador)'] = '#contenedorGeneral'
Estructura$CSS[Estructura$Diario=='Primicias'] = '#entry-content-inarticle'
obtenerNoticiaNacional = function(link_noticia, diario, diccionario_css){
noticia_leida = read_html(link_noticia)
css = diccionario_css$CSS[diccionario_css$Diario==diario]
text_nodes = noticia_leida %>%
html_nodes(css = css) %>%
html_nodes("p")
text = text_nodes %>%
html_text()
text = paste0(text, collapse = " ")
return(text)
}
noticiasMujer <- noticiasMujer %>% filter(Diario %in% Diarios)
news = map2_chr(noticiasMujer$Link, noticiasMujer$Diario, obtenerNoticiaNacional, diccionario_css=Estructura)
noticiasMujer$Noticia = news
sapply(noticiasMujer, function(x) sum(is.na(x)))
which(is.na(noticiasMujer$Fecha))
noticiasMujer[57,4]
noticiasMujer[57,2] <- "2020-12-01T00:00:00Z"
glimpse(noticiasMujer)
# puede que sea por la fecha de publicacion revisar mañana
# fecha de corte 28 de noviembre / fecha inicio 5 marzo
saveRDS(noticiasMujer, "applications/Caso_estudio/noticiasMujer.RDS")
# LEER DATOS
rm(list=ls())
noticiasMujer <- readRDS("applications/Caso_estudio/noticiasMujer.RDS")
tidy_Mujer = noticiasMujer %>%
unnest_tokens(output = bigrama,
input = Noticia,
token = 'ngrams',
n = 2,
to_lower = T)
# Bigramas mas repetidos
tidy_Mujer %>%
count(bigrama) %>%
slice_max(order_by = n,n=10)
# stopwords
library(readxl)
stopwords_es_1 = read_excel("applications/Caso_estudio/CustomStopWords.xlsx")
names(stopwords_es_1) = c("Token","Fuente")
stopwords_es_2 = tibble(Token=tm::stopwords(kind = "es"), Fuente="tm")
stopwords_es = rbind(stopwords_es_1, stopwords_es_2)
stopwords_es = stopwords_es[!duplicated(stopwords_es$Token),]
remove(stopwords_es_1, stopwords_es_2)
bigramas_mujer = tidy_Mujer %>%
separate(bigrama, c("palabra1", "palabra2"), sep = " ") %>%
filter(!palabra1 %in% c(stopwords_es$Token)) %>%
filter(!palabra2 %in% c(stopwords_es$Token))
bigramas_frec_mujer = bigramas_mujer %>%
count(Titular, palabra1, palabra2, sort = TRUE) %>%
unite(bigrama, palabra1, palabra2, sep = " ")
bigramas_frec_mujer %>% select(bigrama, n) %>% head()
#tf-idf
bigramas_tfidf_mujer = bigramas_frec_mujer%>%
bind_tf_idf(bigrama, Titular, n)
bigramas_tfidf_mujer %>% arrange(desc(tf_idf))
# Red Bigramas
library(igraph)
# install.packages("influential")
library(influential)
bigrama_grafo_mujer = bigramas_mujer %>%
count(palabra1, palabra2, sort = TRUE) %>%
filter(n >= 5) %>%
graph_from_data_frame()
bigrama_grafo_mujer
library(ggraph)
set.seed(1998)
ggraph(bigrama_grafo_mujer, layout = "fr") +
geom_edge_link() +
geom_node_point() +
geom_node_label(aes(label = name), vjust = 1, hjust = 0.2)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigrama_grafo_mujer, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
ggraph(bigrama_grafo_mujer, layout = "fr") +
geom_edge_link() +
geom_node_point() +
geom_node_text(aes(label = name), vjust = 1, hjust = 1)
library(tidyverse)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.