content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
library(questionr)
### Name: ltabs
### Title: Cross tabulation with labelled variables
### Aliases: ltabs
### ** Examples
data(fecondite)
ltabs(~radio, femmes)
ltabs(~radio+tv, femmes)
ltabs(~radio+tv, femmes, "l")
ltabs(~radio+tv, femmes, "v")
ltabs(~radio+tv+journal, femmes)
ltabs(~radio+tv, femmes, variable_label = FALSE)
|
/data/genthat_extracted_code/questionr/examples/ltabs.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 334 |
r
|
library(questionr)
### Name: ltabs
### Title: Cross tabulation with labelled variables
### Aliases: ltabs
### ** Examples
data(fecondite)
ltabs(~radio, femmes)
ltabs(~radio+tv, femmes)
ltabs(~radio+tv, femmes, "l")
ltabs(~radio+tv, femmes, "v")
ltabs(~radio+tv+journal, femmes)
ltabs(~radio+tv, femmes, variable_label = FALSE)
|
% $Id: eff.dens.Rd 238 2021-07-11 16:12:17Z X022278 $
\name{eff.dens}
\alias{eff.dens}
\alias{eff.dens.plot}
\title{
Estimate and plot density of efficiencies
}
\description{ A method to estimate and plot kernel estimate of
(Farrell) efficiencies taken into consideration that efficiencies are
bounded either above (input direction) or below (output direction). }
\usage{
eff.dens(eff, bw = "nrd0")
eff.dens.plot(obj, bw = "nrd0", ..., xlim, ylim, xlab, ylab)
}
\arguments{
\item{eff}{ Either a list of (Farrell) efficiencies or a Farrell
object returned from the method \code{\link{dea}}. }
\item{bw}{ Bandwith, look at the documentation of \code{density} for
an explanation. }
\item{obj}{Either an array of efficiencies or a list returned from
\code{eff.dens}.}
\item{\dots}{ Further arguments to the \code{plot} method like line
type and line width. }
\item{xlim}{ Range on the x-axis; usually not needed, just use the
defaults. }
\item{ylim}{ Range on the x-axis; usually not needed, just use the
defaults. }
\item{xlab}{ Label for the x-axis. }
\item{ylab}{ Label for the y-axis. }
}
\details{ The calculation is based on a reflection method (Silverman
1986, 30) using the default window kernel and default bandwidth (window
width) in the method \code{density}.
The method \code{eff.dens.plot} plot the density directly, and
\code{eff.dens} just estimate the numerical density, and the result
can then either be plotted by \code{plot}, corresponds to
\code{eff.dens.plot}, or by lines as an overlay on an existing plot.
}
\value{ The return from \code{eff.dens} is a list \code{list(x,y)}
with efficiencies and the corresponding density values. }
\references{ B.W. Silverman (1986), \emph{Density Estimation for Statistics
and Data Analysis}, Chapman and Hall, London. }
\author{ Peter Bogetoft and Lars Otto \email{larsot23@gmail.com}}
\note{ The input efficiency is also bounded below by 0, but for normal
firms an efficiency at 0 will not happen, i.e. the boundary is not
effective, and therefore this boundary is not taken into
consideration. }
\examples{
e <- 1 - rnorm(100)
e[e>1] <- 1
e <- e[e>0]
eff.dens.plot(e)
hist(e, breaks=15, freq=FALSE, xlab="Efficiency", main="")
den <- eff.dens(e)
lines(den,lw=2)
}
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/eff.dens.Rd
|
no_license
|
cran/Benchmarking
|
R
| false | false | 2,418 |
rd
|
% $Id: eff.dens.Rd 238 2021-07-11 16:12:17Z X022278 $
\name{eff.dens}
\alias{eff.dens}
\alias{eff.dens.plot}
\title{
Estimate and plot density of efficiencies
}
\description{ A method to estimate and plot kernel estimate of
(Farrell) efficiencies taken into consideration that efficiencies are
bounded either above (input direction) or below (output direction). }
\usage{
eff.dens(eff, bw = "nrd0")
eff.dens.plot(obj, bw = "nrd0", ..., xlim, ylim, xlab, ylab)
}
\arguments{
\item{eff}{ Either a list of (Farrell) efficiencies or a Farrell
object returned from the method \code{\link{dea}}. }
\item{bw}{ Bandwith, look at the documentation of \code{density} for
an explanation. }
\item{obj}{Either an array of efficiencies or a list returned from
\code{eff.dens}.}
\item{\dots}{ Further arguments to the \code{plot} method like line
type and line width. }
\item{xlim}{ Range on the x-axis; usually not needed, just use the
defaults. }
\item{ylim}{ Range on the x-axis; usually not needed, just use the
defaults. }
\item{xlab}{ Label for the x-axis. }
\item{ylab}{ Label for the y-axis. }
}
\details{ The calculation is based on a reflection method (Silverman
1986, 30) using the default window kernel and default bandwidth (window
width) in the method \code{density}.
The method \code{eff.dens.plot} plot the density directly, and
\code{eff.dens} just estimate the numerical density, and the result
can then either be plotted by \code{plot}, corresponds to
\code{eff.dens.plot}, or by lines as an overlay on an existing plot.
}
\value{ The return from \code{eff.dens} is a list \code{list(x,y)}
with efficiencies and the corresponding density values. }
\references{ B.W. Silverman (1986), \emph{Density Estimation for Statistics
and Data Analysis}, Chapman and Hall, London. }
\author{ Peter Bogetoft and Lars Otto \email{larsot23@gmail.com}}
\note{ The input efficiency is also bounded below by 0, but for normal
firms an efficiency at 0 will not happen, i.e. the boundary is not
effective, and therefore this boundary is not taken into
consideration. }
\examples{
e <- 1 - rnorm(100)
e[e>1] <- 1
e <- e[e>0]
eff.dens.plot(e)
hist(e, breaks=15, freq=FALSE, xlab="Efficiency", main="")
den <- eff.dens(e)
lines(den,lw=2)
}
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
illumina.getAB <-
function(logR, bfreq, R.BASELINE=2){
R = exp(logR)
R = R*R.BASELINE
A=R/(1+tan(bfreq*pi/2))
B=R-A
list(A=A,B=B)
}
|
/R/illumina.getAB.R
|
no_license
|
cran/PSCN
|
R
| false | false | 153 |
r
|
illumina.getAB <-
function(logR, bfreq, R.BASELINE=2){
R = exp(logR)
R = R*R.BASELINE
A=R/(1+tan(bfreq*pi/2))
B=R-A
list(A=A,B=B)
}
|
library(GrpString)
### Name: DupRm
### Title: Removes successive duplicates in strings
### Aliases: DupRm
### Keywords: programming
### ** Examples
# Simple example
dup1 <- "000<<<<<DDDFFF333333qqqqqKKKKK33FFF"
dup3 <- "aaBB111^^~~~555667777000000!!!###$$$$$$&&&(((((***)))))@@@@@>>>>99"
dup13 <- c(dup1, dup3)
DupRm(dup13)
|
/data/genthat_extracted_code/GrpString/examples/DupRm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 332 |
r
|
library(GrpString)
### Name: DupRm
### Title: Removes successive duplicates in strings
### Aliases: DupRm
### Keywords: programming
### ** Examples
# Simple example
dup1 <- "000<<<<<DDDFFF333333qqqqqKKKKK33FFF"
dup3 <- "aaBB111^^~~~555667777000000!!!###$$$$$$&&&(((((***)))))@@@@@>>>>99"
dup13 <- c(dup1, dup3)
DupRm(dup13)
|
\alias{gtkEntryCompletionGetCompletionPrefix}
\name{gtkEntryCompletionGetCompletionPrefix}
\title{gtkEntryCompletionGetCompletionPrefix}
\description{Get the original text entered by the user that triggered
the completion or \code{NULL} if there's no completion ongoing.}
\usage{gtkEntryCompletionGetCompletionPrefix(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkEntryCompletion}}}}
\details{Since 2.12}
\value{[character] the prefix for the current completion}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/RGtk2/man/gtkEntryCompletionGetCompletionPrefix.Rd
|
no_license
|
lawremi/RGtk2
|
R
| false | false | 545 |
rd
|
\alias{gtkEntryCompletionGetCompletionPrefix}
\name{gtkEntryCompletionGetCompletionPrefix}
\title{gtkEntryCompletionGetCompletionPrefix}
\description{Get the original text entered by the user that triggered
the completion or \code{NULL} if there's no completion ongoing.}
\usage{gtkEntryCompletionGetCompletionPrefix(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkEntryCompletion}}}}
\details{Since 2.12}
\value{[character] the prefix for the current completion}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
### Lab1: Matrix
### Author: Taiwo Famuyiwa
### Date: August. 4, 2019
###########################
###########################
#Matrix#
#QUESTION1
#A)
M <- matrix(c(1:10),nrow=5,ncol=2, dimnames=list(c("a","b","c","d","e"),c("A","B")))
#Answer:
# A B
#a 1 6
#b 2 7
#c 3 8
#d 4 9
#e 5 10
#B)Function to get the maximum of all column
apply(M, 2, function(x) max(x, na.rm = TRUE))
#QUESTION2
x <- c(1, 2, 3)
y <- c(4, 5, 6)
z <- c(7, 8, 9)
u<-matrix(c(x, y, z), nrow = 3, ncol=3, dimnames = list(c("a", "b", "c")))
|
/LabWork2.R
|
no_license
|
tincorpai/labwork2
|
R
| false | false | 525 |
r
|
### Lab1: Matrix
### Author: Taiwo Famuyiwa
### Date: August. 4, 2019
###########################
###########################
#Matrix#
#QUESTION1
#A)
M <- matrix(c(1:10),nrow=5,ncol=2, dimnames=list(c("a","b","c","d","e"),c("A","B")))
#Answer:
# A B
#a 1 6
#b 2 7
#c 3 8
#d 4 9
#e 5 10
#B)Function to get the maximum of all column
apply(M, 2, function(x) max(x, na.rm = TRUE))
#QUESTION2
x <- c(1, 2, 3)
y <- c(4, 5, 6)
z <- c(7, 8, 9)
u<-matrix(c(x, y, z), nrow = 3, ncol=3, dimnames = list(c("a", "b", "c")))
|
# Quadrat Detection Function
# Greg Nishihara
# 2021 July 17
# Load packages
# パッケージを読み込む
library(tidyverse)
library(magick)
source("detect_quadrat.R")
f1 = "~/Lab_Data/tanimaes/share_files/quadrat_01_210318.JPG"
f2 = "~/Lab_Data/tanimaes/share_files/quadrat_02_210623.JPG"
img1 = image_read(f1)
img2 = image_read(f2)
imgggplot1 = img1 |> image_ggplot()
imgggplot2 = img2 |> image_ggplot()
hdata1 = img1 |> detect_quadrat()
hdata2 = img2 |> detect_quadrat()
p1 = imgggplot1 +
geom_hline(aes(yintercept = yintercept), data = hdata1, color = "white", size = 2) +
geom_vline(aes(xintercept = xintercept), data = hdata1, color = "white", size = 2)
p2 = imgggplot2 +
geom_hline(aes(yintercept = yintercept), data = hdata2, color = "white", size = 2) +
geom_vline(aes(xintercept = xintercept), data = hdata2, color = "white", size = 2)
ofile1 = basename(f1) |> str_replace(".JPG", "_squared.jpg")
ofile2 = basename(f2) |> str_replace(".JPG", "_squared.jpg")
ggsave(ofile1, p1, width = 80, units = "mm", dpi = 100)
ggsave(ofile2, p2, width = 80, units = "mm", dpi = 100)
|
/example01.R
|
no_license
|
gnishihara/detect-quadrat
|
R
| false | false | 1,108 |
r
|
# Quadrat Detection Function
# Greg Nishihara
# 2021 July 17
# Load packages
# パッケージを読み込む
library(tidyverse)
library(magick)
source("detect_quadrat.R")
f1 = "~/Lab_Data/tanimaes/share_files/quadrat_01_210318.JPG"
f2 = "~/Lab_Data/tanimaes/share_files/quadrat_02_210623.JPG"
img1 = image_read(f1)
img2 = image_read(f2)
imgggplot1 = img1 |> image_ggplot()
imgggplot2 = img2 |> image_ggplot()
hdata1 = img1 |> detect_quadrat()
hdata2 = img2 |> detect_quadrat()
p1 = imgggplot1 +
geom_hline(aes(yintercept = yintercept), data = hdata1, color = "white", size = 2) +
geom_vline(aes(xintercept = xintercept), data = hdata1, color = "white", size = 2)
p2 = imgggplot2 +
geom_hline(aes(yintercept = yintercept), data = hdata2, color = "white", size = 2) +
geom_vline(aes(xintercept = xintercept), data = hdata2, color = "white", size = 2)
ofile1 = basename(f1) |> str_replace(".JPG", "_squared.jpg")
ofile2 = basename(f2) |> str_replace(".JPG", "_squared.jpg")
ggsave(ofile1, p1, width = 80, units = "mm", dpi = 100)
ggsave(ofile2, p2, width = 80, units = "mm", dpi = 100)
|
############################
##
## 13 June 2016
## Continuous Aspects in MCMC
## Want to consider both number and
## duration of Trophallaxis interactions
##
###########################
#####
# Outline
#####
# code (turn into function?) to get data in order
# now N_t is number of ants (or pairs of ants)
# in trophallaxis at time t
# code/function to run new mcmc model
# Simulated data to ensure model works
# apply to ant data
## Get Data in New format
high4 <- read.csv("./Data/Colony1_trophallaxis_high_density_4hr.csv")
low4 <- read.csv("./Data/Colony1_trophallaxis_low_density_4hr.csv")
low4.1 <- low4[which(low4$Location == 1), ]
low4.4 <- low4[which(low4$Location == 4), ]
prep.high = prep.troph.pairs(high4)
prep.low = prep.troph.pairs(low4)
prep.low.1 = prep.troph.pairs(low4.1)
prep.low.4 = prep.troph.pairs(low4.4)
N.high = prep.high$pairs2
N.low = prep.low$pairs2
N.low.1 = prep.low.1$pairs2
N.low.4 = prep.low.4$pairs2
##visualize data
##
##
col = c("#120d08", "#bc5356", "#538bbc", "#53bc84")
par(mfrow = c(2,2))
plot(N.high, main = "High Density", ylab = "# Interactions", xlab = "", type = "l", col = col[2])
plot(N.low, main = "Low Density", ylab = "# Interactions", xlab = "Time (Seconds)", type = "l", col = col[3])
plot(N.low.1, main = "Low Density, Entrance Chamber", ylab = "# Interactions", xlab = "Time (Seconds)", type = "l", col = col[3])
plot(N.low.4, main = "Low Density, Queen Chamber", ylab = "# Interactions", xlab = "Time (Seconds)", type = "l", col = col[3])
par(mfrow = c(1,1))
Time = prep.low$hours * 60 * 60
###
# Simulated Data
###
#see terminal.R
###
# Inference
###
#Propose high and low gamma and lambda,
#first simplest case
# then with additional state based conditions
#calculate R* and then P* matrices
#accept/reject
data = N.low.1[1:(4*60*60)] #only using first two hours for time
Time = 4 * 60 * 60
#hyperparameters
a = .01
b = .8
c = .02
d = .8
r = .02
q = .5
#tuning parameter
tau = matrix( c(.01, 0, 0, 0,
0, .01, 0, 0,
0, 0, .01, 0,
0, 0, 0, .01), nrow = 4, ncol = 4)
n.mcmc = 3000
theta = matrix(c(70000, 1, 1, 70000), 2, 2)
#homes
params = matrix(NA, nrow = 4, ncol = n.mcmc)
rownames(params) <- c("gamma_high^tilde", "gamma_low", "lambda_high", "lambda_low")
colnames(params) <- 1:n.mcmc
X.param = matrix(NA, nrow = Time,
ncol = n.mcmc, byrow = T)
# probability matrix for 2 state discrete time Markov Chain
#stores all M matrices for every iteration in mcmc
M.param = matrix(data = NA, nrow = 4,
ncol = n.mcmc, byrow = T)
rownames(M.param) <- c("HH", "HL", "LH", "LL")
colnames(M.param) <- 1:n.mcmc
#is replaced every loop
M = matrix(c(0.99, 0.01, 0.01, 0.99),byrow = T, 2, 2)
rownames(M) <- c("High", "Low")
colnames(M) <- c("High", "Low")
#initialize
params[1:2, 1] = c(.01, .04 )
params[3:4, 1] = c(.03, .024)
X.param[, 1] = sample(c(1, 2), size = Time, replace = T) #1 - high state, 2 - low state
M.param[, 1] = as.vector(t(M))
log.fullcond = function(params, P_L, P_H, data, X.param){
sumP_H = 0
sumP_L = 0
for(t in 2:length(data)){
if(X.param[t, l-1] == 1){
sumP_H = sumP_H + log(P_H[data[t - 1] + 1, data[t] + 1])
}else{
sumP_L = sumP_L + log(P_L[data[t - 1] + 1, data[t] + 1])
}
}
loglike = sumP_L +
sumP_H +
dgamma(params[1], a, b, log = T) +
dgamma(params[2], c, d, log = T) +
dgamma(params[3], r, q, log = T) +
dgamma(params[4], r, q, log = T)
return(loglike)
}
accept = 0
sigma = NA
for(l in 2:n.mcmc){
# print out every 10 iterations completed
if( l %% 100 == 0 ) cat(paste("iteration", l, "complete\n"))
#
#update
# #adaptive tuning parameter
# if(l < n.mcmc/2 & l %% 100 == 0){
#
# sigma = 2.38 ^ 2 / 4 * var(log(t(params[, 1:(l - 1)])))
# tau = sigma
# }
proposal = rmvnorm(n = 1, mean = log(params[, l - 1]), sigma = tau)
theta.star = exp(proposal)
gamma.high = theta.star[1] + theta.star[2] #calculate correct gamma_high
#calculate P* matrices - for high/low states
R_H.star = matrix(0, nrow = max(data) + 1, ncol = max(data) + 1)
rownames(R_H.star) <- 0:max(data)
colnames(R_H.star) <- 0:max(data)
for(i in 1:nrow(R_H.star)){
if( i %% 2 != 0 & i != nrow(R_H.star) & i != (nrow(R_H.star) - 1)){
R_H.star[i, i + 2] = gamma.high #gamma_high
}
if( i %% 2 != 0 & i != 1 & i != 2){
R_H.star[i, i - 2] = (i - 1)/2 * theta.star[3] #lambda_high
}
}
P_H.star = Pctmc(Q = R_H.star, t = 1)
rownames(P_H.star) <- 0:max(data)
colnames(P_H.star) <- 0:max(data)
R_L.star = matrix(0, nrow = max(data) + 1, ncol = max(data) + 1)
rownames(R_L.star) <- 0:max(data)
colnames(R_L.star) <- 0:max(data)
for(i in 1:nrow(R_L.star)){
if( i %% 2 != 0 & i != nrow(R_L.star) & i != (nrow(R_L.star) - 1)){
R_L.star[i, i + 2] = theta.star[2] #gamma_low
}
if( i %% 2 != 0 & i != 1 & i != 2){
R_L.star[i, i - 2] = (i - 1)/2 * theta.star[4] #lambda_low
}
}
P_L.star = Pctmc(Q = R_L.star, t = 1)
rownames(P_L.star) <- 0:max(data)
colnames(P_L.star) <- 0:max(data)
#calculate P matrices for PREVIOUS values - for high/low states
R_H = matrix(0, nrow = max(data) + 1, ncol = max(data) + 1)
rownames(R_H) <- 0:max(data)
colnames(R_H) <- 0:max(data)
for(i in 1:nrow(R_H)){
if( i %% 2 != 0 & i != nrow(R_H) & i != (nrow(R_H) - 1)){
R_H[i, i + 2] = params[1, l - 1] + params[2, l - 1] #gamma_high
}
if( i %% 2 != 0 & i != 1 & i != 2){
R_H[i, i - 2] = (i - 1)/2 * params[3, l - 1] #lambda_high
}
}
P_H = Pctmc(Q = R_H, t = 1)
rownames(P_H) <- 0:max(data)
colnames(P_H) <- 0:max(data)
R_L = matrix(0, nrow = max(data) + 1, ncol = max(data) + 1)
rownames(R_L) <- 0:max(data)
colnames(R_L) <- 0:max(data)
for(i in 1:nrow(R_L)){
if( i %% 2 != 0 & i != nrow(R_L) & i != (nrow(R_L) - 1)){
R_L[i, i + 2] = params[2, l-1] #gamma_low
}
if( i %% 2 != 0 & i != 1 & i != 2){
R_L[i, i - 2] = (i - 1)/2 * params[4, l-1] #lambda_low
}
}
P_L = Pctmc(Q = R_L, t = 1)
rownames(P_L) <- 0:max(data)
colnames(P_L) <- 0:max(data)
#calculate probability
MHprob = exp(log.fullcond(theta.star, P_L.star, P_H.star, data, X.param) -
log.fullcond(params[, l - 1], P_L, P_H, data, X.param))
if(is.finite(MHprob) == FALSE){MHprob = 0}
#accept/reject
if(runif(1) < MHprob){
accept = accept + 1
params[, l] = theta.star[]
#needed for X
P_H = P_H.star
P_L = P_L.star
}else{
params[, l] = params[, l - 1]
}
#
##X Parameters
m = matrix(data = 0, nrow = 2, ncol = 2)
rownames(m) <- c("high", "low")
colnames(m) <- c("high", "low")
# number states going from i to j, refreshes every run
prob_H = P_H[data[1] + 1, data[2] + 1] * .5 *
M[1, X.param[2, l - 1]]
prob_L = P_L[data[1] + 1, data[2] + 1] * .5 *
M[2, X.param[2, l - 1]]
X.param[1, l] = sample(x = (1:2), size = 1, prob = c(prob_H, prob_L))
m[X.param[1, l], X.param[1, l]] = m[X.param[1, l], X.param[1, l]] + 1
#
#
for(t in 2:(Time - 1)){
prob_H = P_H[data[t] + 1, data[t + 1] + 1] *
M[1, X.param[t + 1, l - 1]] *
M[X.param[t - 1, l - 1], 1]
prob_L = P_L[data[t] + 1, data[t + 1] + 1] *
M[2, X.param[t + 1, l - 1]] *
M[X.param[t - 1, l - 1], 2]
X.param[t, l] = sample(x = (1:2), 1, prob = c(prob_H, prob_L))
m[X.param[t - 1, l], X.param[t, l]] = m[X.param[t - 1, l],
X.param[t, l]] + 1
}
prob_H = P_H[data[Time - 1] + 1, data[Time] + 1] *
M[X.param[1, l - 1], X.param[Time - 1, l - 1]]
prob_L = P_L[data[X.param[Time - 1]] + 1, data[X.param[Time]] + 1] *
M[X.param[2, l - 1], X.param[Time - 1, l - 1]]
X.param[Time, l] = sample(x = (1:2), 1, prob = c(prob_H, prob_L))
m[X.param[Time - 1, l], X.param[Time, l]] = m[X.param[Time - 1, l],
X.param[Time, l]] + 1
#
#M matrix parameter
M[1, ] = (rdirichlet(n = 1 , alpha = theta[1, ] + m[1, ]))
M[2, ] = (rdirichlet(n = 1 , alpha = theta[2, ] + m[2, ]))
M.param[, l] = as.vector(t(M))
}
#compile estimates - stop code midway version below
X.est = matrix(data = rep(NA, Time), nrow = Time, ncol = 1)
# gamma.high.tilde.est = mean(params[1, ])
# gamma.low.est = mean(params[2, ])
# lambda.high.est = mean(params[3, ])
# lambda.low.est = mean(params[4, ])
#
# gamma.high.est = mean(params[1,]) + mean(params[2, ])
#
# estimate = c(gamma.high.tilde.est, gamma.low.est, lambda.high.est, lambda.low.est, gamma.high.est)
est = apply(params, 1, bm)
var = apply(params, 1, quantile, probs = c(0.025, 0.975), na.rm = TRUE)
est
var
M.est = apply(M.param[,1:(l-1)], 1, bm)
M.var = apply(M.param, 1 , quantile, probs = c(0.025, 0.975, na.rm = T))
for(t in 1:Time ){
X.est[t, 1] = mean(X.param[t, ])
}
###
###
### Results
###
###
#######
#accept ratio and estimated vs known
#######
accept/n.mcmc
estimate
est = round(estimate, digits = 3)
est
#plot the estimation runs.
col = c("#120d08", "#bc5356", "#538bbc", "#53bc84")
pdf(file = paste("./output/", Sys.time(), ".pdf", sep = ""))
#Parameters
plot(0,0,xlab="MCMC Runs",
ylab="Rates (per minute)",
ylim=c(0,(max(params[1, ] + params[2, ]) * 60)),
xlim=c(0,n.mcmc),
type="n",
cex.lab = 1)
lines(1:n.mcmc, 60 * (params[1, ] + params[2, ]), col = col[1])
lines(1:n.mcmc, 60 * params[2, ], col = col[2])
lines(1:n.mcmc, (60 * params[3, ]), col = col[3])
lines(1:n.mcmc, (60 * params[4, ]), col = col[4])
#X params
#Single X
X = X.param[sample(1:Time, 1), ]
plot(0, 0, xlab = "MCMC Runs", ylab = "Single X", ylim = c(0,max(X)),
xlim = c(0,n.mcmc), type = "n", cex.lab = 1)
lines(1:n.mcmc, X, col = col[4])
#States over time
plot(X.est, type = "l")
plot(round(X.est), type = "l")
#M
plot(0,0,xlab="MCMC Runs", ylab = "M", ylim = c(0, max(M.param)), xlim=c(0,n.mcmc),
type="n", cex.lab = 1)
for(i in 1:(4)){
lines(1:n.mcmc, M.param[i, ], col = col[i])
}
dev.off()
###
### Code for visuals midway through running - helpful for debugging
###
col = c("#120d08", "#bc5356", "#538bbc", "#53bc84")
#Parameters
plot(0,0,xlab="MCMC Runs",
ylab="Rates (per minute)",
ylim=c(0,(max(params[1, 1:(l-1)] + params[2, 1:(l-1)]) * 60)),
xlim=c(0,l),
type="n",
cex.lab = 1)
lines(1:(l-1), 60 * (params[1, 1:(l-1)] + params[2, 1:(l-1)]), col = col[1])
lines(1:(l - 1), 60 * params[2, 1:(l-1)], col = col[2])
lines(1:(l-1), (60 * params[3, 1:(l-1)]), col = col[3])
lines(1:(l-1), (60 * params[4, 1:(l-1) ]), col = col[4])
#States over time
#
X.est = matrix(data = rep(NA, Time), nrow = Time, ncol = 1)
for(t in 1:Time ){
X.est[t, 1] = mean(X.param[t, 1:(l-1)])
}
plot(X.est, type = "l")
plot(round(X.est), type = "l")
#M
plot(0,0,xlab="MCMC Runs", ylab = "M", ylim = c(0, max(M.param[1:(l-1)])), xlim=c(0,n.mcmc),
type="n", cex.lab = 1)
for(i in 1:(4)){
lines(1:(l-1), M.param[i, 1:(l-1)], col = col[i])
}
|
/Data Exploration/Colony 1/CTMC.4hrLoEntrance.R
|
no_license
|
MLBartley/Ant-Research
|
R
| false | false | 11,277 |
r
|
############################
##
## 13 June 2016
## Continuous Aspects in MCMC
## Want to consider both number and
## duration of Trophallaxis interactions
##
###########################
#####
# Outline
#####
# code (turn into function?) to get data in order
# now N_t is number of ants (or pairs of ants)
# in trophallaxis at time t
# code/function to run new mcmc model
# Simulated data to ensure model works
# apply to ant data
## Get Data in New format
high4 <- read.csv("./Data/Colony1_trophallaxis_high_density_4hr.csv")
low4 <- read.csv("./Data/Colony1_trophallaxis_low_density_4hr.csv")
low4.1 <- low4[which(low4$Location == 1), ]
low4.4 <- low4[which(low4$Location == 4), ]
prep.high = prep.troph.pairs(high4)
prep.low = prep.troph.pairs(low4)
prep.low.1 = prep.troph.pairs(low4.1)
prep.low.4 = prep.troph.pairs(low4.4)
N.high = prep.high$pairs2
N.low = prep.low$pairs2
N.low.1 = prep.low.1$pairs2
N.low.4 = prep.low.4$pairs2
##visualize data
##
##
col = c("#120d08", "#bc5356", "#538bbc", "#53bc84")
par(mfrow = c(2,2))
plot(N.high, main = "High Density", ylab = "# Interactions", xlab = "", type = "l", col = col[2])
plot(N.low, main = "Low Density", ylab = "# Interactions", xlab = "Time (Seconds)", type = "l", col = col[3])
plot(N.low.1, main = "Low Density, Entrance Chamber", ylab = "# Interactions", xlab = "Time (Seconds)", type = "l", col = col[3])
plot(N.low.4, main = "Low Density, Queen Chamber", ylab = "# Interactions", xlab = "Time (Seconds)", type = "l", col = col[3])
par(mfrow = c(1,1))
Time = prep.low$hours * 60 * 60
###
# Simulated Data
###
#see terminal.R
###
# Inference
###
#Propose high and low gamma and lambda,
#first simplest case
# then with additional state based conditions
#calculate R* and then P* matrices
#accept/reject
data = N.low.1[1:(4*60*60)] #only using first two hours for time
Time = 4 * 60 * 60
#hyperparameters
a = .01
b = .8
c = .02
d = .8
r = .02
q = .5
#tuning parameter
tau = matrix( c(.01, 0, 0, 0,
0, .01, 0, 0,
0, 0, .01, 0,
0, 0, 0, .01), nrow = 4, ncol = 4)
n.mcmc = 3000
theta = matrix(c(70000, 1, 1, 70000), 2, 2)
#homes
params = matrix(NA, nrow = 4, ncol = n.mcmc)
rownames(params) <- c("gamma_high^tilde", "gamma_low", "lambda_high", "lambda_low")
colnames(params) <- 1:n.mcmc
X.param = matrix(NA, nrow = Time,
ncol = n.mcmc, byrow = T)
# probability matrix for 2 state discrete time Markov Chain
#stores all M matrices for every iteration in mcmc
M.param = matrix(data = NA, nrow = 4,
ncol = n.mcmc, byrow = T)
rownames(M.param) <- c("HH", "HL", "LH", "LL")
colnames(M.param) <- 1:n.mcmc
#is replaced every loop
M = matrix(c(0.99, 0.01, 0.01, 0.99),byrow = T, 2, 2)
rownames(M) <- c("High", "Low")
colnames(M) <- c("High", "Low")
#initialize
params[1:2, 1] = c(.01, .04 )
params[3:4, 1] = c(.03, .024)
X.param[, 1] = sample(c(1, 2), size = Time, replace = T) #1 - high state, 2 - low state
M.param[, 1] = as.vector(t(M))
log.fullcond = function(params, P_L, P_H, data, X.param){
sumP_H = 0
sumP_L = 0
for(t in 2:length(data)){
if(X.param[t, l-1] == 1){
sumP_H = sumP_H + log(P_H[data[t - 1] + 1, data[t] + 1])
}else{
sumP_L = sumP_L + log(P_L[data[t - 1] + 1, data[t] + 1])
}
}
loglike = sumP_L +
sumP_H +
dgamma(params[1], a, b, log = T) +
dgamma(params[2], c, d, log = T) +
dgamma(params[3], r, q, log = T) +
dgamma(params[4], r, q, log = T)
return(loglike)
}
accept = 0
sigma = NA
for(l in 2:n.mcmc){
# print out every 10 iterations completed
if( l %% 100 == 0 ) cat(paste("iteration", l, "complete\n"))
#
#update
# #adaptive tuning parameter
# if(l < n.mcmc/2 & l %% 100 == 0){
#
# sigma = 2.38 ^ 2 / 4 * var(log(t(params[, 1:(l - 1)])))
# tau = sigma
# }
proposal = rmvnorm(n = 1, mean = log(params[, l - 1]), sigma = tau)
theta.star = exp(proposal)
gamma.high = theta.star[1] + theta.star[2] #calculate correct gamma_high
#calculate P* matrices - for high/low states
R_H.star = matrix(0, nrow = max(data) + 1, ncol = max(data) + 1)
rownames(R_H.star) <- 0:max(data)
colnames(R_H.star) <- 0:max(data)
for(i in 1:nrow(R_H.star)){
if( i %% 2 != 0 & i != nrow(R_H.star) & i != (nrow(R_H.star) - 1)){
R_H.star[i, i + 2] = gamma.high #gamma_high
}
if( i %% 2 != 0 & i != 1 & i != 2){
R_H.star[i, i - 2] = (i - 1)/2 * theta.star[3] #lambda_high
}
}
P_H.star = Pctmc(Q = R_H.star, t = 1)
rownames(P_H.star) <- 0:max(data)
colnames(P_H.star) <- 0:max(data)
R_L.star = matrix(0, nrow = max(data) + 1, ncol = max(data) + 1)
rownames(R_L.star) <- 0:max(data)
colnames(R_L.star) <- 0:max(data)
for(i in 1:nrow(R_L.star)){
if( i %% 2 != 0 & i != nrow(R_L.star) & i != (nrow(R_L.star) - 1)){
R_L.star[i, i + 2] = theta.star[2] #gamma_low
}
if( i %% 2 != 0 & i != 1 & i != 2){
R_L.star[i, i - 2] = (i - 1)/2 * theta.star[4] #lambda_low
}
}
P_L.star = Pctmc(Q = R_L.star, t = 1)
rownames(P_L.star) <- 0:max(data)
colnames(P_L.star) <- 0:max(data)
#calculate P matrices for PREVIOUS values - for high/low states
R_H = matrix(0, nrow = max(data) + 1, ncol = max(data) + 1)
rownames(R_H) <- 0:max(data)
colnames(R_H) <- 0:max(data)
for(i in 1:nrow(R_H)){
if( i %% 2 != 0 & i != nrow(R_H) & i != (nrow(R_H) - 1)){
R_H[i, i + 2] = params[1, l - 1] + params[2, l - 1] #gamma_high
}
if( i %% 2 != 0 & i != 1 & i != 2){
R_H[i, i - 2] = (i - 1)/2 * params[3, l - 1] #lambda_high
}
}
P_H = Pctmc(Q = R_H, t = 1)
rownames(P_H) <- 0:max(data)
colnames(P_H) <- 0:max(data)
R_L = matrix(0, nrow = max(data) + 1, ncol = max(data) + 1)
rownames(R_L) <- 0:max(data)
colnames(R_L) <- 0:max(data)
for(i in 1:nrow(R_L)){
if( i %% 2 != 0 & i != nrow(R_L) & i != (nrow(R_L) - 1)){
R_L[i, i + 2] = params[2, l-1] #gamma_low
}
if( i %% 2 != 0 & i != 1 & i != 2){
R_L[i, i - 2] = (i - 1)/2 * params[4, l-1] #lambda_low
}
}
P_L = Pctmc(Q = R_L, t = 1)
rownames(P_L) <- 0:max(data)
colnames(P_L) <- 0:max(data)
#calculate probability
MHprob = exp(log.fullcond(theta.star, P_L.star, P_H.star, data, X.param) -
log.fullcond(params[, l - 1], P_L, P_H, data, X.param))
if(is.finite(MHprob) == FALSE){MHprob = 0}
#accept/reject
if(runif(1) < MHprob){
accept = accept + 1
params[, l] = theta.star[]
#needed for X
P_H = P_H.star
P_L = P_L.star
}else{
params[, l] = params[, l - 1]
}
#
##X Parameters
m = matrix(data = 0, nrow = 2, ncol = 2)
rownames(m) <- c("high", "low")
colnames(m) <- c("high", "low")
# number states going from i to j, refreshes every run
prob_H = P_H[data[1] + 1, data[2] + 1] * .5 *
M[1, X.param[2, l - 1]]
prob_L = P_L[data[1] + 1, data[2] + 1] * .5 *
M[2, X.param[2, l - 1]]
X.param[1, l] = sample(x = (1:2), size = 1, prob = c(prob_H, prob_L))
m[X.param[1, l], X.param[1, l]] = m[X.param[1, l], X.param[1, l]] + 1
#
#
for(t in 2:(Time - 1)){
prob_H = P_H[data[t] + 1, data[t + 1] + 1] *
M[1, X.param[t + 1, l - 1]] *
M[X.param[t - 1, l - 1], 1]
prob_L = P_L[data[t] + 1, data[t + 1] + 1] *
M[2, X.param[t + 1, l - 1]] *
M[X.param[t - 1, l - 1], 2]
X.param[t, l] = sample(x = (1:2), 1, prob = c(prob_H, prob_L))
m[X.param[t - 1, l], X.param[t, l]] = m[X.param[t - 1, l],
X.param[t, l]] + 1
}
prob_H = P_H[data[Time - 1] + 1, data[Time] + 1] *
M[X.param[1, l - 1], X.param[Time - 1, l - 1]]
prob_L = P_L[data[X.param[Time - 1]] + 1, data[X.param[Time]] + 1] *
M[X.param[2, l - 1], X.param[Time - 1, l - 1]]
X.param[Time, l] = sample(x = (1:2), 1, prob = c(prob_H, prob_L))
m[X.param[Time - 1, l], X.param[Time, l]] = m[X.param[Time - 1, l],
X.param[Time, l]] + 1
#
#M matrix parameter
M[1, ] = (rdirichlet(n = 1 , alpha = theta[1, ] + m[1, ]))
M[2, ] = (rdirichlet(n = 1 , alpha = theta[2, ] + m[2, ]))
M.param[, l] = as.vector(t(M))
}
#compile estimates - stop code midway version below
X.est = matrix(data = rep(NA, Time), nrow = Time, ncol = 1)
# gamma.high.tilde.est = mean(params[1, ])
# gamma.low.est = mean(params[2, ])
# lambda.high.est = mean(params[3, ])
# lambda.low.est = mean(params[4, ])
#
# gamma.high.est = mean(params[1,]) + mean(params[2, ])
#
# estimate = c(gamma.high.tilde.est, gamma.low.est, lambda.high.est, lambda.low.est, gamma.high.est)
est = apply(params, 1, bm)
var = apply(params, 1, quantile, probs = c(0.025, 0.975), na.rm = TRUE)
est
var
M.est = apply(M.param[,1:(l-1)], 1, bm)
M.var = apply(M.param, 1 , quantile, probs = c(0.025, 0.975, na.rm = T))
for(t in 1:Time ){
X.est[t, 1] = mean(X.param[t, ])
}
###
###
### Results
###
###
#######
#accept ratio and estimated vs known
#######
accept/n.mcmc
estimate
est = round(estimate, digits = 3)
est
#plot the estimation runs.
col = c("#120d08", "#bc5356", "#538bbc", "#53bc84")
pdf(file = paste("./output/", Sys.time(), ".pdf", sep = ""))
#Parameters
plot(0,0,xlab="MCMC Runs",
ylab="Rates (per minute)",
ylim=c(0,(max(params[1, ] + params[2, ]) * 60)),
xlim=c(0,n.mcmc),
type="n",
cex.lab = 1)
lines(1:n.mcmc, 60 * (params[1, ] + params[2, ]), col = col[1])
lines(1:n.mcmc, 60 * params[2, ], col = col[2])
lines(1:n.mcmc, (60 * params[3, ]), col = col[3])
lines(1:n.mcmc, (60 * params[4, ]), col = col[4])
#X params
#Single X
X = X.param[sample(1:Time, 1), ]
plot(0, 0, xlab = "MCMC Runs", ylab = "Single X", ylim = c(0,max(X)),
xlim = c(0,n.mcmc), type = "n", cex.lab = 1)
lines(1:n.mcmc, X, col = col[4])
#States over time
plot(X.est, type = "l")
plot(round(X.est), type = "l")
#M
plot(0,0,xlab="MCMC Runs", ylab = "M", ylim = c(0, max(M.param)), xlim=c(0,n.mcmc),
type="n", cex.lab = 1)
for(i in 1:(4)){
lines(1:n.mcmc, M.param[i, ], col = col[i])
}
dev.off()
###
### Code for visuals midway through running - helpful for debugging
###
col = c("#120d08", "#bc5356", "#538bbc", "#53bc84")
#Parameters
plot(0,0,xlab="MCMC Runs",
ylab="Rates (per minute)",
ylim=c(0,(max(params[1, 1:(l-1)] + params[2, 1:(l-1)]) * 60)),
xlim=c(0,l),
type="n",
cex.lab = 1)
lines(1:(l-1), 60 * (params[1, 1:(l-1)] + params[2, 1:(l-1)]), col = col[1])
lines(1:(l - 1), 60 * params[2, 1:(l-1)], col = col[2])
lines(1:(l-1), (60 * params[3, 1:(l-1)]), col = col[3])
lines(1:(l-1), (60 * params[4, 1:(l-1) ]), col = col[4])
#States over time
#
X.est = matrix(data = rep(NA, Time), nrow = Time, ncol = 1)
for(t in 1:Time ){
X.est[t, 1] = mean(X.param[t, 1:(l-1)])
}
plot(X.est, type = "l")
plot(round(X.est), type = "l")
#M
plot(0,0,xlab="MCMC Runs", ylab = "M", ylim = c(0, max(M.param[1:(l-1)])), xlim=c(0,n.mcmc),
type="n", cex.lab = 1)
for(i in 1:(4)){
lines(1:(l-1), M.param[i, 1:(l-1)], col = col[i])
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## A pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
AdityaNasam/ProgrammingAssignment2
|
R
| false | false | 984 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## A pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
#**Import the data set**
setwd("C:/Pradeep/Project/Learning/ML A-Z/P1 Data Preprocessing/Machine-Learning-A-Z/Part 2 - Regression/Section 4 - Simple Linear Regression/Simple_Linear_Regression_sample/")
real_dataset=read.csv("Salary_Data.csv")
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
real_split = sample.split(real_dataset$Salary, SplitRatio = 2/3)
real_training_set = subset(real_dataset, real_split == TRUE)
real_test_set = subset(real_dataset, real_split == FALSE)
# Fitting Simple Linear Regression to the Training Set
real_regressor=lm(formula = Salary ~ YearsExperience,data = real_training_set)
summary(real_regressor)
# Predicting the test results
real_y_pred=predict(real_regressor,newdata=real_test_set)
#Visualizing training test results
#install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x=real_training_set$YearsExperience,y = real_training_set$Salary),
colour="red"
)+
geom_line(aes(x=real_training_set$YearsExperience,predict(real_regressor,newdata = real_training_set)),
colour ="blue"
)+
ggtitle("Salary vs Exp (Training set)") +
ylab("Years") +
xlab("Exp")
#Visualizing training test results
#install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x=real_test_set$YearsExperience,y = real_test_set$Salary),
colour="red"
)+
geom_line(aes(x=real_training_set$YearsExperience,predict(real_regressor,newdata = real_training_set)),
colour ="blue"
)+
ggtitle("Salary vs Exp (Test set)") +
ylab("Years") +
xlab("Exp")
|
/Part 2 - Simple_Linear_Regression_sample/simple_linear_regression.r
|
no_license
|
pradeeppadmarajaiah/machine_learning
|
R
| false | false | 1,634 |
r
|
#**Import the data set**
setwd("C:/Pradeep/Project/Learning/ML A-Z/P1 Data Preprocessing/Machine-Learning-A-Z/Part 2 - Regression/Section 4 - Simple Linear Regression/Simple_Linear_Regression_sample/")
real_dataset=read.csv("Salary_Data.csv")
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
real_split = sample.split(real_dataset$Salary, SplitRatio = 2/3)
real_training_set = subset(real_dataset, real_split == TRUE)
real_test_set = subset(real_dataset, real_split == FALSE)
# Fitting Simple Linear Regression to the Training Set
real_regressor=lm(formula = Salary ~ YearsExperience,data = real_training_set)
summary(real_regressor)
# Predicting the test results
real_y_pred=predict(real_regressor,newdata=real_test_set)
#Visualizing training test results
#install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x=real_training_set$YearsExperience,y = real_training_set$Salary),
colour="red"
)+
geom_line(aes(x=real_training_set$YearsExperience,predict(real_regressor,newdata = real_training_set)),
colour ="blue"
)+
ggtitle("Salary vs Exp (Training set)") +
ylab("Years") +
xlab("Exp")
#Visualizing training test results
#install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x=real_test_set$YearsExperience,y = real_test_set$Salary),
colour="red"
)+
geom_line(aes(x=real_training_set$YearsExperience,predict(real_regressor,newdata = real_training_set)),
colour ="blue"
)+
ggtitle("Salary vs Exp (Test set)") +
ylab("Years") +
xlab("Exp")
|
library(plotly)
library(dplyr)
library(stringr)
df_2018 <- read.csv("./data/df_2018.csv", stringsAsFactors = F)
# create summary information of certain column from given dataframe
get_df <- function(df, colname) {
result <- df %>%
filter( (!!as.symbol(colname)) != "") %>%
group_by( (!!as.symbol(colname))) %>%
summarise(Count = n())
# change the order
result[[colname]] <- factor(
result[[colname]],
levels = unique(result[[colname]])[order(result[["Count"]],
decreasing = T)]
)
return(result)
}
# create a barplot of given dataframe
get_g <- function(df, colname, title, color) {
result <- plot_ly(df, x = as.formula(paste0("~", colname)),
y = ~Count, type = "bar", color = I(color)) %>%
layout(xaxis = list(title = ""), yaxis = list(title = "Total Cases"),
title = title)
return(result)
}
weather_df <- get_df(df_2018, "WEATHER")
weather_g <- get_g(weather_df, "WEATHER", "Weather", "#ffa500")
road_condition_df <- get_df(df_2018, "ROADCOND")
road_condition_g <-
get_g(road_condition_df, "ROADCOND", "Road Conditions", "#ffe8b0")
light_df <- get_df(df_2018, "LIGHTCOND")
light_g <-
get_g(light_df, "LIGHTCOND", "Light Conditions", "#ffacd9")
collision_df <- get_df(df_2018, "COLLISIONTYPE")
collision_g <-
get_g(collision_df, "COLLISIONTYPE", "Collision Types", "#b5d8f6")
junction_df <- get_df(df_2018, "JUNCTIONTYPE")
junction_g <-
get_g(junction_df, "JUNCTIONTYPE", "Junction Types", "#c0c5ce")
address_df <- get_df(df_2018, "ADDRTYPE")
address_g <-
get_g(address_df, "ADDRTYPE", "Address Types", "#7647a2")
intro_panel <- tabPanel(
"Introduction",
includeCSS("style.css"),
div(
class = "main",
h2("Introduction", class = "tab-title"),
p("This project is a research of collisions happened in Seattle in 2018.
According to the news from The Seattle Times, Seattle traffic deaths
and injuries down only slightly in 2018 compared to previous year.
Most of the fatalities were pedestrians. However, Seattle Department
of Transportation’s Vision Zero goal is to end traffic deaths and serious
injuries on city streets in Seattle by 2030. Vision Zero is an international
traffic-safety program started in Sweden and adopted by Seattle in 2015.
The result of traffic fatalities and injuries in 2018 leaves the
city far from meeting its goal."),
p("Based on our daily experience, traffic conditions
Seattle is not satisfying, especially in peak period of a day.
For example, collisions and congestions
often happen in Interstate 5. In addition, various weather in Seattle might
increase the probability of collisions."),
tags$figure(
img(
src =
"http://seattletimes.wpengine.netdna-cdn.com/today/files/2014/10/carcrash2.jpg",
class = "image"
),
tags$figcaption("Part of the scene of a multi-car crash
Friday afternoon on Rainier Avenue South in Seattle.")
),
p("In order to know more detailed information about
collisions such as locations, time and weather conditions,
we find the data of collisions in 2018 collected
from Seattle GSI. The data contains 40 variables in each specific collision
such as weather, if bicycles were present, fatalities, and location."),
a("Data Source", href = "https://data.seattle.gov/widgets/vac5-r8kk")
)
)
summary_plots_panel <- tabPanel(
"Summary Plots",
div(
class = "main",
h2("Summary Information", class = "tab-title"),
div(
class = "plots",
plotlyOutput("weather_sum"),
plotlyOutput("road_condition_sum"),
plotlyOutput("light_sum"),
plotlyOutput("collision_sum"),
plotlyOutput("junction_sum"),
plotlyOutput("address_sum")
)
)
)
summary_info_panel <- tabPanel(
"Summary Information",
div(
class = "main",
h2("Conclusion", class = "tab-title"),
tags$ol(
class = "con-list",
tags$li("Throught the analysis of time there was only one surpising
pattern, that both afternoon and evening had the much higher
numbers of involvment over all 6 factors. Other than that there
was no other patterns across either weekdays or months. However
this does make sense, traffic always becomes worse than usual
during peak period.Peak period appears after adults getting off
work and children leaving school. That explains the largest amount
of collisions happened in the afternoon. Moreover,
driving at night is more difficult because of light
conditions. Dark environment will affect drivers’ visions.
So, collisions are more likely to happen.
"),
tags$li("Collisions happened a lot when the road is dry.
We see several places with high density of car accidents.
Such as Rainier Ave Street, Aurora Ave N, 15th Ave W,
Lake City Way NE, Martin Luther King Jr Way S.
These are main roads cross the city and connecting
to nearby cities. Accidents are easy to happen when
traffic flows are getting intense. Furthermore, downtown
Seattle had constructions in many areas which made
driving in downtown more difficult. High population
density will also increase the probability of collisions."),
tags$li("The result for Casualty & Weather part is unexpected
because we originally think bad weathers will lead to more
casualties. Turns out, the reality is that most casualties
happened in clear weather. So, one possible reason could
be the direct harsh sunlight that could influence drivers’
vision, which will increase the probability of accidents.
Another reason could be that clear and partly cloudy days
take the largest proportion all over the year."),
tags$li("Collisions happened most in blocks. If the city wants
to reach the goal of Vision Zero, the government should
figure out why accidents always happened in the mid of
blocks and try to find a way to prevent the accidents.
Luckily, the government has plan for improving the traffic
conditions. Seattle Department of transportation states
that: traffic collisions aren't accidents - they're
preventable through smarter street design, targeted
enforcement, and thoughtful public engagement. Together,
we can make Seattle's streets safer for everyone.")
),
p("Source: https://www.seattle.gov/visionzero")
)
)
|
/rshinyapp/intro_and_summary.R
|
permissive
|
bobbydyr/datascience
|
R
| false | false | 6,808 |
r
|
library(plotly)
library(dplyr)
library(stringr)
df_2018 <- read.csv("./data/df_2018.csv", stringsAsFactors = F)
# create summary information of certain column from given dataframe
get_df <- function(df, colname) {
result <- df %>%
filter( (!!as.symbol(colname)) != "") %>%
group_by( (!!as.symbol(colname))) %>%
summarise(Count = n())
# change the order
result[[colname]] <- factor(
result[[colname]],
levels = unique(result[[colname]])[order(result[["Count"]],
decreasing = T)]
)
return(result)
}
# create a barplot of given dataframe
get_g <- function(df, colname, title, color) {
result <- plot_ly(df, x = as.formula(paste0("~", colname)),
y = ~Count, type = "bar", color = I(color)) %>%
layout(xaxis = list(title = ""), yaxis = list(title = "Total Cases"),
title = title)
return(result)
}
weather_df <- get_df(df_2018, "WEATHER")
weather_g <- get_g(weather_df, "WEATHER", "Weather", "#ffa500")
road_condition_df <- get_df(df_2018, "ROADCOND")
road_condition_g <-
get_g(road_condition_df, "ROADCOND", "Road Conditions", "#ffe8b0")
light_df <- get_df(df_2018, "LIGHTCOND")
light_g <-
get_g(light_df, "LIGHTCOND", "Light Conditions", "#ffacd9")
collision_df <- get_df(df_2018, "COLLISIONTYPE")
collision_g <-
get_g(collision_df, "COLLISIONTYPE", "Collision Types", "#b5d8f6")
junction_df <- get_df(df_2018, "JUNCTIONTYPE")
junction_g <-
get_g(junction_df, "JUNCTIONTYPE", "Junction Types", "#c0c5ce")
address_df <- get_df(df_2018, "ADDRTYPE")
address_g <-
get_g(address_df, "ADDRTYPE", "Address Types", "#7647a2")
intro_panel <- tabPanel(
"Introduction",
includeCSS("style.css"),
div(
class = "main",
h2("Introduction", class = "tab-title"),
p("This project is a research of collisions happened in Seattle in 2018.
According to the news from The Seattle Times, Seattle traffic deaths
and injuries down only slightly in 2018 compared to previous year.
Most of the fatalities were pedestrians. However, Seattle Department
of Transportation’s Vision Zero goal is to end traffic deaths and serious
injuries on city streets in Seattle by 2030. Vision Zero is an international
traffic-safety program started in Sweden and adopted by Seattle in 2015.
The result of traffic fatalities and injuries in 2018 leaves the
city far from meeting its goal."),
p("Based on our daily experience, traffic conditions
Seattle is not satisfying, especially in peak period of a day.
For example, collisions and congestions
often happen in Interstate 5. In addition, various weather in Seattle might
increase the probability of collisions."),
tags$figure(
img(
src =
"http://seattletimes.wpengine.netdna-cdn.com/today/files/2014/10/carcrash2.jpg",
class = "image"
),
tags$figcaption("Part of the scene of a multi-car crash
Friday afternoon on Rainier Avenue South in Seattle.")
),
p("In order to know more detailed information about
collisions such as locations, time and weather conditions,
we find the data of collisions in 2018 collected
from Seattle GSI. The data contains 40 variables in each specific collision
such as weather, if bicycles were present, fatalities, and location."),
a("Data Source", href = "https://data.seattle.gov/widgets/vac5-r8kk")
)
)
summary_plots_panel <- tabPanel(
"Summary Plots",
div(
class = "main",
h2("Summary Information", class = "tab-title"),
div(
class = "plots",
plotlyOutput("weather_sum"),
plotlyOutput("road_condition_sum"),
plotlyOutput("light_sum"),
plotlyOutput("collision_sum"),
plotlyOutput("junction_sum"),
plotlyOutput("address_sum")
)
)
)
summary_info_panel <- tabPanel(
"Summary Information",
div(
class = "main",
h2("Conclusion", class = "tab-title"),
tags$ol(
class = "con-list",
tags$li("Throught the analysis of time there was only one surpising
pattern, that both afternoon and evening had the much higher
numbers of involvment over all 6 factors. Other than that there
was no other patterns across either weekdays or months. However
this does make sense, traffic always becomes worse than usual
during peak period.Peak period appears after adults getting off
work and children leaving school. That explains the largest amount
of collisions happened in the afternoon. Moreover,
driving at night is more difficult because of light
conditions. Dark environment will affect drivers’ visions.
So, collisions are more likely to happen.
"),
tags$li("Collisions happened a lot when the road is dry.
We see several places with high density of car accidents.
Such as Rainier Ave Street, Aurora Ave N, 15th Ave W,
Lake City Way NE, Martin Luther King Jr Way S.
These are main roads cross the city and connecting
to nearby cities. Accidents are easy to happen when
traffic flows are getting intense. Furthermore, downtown
Seattle had constructions in many areas which made
driving in downtown more difficult. High population
density will also increase the probability of collisions."),
tags$li("The result for Casualty & Weather part is unexpected
because we originally think bad weathers will lead to more
casualties. Turns out, the reality is that most casualties
happened in clear weather. So, one possible reason could
be the direct harsh sunlight that could influence drivers’
vision, which will increase the probability of accidents.
Another reason could be that clear and partly cloudy days
take the largest proportion all over the year."),
tags$li("Collisions happened most in blocks. If the city wants
to reach the goal of Vision Zero, the government should
figure out why accidents always happened in the mid of
blocks and try to find a way to prevent the accidents.
Luckily, the government has plan for improving the traffic
conditions. Seattle Department of transportation states
that: traffic collisions aren't accidents - they're
preventable through smarter street design, targeted
enforcement, and thoughtful public engagement. Together,
we can make Seattle's streets safer for everyone.")
),
p("Source: https://www.seattle.gov/visionzero")
)
)
|
# Scatter plot of area vs intensity after filtering down to specific planes within an image. Also counts cells in those planes.
library(xlsx)
library(ggplot2)
library(ggridges)
library(forcats)
library(Cairo)
CairoWin()
# Set the working directory
setwd("D:/TB model microscopy/5dg/")
inputfolder = "D:/TB model microscopy/5dg/"
filelist = list.files(path=inputfolder, pattern = ".xlsx", all.files=FALSE)
filelist = list.files(path=inputfolder, pattern = ".csv", all.files=FALSE)
resultbox=data.frame()
for (experimentid in filelist){
#fullsheet = read.xlsx(experimentid, sheetIndex=1)
fullsheet = read.csv(experimentid)
colnames(fullsheet)[1] <- "imagename"
colnames(fullsheet)[2] <- "numberplanes"
colnames(fullsheet)[6] <- "nucarea"
imagelist = unique(fullsheet$imagename)
for (i in imagelist){
smalltable = subset(fullsheet, fullsheet$imagename ==i)
numplanes = max(smalltable$numberplanes, na.rm = TRUE)
filteredwell = subset(smalltable, smalltable$numberplanes %in% c(13:16, 23:26, 31:34))
fullplanelist = unique(smalltable$numberplanes)
planelist = unique(filteredwell$numberplanes)
totalplanes = length(fullplanelist)
lenplanelist = length(planelist)
numcells = length(smalltable$imagename)
numfilteredcells = length(filteredwell$imagename)
avgunfiltered = median(smalltable$nucarea)
avgfiltered = median(filteredwell$nucarea)
cellsperimg = numcells/totalplanes
filteredcellsperimg = numfilteredcells / lenplanelist
resulthandler = data.frame("Experiment" = experimentid, "Image" = i, "Total Planes" = totalplanes, "Filtered Planes" = lenplanelist, "Total Cells" = numcells, "Filtered Cells" = numfilteredcells, "Total Cells/Img" = cellsperimg, "Filtered Cells/Img" = filteredcellsperimg, "Median Size" = avgunfiltered, "Filtered Median Size" = avgfiltered)
resultbox = rbind(resultbox,resulthandler)
}
}
# Save summary stats
#resultsFile = paste(saveDir, exptName, ".xlsx", sep="")
write.xlsx(resultbox, "advancedplanestats.xlsx")
fullsheet = read.csv(filelist[1])
fullsheet[fullsheet==""] <- NA
fullsheet <- na.omit(fullsheet)
# Give the column with the name labels a shorter name for calling it later
colnames(fullsheet)[3] <- "wellid"
colnames(fullsheet)[6] = "nucarea"
colnames(fullsheet)[7] = "nucintint"
colnames(fullsheet)[8] = "nucavint"
welllist = unique(fullsheet$wellid)
welllist
well1 = subset(fullsheet, fullsheet$wellid == "5dg7_4h_ B2_")
well2 = subset(fullsheet, fullsheet$wellid == "5dg7_24h B2_")
well2filter = subset(fullsheet, fullsheet$wellid == "5dg5_4h_2 B2_")
filteredwell = subset(well2filter, well2filter$Image.Plane %in% c(13:16, 23:26, 31:34))
fullplanelist = unique(well2filter$Image.Plane)
planelist = unique(filteredwell$Image.Plane)
totalplanes = length(fullplanelist)
lenplanelist = length(planelist)
CairoWin()
plota = ggplot(well1, aes(x = nucarea, y = nucintint)) +
geom_point(alpha=0.1, aes(colour="red")) +
geom_point(data=well2, alpha=0.2,aes(colour="blue")) +
ggtitle("Single Cell") +
theme_bw() +
scale_x_log10(expand=c(0,0)) +
guides(fill=FALSE) +
scale_y_log10(expand=c(0.05,0)) +
labs(title="Meera",
fill=NULL) +
#scale_fill_manual(values = manualfill) +
annotation_logticks(sides="b")
plota
wells = rbind(well1, well2)
d = ggplot(well1, aes(x = nucarea, y = nucavint)) +
scale_x_log10(limits = c(10, 10000)) +
#scale_y_log10()+
scale_y_continuous(limits=c(0, 66000)) +
labs(title="4h",
fill=NULL) +
geom_point(alpha = 0.2, shape = 19, size = 0.7) +
stat_density_2d(aes(fill= ..level.., alpha = ..level..), geom = "polygon", show.legend = FALSE) +
scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) +
theme_bw() + annotation_logticks()
d
e = ggplot(well2, aes(x = nucarea, y = nucavint)) +
scale_x_log10(limits = c(10, 10000)) +
#scale_y_log10()+
scale_y_continuous(limits=c(0, 66000)) +
labs(title="24h",
fill=NULL) +
geom_point(alpha = 0.2, shape = 19, size = 0.7) +
stat_density_2d(aes(fill= ..level.., alpha = ..level..), geom = "polygon", show.legend = FALSE) +
scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) +
theme_bw() + annotation_logticks()
e
e = ggplot(well2, aes(x = nucarea, y = nucavint)) +
scale_x_log10(limits = c(10, 10000)) +
scale_y_log10(limits = c(100000, 100000000))+
labs(title="24h",
fill=NULL) +
geom_point(alpha = 0.2, shape = 19, size = 0.7) +
stat_density_2d(aes(fill= ..level.., alpha = ..level..), geom = "polygon", show.legend = FALSE) +
scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) +
theme_bw() + annotation_logticks()
e
f = ggplot(well2filter, aes(x = nucarea, y = nucavint)) +
scale_x_log10(limits = c(10, 10000)) +
scale_y_log10(limits = c(10000, 100000))+
labs(title="24h",
fill=NULL) +
geom_point(alpha = 0.2, shape = 19, size = 0.7) +
stat_density_2d(aes(fill= ..level.., alpha = ..level..), geom = "polygon", show.legend = FALSE) +
scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) +
theme_bw() + annotation_logticks()
f
g = ggplot(filteredwell, aes(x = nucarea, y = nucavint)) +
scale_x_log10(limits = c(10, 10000)) +
scale_y_log10(limits = c(10000, 100000))+
labs(title="24h",
fill=NULL) +
geom_point(alpha = 0.2, shape = 19, size = 0.7) +
stat_density_2d(aes(fill= ..level.., alpha = ..level..), geom = "polygon", show.legend = FALSE) +
scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) +
theme_bw() + annotation_logticks()
g
ggsave(d, filename = paste("5dg7", "4h Scatter.png"), type = "cairo",
width = 5, height = 5, units = "in")
ggsave(e, filename = paste("5dg7", "24h Scatter.png"), type = "cairo",
width = 5, height = 5, units = "in")
|
/Cell Count - Area vs Intensity with Plane Filter.R
|
no_license
|
DavidStirling/Misc-R-Scripts
|
R
| false | false | 6,165 |
r
|
# Scatter plot of area vs intensity after filtering down to specific planes within an image. Also counts cells in those planes.
library(xlsx)
library(ggplot2)
library(ggridges)
library(forcats)
library(Cairo)
CairoWin()
# Set the working directory
setwd("D:/TB model microscopy/5dg/")
inputfolder = "D:/TB model microscopy/5dg/"
filelist = list.files(path=inputfolder, pattern = ".xlsx", all.files=FALSE)
filelist = list.files(path=inputfolder, pattern = ".csv", all.files=FALSE)
resultbox=data.frame()
for (experimentid in filelist){
#fullsheet = read.xlsx(experimentid, sheetIndex=1)
fullsheet = read.csv(experimentid)
colnames(fullsheet)[1] <- "imagename"
colnames(fullsheet)[2] <- "numberplanes"
colnames(fullsheet)[6] <- "nucarea"
imagelist = unique(fullsheet$imagename)
for (i in imagelist){
smalltable = subset(fullsheet, fullsheet$imagename ==i)
numplanes = max(smalltable$numberplanes, na.rm = TRUE)
filteredwell = subset(smalltable, smalltable$numberplanes %in% c(13:16, 23:26, 31:34))
fullplanelist = unique(smalltable$numberplanes)
planelist = unique(filteredwell$numberplanes)
totalplanes = length(fullplanelist)
lenplanelist = length(planelist)
numcells = length(smalltable$imagename)
numfilteredcells = length(filteredwell$imagename)
avgunfiltered = median(smalltable$nucarea)
avgfiltered = median(filteredwell$nucarea)
cellsperimg = numcells/totalplanes
filteredcellsperimg = numfilteredcells / lenplanelist
resulthandler = data.frame("Experiment" = experimentid, "Image" = i, "Total Planes" = totalplanes, "Filtered Planes" = lenplanelist, "Total Cells" = numcells, "Filtered Cells" = numfilteredcells, "Total Cells/Img" = cellsperimg, "Filtered Cells/Img" = filteredcellsperimg, "Median Size" = avgunfiltered, "Filtered Median Size" = avgfiltered)
resultbox = rbind(resultbox,resulthandler)
}
}
# Save summary stats
#resultsFile = paste(saveDir, exptName, ".xlsx", sep="")
write.xlsx(resultbox, "advancedplanestats.xlsx")
fullsheet = read.csv(filelist[1])
fullsheet[fullsheet==""] <- NA
fullsheet <- na.omit(fullsheet)
# Give the column with the name labels a shorter name for calling it later
colnames(fullsheet)[3] <- "wellid"
colnames(fullsheet)[6] = "nucarea"
colnames(fullsheet)[7] = "nucintint"
colnames(fullsheet)[8] = "nucavint"
welllist = unique(fullsheet$wellid)
welllist
well1 = subset(fullsheet, fullsheet$wellid == "5dg7_4h_ B2_")
well2 = subset(fullsheet, fullsheet$wellid == "5dg7_24h B2_")
well2filter = subset(fullsheet, fullsheet$wellid == "5dg5_4h_2 B2_")
filteredwell = subset(well2filter, well2filter$Image.Plane %in% c(13:16, 23:26, 31:34))
fullplanelist = unique(well2filter$Image.Plane)
planelist = unique(filteredwell$Image.Plane)
totalplanes = length(fullplanelist)
lenplanelist = length(planelist)
CairoWin()
plota = ggplot(well1, aes(x = nucarea, y = nucintint)) +
geom_point(alpha=0.1, aes(colour="red")) +
geom_point(data=well2, alpha=0.2,aes(colour="blue")) +
ggtitle("Single Cell") +
theme_bw() +
scale_x_log10(expand=c(0,0)) +
guides(fill=FALSE) +
scale_y_log10(expand=c(0.05,0)) +
labs(title="Meera",
fill=NULL) +
#scale_fill_manual(values = manualfill) +
annotation_logticks(sides="b")
plota
wells = rbind(well1, well2)
d = ggplot(well1, aes(x = nucarea, y = nucavint)) +
scale_x_log10(limits = c(10, 10000)) +
#scale_y_log10()+
scale_y_continuous(limits=c(0, 66000)) +
labs(title="4h",
fill=NULL) +
geom_point(alpha = 0.2, shape = 19, size = 0.7) +
stat_density_2d(aes(fill= ..level.., alpha = ..level..), geom = "polygon", show.legend = FALSE) +
scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) +
theme_bw() + annotation_logticks()
d
e = ggplot(well2, aes(x = nucarea, y = nucavint)) +
scale_x_log10(limits = c(10, 10000)) +
#scale_y_log10()+
scale_y_continuous(limits=c(0, 66000)) +
labs(title="24h",
fill=NULL) +
geom_point(alpha = 0.2, shape = 19, size = 0.7) +
stat_density_2d(aes(fill= ..level.., alpha = ..level..), geom = "polygon", show.legend = FALSE) +
scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) +
theme_bw() + annotation_logticks()
e
e = ggplot(well2, aes(x = nucarea, y = nucavint)) +
scale_x_log10(limits = c(10, 10000)) +
scale_y_log10(limits = c(100000, 100000000))+
labs(title="24h",
fill=NULL) +
geom_point(alpha = 0.2, shape = 19, size = 0.7) +
stat_density_2d(aes(fill= ..level.., alpha = ..level..), geom = "polygon", show.legend = FALSE) +
scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) +
theme_bw() + annotation_logticks()
e
f = ggplot(well2filter, aes(x = nucarea, y = nucavint)) +
scale_x_log10(limits = c(10, 10000)) +
scale_y_log10(limits = c(10000, 100000))+
labs(title="24h",
fill=NULL) +
geom_point(alpha = 0.2, shape = 19, size = 0.7) +
stat_density_2d(aes(fill= ..level.., alpha = ..level..), geom = "polygon", show.legend = FALSE) +
scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) +
theme_bw() + annotation_logticks()
f
g = ggplot(filteredwell, aes(x = nucarea, y = nucavint)) +
scale_x_log10(limits = c(10, 10000)) +
scale_y_log10(limits = c(10000, 100000))+
labs(title="24h",
fill=NULL) +
geom_point(alpha = 0.2, shape = 19, size = 0.7) +
stat_density_2d(aes(fill= ..level.., alpha = ..level..), geom = "polygon", show.legend = FALSE) +
scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) +
theme_bw() + annotation_logticks()
g
ggsave(d, filename = paste("5dg7", "4h Scatter.png"), type = "cairo",
width = 5, height = 5, units = "in")
ggsave(e, filename = paste("5dg7", "24h Scatter.png"), type = "cairo",
width = 5, height = 5, units = "in")
|
DSC_220D5D6DAE <- readRDS("dsc_result/datamaker.R_1.rds")
input <- DSC_220D5D6DAE$data
mixcompdist <- 'normal'
library(ashr)
ash.wrapper=function(input,args=NULL){
if(is.null(args)){
args=list(mixcompdist="halfuniform",method="fdr")
}
res = do.call(ash, args=c(list(betahat=input$betahat,sebetahat=input$sebetahat),args))
return(res)
}
ash_data = ash.wrapper(input$input, list(mixcompdist = mixcompdist, optmethod = "mixEM"))
beta_est <- ash_data$PosteriorMean
pi0_est <- ashr::get_pi0(ash_data)
saveRDS(list(ash_data=ash_data, beta_est=beta_est, pi0_est=pi0_est), 'dsc_result/datamaker.R_1_runash.R_1.rds')
|
/inst/app/data/.sos/shrink_1_0.R
|
no_license
|
stephenslab/shinydsc
|
R
| false | false | 622 |
r
|
DSC_220D5D6DAE <- readRDS("dsc_result/datamaker.R_1.rds")
input <- DSC_220D5D6DAE$data
mixcompdist <- 'normal'
library(ashr)
ash.wrapper=function(input,args=NULL){
if(is.null(args)){
args=list(mixcompdist="halfuniform",method="fdr")
}
res = do.call(ash, args=c(list(betahat=input$betahat,sebetahat=input$sebetahat),args))
return(res)
}
ash_data = ash.wrapper(input$input, list(mixcompdist = mixcompdist, optmethod = "mixEM"))
beta_est <- ash_data$PosteriorMean
pi0_est <- ashr::get_pi0(ash_data)
saveRDS(list(ash_data=ash_data, beta_est=beta_est, pi0_est=pi0_est), 'dsc_result/datamaker.R_1_runash.R_1.rds')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/levels_and_differences.R
\name{get_level}
\alias{get_level}
\title{Gets the level of some data from changes}
\usage{
get_level(delta, dates, ref)
}
\arguments{
\item{delta}{Vector of changes in a variable of interest x (that is x(t)-x(t-1)).}
\item{dates}{Vector of dates associated to each value of x (it must be in ascending order).}
\item{ref}{Date at which we normalize the level to zero.}
}
\value{
Vector with normalized level.
}
\description{
Gets the level of some data from changes
}
|
/man/get_level.Rd
|
no_license
|
caiohm/lptools
|
R
| false | true | 573 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/levels_and_differences.R
\name{get_level}
\alias{get_level}
\title{Gets the level of some data from changes}
\usage{
get_level(delta, dates, ref)
}
\arguments{
\item{delta}{Vector of changes in a variable of interest x (that is x(t)-x(t-1)).}
\item{dates}{Vector of dates associated to each value of x (it must be in ascending order).}
\item{ref}{Date at which we normalize the level to zero.}
}
\value{
Vector with normalized level.
}
\description{
Gets the level of some data from changes
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all-generics.r
\name{p_group}
\alias{p_group}
\title{Access P group designations}
\usage{
p_group(x, ...)
}
\arguments{
\item{x}{A \code{\link{HLAGene}} or \code{\linkS4class{HLAAllele}} object.}
\item{...}{Further arguments passed to methods.}
}
\value{
A character vector of allele names.
}
\description{
Access P group designations
}
\examples{
\dontrun{
x <- HLAGene("DPB1")
p_group(x[is_complete(x)])
}
}
|
/man/p_group.Rd
|
no_license
|
jn7163/hlatools
|
R
| false | true | 490 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all-generics.r
\name{p_group}
\alias{p_group}
\title{Access P group designations}
\usage{
p_group(x, ...)
}
\arguments{
\item{x}{A \code{\link{HLAGene}} or \code{\linkS4class{HLAAllele}} object.}
\item{...}{Further arguments passed to methods.}
}
\value{
A character vector of allele names.
}
\description{
Access P group designations
}
\examples{
\dontrun{
x <- HLAGene("DPB1")
p_group(x[is_complete(x)])
}
}
|
source('./scripts/clean.R')
library(survival)
df_full_tmp = rbind(df_readmit[, colnames(df_noreadmit)], df_noreadmit)
## section 2.2: study population
nrow(df_full_tmp)
nrow(df_readmit)
nrow(df_noreadmit)
sum(df_full$GLUCOSE_1_TESTED == 0 | df_full$A1C_1_TESTED == 0)
## section 2.3: data pre-processing
# glucose tested
M <- as.table(cbind(summary(df_readmit$GLUCOSE_1_TESTED),
summary(df_noreadmit$GLUCOSE_1_TESTED)))
dimnames(M) <- list(a_type = levels(df_readmit$GLUCOSE_1_TESTED),
status = c('readmitted','non-readmitted'))
chisq.test(M)
# a1c tested
M <- as.table(cbind(summary(df_readmit$A1C_1_TESTED),
summary(df_noreadmit$A1C_1_TESTED)))
dimnames(M) <- list(a_type = levels(df_readmit$A1C_1_TESTED),
status = c('readmitted','non-readmitted'))
chisq.test(M)
## section 2.5: exploratory analysis
sum(df_full_tmp$GENDER == 'M')
sum(df_full_tmp$GENDER == 'F')
# chi-squared: gender
M <- as.table(cbind(summary(df_readmit$GENDER), summary(df_noreadmit$GENDER)))
dimnames(M) <- list(gender = c('F', 'M'),
status = c('readmitted','non-readmitted'))
chisq.test(M) # 0.0007818
# median age
median(df_readmit$AGE)
median(df_noreadmit$AGE)
# Medicaid/Medicare percentage
n_medicaid = sum(df_full_tmp$INSURANCE == 'Medicaid')
n_medicare = sum(df_full_tmp$INSURANCE == 'Medicare')
n_medicaid / 13085
n_medicare / 13085
nrow(subset(df_full_tmp, INSURANCE == 'Medicare' & AGE < 65)) /
n_medicare
median(subset(df_full_tmp, INSURANCE == 'Medicaid')$AGE)
# t-test: age
t.test(subset(df_full_tmp, ETHNICITY == 'BLACK/AFRICAN AMERICAN')$AGE,
subset(df_full_tmp, ETHNICITY == 'WHITE')$AGE) # < 2.2e-16
# t-test: length of stay
t.test(df_readmit$LENGTH_OF_STAY, df_noreadmit$LENGTH_OF_STAY) # 0.00713
# t-test: A1C
t.test(df_readmit$A1C_1, df_noreadmit$A1C_1) # < 2.2e-16
# t-test: glucose
t.test(df_readmit$GLUCOSE_1, df_noreadmit$GLUCOSE_1) # 0.125
# Wilcoxon signed-rank: glucose and A1C
wilcox.test(df_readmit$GLUCOSE_1, df_readmit$GLUCOSE_2) # 1.994e-06
wilcox.test(df_readmit$A1C_1, df_readmit$A1C_2) # < 2.2e-16
# median time-to-readmission
median(subset(df_readmit, GENDER == 'F')$TIME_TO_READMIT)
median(subset(df_readmit, GENDER == 'M')$TIME_TO_READMIT)
|
/scripts/eda.R
|
no_license
|
tq21/honors-thesis
|
R
| false | false | 2,276 |
r
|
source('./scripts/clean.R')
library(survival)
df_full_tmp = rbind(df_readmit[, colnames(df_noreadmit)], df_noreadmit)
## section 2.2: study population
nrow(df_full_tmp)
nrow(df_readmit)
nrow(df_noreadmit)
sum(df_full$GLUCOSE_1_TESTED == 0 | df_full$A1C_1_TESTED == 0)
## section 2.3: data pre-processing
# glucose tested
M <- as.table(cbind(summary(df_readmit$GLUCOSE_1_TESTED),
summary(df_noreadmit$GLUCOSE_1_TESTED)))
dimnames(M) <- list(a_type = levels(df_readmit$GLUCOSE_1_TESTED),
status = c('readmitted','non-readmitted'))
chisq.test(M)
# a1c tested
M <- as.table(cbind(summary(df_readmit$A1C_1_TESTED),
summary(df_noreadmit$A1C_1_TESTED)))
dimnames(M) <- list(a_type = levels(df_readmit$A1C_1_TESTED),
status = c('readmitted','non-readmitted'))
chisq.test(M)
## section 2.5: exploratory analysis
sum(df_full_tmp$GENDER == 'M')
sum(df_full_tmp$GENDER == 'F')
# chi-squared: gender
M <- as.table(cbind(summary(df_readmit$GENDER), summary(df_noreadmit$GENDER)))
dimnames(M) <- list(gender = c('F', 'M'),
status = c('readmitted','non-readmitted'))
chisq.test(M) # 0.0007818
# median age
median(df_readmit$AGE)
median(df_noreadmit$AGE)
# Medicaid/Medicare percentage
n_medicaid = sum(df_full_tmp$INSURANCE == 'Medicaid')
n_medicare = sum(df_full_tmp$INSURANCE == 'Medicare')
n_medicaid / 13085
n_medicare / 13085
nrow(subset(df_full_tmp, INSURANCE == 'Medicare' & AGE < 65)) /
n_medicare
median(subset(df_full_tmp, INSURANCE == 'Medicaid')$AGE)
# t-test: age
t.test(subset(df_full_tmp, ETHNICITY == 'BLACK/AFRICAN AMERICAN')$AGE,
subset(df_full_tmp, ETHNICITY == 'WHITE')$AGE) # < 2.2e-16
# t-test: length of stay
t.test(df_readmit$LENGTH_OF_STAY, df_noreadmit$LENGTH_OF_STAY) # 0.00713
# t-test: A1C
t.test(df_readmit$A1C_1, df_noreadmit$A1C_1) # < 2.2e-16
# t-test: glucose
t.test(df_readmit$GLUCOSE_1, df_noreadmit$GLUCOSE_1) # 0.125
# Wilcoxon signed-rank: glucose and A1C
wilcox.test(df_readmit$GLUCOSE_1, df_readmit$GLUCOSE_2) # 1.994e-06
wilcox.test(df_readmit$A1C_1, df_readmit$A1C_2) # < 2.2e-16
# median time-to-readmission
median(subset(df_readmit, GENDER == 'F')$TIME_TO_READMIT)
median(subset(df_readmit, GENDER == 'M')$TIME_TO_READMIT)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutPredict.snv.prepare.R
\name{mutPredict.snv.prepare}
\alias{mutPredict.snv.prepare}
\title{Run mutation recurrence for SNV.}
\usage{
mutPredict.snv.prepare(mask.regions.file = system.file("extdata",
"mask_regions.RDS", package = "MutSpot"), nucleotide.selected.file,
continuous.features.selected.snv.url.file,
discrete.features.selected.snv.url.file,
sample.specific.features.url.file = NULL, snv.mutations.file,
snv.mutations.file2, region.of.interest, cores = 1, snv.model.file,
min.count = 2, hotspot.size = 21, genome.size = 2533374732,
hotspots = TRUE, output.dir)
}
\arguments{
\item{mask.regions.file}{Regions to mask in genome, for example, non-mappable regions/immunoglobin loci/CDS regions RDS file, default file = mask_regions.RDS.}
\item{nucleotide.selected.file}{Nucleotide context selected for model RDS file.}
\item{continuous.features.selected.snv.url.file}{Text file containing URLs of SNV continuous features selected for model.}
\item{discrete.features.selected.snv.url.file}{Text file containing URLs of SNV discrete features selected for model.}
\item{sample.specific.features.url.file}{Text file containing URLs of sample specific features, default = NULL.}
\item{snv.mutations.file}{SNV mutations found in region of interest MAF file.}
\item{snv.mutations.file2}{SNV mutations MAF file.}
\item{region.of.interest}{Region of interest bed file, default = NULL.}
\item{cores}{Number of cores, default = 1.}
\item{snv.model.file}{SNV model.}
\item{min.count}{Minimum number of mutated samples in each hotspot, default = 2.}
\item{hotspot.size}{Size of each hotspot, default = 21.}
\item{genome.size}{Total number of hotspots to run analysis on, default = 2533374732.}
\item{hotspots}{To run hotspot analysis or region-based analysis, default = TRUE.}
\item{output.dir}{Save temporary files in given output directory.}
}
\value{
Prepare intermediate files for hotspot prediction.
}
\description{
Run mutation recurrence for SNV.
}
|
/MutSpot_Rpackage/man/mutPredict.snv.prepare.Rd
|
no_license
|
danchubb/hg38MutSpot
|
R
| false | true | 2,057 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutPredict.snv.prepare.R
\name{mutPredict.snv.prepare}
\alias{mutPredict.snv.prepare}
\title{Run mutation recurrence for SNV.}
\usage{
mutPredict.snv.prepare(mask.regions.file = system.file("extdata",
"mask_regions.RDS", package = "MutSpot"), nucleotide.selected.file,
continuous.features.selected.snv.url.file,
discrete.features.selected.snv.url.file,
sample.specific.features.url.file = NULL, snv.mutations.file,
snv.mutations.file2, region.of.interest, cores = 1, snv.model.file,
min.count = 2, hotspot.size = 21, genome.size = 2533374732,
hotspots = TRUE, output.dir)
}
\arguments{
\item{mask.regions.file}{Regions to mask in genome, for example, non-mappable regions/immunoglobin loci/CDS regions RDS file, default file = mask_regions.RDS.}
\item{nucleotide.selected.file}{Nucleotide context selected for model RDS file.}
\item{continuous.features.selected.snv.url.file}{Text file containing URLs of SNV continuous features selected for model.}
\item{discrete.features.selected.snv.url.file}{Text file containing URLs of SNV discrete features selected for model.}
\item{sample.specific.features.url.file}{Text file containing URLs of sample specific features, default = NULL.}
\item{snv.mutations.file}{SNV mutations found in region of interest MAF file.}
\item{snv.mutations.file2}{SNV mutations MAF file.}
\item{region.of.interest}{Region of interest bed file, default = NULL.}
\item{cores}{Number of cores, default = 1.}
\item{snv.model.file}{SNV model.}
\item{min.count}{Minimum number of mutated samples in each hotspot, default = 2.}
\item{hotspot.size}{Size of each hotspot, default = 21.}
\item{genome.size}{Total number of hotspots to run analysis on, default = 2533374732.}
\item{hotspots}{To run hotspot analysis or region-based analysis, default = TRUE.}
\item{output.dir}{Save temporary files in given output directory.}
}
\value{
Prepare intermediate files for hotspot prediction.
}
\description{
Run mutation recurrence for SNV.
}
|
## -----------------------------------------------------------------------------
## Replication code accompanying the paper "The biglasso Package: A Memory-
## and Computation-Efficient Solver for Lasso Model Fitting with Big Data in R"
## Authors: Yaohui Zeng and Patrick Breheny
##
## benchmarking platform:
## MacBook Pro with Intel Core i7 @ 2.3 GHz and 16 GB RAM.
##
## -----------------------------------------------------------------------------
## -----------------------------------------------------------------------------
## Section 4.3: Computational efficiency: Logistic regression - Simulated data
## Replicating Figure 3
# Utility functions for simulation
require(ncvreg)
require(glmnet)
require(picasso)
require(plyr)
require(ggplot2)
require(mvtnorm)
require(biglasso)
# picasso loss and objective
picasso_obj <- function(fit, x, y) {
n <- length(y)
beta <- as.matrix(fit$beta)
int <- fit$intercept
lambda <- fit$lambda
obj.val <- NULL
loss <- NULL
if (fit$family == "binomial") {
for (i in 1:length(lambda)) {
eta <- int[i] + x %*% beta[, i]
tmp <- -sum(y * eta - log(1 + exp(eta))) # negative log-likelihood
loss <- c(loss, tmp)
obj.val <- c(obj.val, tmp / n + lambda[i] * sum(abs(beta[, i])))
}
} else if (fit$family == "gaussian") {
for (i in 1:length(lambda)) {
tmp <- crossprod(y - x %*% beta[, i] - int[i]) # residual sum of squares
loss <- c(loss, tmp)
obj.val <- c(obj.val, tmp / (2*n) + lambda[i] * sum(abs(beta[, i])))
}
}
list(loss = loss, obj.val = obj.val)
}
# picasso vs glmnet, relative objective difference at each lambda
rel_obj_diff_pic <- function(fit.glm, fit, n, x, y) {
dev.glm <- (1 - fit.glm$dev.ratio) * fit.glm$nulldev / 2
obj.glm <- as.numeric(dev.glm / n + fit.glm$lambda * colSums(abs(fit.glm$beta)))
obj.fit <- picasso_obj(fit, x, y)
rel.diff.obj <- (obj.fit$obj.val - obj.glm) / obj.glm
rel.diff.obj
}
# relative objective difference at each lambda
rel_obj_diff <- function(fit.glm, fit, n) {
dev.glm <- (1 - fit.glm$dev.ratio) * fit.glm$nulldev / 2
obj.glm <- as.numeric(dev.glm / n + fit.glm$lambda * colSums(abs(fit.glm$beta)))
if ("glmnet" %in% class(fit)) {
dev.glm.fit <- (1 - fit$dev.ratio) * fit$nulldev
obj.fit <- as.numeric(dev.glm.fit / n + fit$lambda * colSums(abs(fit$beta)))
} else {
obj.fit <- as.numeric(fit$loss / n + fit$lambda * colSums(abs(fit$beta[-1, ])))
}
rel.diff.obj <- (obj.fit - obj.glm) / obj.glm
rel.diff.obj
}
which_case <- function(n, p, q, eff.nonzero, corr) {
if (length(n) > 1) {
sim.case <- "vary_n"
which.vary <- n
which.vary.c <- 'n'
} else if (length(p) > 1) {
sim.case <- "vary_p"
which.vary <- p
which.vary.c <- 'p'
} else if (length(q) > 1) {
sim.case <- 'vary_q'
which.vary <- q
which.vary.c <- 'q'
} else if (length(eff.nonzero) > 1) {
sim.case <- 'vary_beta'
which.vary <- eff.nonzero
which.vary.c <- 'eff.nonzero'
} else if (length(corr) > 1) {
sim.case <- 'vary_corr'
which.vary <- corr
which.vary.c <- 'corr'
} else {
sim.case <- 'vary_NA'
which.vary <- n
which.vary.c <- 'NA'
}
list(sim.case = sim.case, which.vary = which.vary, which.vary.c = which.vary.c)
}
sim <- function(n, p, q, eff.nonzero, corr, rep, methods, eps, lam.min, lam.log,
backingfile, descrpfile, backingpath) {
case <- which_case(n = n, p = p, q = q, eff.nonzero = eff.nonzero, corr = corr)
sim.case <- case$sim.case
which.vary <- case$which.vary
which.vary.c <- case$which.vary.c
parms <- list(case = case, n = n, p = p, q = q, eff.nonzero = eff.nonzero,
corr = corr, rep = rep, methods = methods, eps = eps,
lam.min = lam.min, lam.log = lam.log)
cat("\nStart simulation: ", format(Sys.time()))
cat("\n============================================================\n")
cat("\nR sessionInfo: \n\n")
print(sessionInfo())
## print out simulation setting
cat("\n Simulation case: ", sim.case)
cat("\n============================================================\n")
cat("\t Simulation settings: \n")
cat("\t ---------------------------------------------\n")
cat("\t n = ", n, '\n')
cat("\t p = ", p, '\n')
cat("\t q = ", q, '\n')
cat("\t eff.nonzero = ", eff.nonzero, '\n')
cat("\t corr = ", corr, '\n')
cat("\t rep = ", rep, '\n')
cat("\t methods = ", methods, '\n')
cat("\t eps = ", eps, '\n')
cat("\t lam.min = ", lam.min, '\n')
cat("\t lam.log = ", lam.log, '\n')
cat("\n============================================================\n\n")
time.all <- array(NA, dim = c(rep, length(methods), length(which.vary)))
for (i in 1:length(which.vary)) {
cat("\t", which.vary.c, " = ", which.vary[i], "; start time: ", format(Sys.time()), '\n')
cat("\t---------------------------------------------\n")
for (j in 1:rep) {
time <- NULL
if (sim.case == 'vary_n') {
beta.nonzero <- runif(q, -eff.nonzero, eff.nonzero)
beta <- rep(0, p)
nonzero.id <- sample(p, q)
beta[nonzero.id] <- beta.nonzero
if (corr < 1 && corr > 0) {
Sigma <- matrix(corr, ncol = p, nrow = p)
diag(Sigma) <- 1
x <- rmvnorm(n[i], sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p * n[i]), ncol = p)
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n[i], 1, prob)
} else if (sim.case == 'vary_p') {
beta.nonzero <- runif(q, -eff.nonzero, eff.nonzero)
beta <- rep(0, p[i])
nonzero.id <- sample(p[i], q)
beta[nonzero.id] <- beta.nonzero
if (corr < 1 && corr > 0) {
Sigma <- matrix(corr, ncol = p[i], nrow = p[i])
diag(Sigma) <- 1
x <- rmvnorm(n, sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p[i] * n), ncol = p[i])
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n, 1, prob)
} else if (sim.case == 'vary_q') {
beta.nonzero <- runif(q[i], -eff.nonzero, eff.nonzero)
beta <- rep(0, p)
nonzero.id <- sample(p, q[i])
beta[nonzero.id] <- beta.nonzero
if (corr < 1 && corr > 0) {
Sigma <- matrix(corr, ncol = p, nrow = p)
diag(Sigma) <- 1
x <- rmvnorm(n, sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p * n), ncol = p)
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n, 1, prob)
} else if (sim.case == 'vary_beta') {
beta.nonzero <- runif(q, -eff.nonzero[i], eff.nonzero[i])
beta <- rep(0, p)
nonzero.id <- sample(p, q)
beta[nonzero.id] <- beta.nonzero
if (corr < 1 && corr > 0) {
Sigma <- matrix(corr, ncol = p, nrow = p)
diag(Sigma) <- 1
x <- rmvnorm(n, sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p * n), ncol = p)
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n, 1, prob)
} else if (sim.case == "vary_corr") {
beta.nonzero <- runif(q, -eff.nonzero, eff.nonzero)
beta <- rep(0, p)
nonzero.id <- sample(p, q)
beta[nonzero.id] <- beta.nonzero
## correlation matrix
if (corr[i] < 1 && corr[i] > 0) {
Sigma <- matrix(corr[i], ncol = p, nrow = p)
diag(Sigma) <- 1
x <- rmvnorm(n, sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p * n), ncol = p)
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n, 1, prob)
} else if (sim.case == 'vary_NA') {
beta.nonzero <- runif(q, -eff.nonzero, eff.nonzero)
beta <- rep(0, p)
nonzero.id <- sample(p, q)
beta[nonzero.id] <- beta.nonzero
if (corr < 1 && corr > 0) {
Sigma <- matrix(corr, ncol = p, nrow = p)
diag(Sigma) <- 1
x <- rmvnorm(n, sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p * n), ncol = p)
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n, 1, prob)
}
x.bm <- as.big.matrix(x, backingfile = backingfile, descriptorfile = descrpfile,
backingpath = backingpath, type = 'double')
fit.hsr0 <- biglasso(x.bm, y, family = 'binomial', screen = 'SSR',
lambda.log.scale = lam.log, lambda.min = lam.min,
ncores = 4, eps = eps)
lambda <- fit.hsr0$lambda
# PICASSO
st <- system.time(fit.pic <- picasso(x, y, family = 'binomial',
prec = eps, lambda = lambda))
time <- c(time, st['elapsed'])
rm(fit.pic)
gc()
# ncvreg
st <- system.time(fit.ncv <- ncvreg(x, y, penalty = 'lasso', family = 'binomial',
lambda = lambda, eps = eps))
time <- c(time, st['elapsed'])
rm(fit.ncv)
gc()
# glmnet
glmnet.control(fdev = 0, devmax = 1)
st <- system.time(fit.glm.true <- glmnet(x, y, family = 'binomial',
lambda = lambda, thresh = eps))
time <- c(time, st['elapsed'])
rm(fit.glm.true)
gc()
rm(x)
gc()
# SSR-Slores, 1 core
st <- system.time(fit.slores1 <- biglasso(x.bm, y, family = 'binomial',
screen = "SSR-Slores",
safe.thresh = 0, ncores = 1,
lambda = lambda, eps = eps))
time <- c(time, st['elapsed'])
rm(fit.slores1)
gc()
# SSR-Slores, 2 core
st <- system.time(fit.slores2 <- biglasso(x.bm, y, family = 'binomial',
screen = "SSR-Slores",
safe.thresh = 0, ncores = 2,
lambda = lambda, eps = eps))
time <- c(time, st['elapsed'])
rm(fit.slores2)
gc()
# SSR-Slores, 4 core
st <- system.time(fit.slores4 <- biglasso(x.bm, y, family = 'binomial',
screen = "SSR-Slores",
safe.thresh = 0, ncores = 4,
lambda = lambda, eps = eps))
time <- c(time, st['elapsed'])
rm(fit.slores4)
gc()
# SSR-Slores, 8 core
st <- system.time(fit.slores8 <- biglasso(x.bm, y, family = 'binomial',
screen = "SSR-Slores",
safe.thresh = 0, ncores = 8,
lambda = lambda, eps = eps))
time <- c(time, st['elapsed'])
rm(fit.slores8)
gc()
file.remove(paste0(backingpath, '/', backingfile))
file.remove(paste0(backingpath, '/', descrpfile))
time <- as.numeric(time)
time.all[j, , i] <- time
cat("\t\trep", j, '; time = ', time, "\n")
}
cat("\t", which.vary.c, " = ", which.vary[i], "; end time: ", format(Sys.time()), '\n')
cat("\n============================================================\n")
}
cat("\nEnd simulation: ", format(Sys.time()), "\n")
cat("\n============================================================\n")
list(time.all = time.all,
parms = parms
)
}
# ============================================================
# Case 1: vary n
# ============================================================
rm(list = ls())
gc()
set.seed(1234)
date <- Sys.Date()
n <- c(100, 200, 500, 1000, 2000, 5000, 10000, 20000)
# n <- c(2000, 20000)
p <- 10000
q <- 20
eff.nonzero <- 1
corr <- 0
rep <- 20
methods <- c('picasso', 'ncvreg', 'glmnet', 'SSR-Slores', 'biglasso (2 cores)',
'biglasso (4 cores)', 'biglasso (8 cores)')
eps <- 1e-6
lam.min <- 0.1
lam.log <- FALSE
backingfile <- 'back.bin'
descrpfile <- 'descrb.desc'
backingpath <- getwd()
res <- sim(n = n, p = p, q = q, eff.nonzero = eff.nonzero, corr = corr,
rep = rep, methods = methods, eps = eps, lam.min = lam.min,
lam.log = lam.log, backingfile = backingfile,
descrpfile = descrpfile, backingpath = backingpath)
## Post analysis
methods <- res$parms$methods
which.vary <- res$parms$case$which.vary
sim.case <- res$parms$case$sim.case
if (sim.case == "vary_n") {
xlab <- 'Number of observations'
} else if (sim.case == "vary_p") {
xlab <- 'Number of features'
} else if (sim.case == 'vary_q') {
xlab <- 'Number of active features'
} else if (sim.case == 'vary_beta') {
xlab <- 'Magnitude of beta'
} else if (sim.case == 'vary_corr') {
xlab <- 'Magnitude of correlation'
} else {
xlab <- 'NA'
}
cat("\n============================================================\n")
cat("\nMean: \n\n")
time.mean <- apply(res$time.all, c(2, 3), mean, na.rm = TRUE)
rownames(time.mean) <- methods
colnames(time.mean) <- which.vary
print(time.mean)
cat("\nSE: \n\n")
time.se <- apply(res$time.all, c(2, 3), function(x) {
x <- x[!is.na(x)]
if (length(x) <= 1) {
return(NA)
} else {
return(sd(x) / sqrt(length(x)))
}
})
rownames(time.se) <- methods
colnames(time.se) <- which.vary
print(time.se)
## plot
# -----------------------------------------------------------------------------
rule.name <- methods
time.df <- data.frame(time = matrix(t(time.mean), ncol = 1, byrow = T),
Method = rep(rule.name, each = length(which.vary)),
Which.vary = rep(which.vary, length(methods)))
time.df$Method <- factor(time.df$Method, methods)
## package comparison
time.df.pkgs <- subset(time.df, Method %in% c('picasso', "ncvreg", "glmnet", "SSR-Slores",
'biglasso (2 cores)', 'biglasso (4 cores)', 'biglasso (8 cores)'))
time.df.pkgs$Method <- revalue(time.df.pkgs$Method, c("SSR-Slores"="biglasso (1 core)"))
time.df.pkgs$Package <- time.df.pkgs$Method
# -----------------------------------------------------------------------------
# Figure 3 (b)
# -----------------------------------------------------------------------------
library(RColorBrewer)
gp.pkgs <- ggplot(time.df.pkgs, aes(x = Which.vary, y = time, color = Package)) +
geom_line(size = 1) +
scale_x_continuous(breaks = pretty(range(time.df$Which.vary)),
limits = range(time.df$Which.vary)) +
scale_y_continuous(breaks = pretty(range(time.df$time))) +
xlab(xlab) +
ylab("Computing time (s)") +
# theme(legend.position = 'top') +
theme_bw() +
theme(legend.position = c(.2, .7))
gg.colors <- gg.colors.default <- unique(ggplot_build(gp.pkgs)$data[[1]]$colour)
gg.colors[3] <- "#6495ED" # comflowerblue
gp.pkgs <-
gp.pkgs + scale_colour_manual(
values = gg.colors
)
date <- Sys.Date()
pdf(file = paste0(date, '_', sim.case, '_pkgs_logistic.pdf'), width = 5, height = 4)
print(gp.pkgs)
dev.off()
# ============================================================
# Case 2: vary p
# ============================================================
rm(list = ls())
gc()
set.seed(1234)
date <- Sys.Date()
n <- 1000
p <- c(1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000)
q <- 20
eff.nonzero <- 1
corr <- 0
rep <- 20
methods <- c('picasso', 'ncvreg', 'glmnet', 'SSR-Slores',
'biglasso (2 cores)', 'biglasso (4 cores)', 'biglasso (8 cores)')
eps <- 1e-6
lam.min <- 0.1
lam.log <- FALSE
backingfile <- 'back.bin'
descrpfile <- 'descrb.desc'
backingpath <- getwd()
res <- sim(n = n, p = p, q = q, eff.nonzero = eff.nonzero, corr = corr,
rep = rep, methods = methods, eps = eps, lam.min = lam.min,
lam.log = lam.log, backingfile = backingfile,
descrpfile = descrpfile, backingpath = backingpath)
# post analysis
methods <- res$parms$methods
which.vary <- res$parms$case$which.vary
sim.case <- res$parms$case$sim.case
if (sim.case == "vary_n") {
xlab <- 'Number of observations'
} else if (sim.case == "vary_p") {
xlab <- 'Number of features'
} else if (sim.case == 'vary_q') {
xlab <- 'Number of active features'
} else if (sim.case == 'vary_beta') {
xlab <- 'Magnitude of beta'
} else if (sim.case == 'vary_corr') {
xlab <- 'Magnitude of correlation'
} else {
xlab <- 'NA'
}
cat("\n============================================================\n")
cat("\nMean: \n\n")
time.mean <- apply(res$time.all, c(2, 3), mean, na.rm = TRUE)
rownames(time.mean) <- methods
colnames(time.mean) <- which.vary
print(time.mean)
cat("\nSE: \n\n")
time.se <- apply(res$time.all, c(2, 3), function(x) {
x <- x[!is.na(x)]
if (length(x) <= 1) {
return(NA)
} else {
return(sd(x) / sqrt(length(x)))
}
})
rownames(time.se) <- methods
colnames(time.se) <- which.vary
print(time.se)
## plot
# -----------------------------------------------------------------------------
rule.name <- methods
time.df <- data.frame(time = matrix(t(time.mean), ncol = 1, byrow = T),
Method = rep(rule.name, each = length(which.vary)),
Which.vary = rep(which.vary, length(methods)))
time.df$Method <- factor(time.df$Method, methods)
## package comparison
time.df.pkgs <- subset(time.df, Method %in% c('picasso', "ncvreg", "glmnet", "SSR-Slores",
'biglasso (2 cores)', 'biglasso (4 cores)', 'biglasso (8 cores)'))
time.df.pkgs$Method <- revalue(time.df.pkgs$Method, c("SSR-Slores"="biglasso (1 core)"))
time.df.pkgs$Package <- time.df.pkgs$Method
# -----------------------------------------------------------------------------
# Figure 3 (a)
# -----------------------------------------------------------------------------
library(RColorBrewer)
gp.pkgs <- ggplot(time.df.pkgs, aes(x = Which.vary, y = time, color = Package)) +
geom_line(size = 1) +
scale_x_continuous(breaks = pretty(range(time.df$Which.vary)),
limits = range(time.df$Which.vary)) +
scale_y_continuous(breaks = pretty(range(time.df$time))) +
xlab(xlab) +
ylab("Computing time (s)") +
# theme(legend.position = 'top') +
theme_bw() +
theme(legend.position = c(.2, .7))
gg.colors <- gg.colors.default <- unique(ggplot_build(gp.pkgs)$data[[1]]$colour)
gg.colors[3] <- "#6495ED" # comflowerblue
gp.pkgs <-
gp.pkgs + scale_colour_manual(
values = gg.colors
)
date <- Sys.Date()
pdf(file = paste0(date, '_', sim.case, '_pkgs_logistic.pdf'), width = 5, height = 4)
print(gp.pkgs)
dev.off()
|
/reproduce_code_revision/Section_4-3_Logistic_simulated_data_README.R
|
no_license
|
YaohuiZeng/biglasso_reproduce
|
R
| false | false | 19,129 |
r
|
## -----------------------------------------------------------------------------
## Replication code accompanying the paper "The biglasso Package: A Memory-
## and Computation-Efficient Solver for Lasso Model Fitting with Big Data in R"
## Authors: Yaohui Zeng and Patrick Breheny
##
## benchmarking platform:
## MacBook Pro with Intel Core i7 @ 2.3 GHz and 16 GB RAM.
##
## -----------------------------------------------------------------------------
## -----------------------------------------------------------------------------
## Section 4.3: Computational efficiency: Logistic regression - Simulated data
## Replicating Figure 3
# Utility functions for simulation
require(ncvreg)
require(glmnet)
require(picasso)
require(plyr)
require(ggplot2)
require(mvtnorm)
require(biglasso)
# picasso loss and objective
picasso_obj <- function(fit, x, y) {
n <- length(y)
beta <- as.matrix(fit$beta)
int <- fit$intercept
lambda <- fit$lambda
obj.val <- NULL
loss <- NULL
if (fit$family == "binomial") {
for (i in 1:length(lambda)) {
eta <- int[i] + x %*% beta[, i]
tmp <- -sum(y * eta - log(1 + exp(eta))) # negative log-likelihood
loss <- c(loss, tmp)
obj.val <- c(obj.val, tmp / n + lambda[i] * sum(abs(beta[, i])))
}
} else if (fit$family == "gaussian") {
for (i in 1:length(lambda)) {
tmp <- crossprod(y - x %*% beta[, i] - int[i]) # residual sum of squares
loss <- c(loss, tmp)
obj.val <- c(obj.val, tmp / (2*n) + lambda[i] * sum(abs(beta[, i])))
}
}
list(loss = loss, obj.val = obj.val)
}
# picasso vs glmnet, relative objective difference at each lambda
rel_obj_diff_pic <- function(fit.glm, fit, n, x, y) {
dev.glm <- (1 - fit.glm$dev.ratio) * fit.glm$nulldev / 2
obj.glm <- as.numeric(dev.glm / n + fit.glm$lambda * colSums(abs(fit.glm$beta)))
obj.fit <- picasso_obj(fit, x, y)
rel.diff.obj <- (obj.fit$obj.val - obj.glm) / obj.glm
rel.diff.obj
}
# relative objective difference at each lambda
rel_obj_diff <- function(fit.glm, fit, n) {
dev.glm <- (1 - fit.glm$dev.ratio) * fit.glm$nulldev / 2
obj.glm <- as.numeric(dev.glm / n + fit.glm$lambda * colSums(abs(fit.glm$beta)))
if ("glmnet" %in% class(fit)) {
dev.glm.fit <- (1 - fit$dev.ratio) * fit$nulldev
obj.fit <- as.numeric(dev.glm.fit / n + fit$lambda * colSums(abs(fit$beta)))
} else {
obj.fit <- as.numeric(fit$loss / n + fit$lambda * colSums(abs(fit$beta[-1, ])))
}
rel.diff.obj <- (obj.fit - obj.glm) / obj.glm
rel.diff.obj
}
which_case <- function(n, p, q, eff.nonzero, corr) {
if (length(n) > 1) {
sim.case <- "vary_n"
which.vary <- n
which.vary.c <- 'n'
} else if (length(p) > 1) {
sim.case <- "vary_p"
which.vary <- p
which.vary.c <- 'p'
} else if (length(q) > 1) {
sim.case <- 'vary_q'
which.vary <- q
which.vary.c <- 'q'
} else if (length(eff.nonzero) > 1) {
sim.case <- 'vary_beta'
which.vary <- eff.nonzero
which.vary.c <- 'eff.nonzero'
} else if (length(corr) > 1) {
sim.case <- 'vary_corr'
which.vary <- corr
which.vary.c <- 'corr'
} else {
sim.case <- 'vary_NA'
which.vary <- n
which.vary.c <- 'NA'
}
list(sim.case = sim.case, which.vary = which.vary, which.vary.c = which.vary.c)
}
sim <- function(n, p, q, eff.nonzero, corr, rep, methods, eps, lam.min, lam.log,
backingfile, descrpfile, backingpath) {
case <- which_case(n = n, p = p, q = q, eff.nonzero = eff.nonzero, corr = corr)
sim.case <- case$sim.case
which.vary <- case$which.vary
which.vary.c <- case$which.vary.c
parms <- list(case = case, n = n, p = p, q = q, eff.nonzero = eff.nonzero,
corr = corr, rep = rep, methods = methods, eps = eps,
lam.min = lam.min, lam.log = lam.log)
cat("\nStart simulation: ", format(Sys.time()))
cat("\n============================================================\n")
cat("\nR sessionInfo: \n\n")
print(sessionInfo())
## print out simulation setting
cat("\n Simulation case: ", sim.case)
cat("\n============================================================\n")
cat("\t Simulation settings: \n")
cat("\t ---------------------------------------------\n")
cat("\t n = ", n, '\n')
cat("\t p = ", p, '\n')
cat("\t q = ", q, '\n')
cat("\t eff.nonzero = ", eff.nonzero, '\n')
cat("\t corr = ", corr, '\n')
cat("\t rep = ", rep, '\n')
cat("\t methods = ", methods, '\n')
cat("\t eps = ", eps, '\n')
cat("\t lam.min = ", lam.min, '\n')
cat("\t lam.log = ", lam.log, '\n')
cat("\n============================================================\n\n")
time.all <- array(NA, dim = c(rep, length(methods), length(which.vary)))
for (i in 1:length(which.vary)) {
cat("\t", which.vary.c, " = ", which.vary[i], "; start time: ", format(Sys.time()), '\n')
cat("\t---------------------------------------------\n")
for (j in 1:rep) {
time <- NULL
if (sim.case == 'vary_n') {
beta.nonzero <- runif(q, -eff.nonzero, eff.nonzero)
beta <- rep(0, p)
nonzero.id <- sample(p, q)
beta[nonzero.id] <- beta.nonzero
if (corr < 1 && corr > 0) {
Sigma <- matrix(corr, ncol = p, nrow = p)
diag(Sigma) <- 1
x <- rmvnorm(n[i], sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p * n[i]), ncol = p)
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n[i], 1, prob)
} else if (sim.case == 'vary_p') {
beta.nonzero <- runif(q, -eff.nonzero, eff.nonzero)
beta <- rep(0, p[i])
nonzero.id <- sample(p[i], q)
beta[nonzero.id] <- beta.nonzero
if (corr < 1 && corr > 0) {
Sigma <- matrix(corr, ncol = p[i], nrow = p[i])
diag(Sigma) <- 1
x <- rmvnorm(n, sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p[i] * n), ncol = p[i])
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n, 1, prob)
} else if (sim.case == 'vary_q') {
beta.nonzero <- runif(q[i], -eff.nonzero, eff.nonzero)
beta <- rep(0, p)
nonzero.id <- sample(p, q[i])
beta[nonzero.id] <- beta.nonzero
if (corr < 1 && corr > 0) {
Sigma <- matrix(corr, ncol = p, nrow = p)
diag(Sigma) <- 1
x <- rmvnorm(n, sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p * n), ncol = p)
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n, 1, prob)
} else if (sim.case == 'vary_beta') {
beta.nonzero <- runif(q, -eff.nonzero[i], eff.nonzero[i])
beta <- rep(0, p)
nonzero.id <- sample(p, q)
beta[nonzero.id] <- beta.nonzero
if (corr < 1 && corr > 0) {
Sigma <- matrix(corr, ncol = p, nrow = p)
diag(Sigma) <- 1
x <- rmvnorm(n, sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p * n), ncol = p)
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n, 1, prob)
} else if (sim.case == "vary_corr") {
beta.nonzero <- runif(q, -eff.nonzero, eff.nonzero)
beta <- rep(0, p)
nonzero.id <- sample(p, q)
beta[nonzero.id] <- beta.nonzero
## correlation matrix
if (corr[i] < 1 && corr[i] > 0) {
Sigma <- matrix(corr[i], ncol = p, nrow = p)
diag(Sigma) <- 1
x <- rmvnorm(n, sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p * n), ncol = p)
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n, 1, prob)
} else if (sim.case == 'vary_NA') {
beta.nonzero <- runif(q, -eff.nonzero, eff.nonzero)
beta <- rep(0, p)
nonzero.id <- sample(p, q)
beta[nonzero.id] <- beta.nonzero
if (corr < 1 && corr > 0) {
Sigma <- matrix(corr, ncol = p, nrow = p)
diag(Sigma) <- 1
x <- rmvnorm(n, sigma = Sigma, method = 'chol')
} else {
x <- matrix(rnorm(p * n), ncol = p)
}
eta <- x %*% beta
prob <- exp(eta) / (1 + exp(eta))
y <- rbinom(n, 1, prob)
}
x.bm <- as.big.matrix(x, backingfile = backingfile, descriptorfile = descrpfile,
backingpath = backingpath, type = 'double')
fit.hsr0 <- biglasso(x.bm, y, family = 'binomial', screen = 'SSR',
lambda.log.scale = lam.log, lambda.min = lam.min,
ncores = 4, eps = eps)
lambda <- fit.hsr0$lambda
# PICASSO
st <- system.time(fit.pic <- picasso(x, y, family = 'binomial',
prec = eps, lambda = lambda))
time <- c(time, st['elapsed'])
rm(fit.pic)
gc()
# ncvreg
st <- system.time(fit.ncv <- ncvreg(x, y, penalty = 'lasso', family = 'binomial',
lambda = lambda, eps = eps))
time <- c(time, st['elapsed'])
rm(fit.ncv)
gc()
# glmnet
glmnet.control(fdev = 0, devmax = 1)
st <- system.time(fit.glm.true <- glmnet(x, y, family = 'binomial',
lambda = lambda, thresh = eps))
time <- c(time, st['elapsed'])
rm(fit.glm.true)
gc()
rm(x)
gc()
# SSR-Slores, 1 core
st <- system.time(fit.slores1 <- biglasso(x.bm, y, family = 'binomial',
screen = "SSR-Slores",
safe.thresh = 0, ncores = 1,
lambda = lambda, eps = eps))
time <- c(time, st['elapsed'])
rm(fit.slores1)
gc()
# SSR-Slores, 2 core
st <- system.time(fit.slores2 <- biglasso(x.bm, y, family = 'binomial',
screen = "SSR-Slores",
safe.thresh = 0, ncores = 2,
lambda = lambda, eps = eps))
time <- c(time, st['elapsed'])
rm(fit.slores2)
gc()
# SSR-Slores, 4 core
st <- system.time(fit.slores4 <- biglasso(x.bm, y, family = 'binomial',
screen = "SSR-Slores",
safe.thresh = 0, ncores = 4,
lambda = lambda, eps = eps))
time <- c(time, st['elapsed'])
rm(fit.slores4)
gc()
# SSR-Slores, 8 core
st <- system.time(fit.slores8 <- biglasso(x.bm, y, family = 'binomial',
screen = "SSR-Slores",
safe.thresh = 0, ncores = 8,
lambda = lambda, eps = eps))
time <- c(time, st['elapsed'])
rm(fit.slores8)
gc()
file.remove(paste0(backingpath, '/', backingfile))
file.remove(paste0(backingpath, '/', descrpfile))
time <- as.numeric(time)
time.all[j, , i] <- time
cat("\t\trep", j, '; time = ', time, "\n")
}
cat("\t", which.vary.c, " = ", which.vary[i], "; end time: ", format(Sys.time()), '\n')
cat("\n============================================================\n")
}
cat("\nEnd simulation: ", format(Sys.time()), "\n")
cat("\n============================================================\n")
list(time.all = time.all,
parms = parms
)
}
# ============================================================
# Case 1: vary n
# ============================================================
rm(list = ls())
gc()
set.seed(1234)
date <- Sys.Date()
n <- c(100, 200, 500, 1000, 2000, 5000, 10000, 20000)
# n <- c(2000, 20000)
p <- 10000
q <- 20
eff.nonzero <- 1
corr <- 0
rep <- 20
methods <- c('picasso', 'ncvreg', 'glmnet', 'SSR-Slores', 'biglasso (2 cores)',
'biglasso (4 cores)', 'biglasso (8 cores)')
eps <- 1e-6
lam.min <- 0.1
lam.log <- FALSE
backingfile <- 'back.bin'
descrpfile <- 'descrb.desc'
backingpath <- getwd()
res <- sim(n = n, p = p, q = q, eff.nonzero = eff.nonzero, corr = corr,
rep = rep, methods = methods, eps = eps, lam.min = lam.min,
lam.log = lam.log, backingfile = backingfile,
descrpfile = descrpfile, backingpath = backingpath)
## Post analysis
methods <- res$parms$methods
which.vary <- res$parms$case$which.vary
sim.case <- res$parms$case$sim.case
if (sim.case == "vary_n") {
xlab <- 'Number of observations'
} else if (sim.case == "vary_p") {
xlab <- 'Number of features'
} else if (sim.case == 'vary_q') {
xlab <- 'Number of active features'
} else if (sim.case == 'vary_beta') {
xlab <- 'Magnitude of beta'
} else if (sim.case == 'vary_corr') {
xlab <- 'Magnitude of correlation'
} else {
xlab <- 'NA'
}
cat("\n============================================================\n")
cat("\nMean: \n\n")
time.mean <- apply(res$time.all, c(2, 3), mean, na.rm = TRUE)
rownames(time.mean) <- methods
colnames(time.mean) <- which.vary
print(time.mean)
cat("\nSE: \n\n")
time.se <- apply(res$time.all, c(2, 3), function(x) {
x <- x[!is.na(x)]
if (length(x) <= 1) {
return(NA)
} else {
return(sd(x) / sqrt(length(x)))
}
})
rownames(time.se) <- methods
colnames(time.se) <- which.vary
print(time.se)
## plot
# -----------------------------------------------------------------------------
rule.name <- methods
time.df <- data.frame(time = matrix(t(time.mean), ncol = 1, byrow = T),
Method = rep(rule.name, each = length(which.vary)),
Which.vary = rep(which.vary, length(methods)))
time.df$Method <- factor(time.df$Method, methods)
## package comparison
time.df.pkgs <- subset(time.df, Method %in% c('picasso', "ncvreg", "glmnet", "SSR-Slores",
'biglasso (2 cores)', 'biglasso (4 cores)', 'biglasso (8 cores)'))
time.df.pkgs$Method <- revalue(time.df.pkgs$Method, c("SSR-Slores"="biglasso (1 core)"))
time.df.pkgs$Package <- time.df.pkgs$Method
# -----------------------------------------------------------------------------
# Figure 3 (b)
# -----------------------------------------------------------------------------
library(RColorBrewer)
gp.pkgs <- ggplot(time.df.pkgs, aes(x = Which.vary, y = time, color = Package)) +
geom_line(size = 1) +
scale_x_continuous(breaks = pretty(range(time.df$Which.vary)),
limits = range(time.df$Which.vary)) +
scale_y_continuous(breaks = pretty(range(time.df$time))) +
xlab(xlab) +
ylab("Computing time (s)") +
# theme(legend.position = 'top') +
theme_bw() +
theme(legend.position = c(.2, .7))
gg.colors <- gg.colors.default <- unique(ggplot_build(gp.pkgs)$data[[1]]$colour)
gg.colors[3] <- "#6495ED" # comflowerblue
gp.pkgs <-
gp.pkgs + scale_colour_manual(
values = gg.colors
)
date <- Sys.Date()
pdf(file = paste0(date, '_', sim.case, '_pkgs_logistic.pdf'), width = 5, height = 4)
print(gp.pkgs)
dev.off()
# ============================================================
# Case 2: vary p
# ============================================================
rm(list = ls())
gc()
set.seed(1234)
date <- Sys.Date()
n <- 1000
p <- c(1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000)
q <- 20
eff.nonzero <- 1
corr <- 0
rep <- 20
methods <- c('picasso', 'ncvreg', 'glmnet', 'SSR-Slores',
'biglasso (2 cores)', 'biglasso (4 cores)', 'biglasso (8 cores)')
eps <- 1e-6
lam.min <- 0.1
lam.log <- FALSE
backingfile <- 'back.bin'
descrpfile <- 'descrb.desc'
backingpath <- getwd()
res <- sim(n = n, p = p, q = q, eff.nonzero = eff.nonzero, corr = corr,
rep = rep, methods = methods, eps = eps, lam.min = lam.min,
lam.log = lam.log, backingfile = backingfile,
descrpfile = descrpfile, backingpath = backingpath)
# post analysis
methods <- res$parms$methods
which.vary <- res$parms$case$which.vary
sim.case <- res$parms$case$sim.case
if (sim.case == "vary_n") {
xlab <- 'Number of observations'
} else if (sim.case == "vary_p") {
xlab <- 'Number of features'
} else if (sim.case == 'vary_q') {
xlab <- 'Number of active features'
} else if (sim.case == 'vary_beta') {
xlab <- 'Magnitude of beta'
} else if (sim.case == 'vary_corr') {
xlab <- 'Magnitude of correlation'
} else {
xlab <- 'NA'
}
cat("\n============================================================\n")
cat("\nMean: \n\n")
time.mean <- apply(res$time.all, c(2, 3), mean, na.rm = TRUE)
rownames(time.mean) <- methods
colnames(time.mean) <- which.vary
print(time.mean)
cat("\nSE: \n\n")
time.se <- apply(res$time.all, c(2, 3), function(x) {
x <- x[!is.na(x)]
if (length(x) <= 1) {
return(NA)
} else {
return(sd(x) / sqrt(length(x)))
}
})
rownames(time.se) <- methods
colnames(time.se) <- which.vary
print(time.se)
## plot
# -----------------------------------------------------------------------------
rule.name <- methods
time.df <- data.frame(time = matrix(t(time.mean), ncol = 1, byrow = T),
Method = rep(rule.name, each = length(which.vary)),
Which.vary = rep(which.vary, length(methods)))
time.df$Method <- factor(time.df$Method, methods)
## package comparison
time.df.pkgs <- subset(time.df, Method %in% c('picasso', "ncvreg", "glmnet", "SSR-Slores",
'biglasso (2 cores)', 'biglasso (4 cores)', 'biglasso (8 cores)'))
time.df.pkgs$Method <- revalue(time.df.pkgs$Method, c("SSR-Slores"="biglasso (1 core)"))
time.df.pkgs$Package <- time.df.pkgs$Method
# -----------------------------------------------------------------------------
# Figure 3 (a)
# -----------------------------------------------------------------------------
library(RColorBrewer)
gp.pkgs <- ggplot(time.df.pkgs, aes(x = Which.vary, y = time, color = Package)) +
geom_line(size = 1) +
scale_x_continuous(breaks = pretty(range(time.df$Which.vary)),
limits = range(time.df$Which.vary)) +
scale_y_continuous(breaks = pretty(range(time.df$time))) +
xlab(xlab) +
ylab("Computing time (s)") +
# theme(legend.position = 'top') +
theme_bw() +
theme(legend.position = c(.2, .7))
gg.colors <- gg.colors.default <- unique(ggplot_build(gp.pkgs)$data[[1]]$colour)
gg.colors[3] <- "#6495ED" # comflowerblue
gp.pkgs <-
gp.pkgs + scale_colour_manual(
values = gg.colors
)
date <- Sys.Date()
pdf(file = paste0(date, '_', sim.case, '_pkgs_logistic.pdf'), width = 5, height = 4)
print(gp.pkgs)
dev.off()
|
/COVID_testing.R
|
no_license
|
HongYuBinBin/Covid19-diffusion-prediction-model
|
R
| false | false | 18,297 |
r
| ||
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# How often doctor explained things (adults)
FYC <- FYC %>%
mutate(adult_explain = recode_factor(
ADEXPL42, .default = "Missing", .missing = "Missing",
"4" = "Always",
"3" = "Usually",
"2" = "Sometimes/Never",
"1" = "Sometimes/Never",
"-7" = "Don't know/Non-response",
"-8" = "Don't know/Non-response",
"-9" = "Don't know/Non-response",
"-1" = "Inapplicable"))
SAQdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~SAQWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~adult_explain, FUN=svymean, by = ~ind, design = subset(SAQdsgn, ADAPPT42 >= 1 & AGELAST >= 18))
print(results)
|
/mepstrends/hc_care/json/code/r/pctPOP__ind__adult_explain__.r
|
permissive
|
HHS-AHRQ/MEPS-summary-tables
|
R
| false | false | 1,240 |
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# How often doctor explained things (adults)
FYC <- FYC %>%
mutate(adult_explain = recode_factor(
ADEXPL42, .default = "Missing", .missing = "Missing",
"4" = "Always",
"3" = "Usually",
"2" = "Sometimes/Never",
"1" = "Sometimes/Never",
"-7" = "Don't know/Non-response",
"-8" = "Don't know/Non-response",
"-9" = "Don't know/Non-response",
"-1" = "Inapplicable"))
SAQdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~SAQWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~adult_explain, FUN=svymean, by = ~ind, design = subset(SAQdsgn, ADAPPT42 >= 1 & AGELAST >= 18))
print(results)
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22821294526489e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615835245-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 270 |
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22821294526489e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
# 토픽분석
# 한글 사전과 텍스트 마이닝 관련 패키지 설치
library('KoNLP')
library('RSQLite')
library('tm')
library('wordcloud')
# 가져오기
facebook <- file('/Users/yuhayung/Desktop/coding/학원/texttest/facebook_bigdata.txt', encoding = "UTF-8")
facebook_data <- readLines(facebook)
head(facebook_data)
user_dic <- data.frame(term = c('R프로그래밍', '페이스북', '김진성', '소셜네트워크'), tag ='ncn')
# term : 추가 단어 / tag : ncn(명사 지시코드)
buildDictionary(ext_dic = 'sejong', user_dic = user_dic)
# buildDictionary KoNLP 제공함수 : 신규단어를 사전에 추가 하는 역활을 한다.
# ext_dic = 'sejong' : 속성은 구축할 사전을 지정
# user_dic : 사전에 추가될 단어의 모음이다.
# tag = 'ncn' 속성은 추가하 품사가 명사라는 의미이다.
###################################################################################################
# 단어를 추출을 위한 ᄉ 함수 정의
paste(extractNoun('유하영은 많은 사람과 소통을 위해서 소셜네트워크에 가입하였습ᄂ다.'), collapse =" ")
# collapse : 추출된 명사들을 공백으로 연결하는 역활을 한다.
exNouns <- function(x){paste(extractNoun(as.character(x)), collapse = " ")}
# 사용자 정의 함수는 [문자변환] > [단어 추출] > [공백으로 합침] 순서로 실ᄒ 예에서 처럼 정의
facebook_nouns <- sapply(facebook_data, exNouns)
facebook_nouns[1]
###################################################################################################
# 추출 단어
myCorpus <- Corpus(VectorSource(facebook_nouns))
myCorpus
# 데이터 전처리
# 단계2-1 : 문장부호 제거
myCorpusPrepro <- tm_map(myCorpus,removePunctuation)
# 단계2-2 : 수
myCorpusPrepro <- tm_map(myCorpusPrepro, removeNumbers)
# 단계2-3 : 소문자 변경
myCorpusPrepro <- tm_map(myCorpusPrepro, tolower)
# 단계2-4 : 불용어 제거
myCorpusPrepro <- tm_map(myCorpusPrepro, removeWords, stopwords('english'))
# 단계2-1 : 절처리 결과 확인
inspect(myCorpusPrepro[1:5])
###################################################################################################
# 단어 선별하기 / 한글 1 음절은 2byte에 저장(2 음절 =4byte)
myCorpusPrepro_term <- TermDocumentMatrix(myCorpusPrepro, control = list(wordLengths = c(4, 16)))
myCorpusPrepro_term
# matrix 자료 구조를 data.frame 자료구조로 변셩
myTerm_df <- as.data.frame(as.matrix(myCorpusPrepro_term))
dim(myTerm_df)
###################################################################################################
# 단어 출현 빈도수 구하기
wordResult <- sort(rowSums(myTerm_df), decreasing = T)
wordResult[1:10] # 빈도수가 높은 상위 10개 단어 보기
# 불용어 제거
myCorpusPrepro <- tm_map(myCorpus, removePunctuation) # 문장부호 제거
myCorpusPrepro <- tm_map(myCorpusPrepro, tolower) # 소문자 변경
myCorpusPrepro <- tm_map(myCorpusPrepro, removeNumbers) # 숫자 제거
myStopwords = c(stopwords('english'), '사용','하기'); # 제거할 단어 지
myCorpusPrepro <- tm_map(myCorpusPrepro, removeWords, myStopwords)
# 단어 선별과 평서문 변환
myCorpusPrepro_term <- TermDocumentMatrix(myCorpusPrepro, control = list(wordLengths = c(4,16)))
# 말뭉치 객체를 평서문으로 변환
myTerm_df <- as.data.frame(as.matrix(myCorpusPrepro_term))
# 단어 출현 빈도수 구하기
wordResult <- sort(rowSums(myTerm_df), decreasing = T)
wordResult[1:10]
#################################################################################################3
# 단어구름 시각화
# 단어구름ᄋ 디자인(빈도수 , 생상, 위치, 회전 등) 적용하기
# 단어 이름과 빈도수로 data.frame 생성
myName <- names(wordResult) # 단어 이름 생성
word.df <- data.frame(word = myName, freq = wordResult) # 데이터프레임 생성
str(word.df)
# 단어색상과 글
pal <- brewer.pal(12, "Paired") # 12가지 생상 적용
# 단어 구름 시각화
wordcloud(word.df$word, word.df$freq, scale = c(5, 1), min.freq = 3,
random.color = F, rot.per = .1, colors = pal, family = 'NanumGothic')
|
/비정형데이터/토픽 분석.R
|
no_license
|
freegray/R_training
|
R
| false | false | 6,189 |
r
|
# 토픽분석
# 한글 사전과 텍스트 마이닝 관련 패키지 설치
library('KoNLP')
library('RSQLite')
library('tm')
library('wordcloud')
# 가져오기
facebook <- file('/Users/yuhayung/Desktop/coding/학원/texttest/facebook_bigdata.txt', encoding = "UTF-8")
facebook_data <- readLines(facebook)
head(facebook_data)
user_dic <- data.frame(term = c('R프로그래밍', '페이스북', '김진성', '소셜네트워크'), tag ='ncn')
# term : 추가 단어 / tag : ncn(명사 지시코드)
buildDictionary(ext_dic = 'sejong', user_dic = user_dic)
# buildDictionary KoNLP 제공함수 : 신규단어를 사전에 추가 하는 역활을 한다.
# ext_dic = 'sejong' : 속성은 구축할 사전을 지정
# user_dic : 사전에 추가될 단어의 모음이다.
# tag = 'ncn' 속성은 추가하 품사가 명사라는 의미이다.
###################################################################################################
# 단어를 추출을 위한 ᄉ 함수 정의
paste(extractNoun('유하영은 많은 사람과 소통을 위해서 소셜네트워크에 가입하였습ᄂ다.'), collapse =" ")
# collapse : 추출된 명사들을 공백으로 연결하는 역활을 한다.
exNouns <- function(x){paste(extractNoun(as.character(x)), collapse = " ")}
# 사용자 정의 함수는 [문자변환] > [단어 추출] > [공백으로 합침] 순서로 실ᄒ 예에서 처럼 정의
facebook_nouns <- sapply(facebook_data, exNouns)
facebook_nouns[1]
###################################################################################################
# 추출 단어
myCorpus <- Corpus(VectorSource(facebook_nouns))
myCorpus
# 데이터 전처리
# 단계2-1 : 문장부호 제거
myCorpusPrepro <- tm_map(myCorpus,removePunctuation)
# 단계2-2 : 수
myCorpusPrepro <- tm_map(myCorpusPrepro, removeNumbers)
# 단계2-3 : 소문자 변경
myCorpusPrepro <- tm_map(myCorpusPrepro, tolower)
# 단계2-4 : 불용어 제거
myCorpusPrepro <- tm_map(myCorpusPrepro, removeWords, stopwords('english'))
# 단계2-1 : 절처리 결과 확인
inspect(myCorpusPrepro[1:5])
###################################################################################################
# 단어 선별하기 / 한글 1 음절은 2byte에 저장(2 음절 =4byte)
myCorpusPrepro_term <- TermDocumentMatrix(myCorpusPrepro, control = list(wordLengths = c(4, 16)))
myCorpusPrepro_term
# matrix 자료 구조를 data.frame 자료구조로 변셩
myTerm_df <- as.data.frame(as.matrix(myCorpusPrepro_term))
dim(myTerm_df)
###################################################################################################
# 단어 출현 빈도수 구하기
wordResult <- sort(rowSums(myTerm_df), decreasing = T)
wordResult[1:10] # 빈도수가 높은 상위 10개 단어 보기
# 불용어 제거
myCorpusPrepro <- tm_map(myCorpus, removePunctuation) # 문장부호 제거
myCorpusPrepro <- tm_map(myCorpusPrepro, tolower) # 소문자 변경
myCorpusPrepro <- tm_map(myCorpusPrepro, removeNumbers) # 숫자 제거
myStopwords = c(stopwords('english'), '사용','하기'); # 제거할 단어 지
myCorpusPrepro <- tm_map(myCorpusPrepro, removeWords, myStopwords)
# 단어 선별과 평서문 변환
myCorpusPrepro_term <- TermDocumentMatrix(myCorpusPrepro, control = list(wordLengths = c(4,16)))
# 말뭉치 객체를 평서문으로 변환
myTerm_df <- as.data.frame(as.matrix(myCorpusPrepro_term))
# 단어 출현 빈도수 구하기
wordResult <- sort(rowSums(myTerm_df), decreasing = T)
wordResult[1:10]
#################################################################################################3
# 단어구름 시각화
# 단어구름ᄋ 디자인(빈도수 , 생상, 위치, 회전 등) 적용하기
# 단어 이름과 빈도수로 data.frame 생성
myName <- names(wordResult) # 단어 이름 생성
word.df <- data.frame(word = myName, freq = wordResult) # 데이터프레임 생성
str(word.df)
# 단어색상과 글
pal <- brewer.pal(12, "Paired") # 12가지 생상 적용
# 단어 구름 시각화
wordcloud(word.df$word, word.df$freq, scale = c(5, 1), min.freq = 3,
random.color = F, rot.per = .1, colors = pal, family = 'NanumGothic')
|
context("ph_comtrait")
sfile <- system.file("examples/sample_comstruct", package = "phylocomr")
tfile <- system.file("examples/traits_aot", package = "phylocomr")
sample_str <- paste0(readLines(sfile), collapse = "\n")
sfile2 <- tempfile()
cat(sample_str, file = sfile2, sep = '\n')
traits_str <- paste0(readLines(tfile), collapse = "\n")
tfile2 <- tempfile()
cat(traits_str, file = tfile2, sep = '\n')
sampledf <- read.table(sfile, header = FALSE,
stringsAsFactors = FALSE)
traitsdf_file <- system.file("examples/traits_aot_df",
package = "phylocomr")
traitsdf <- read.table(text = readLines(traitsdf_file), header = TRUE,
stringsAsFactors = FALSE)
test_that("ph_comtrait works with data.frame input", {
skip_on_appveyor()
skip_on_cran()
aa <- ph_comtrait(sample = sampledf, traits = traitsdf,
binary = c(FALSE, FALSE, FALSE, TRUE))
expect_is(aa, "data.frame")
expect_named(aa, c('trait', 'sample', 'ntaxa', 'mean', 'metric',
'meanrndmetric', 'sdrndmetric', 'sesmetric',
'ranklow', 'rankhigh', 'runs'))
expect_is(aa$trait, "character")
expect_type(aa$ntaxa, "integer")
expect_type(aa$sdrndmetric, "double")
expect_equal(unique(aa$runs), 999)
})
test_that("ph_comtrait works with file input", {
skip_on_appveyor()
skip_on_cran()
aa <- ph_comtrait(sample = sfile2, traits = tfile2)
expect_is(aa, "data.frame")
expect_named(aa, c('trait', 'sample', 'ntaxa', 'mean', 'metric',
'meanrndmetric', 'sdrndmetric', 'sesmetric',
'ranklow', 'rankhigh', 'runs'))
expect_is(aa$trait, "character")
expect_type(aa$ntaxa, "integer")
expect_type(aa$sdrndmetric, "double")
expect_equal(unique(aa$runs), 999)
})
test_that("ph_comtrait - different models give expected output", {
skip_on_appveyor()
skip_on_cran()
n0 <- ph_comtrait(sample = sfile2, traits = tfile2, null_model = 0)
n1 <- ph_comtrait(sample = sfile2, traits = tfile2, null_model = 1)
# identical
expect_identical(n0$metric, n1$metric)
# not identical
expect_false(identical(n0$sesmetric, n1$sesmetric))
})
test_that("ph_comtrait fails well", {
# required inputs
expect_error(ph_comtrait(),
"argument \"sample\" is missing, with no default")
expect_error(ph_comtrait("Adsf"),
"argument \"traits\" is missing, with no default")
# types are correct
expect_error(ph_comtrait(5, "asdfad"),
"sample must be of class character, data.frame")
expect_error(ph_comtrait("adf", 5),
"traits must be of class character, data.frame")
expect_error(ph_comtrait(sfile, tfile, null_model = mtcars),
"null_model must be of class numeric, integer")
expect_error(ph_comtrait(sfile, tfile, randomizations = mtcars),
"randomizations must be of class numeric, integer")
expect_error(ph_comtrait(sfile, tfile, metric = 5),
"metric must be of class character")
expect_error(ph_comtrait(sfile, tfile, binary = 5),
"binary must be of class logical")
expect_error(ph_comtrait(sfile, tfile, abundance = 5),
"abundance must be of class logical")
# correct set of values
expect_error(ph_comtrait(sfile, tfile, null_model = 15),
"null_model %in% 0:3 is not TRUE")
})
|
/data/genthat_extracted_code/phylocomr/tests/test-comtrait.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 3,328 |
r
|
context("ph_comtrait")
sfile <- system.file("examples/sample_comstruct", package = "phylocomr")
tfile <- system.file("examples/traits_aot", package = "phylocomr")
sample_str <- paste0(readLines(sfile), collapse = "\n")
sfile2 <- tempfile()
cat(sample_str, file = sfile2, sep = '\n')
traits_str <- paste0(readLines(tfile), collapse = "\n")
tfile2 <- tempfile()
cat(traits_str, file = tfile2, sep = '\n')
sampledf <- read.table(sfile, header = FALSE,
stringsAsFactors = FALSE)
traitsdf_file <- system.file("examples/traits_aot_df",
package = "phylocomr")
traitsdf <- read.table(text = readLines(traitsdf_file), header = TRUE,
stringsAsFactors = FALSE)
test_that("ph_comtrait works with data.frame input", {
skip_on_appveyor()
skip_on_cran()
aa <- ph_comtrait(sample = sampledf, traits = traitsdf,
binary = c(FALSE, FALSE, FALSE, TRUE))
expect_is(aa, "data.frame")
expect_named(aa, c('trait', 'sample', 'ntaxa', 'mean', 'metric',
'meanrndmetric', 'sdrndmetric', 'sesmetric',
'ranklow', 'rankhigh', 'runs'))
expect_is(aa$trait, "character")
expect_type(aa$ntaxa, "integer")
expect_type(aa$sdrndmetric, "double")
expect_equal(unique(aa$runs), 999)
})
test_that("ph_comtrait works with file input", {
skip_on_appveyor()
skip_on_cran()
aa <- ph_comtrait(sample = sfile2, traits = tfile2)
expect_is(aa, "data.frame")
expect_named(aa, c('trait', 'sample', 'ntaxa', 'mean', 'metric',
'meanrndmetric', 'sdrndmetric', 'sesmetric',
'ranklow', 'rankhigh', 'runs'))
expect_is(aa$trait, "character")
expect_type(aa$ntaxa, "integer")
expect_type(aa$sdrndmetric, "double")
expect_equal(unique(aa$runs), 999)
})
test_that("ph_comtrait - different models give expected output", {
skip_on_appveyor()
skip_on_cran()
n0 <- ph_comtrait(sample = sfile2, traits = tfile2, null_model = 0)
n1 <- ph_comtrait(sample = sfile2, traits = tfile2, null_model = 1)
# identical
expect_identical(n0$metric, n1$metric)
# not identical
expect_false(identical(n0$sesmetric, n1$sesmetric))
})
test_that("ph_comtrait fails well", {
# required inputs
expect_error(ph_comtrait(),
"argument \"sample\" is missing, with no default")
expect_error(ph_comtrait("Adsf"),
"argument \"traits\" is missing, with no default")
# types are correct
expect_error(ph_comtrait(5, "asdfad"),
"sample must be of class character, data.frame")
expect_error(ph_comtrait("adf", 5),
"traits must be of class character, data.frame")
expect_error(ph_comtrait(sfile, tfile, null_model = mtcars),
"null_model must be of class numeric, integer")
expect_error(ph_comtrait(sfile, tfile, randomizations = mtcars),
"randomizations must be of class numeric, integer")
expect_error(ph_comtrait(sfile, tfile, metric = 5),
"metric must be of class character")
expect_error(ph_comtrait(sfile, tfile, binary = 5),
"binary must be of class logical")
expect_error(ph_comtrait(sfile, tfile, abundance = 5),
"abundance must be of class logical")
# correct set of values
expect_error(ph_comtrait(sfile, tfile, null_model = 15),
"null_model %in% 0:3 is not TRUE")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.R
\name{print.sherlock_effects}
\alias{print.sherlock_effects}
\title{Print Method for Effects Summary Output}
\usage{
\method{print}{sherlock_effects}(x, ..., digits = 4L)
}
\arguments{
\item{x}{An object with class \code{sherlock_effects}, which should be a
modified \code{\link[data.table]{data.table}}.}
\item{...}{Other options (not currently used).}
\item{digits}{A \code{numeric} integer giving the number of digits to print
in the entries of the summary table.}
}
\value{
None. Called for the side effect of printing an informative summary
table from objects of class \code{sherlock_effects}.
}
\description{
Print Method for Effects Summary Output
}
|
/man/print.sherlock_effects.Rd
|
permissive
|
test-mass-forker-org-1/sherlock
|
R
| false | true | 745 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.R
\name{print.sherlock_effects}
\alias{print.sherlock_effects}
\title{Print Method for Effects Summary Output}
\usage{
\method{print}{sherlock_effects}(x, ..., digits = 4L)
}
\arguments{
\item{x}{An object with class \code{sherlock_effects}, which should be a
modified \code{\link[data.table]{data.table}}.}
\item{...}{Other options (not currently used).}
\item{digits}{A \code{numeric} integer giving the number of digits to print
in the entries of the summary table.}
}
\value{
None. Called for the side effect of printing an informative summary
table from objects of class \code{sherlock_effects}.
}
\description{
Print Method for Effects Summary Output
}
|
install.packages("scatterplot3d")
library("scatterplot3d")
house<-read.csv("./house.csv")
par(family="HiraKakuProN-W3")
scatterplot3d(house)
|
/house/house_scatterplot3d.r
|
no_license
|
KoretsuNobuyasu/R_data_analysis_school
|
R
| false | false | 141 |
r
|
install.packages("scatterplot3d")
library("scatterplot3d")
house<-read.csv("./house.csv")
par(family="HiraKakuProN-W3")
scatterplot3d(house)
|
## These functions returns the inverse of a matrix with a cache solution.
# Library MASS contains the function ginv.
# ginv return the inverse matrix for squared and non-squared matrixes
library(MASS)
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
matrix <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(minv) inverse <<- minv
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve should
## retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
minv <- ginv(data, ...)
x$setinverse(minv)
minv
}
## Test
A = matrix(
c(2, 4, 3, 1, 5, 7, 5, 6, 7),
nrow=3,
ncol=3,
byrow = TRUE
)
# Makes CacheMatrix from matrix A
matrix <- makeCacheMatrix(A)
# Show Matrix
print("Matrix A:")
print(matrix$get())
# Calculates de inverse of A without Cache
print("Inverse of A:")
inverse <- cacheSolve(matrix)
print(inverse)
# Calculates de inverse of A with Cache
print("Inverse of A:")
inverse <- cacheSolve(matrix)
print(inverse)
|
/cachematrix.R
|
no_license
|
pschiume/ProgrammingAssignment2
|
R
| false | false | 1,721 |
r
|
## These functions returns the inverse of a matrix with a cache solution.
# Library MASS contains the function ginv.
# ginv return the inverse matrix for squared and non-squared matrixes
library(MASS)
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
matrix <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(minv) inverse <<- minv
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve should
## retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
minv <- ginv(data, ...)
x$setinverse(minv)
minv
}
## Test
A = matrix(
c(2, 4, 3, 1, 5, 7, 5, 6, 7),
nrow=3,
ncol=3,
byrow = TRUE
)
# Makes CacheMatrix from matrix A
matrix <- makeCacheMatrix(A)
# Show Matrix
print("Matrix A:")
print(matrix$get())
# Calculates de inverse of A without Cache
print("Inverse of A:")
inverse <- cacheSolve(matrix)
print(inverse)
# Calculates de inverse of A with Cache
print("Inverse of A:")
inverse <- cacheSolve(matrix)
print(inverse)
|
#' @rdname datapackage
#' @param data a data frame to be added to the package
#' @param name what to name this dataset
#' @export
datapkg_write <- function(data, name, path = getwd()){
if(missing(name))
name <- deparse(substitute(data))
stopifnot(is.data.frame(data))
root <- sub("datapackage.json$", "", path)
root <- sub("/$", "", root)
dir.create(file.path(root, "data"), showWarnings = FALSE, recursive = TRUE)
json_path <- file.path(root, "datapackage.json")
csv_name <- file.path("data", paste0(name, ".csv"))
csv_path <- file.path(root, csv_name)
if(file.exists(csv_path))
stop("File already exists: ", csv_path, call. = FALSE)
pkg_info <- if(file.exists(json_path)){
message("Opening existing ", json_path)
jsonlite:::fromJSON(json_path, simplifyVector = FALSE)
} else {
message("Creating new ", json_path)
list(name = basename(path))
}
readr::write_csv(data, csv_path)
pkg_info$resources <- c(pkg_info$resources,
list(list(
path = csv_name,
name = name,
save_date = Sys.time(),
schema = make_schema(data)
))
)
json <- jsonlite::toJSON(pkg_info, pretty = TRUE, auto_unbox = TRUE)
writeLines(json, json_path)
}
make_schema <- function(data){
out <- as.list(rep(NA, length(data)))
for(i in seq_along(data)){
out[[i]] <- list(
name = names(data)[i],
type = get_type(data[[i]])
)
}
list(fields = out)
}
get_type <- function(x){
if(inherits(x, "Date")) return("date")
if(inherits(x, "POSIXt")) return("datetime")
if(is.character(x)) return("string")
if(is.integer(x)) return("integer")
if(is.numeric(x)) return("number")
if(is.logical(x)) return("boolean")
return("string")
}
|
/R/datapkg_write.R
|
no_license
|
mrecos/datapkg
|
R
| false | false | 1,713 |
r
|
#' @rdname datapackage
#' @param data a data frame to be added to the package
#' @param name what to name this dataset
#' @export
datapkg_write <- function(data, name, path = getwd()){
if(missing(name))
name <- deparse(substitute(data))
stopifnot(is.data.frame(data))
root <- sub("datapackage.json$", "", path)
root <- sub("/$", "", root)
dir.create(file.path(root, "data"), showWarnings = FALSE, recursive = TRUE)
json_path <- file.path(root, "datapackage.json")
csv_name <- file.path("data", paste0(name, ".csv"))
csv_path <- file.path(root, csv_name)
if(file.exists(csv_path))
stop("File already exists: ", csv_path, call. = FALSE)
pkg_info <- if(file.exists(json_path)){
message("Opening existing ", json_path)
jsonlite:::fromJSON(json_path, simplifyVector = FALSE)
} else {
message("Creating new ", json_path)
list(name = basename(path))
}
readr::write_csv(data, csv_path)
pkg_info$resources <- c(pkg_info$resources,
list(list(
path = csv_name,
name = name,
save_date = Sys.time(),
schema = make_schema(data)
))
)
json <- jsonlite::toJSON(pkg_info, pretty = TRUE, auto_unbox = TRUE)
writeLines(json, json_path)
}
make_schema <- function(data){
out <- as.list(rep(NA, length(data)))
for(i in seq_along(data)){
out[[i]] <- list(
name = names(data)[i],
type = get_type(data[[i]])
)
}
list(fields = out)
}
get_type <- function(x){
if(inherits(x, "Date")) return("date")
if(inherits(x, "POSIXt")) return("datetime")
if(is.character(x)) return("string")
if(is.integer(x)) return("integer")
if(is.numeric(x)) return("number")
if(is.logical(x)) return("boolean")
return("string")
}
|
pacman::p_load(mgcv)
source("/project/6002088/zhaokq/Simulation/FunctionsToLoad/Functions-V12.R")
load("/project/6002088/zhaokq/Simulation/FunctionsToLoad/BANK1betas.RData")
# Load the population parameters
load("/project/6002088/zhaokq/Simulation/FunctionsToLoad/Settings_Final_MD_one_direction.RData")
sss <- commandArgs(TRUE)
sss <- as.numeric(sss)
beta.0 <- BETA.0[,sss]; beta.1 <- BETA.1[,sss]
my.samp <- 100
#----------------------------------------------#
n.knots = 5; my.p1 = 0.9
#----------------------------------------------#
M = 100 # MC size
pos.index <-beta.0.est <- beta.1.est <- matrix(NA, nrow=length(pos), ncol = M)
GamObj <- vector("list", M)
sum.est <- array(NA, c( 2,M ,2), dimnames = list(c("s(Posit)","s(Posit):Z[, 1]"), NULL, c("chi.sq", "pvalue")))
sum.est.em <- array(NA, c( 2,M ,3), dimnames = list(c("s(Posit)","s(Posit):Z[, 1]"), NULL, c("chi.sq", "pvalue", "logPvalue")))
beta.0.se <- beta.1.se <- matrix(NA, nrow=length(pos), ncol = M)
time.0 <- Sys.time()
for(mm in 1: M){
set.seed(3432421+mm)
#------------------- -------------------------------------------#
Z <- data.frame(matrix(NA, nrow= my.samp, ncol = 1)); colnames(Z) <- c( "cell_type")
Z$cell_type <- sample(c(0,1), size = my.samp, replace = T ) # simulate Z from binomial distribution
Z <-as.matrix(Z);rownames(Z)<- NULL
samp.Z <- Z
# Use bootstrap to build the read-depth matrix
my.X <- matrix(sample(as.vector(dat.use.total), size = nrow(Z)*length(pos), replace = T) ,
nrow = nrow(Z), ncol = length(pos))
#--- Simulate the data ---#
sim.dat<-BSMethGammSim(n= nrow(Z), posit = pos, theta.0 =as.matrix(beta.0, ncol=1), beta= cbind(beta.1),
X = my.X, Z =Z,p0 = 0, p1 = 1)
#--- Organize the data before EM-smooth ---#
X <-my.X; Y <- sim.dat$Y
samp.size <- nrow(Y); my.p <- ncol(Y)
my.span.dat <- data.frame(Y=as.vector(t(Y)), X=as.vector(t(X)), Posit = rep(pos, samp.size),
ID = rep(1:samp.size, each=my.p),
sapply(1:ncol(Z), function(i){rep(Z[,i], each = my.p)}))
colnames(my.span.dat)[-(1:4)] <- colnames(Z)
my.span.dat <- my.span.dat[my.span.dat$X>0,]
# my.span.dat<- data.frame(my.span.dat, null = sample(c(0,1), size = nrow(my.span.dat), replace = T))
if(ncol(Z)==1){
Z <- matrix(my.span.dat[,-(1:4)], nrow = nrow(my.span.dat))
}else{
Z <- my.span.dat[,-(1:4)]
}
# -- Fit the model use the proposed method ---#
out <- BSMethEM(data=my.span.dat, n.k = rep(n.knots,ncol(Z)+1 ),epsilon = 10^(-6)/300,p0 = 0,
p1 =1,maxStep = 500, method="REML", detail=F)
# -- Extract the functional parameter estimates ---#
beta.0.est[,mm] <- out$Beta.out[,"Intercept"]
beta.1.est[,mm] <- out$Beta.out[,2]
pos.index[,mm] <- out$SE.pos
beta.0.se [,mm] <- out$SE.out[,1]
beta.1.se [,mm] <- out$SE.out[,2]
GamObj[[mm]]<-out$FinalGamObj
sum.est[,mm,"chi.sq"] <- summary(out$FinalGamObj)$s.table[,"Chi.sq"]
sum.est[,mm,"pvalue"] <- summary(out$FinalGamObj)$s.table[,"p-value"]
sum.est.em [ ,mm, "chi.sq"] <- out$chi.sq
sum.est.em [ ,mm, "pvalue"] <- out$pvalue
sum.est.em [ ,mm, "logPvalue"] <- out$logpvalue
}
print(Sys.time()-time.0)
save.image(paste0 ("S", sss, "MD_", round(MD[sss]*100,2), "Exp-REML-knots", n.knots, "p1", my.p1, "Samp", my.samp, "Simu", M, "EDF.RData"))
#plot(pos, beta.0)
#par(mfrow=c(1,2), mar=c(4,4,1,1)) # 10 width X 3 height
#plot(pos[order(pos)], beta.0[order(pos)], type="l", xlab="Position",
# ylab=expression(beta[0]), xaxt ="n")
#axis(side = 1, at = pos[order(pos)], labels=F, lwd=0.5, lwd.ticks = 0.5, tck=0.03)
#axis(side=1, at = seq(round(min(pos)), round(max(pos)),length.out = 10 ) , tck= -0.02)
#summary(out$FinalGamObj)$s.table
|
/Scenario 2/Exp_No_Error/Smoothed-EM/Simu_100_Samp_100.R
|
no_license
|
kaiqiong/SOMNiBUS_Simu
|
R
| false | false | 3,818 |
r
|
pacman::p_load(mgcv)
source("/project/6002088/zhaokq/Simulation/FunctionsToLoad/Functions-V12.R")
load("/project/6002088/zhaokq/Simulation/FunctionsToLoad/BANK1betas.RData")
# Load the population parameters
load("/project/6002088/zhaokq/Simulation/FunctionsToLoad/Settings_Final_MD_one_direction.RData")
sss <- commandArgs(TRUE)
sss <- as.numeric(sss)
beta.0 <- BETA.0[,sss]; beta.1 <- BETA.1[,sss]
my.samp <- 100
#----------------------------------------------#
n.knots = 5; my.p1 = 0.9
#----------------------------------------------#
M = 100 # MC size
pos.index <-beta.0.est <- beta.1.est <- matrix(NA, nrow=length(pos), ncol = M)
GamObj <- vector("list", M)
sum.est <- array(NA, c( 2,M ,2), dimnames = list(c("s(Posit)","s(Posit):Z[, 1]"), NULL, c("chi.sq", "pvalue")))
sum.est.em <- array(NA, c( 2,M ,3), dimnames = list(c("s(Posit)","s(Posit):Z[, 1]"), NULL, c("chi.sq", "pvalue", "logPvalue")))
beta.0.se <- beta.1.se <- matrix(NA, nrow=length(pos), ncol = M)
time.0 <- Sys.time()
for(mm in 1: M){
set.seed(3432421+mm)
#------------------- -------------------------------------------#
Z <- data.frame(matrix(NA, nrow= my.samp, ncol = 1)); colnames(Z) <- c( "cell_type")
Z$cell_type <- sample(c(0,1), size = my.samp, replace = T ) # simulate Z from binomial distribution
Z <-as.matrix(Z);rownames(Z)<- NULL
samp.Z <- Z
# Use bootstrap to build the read-depth matrix
my.X <- matrix(sample(as.vector(dat.use.total), size = nrow(Z)*length(pos), replace = T) ,
nrow = nrow(Z), ncol = length(pos))
#--- Simulate the data ---#
sim.dat<-BSMethGammSim(n= nrow(Z), posit = pos, theta.0 =as.matrix(beta.0, ncol=1), beta= cbind(beta.1),
X = my.X, Z =Z,p0 = 0, p1 = 1)
#--- Organize the data before EM-smooth ---#
X <-my.X; Y <- sim.dat$Y
samp.size <- nrow(Y); my.p <- ncol(Y)
my.span.dat <- data.frame(Y=as.vector(t(Y)), X=as.vector(t(X)), Posit = rep(pos, samp.size),
ID = rep(1:samp.size, each=my.p),
sapply(1:ncol(Z), function(i){rep(Z[,i], each = my.p)}))
colnames(my.span.dat)[-(1:4)] <- colnames(Z)
my.span.dat <- my.span.dat[my.span.dat$X>0,]
# my.span.dat<- data.frame(my.span.dat, null = sample(c(0,1), size = nrow(my.span.dat), replace = T))
if(ncol(Z)==1){
Z <- matrix(my.span.dat[,-(1:4)], nrow = nrow(my.span.dat))
}else{
Z <- my.span.dat[,-(1:4)]
}
# -- Fit the model use the proposed method ---#
out <- BSMethEM(data=my.span.dat, n.k = rep(n.knots,ncol(Z)+1 ),epsilon = 10^(-6)/300,p0 = 0,
p1 =1,maxStep = 500, method="REML", detail=F)
# -- Extract the functional parameter estimates ---#
beta.0.est[,mm] <- out$Beta.out[,"Intercept"]
beta.1.est[,mm] <- out$Beta.out[,2]
pos.index[,mm] <- out$SE.pos
beta.0.se [,mm] <- out$SE.out[,1]
beta.1.se [,mm] <- out$SE.out[,2]
GamObj[[mm]]<-out$FinalGamObj
sum.est[,mm,"chi.sq"] <- summary(out$FinalGamObj)$s.table[,"Chi.sq"]
sum.est[,mm,"pvalue"] <- summary(out$FinalGamObj)$s.table[,"p-value"]
sum.est.em [ ,mm, "chi.sq"] <- out$chi.sq
sum.est.em [ ,mm, "pvalue"] <- out$pvalue
sum.est.em [ ,mm, "logPvalue"] <- out$logpvalue
}
print(Sys.time()-time.0)
save.image(paste0 ("S", sss, "MD_", round(MD[sss]*100,2), "Exp-REML-knots", n.knots, "p1", my.p1, "Samp", my.samp, "Simu", M, "EDF.RData"))
#plot(pos, beta.0)
#par(mfrow=c(1,2), mar=c(4,4,1,1)) # 10 width X 3 height
#plot(pos[order(pos)], beta.0[order(pos)], type="l", xlab="Position",
# ylab=expression(beta[0]), xaxt ="n")
#axis(side = 1, at = pos[order(pos)], labels=F, lwd=0.5, lwd.ticks = 0.5, tck=0.03)
#axis(side=1, at = seq(round(min(pos)), round(max(pos)),length.out = 10 ) , tck= -0.02)
#summary(out$FinalGamObj)$s.table
|
testlist <- list(x = c(-3.17678814184056e-277, -3.17678814184056e-277, -3.17678814184056e-277, -5.02231727848791e-166, 1.32071904775755e+214, 1.13430513712795e+223, 2.85050289963452e-109, 1.3906627569276e-309, 1.39066270674129e-309, 1.26480805335359e-321, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result)
|
/netrankr/inst/testfiles/checkPairs/libFuzzer_checkPairs/checkPairs_valgrind_files/1612882908-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 468 |
r
|
testlist <- list(x = c(-3.17678814184056e-277, -3.17678814184056e-277, -3.17678814184056e-277, -5.02231727848791e-166, 1.32071904775755e+214, 1.13430513712795e+223, 2.85050289963452e-109, 1.3906627569276e-309, 1.39066270674129e-309, 1.26480805335359e-321, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result)
|
##### plot hat curve: Nt+1 = Nt + rNt(1-Nt/K)
hat <- data.frame(seq(from = 0, to = 1000, by = 0.1))
hat[, 2] = sapply(hat[, 1], FUN = function(x){x + 1.8*x*(1 - x/500)})
hat1 = hat[1:sum(hat[, 2]>0), ] #drop negative data
plot(hat1[, 1], hat1[, 2], xlim=c(0, 1000), ylim=c(0, 1000), type="l", lwd=2,
col="green", xlab="N_t", ylab="N_t+1", xaxs="i", yaxs="i",
main="time 1 return map with growth rate 1.8")
abline(a=0, b=1, col="red", lwd=2)
##### plot trajactory of Nt
out <- data.frame(row.names=c("N_t", "N_t+1", "t"))
out[, 1] = c(100, 0, 0)
for (i in 1:1000){
out[2, i] = out[1, i] + 1.8*out[1, i]*(1 - out[1, i]/500)
out[, (i+1)] = c(out[2, i], 0, i)
}
out = t(out[, 1:1000])
out1 = round(out, digit=1)
#plot
for (i in 1:dim(out1)[1]){
segments(out1[i, 1], out1[i, 1], out1[i, 1], out1[i, 2], col="blue", lwd=2)
}
for (i in 1:dim(out1)[1]){
segments(out1[i, 1], out1[i, 2], out1[i, 2], out1[i, 2], col="blue", lwd=2)
}
##### plot time series of Nt
plot(out[1:50, 3], out[1:50, 1], main="discrete logistic population with growth
rate 1.8", xlab="time", ylab="N", col="blue", lwd=2, xaxs="i", yaxs="i",
type="l", ylim=c(min(out[1:50, 1]), max(out[1:50, 1])+100))
##### results for cases in r=2.3, 2.45, 2.56, 2.8
#wrtie a function doing the same thing above
allout <- function(K=500, Nini=100, r=1.8, TimeS="F", Dynamic="F", report="F"){
out <- data.frame(row.names=c("N_t", "N_t+1", "t"))
out[, 1] = c(Nini, 0, 0)
for (i in 1:1000){
out[2, i] = out[1, i] + r*out[1, i]*(1 - out[1, i]/K)
out[, (i+1)] = c(out[2, i], 0, i)
}
out = t(out[, 1:1000])
if (report=="T"){
return(out)
}
if (TimeS=="T"){
plot(out[1:50, 3], out[1:50, 1], main=paste("discrete logistic population with
growth rate", r, sep=" "), xlab="time", ylab="N", col="blue", lwd=1,
xaxs="i", yaxs="i", type="l",
ylim=c(min(out[1:50, 1]), max(out[1:50, 1])+100))
}
if (Dynamic=="T"){
hat <- data.frame(seq(from = 0, to = 1000, by = 0.1))
hat[, 2] = sapply(hat[, 1], FUN = function(x){x + r*x*(1 - x/K)})
hat1 = hat[1:sum(hat[, 2]>0), ]
out1 = round(out, digit=1)
plot(hat1[, 1], hat1[, 2], xlim=c(0, 1000), ylim=c(0, 1000), type="l", lwd=1,
col="green", xlab="N_t", ylab="N_t+1", xaxs="i", yaxs="i",
main=paste("time 1 return map with growth rate", r, sep=" "))
abline(a=0, b=1, col="red", lwd=1)
for (i in 1:dim(out1)[1]){
segments(out1[i, 1], out1[i, 1], out1[i, 1], out1[i, 2], col="blue", lwd=1)
}
for (i in 1:dim(out1)[1]){
segments(out1[i, 1], out1[i, 2], out1[i, 2], out1[i, 2], col="blue", lwd=1)
}
}
}
allout(r=1.8, TimeS="T", Dynamic="T")
allout(r=2.3, TimeS="T", Dynamic="T")
allout(r=2.45, TimeS="T", Dynamic="T")
allout(r=2.56, TimeS="T", Dynamic="T")
allout(r=2.8, TimeS="T", Dynamic="T")
allout(Nini= 101, r=1.8, TimeS="T")
allout(Nini= 101, r=2.3, TimeS="T")
allout(Nini= 101, r=2.45, TimeS="T")
allout(Nini= 101, r=2.56, TimeS="T")
allout(Nini= 101, r=2.8, TimeS="T")
##### plot bifurcation diagram
r <- seq(from=1.9, to=3, by=0.01)
bi <- data.frame(rep(0, 1000))
for (i in 1:length(r)){
bi[, i] = allout(r = r[i], report="T")[, 1]
}
plot(r, bi[1, ], xaxs="i", yaxs="i", xlim=c(1.8, 3), ylim=c(0, max(bi)+50),
type="n", xlab="Intrinsic growth rate r", ylab="Population size")
for (i in 500:1000){
points(r, bi[i, ], pch=20, col="blue", cex=0.4)
}
|
/Discrete_logistic.R
|
no_license
|
snakepowerpoint/Mathematics_for_life_scientists
|
R
| false | false | 3,614 |
r
|
##### plot hat curve: Nt+1 = Nt + rNt(1-Nt/K)
hat <- data.frame(seq(from = 0, to = 1000, by = 0.1))
hat[, 2] = sapply(hat[, 1], FUN = function(x){x + 1.8*x*(1 - x/500)})
hat1 = hat[1:sum(hat[, 2]>0), ] #drop negative data
plot(hat1[, 1], hat1[, 2], xlim=c(0, 1000), ylim=c(0, 1000), type="l", lwd=2,
col="green", xlab="N_t", ylab="N_t+1", xaxs="i", yaxs="i",
main="time 1 return map with growth rate 1.8")
abline(a=0, b=1, col="red", lwd=2)
##### plot trajactory of Nt
out <- data.frame(row.names=c("N_t", "N_t+1", "t"))
out[, 1] = c(100, 0, 0)
for (i in 1:1000){
out[2, i] = out[1, i] + 1.8*out[1, i]*(1 - out[1, i]/500)
out[, (i+1)] = c(out[2, i], 0, i)
}
out = t(out[, 1:1000])
out1 = round(out, digit=1)
#plot
for (i in 1:dim(out1)[1]){
segments(out1[i, 1], out1[i, 1], out1[i, 1], out1[i, 2], col="blue", lwd=2)
}
for (i in 1:dim(out1)[1]){
segments(out1[i, 1], out1[i, 2], out1[i, 2], out1[i, 2], col="blue", lwd=2)
}
##### plot time series of Nt
plot(out[1:50, 3], out[1:50, 1], main="discrete logistic population with growth
rate 1.8", xlab="time", ylab="N", col="blue", lwd=2, xaxs="i", yaxs="i",
type="l", ylim=c(min(out[1:50, 1]), max(out[1:50, 1])+100))
##### results for cases in r=2.3, 2.45, 2.56, 2.8
#wrtie a function doing the same thing above
allout <- function(K=500, Nini=100, r=1.8, TimeS="F", Dynamic="F", report="F"){
out <- data.frame(row.names=c("N_t", "N_t+1", "t"))
out[, 1] = c(Nini, 0, 0)
for (i in 1:1000){
out[2, i] = out[1, i] + r*out[1, i]*(1 - out[1, i]/K)
out[, (i+1)] = c(out[2, i], 0, i)
}
out = t(out[, 1:1000])
if (report=="T"){
return(out)
}
if (TimeS=="T"){
plot(out[1:50, 3], out[1:50, 1], main=paste("discrete logistic population with
growth rate", r, sep=" "), xlab="time", ylab="N", col="blue", lwd=1,
xaxs="i", yaxs="i", type="l",
ylim=c(min(out[1:50, 1]), max(out[1:50, 1])+100))
}
if (Dynamic=="T"){
hat <- data.frame(seq(from = 0, to = 1000, by = 0.1))
hat[, 2] = sapply(hat[, 1], FUN = function(x){x + r*x*(1 - x/K)})
hat1 = hat[1:sum(hat[, 2]>0), ]
out1 = round(out, digit=1)
plot(hat1[, 1], hat1[, 2], xlim=c(0, 1000), ylim=c(0, 1000), type="l", lwd=1,
col="green", xlab="N_t", ylab="N_t+1", xaxs="i", yaxs="i",
main=paste("time 1 return map with growth rate", r, sep=" "))
abline(a=0, b=1, col="red", lwd=1)
for (i in 1:dim(out1)[1]){
segments(out1[i, 1], out1[i, 1], out1[i, 1], out1[i, 2], col="blue", lwd=1)
}
for (i in 1:dim(out1)[1]){
segments(out1[i, 1], out1[i, 2], out1[i, 2], out1[i, 2], col="blue", lwd=1)
}
}
}
allout(r=1.8, TimeS="T", Dynamic="T")
allout(r=2.3, TimeS="T", Dynamic="T")
allout(r=2.45, TimeS="T", Dynamic="T")
allout(r=2.56, TimeS="T", Dynamic="T")
allout(r=2.8, TimeS="T", Dynamic="T")
allout(Nini= 101, r=1.8, TimeS="T")
allout(Nini= 101, r=2.3, TimeS="T")
allout(Nini= 101, r=2.45, TimeS="T")
allout(Nini= 101, r=2.56, TimeS="T")
allout(Nini= 101, r=2.8, TimeS="T")
##### plot bifurcation diagram
r <- seq(from=1.9, to=3, by=0.01)
bi <- data.frame(rep(0, 1000))
for (i in 1:length(r)){
bi[, i] = allout(r = r[i], report="T")[, 1]
}
plot(r, bi[1, ], xaxs="i", yaxs="i", xlim=c(1.8, 3), ylim=c(0, max(bi)+50),
type="n", xlab="Intrinsic growth rate r", ylab="Population size")
for (i in 500:1000){
points(r, bi[i, ], pch=20, col="blue", cex=0.4)
}
|
\name{dfToGrid}
\alias{dfToGrid}
\docType{package}
\title{Grid function (Fonction de carroyage)}
\description{
Function to compute a grid (regular or not) from a data.frame.
(Fonction permettant de générer une grille (régulière ou non) à partir d'un data.frame.)
}
\usage{dfToGrid(df, sEPSG, iCellSize = NULL)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df}{A \code{data.frame} with the centroids coordinates of the squares to draw. To generate an irregular grid, a third column wiht each cell size must be provided. (x, y, iCellSize)
(Un \code{data.frame} comportant les coordonnées des carrés à dessiner. Pour obtenir une grille irrégulière, il faut fournir une troisième colonne indiquant la taille de chaque carreau. (x, y, iCellSize).)}
\item{sEPSG}{EPSG code of projection (\code{character}). For example, the RGF93 / Lambert-93 projection has "2154" code.
(code EPSG de la projection (\code{character}). Par exemple, la projection RGF93 / Lambert-93 a pour code "2154".)}
\item{iCellSize}{Cell size of the grid. If this argument is provided, the grid is regular.
(Taille des carreaux de la grille. Si cet argument est fourni, la grille est régulière.)}
}
\value{
Returns an object of class \code{sf} and \code{data.frame}.
(Retourne un objet de classe \code{sf} et \code{data.frame}.)
}
\author{Psar Analyse Urbaine Insee - Thierry Cornely, Laure Genebes, Arlindo Dos Santos, Cynthia Faivre, Auriane Renaud and Francois Semecurbe
}
\examples{
\dontrun{
# example 1 - regular grid
df <- data.frame(x = c(100, 100, 300, 300, 500), y = c(100, 300, 100, 300, 100))
dfResult <- dfToGrid(df = df, sEPSG = "2154", iCellSize = 200)
write_sf(obj = dfResult, dsn = "regularGrid.shp", delete_layer = TRUE)
# example 2 - irregular grid
df <- data.frame(x = c(50, 50, 150, 150, 300)
, y = c(50, 150, 50, 150, 100)
, iCellSize = c(50, 50, 50, 50, 100))
dfResult <- dfToGrid(df = df, sEPSG = "2154")
write_sf(obj = dfResult, dsn = "irregularGrid.shp", delete_layer = TRUE)
}}
|
/man/dfToGrid.Rd
|
no_license
|
PsarAU/btb
|
R
| false | false | 2,138 |
rd
|
\name{dfToGrid}
\alias{dfToGrid}
\docType{package}
\title{Grid function (Fonction de carroyage)}
\description{
Function to compute a grid (regular or not) from a data.frame.
(Fonction permettant de générer une grille (régulière ou non) à partir d'un data.frame.)
}
\usage{dfToGrid(df, sEPSG, iCellSize = NULL)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df}{A \code{data.frame} with the centroids coordinates of the squares to draw. To generate an irregular grid, a third column wiht each cell size must be provided. (x, y, iCellSize)
(Un \code{data.frame} comportant les coordonnées des carrés à dessiner. Pour obtenir une grille irrégulière, il faut fournir une troisième colonne indiquant la taille de chaque carreau. (x, y, iCellSize).)}
\item{sEPSG}{EPSG code of projection (\code{character}). For example, the RGF93 / Lambert-93 projection has "2154" code.
(code EPSG de la projection (\code{character}). Par exemple, la projection RGF93 / Lambert-93 a pour code "2154".)}
\item{iCellSize}{Cell size of the grid. If this argument is provided, the grid is regular.
(Taille des carreaux de la grille. Si cet argument est fourni, la grille est régulière.)}
}
\value{
Returns an object of class \code{sf} and \code{data.frame}.
(Retourne un objet de classe \code{sf} et \code{data.frame}.)
}
\author{Psar Analyse Urbaine Insee - Thierry Cornely, Laure Genebes, Arlindo Dos Santos, Cynthia Faivre, Auriane Renaud and Francois Semecurbe
}
\examples{
\dontrun{
# example 1 - regular grid
df <- data.frame(x = c(100, 100, 300, 300, 500), y = c(100, 300, 100, 300, 100))
dfResult <- dfToGrid(df = df, sEPSG = "2154", iCellSize = 200)
write_sf(obj = dfResult, dsn = "regularGrid.shp", delete_layer = TRUE)
# example 2 - irregular grid
df <- data.frame(x = c(50, 50, 150, 150, 300)
, y = c(50, 150, 50, 150, 100)
, iCellSize = c(50, 50, 50, 50, 100))
dfResult <- dfToGrid(df = df, sEPSG = "2154")
write_sf(obj = dfResult, dsn = "irregularGrid.shp", delete_layer = TRUE)
}}
|
rm(list=ls())
library("install.load")
install_load("data.table", "gdata", "ggplot2", "e1071", "grid")
install_load("plyr", "tidyverse", "ROCR", "caret", "doMC", "scales")
install_load("gridExtra", "pheatmap", "printr", "ggthemes", "stargazer")
#Data Summary
source("loadData.R")
ls()
tmp_phq9 <- phq9 %>% select(brightenid, week, sum_phq9) %>% spread(week, sum_phq9)
tmp_phq9 <- metaData %>% select(brightenid, baseline_phq9) %>% inner_join(tmp_phq9)
rownames(tmp_phq9) <- tmp_phq9$brightenid
tmp_phq9$brightenid <- NULL
phq9_compliance <- tmp_phq9
phq9_compliance[is.na(phq9_compliance)] = 0
phq9_compliance[phq9_compliance != 0] = 1
pheatmap::pheatmap(phq9_compliance, cluster_cols = F)
tmp_phq9 <- metaData %>% select(brightenid, baseline_phq9) %>% inner_join(phq9) %>% filter( !is.na(baseline_phq9 ))
sesFeatures <- c("Age", "Gender", "education", "employed", "marital", "race",
"hispanic", "minority")
flt_metadata <- metaData %>% select(c('brightenid', sesFeatures))
phq9_stats <- ddply(tmp_phq9, .variables = c('brightenid', 'study_arm'),
.fun = function(df){
weeks_phq9_completed = n_distinct(df$week)
basePHQ9 = unique(df$baseline_phq9)
maxPHQ9 = max(c(basePHQ9, df$sum_phq9 ))
lastPHQ9 = df %>% dplyr::arrange(week) %>% .$sum_phq9
lastPHQ9 = lastPHQ9[nrow(df)]
changePHQ9 = basePHQ9 - lastPHQ9
data.frame(weeks_phq9_completed=weeks_phq9_completed, maxPHQ9=maxPHQ9,
basePHQ9 = basePHQ9,
lastPHQ9 = lastPHQ9, changePHQ9=changePHQ9)
})
phq9_stats <- phq9_stats %>% inner_join(flt_metadata)
phq2_stats <- phq2 %>% group_by(brightenid) %>% summarise(days_phq2_completed = n_distinct(day))
passive_stats <- passive_data %>% group_by(brightenid) %>% summarise(days_passive_provided = n_distinct(day))
stats <- merge(phq9_stats, phq2_stats, all=T)
stats <- merge(stats, passive_stats, all.x=T)
stats <- stats %>% dplyr::mutate(weeks_phq9_completed = ifelse(is.na(weeks_phq9_completed), 0, weeks_phq9_completed),
days_phq2_completed = ifelse(is.na(days_phq2_completed), 0, days_phq2_completed),
days_passive_provided = ifelse(is.na(days_passive_provided), 0, days_passive_provided))
stats <- stats %>% dplyr::mutate(study_arm = factor(study_arm))
mod <- lm(weeks_phq9_completed ~ Gender + Age + hispanic + changePHQ9 + basePHQ9 + employed + minority + study_arm, data=stats)
summary(mod)
mod <- lm(days_phq2_completed ~ Gender + Age + hispanic + changePHQ9 + employed + minority, data=stats)
summary(mod)
library(sjPlot)
sjp.lm(mod)
colnames()
mod <- lm(days_passive_provided ~ Gender + Age + hispanic + changePHQ9 + employed + minority, data=stats)
summary(mod)
sjp.lm(mod)
ggplot(data=stats, aes(x=Age, y=days_phq2_completed)) + geom_jitter()
|
/compliance_PHQ9pattern_association.R
|
no_license
|
apratap/BRIGHTEN-Study
|
R
| false | false | 2,876 |
r
|
rm(list=ls())
library("install.load")
install_load("data.table", "gdata", "ggplot2", "e1071", "grid")
install_load("plyr", "tidyverse", "ROCR", "caret", "doMC", "scales")
install_load("gridExtra", "pheatmap", "printr", "ggthemes", "stargazer")
#Data Summary
source("loadData.R")
ls()
tmp_phq9 <- phq9 %>% select(brightenid, week, sum_phq9) %>% spread(week, sum_phq9)
tmp_phq9 <- metaData %>% select(brightenid, baseline_phq9) %>% inner_join(tmp_phq9)
rownames(tmp_phq9) <- tmp_phq9$brightenid
tmp_phq9$brightenid <- NULL
phq9_compliance <- tmp_phq9
phq9_compliance[is.na(phq9_compliance)] = 0
phq9_compliance[phq9_compliance != 0] = 1
pheatmap::pheatmap(phq9_compliance, cluster_cols = F)
tmp_phq9 <- metaData %>% select(brightenid, baseline_phq9) %>% inner_join(phq9) %>% filter( !is.na(baseline_phq9 ))
sesFeatures <- c("Age", "Gender", "education", "employed", "marital", "race",
"hispanic", "minority")
flt_metadata <- metaData %>% select(c('brightenid', sesFeatures))
phq9_stats <- ddply(tmp_phq9, .variables = c('brightenid', 'study_arm'),
.fun = function(df){
weeks_phq9_completed = n_distinct(df$week)
basePHQ9 = unique(df$baseline_phq9)
maxPHQ9 = max(c(basePHQ9, df$sum_phq9 ))
lastPHQ9 = df %>% dplyr::arrange(week) %>% .$sum_phq9
lastPHQ9 = lastPHQ9[nrow(df)]
changePHQ9 = basePHQ9 - lastPHQ9
data.frame(weeks_phq9_completed=weeks_phq9_completed, maxPHQ9=maxPHQ9,
basePHQ9 = basePHQ9,
lastPHQ9 = lastPHQ9, changePHQ9=changePHQ9)
})
phq9_stats <- phq9_stats %>% inner_join(flt_metadata)
phq2_stats <- phq2 %>% group_by(brightenid) %>% summarise(days_phq2_completed = n_distinct(day))
passive_stats <- passive_data %>% group_by(brightenid) %>% summarise(days_passive_provided = n_distinct(day))
stats <- merge(phq9_stats, phq2_stats, all=T)
stats <- merge(stats, passive_stats, all.x=T)
stats <- stats %>% dplyr::mutate(weeks_phq9_completed = ifelse(is.na(weeks_phq9_completed), 0, weeks_phq9_completed),
days_phq2_completed = ifelse(is.na(days_phq2_completed), 0, days_phq2_completed),
days_passive_provided = ifelse(is.na(days_passive_provided), 0, days_passive_provided))
stats <- stats %>% dplyr::mutate(study_arm = factor(study_arm))
mod <- lm(weeks_phq9_completed ~ Gender + Age + hispanic + changePHQ9 + basePHQ9 + employed + minority + study_arm, data=stats)
summary(mod)
mod <- lm(days_phq2_completed ~ Gender + Age + hispanic + changePHQ9 + employed + minority, data=stats)
summary(mod)
library(sjPlot)
sjp.lm(mod)
colnames()
mod <- lm(days_passive_provided ~ Gender + Age + hispanic + changePHQ9 + employed + minority, data=stats)
summary(mod)
sjp.lm(mod)
ggplot(data=stats, aes(x=Age, y=days_phq2_completed)) + geom_jitter()
|
##worth perceptions for Alberta
#load libraries
library(data.table)
library(xlsx)
library(ggplot2)
library(Hmisc)
#load data
qsr <- fread("O:/CoOp/CoOp194_PROReportng&OM/Julie/worthperceptions_QSRproximinity.csv")
setnames(qsr,c("SBUX_STORENUM","QSR_Count","miles_closest"),c("store_num","qsrn","miles"))
qsr[, qsrn := as.numeric(qsrn)]
qcc <- fread("O:/CoOp/CoOp194_PROReportng&OM/Julie/worthperceptions_QSRproximinity_FY17Q4.csv")
names(qcc) <- tolower(names(qcc))
qcc <- qcc[, lapply(.SD, function(x) as.numeric(x))]
#keep only stores in both data.tables
qcc <- qcc[store_num %in% unique(qsr[,store_num])]
qsr <- qsr[store_num %in% unique(qcc[,store_num])]
#keep only variables we need for worth perceptions data
#qcc <- qcc[, c("store_num","wp_tb_score"), with=F]
#merge together
qsr <- merge(qsr,qcc,by="store_num",all=T)
qsr <- na.omit(qsr, cols=c("qsrn","miles"))
#correlations
rcorr(qsr[,miles],qsr[,wp_tb_score],type="pearson")
rcorr(qsr[,qsrn],qsr[,wp_tb_score],type="pearson")
pcor.test(qsr[,qsrn],qsr[,wp_tb_score],qsr[,miles],method="pearson")
# #split by qsr number
# prob = c(1/4,2/4,3/4,1)
# temp <- qsr %>% group_by(urbanity) %>% summarise(
# qsrn_25 = quantile(qsrn, probs = prob[1], na.rm = T),
# qsrn_50 = quantile(qsrn, probs = prob[2], na.rm = T),
# qsrn_75 = quantile(qsrn, probs = prob[3], na.rm = T),
# qsrn_100 = quantile(qsrn, probs = prob[4], na.rm = T)
# )
# qsr <- left_join(qsr, temp,by="urbanity")
# setDT(qsr)
# #recode based on quartiles
# qsr[qsrn < qsrn_25, qsrn_qtile := 1]
# qsr[qsrn >= qsrn_25 & qsrn < qsrn_50, qsrn_qtile := 2]
# qsr[qsrn >= qsrn_50 & qsrn < qsrn_75, qsrn_qtile := 3]
# qsr[qsrn >= qsrn_75, qsrn_qtile := 4]
#
# #split by qsr miles
# prob = c(1/4,2/4,3/4,1)
# temp <- qsr %>% group_by(urbanity) %>% summarise(
# mi_25 = quantile(miles, probs = prob[1], na.rm = T),
# mi_50 = quantile(miles, probs = prob[2], na.rm = T),
# mi_75 = quantile(miles, probs = prob[3], na.rm = T),
# mi_100 = quantile(miles, probs = prob[4], na.rm = T)
# )
# qsr <- left_join(qsr, temp,by="urbanity")
# setDT(qsr)
# #recode based on quartiles
# qsr[miles < mi_25, mi_qtile := 1]
# qsr[miles >= mi_25 & miles < mi_50, mi_qtile := 2]
# qsr[miles >= mi_50 & miles < mi_75, mi_qtile := 3]
# qsr[miles >= mi_75, mi_qtile := 4]
#summarize
qsr2a <- qsr[, list(wp_resp_count = sum(wp_resp_count,na.rm=T),
wp_tb_count = sum(wp_tb_count,na.rm=T),
wp_tb_score = sum(wp_tb_count,na.rm=T)/sum(wp_resp_count,na.rm=T),
miles = mean(miles,na.rm=T)),
by=c("qsrn_qtile","urbanity")]
qsr2a <- setorder(qsr2a,urbanity,qsrn_qtile)
#summarize
qsr2b <- qsr[, list(wp_resp_count = sum(wp_resp_count,na.rm=T),
wp_tb_count = sum(wp_tb_count,na.rm=T),
wp_tb_score = sum(wp_tb_count,na.rm=T)/sum(wp_resp_count,na.rm=T),
miles = mean(miles,na.rm=T)),
by=c("mi_qtile","urbanity")]
qsr2b <- setorder(qsr2b,urbanity,mi_qtile)
#qsr subset
qsr3 <- qsr[urbanity=="U2"|urbanity=="U4"|urbanity=="U5"]
#rcorr(qsr3[,miles],qsr3[,wp_tb_score],type="pearson")
rcorr(qsr3[,qsrn],qsr3[,wp_tb_score],type="pearson")
pcor.test(qsr3[,qsrn],qsr3[,wp_tb_score],qsr3[,miles],method="pearson")
#split by qsr number
#qsr3 <- qsr[miles<0.1]
qsr3 <- qsr[miles<0.2]
prob = c(1/4,2/4,3/4,1)
temp <- qsr3 %>% group_by(urbanity) %>% summarise(
qsrn_25 = quantile(qsrn, probs = prob[1], na.rm = T),
qsrn_50 = quantile(qsrn, probs = prob[2], na.rm = T),
qsrn_75 = quantile(qsrn, probs = prob[3], na.rm = T),
qsrn_100 = quantile(qsrn, probs = prob[4], na.rm = T)
)
qsr3 <- left_join(qsr3, temp,by="urbanity")
setDT(qsr3)
#recode based on quartiles
qsr3[qsrn < qsrn_25, qsrn_qtile := 1]
qsr3[qsrn >= qsrn_25 & qsrn < qsrn_50, qsrn_qtile := 2]
qsr3[qsrn >= qsrn_50 & qsrn < qsrn_75, qsrn_qtile := 3]
qsr3[qsrn >= qsrn_75, qsrn_qtile := 4]
#summarize
qsr3 <- qsr3[, list(wp_resp_count = sum(wp_resp_count,na.rm=T),
wp_tb_count = sum(wp_tb_count,na.rm=T),
wp_tb_score = sum(wp_tb_count,na.rm=T)/sum(wp_resp_count,na.rm=T),
miles = mean(miles,na.rm=T)),
by=c("qsrn_qtile","urbanity")]
qsr3 <- setorder(qsr3,urbanity,qsrn_qtile)
#split by qsr number - tertile
#qsr3 <- qsr[miles<0.1]
qsr3 <- qsr[miles<0.2]
prob = c(1/3,2/3,1)
temp <- qsr3 %>% group_by(urbanity) %>% summarise(
qsrn_33 = quantile(qsrn, probs = prob[1], na.rm = T),
qsrn_67 = quantile(qsrn, probs = prob[2], na.rm = T),
qsrn_100 = quantile(qsrn, probs = prob[3], na.rm = T)
)
qsr3 <- left_join(qsr3, temp,by="urbanity")
setDT(qsr3)
#recode based on quartiles
qsr3[qsrn < qsrn_33, qsrn_ttile := 1]
qsr3[qsrn >= qsrn_33 & qsrn < qsrn_67, qsrn_ttile := 2]
qsr3[qsrn >= qsrn_67, qsrn_ttile := 3]
#summarize
qsr3 <- qsr3[, list(wp_resp_count = sum(wp_resp_count,na.rm=T),
wp_tb_count = sum(wp_tb_count,na.rm=T),
wp_tb_score = sum(wp_tb_count,na.rm=T)/sum(wp_resp_count,na.rm=T),
miles = mean(miles,na.rm=T)),
by=c("qsrn_ttile","urbanity")]
qsr3 <- setorder(qsr3,urbanity,qsrn_ttile)
##make comps quartile factor for grouping
qsr3[, qsrn_ttile := as.factor(qsrn_ttile)]
#set labels
lname <- "QSR Count Tertile"
llabels <- c("33rd", "67th", "100th")
#plot of comps quartiles with average CC top box score for each
#set up unique elements
DT <- copy(qsr3[urbanity=="U2"])
maintitle <- "Worth Perceptions by QSR Count - Urban Core"
ylabel <- "WP Top Box Score"
xlabel <- "QSR Count Tertile"
xvar <- DT[,qsrn_ttile]
yvar <- DT[,wp_tb_score]
pdata <- DT
#plot
ggplot(data = pdata, aes(x = xvar, y = yvar*100)) +
geom_bar(stat="identity", width = 0.7, fill="lightgray", colour="black") + theme_bw() +
ggtitle(maintitle) + guides(fill=FALSE) +
scale_y_continuous(limits=c(0,40)) +
geom_text(size = 5, aes(label=paste0("WP = ",round(yvar,3)*100,"%"),y=0), stat= "identity", vjust = -1.75) +
geom_text(size = 5, aes(label=c("QSRs <3","QSRs 4-6","QSRs 7+"),y=0), stat= "identity", vjust = -.5) +
theme(axis.text=element_text(size=8), axis.title=element_text(size=8),
plot.title = element_text(size = 10, face = "bold")) +
labs(x = xlabel, y = ylabel)
##12/15/17 updates for Mike
#load data
qsr <- fread("O:/CoOp/CoOp194_PROReportng&OM/Julie/worthperceptions_QSRproximinity.csv")
setnames(qsr,c("SBUX_STORENUM","QSR_Count","miles_closest"),c("store_num","qsrn","miles"))
qsr[, qsrn := as.numeric(qsrn)]
regi <- fread("O:/CoOp/CoOp194_PROReportng&OM/Julie/worthperceptions_QSRproximinity_regioncodes.csv")
setnames(regi,c("STORE_NUM","RGN_ORG_LVL_ID","RGN_ORG_LVL_DESCR"),c("store_num","regcd","regid"))
#keep only stores in both data.tables
regi <- regi[store_num %in% unique(qsr[,store_num])]
temp5 <- copy(qsr)
temp5 <- left_join(temp5,regi,by="store_num")
setDT(temp5)
temp5 <- temp5[, c("store_num","regid","urbanity")]
temp5[, store_num := 1]
temp5 <- temp5[, list(Store_Count = sum(store_num)), by=c("regid","urbanity")]
temp <- temp5[, list(Store_Count = sum(Store_Count)), by=c("urbanity")]
setnames(temp,"Store_Count","Urbanity_Store_Count")
temp5 <- left_join(temp5,temp,by="urbanity")
setDT(temp5)
temp5[, totalN := 8309]
temp5[, Percent_of_Urbanity := round(Store_Count/Urbanity_Store_Count,3)*100]
temp5[, Percent_of_TotalStores := round(Store_Count/totalN,3)*100]
temp5[, Index_UrbanToTotal_Pct := round((Percent_of_Urbanity/Percent_of_TotalStores)/100,3)]
#change infinite values to 0
temp5[mapply(is.infinite, temp5)] <- 0
temp5[, Index_Flag := 0]
temp5[Index_UrbanToTotal_Pct>=.8&Index_UrbanToTotal_Pct<=1.2, Index_Flag := 1]
temp5[,totalN := NULL]
temp5 <- setorder(temp5,urbanity,-Index_UrbanToTotal_Pct)
write.xlsx(temp5,file="C:/Users/jumorris/UrbanCore_StoreCount-by-Region.xlsx")
|
/WorthPerceptions_QSRproximinity.R
|
no_license
|
jumorris2017/R-code-flatfiles
|
R
| false | false | 7,808 |
r
|
##worth perceptions for Alberta
#load libraries
library(data.table)
library(xlsx)
library(ggplot2)
library(Hmisc)
#load data
qsr <- fread("O:/CoOp/CoOp194_PROReportng&OM/Julie/worthperceptions_QSRproximinity.csv")
setnames(qsr,c("SBUX_STORENUM","QSR_Count","miles_closest"),c("store_num","qsrn","miles"))
qsr[, qsrn := as.numeric(qsrn)]
qcc <- fread("O:/CoOp/CoOp194_PROReportng&OM/Julie/worthperceptions_QSRproximinity_FY17Q4.csv")
names(qcc) <- tolower(names(qcc))
qcc <- qcc[, lapply(.SD, function(x) as.numeric(x))]
#keep only stores in both data.tables
qcc <- qcc[store_num %in% unique(qsr[,store_num])]
qsr <- qsr[store_num %in% unique(qcc[,store_num])]
#keep only variables we need for worth perceptions data
#qcc <- qcc[, c("store_num","wp_tb_score"), with=F]
#merge together
qsr <- merge(qsr,qcc,by="store_num",all=T)
qsr <- na.omit(qsr, cols=c("qsrn","miles"))
#correlations
rcorr(qsr[,miles],qsr[,wp_tb_score],type="pearson")
rcorr(qsr[,qsrn],qsr[,wp_tb_score],type="pearson")
pcor.test(qsr[,qsrn],qsr[,wp_tb_score],qsr[,miles],method="pearson")
# #split by qsr number
# prob = c(1/4,2/4,3/4,1)
# temp <- qsr %>% group_by(urbanity) %>% summarise(
# qsrn_25 = quantile(qsrn, probs = prob[1], na.rm = T),
# qsrn_50 = quantile(qsrn, probs = prob[2], na.rm = T),
# qsrn_75 = quantile(qsrn, probs = prob[3], na.rm = T),
# qsrn_100 = quantile(qsrn, probs = prob[4], na.rm = T)
# )
# qsr <- left_join(qsr, temp,by="urbanity")
# setDT(qsr)
# #recode based on quartiles
# qsr[qsrn < qsrn_25, qsrn_qtile := 1]
# qsr[qsrn >= qsrn_25 & qsrn < qsrn_50, qsrn_qtile := 2]
# qsr[qsrn >= qsrn_50 & qsrn < qsrn_75, qsrn_qtile := 3]
# qsr[qsrn >= qsrn_75, qsrn_qtile := 4]
#
# #split by qsr miles
# prob = c(1/4,2/4,3/4,1)
# temp <- qsr %>% group_by(urbanity) %>% summarise(
# mi_25 = quantile(miles, probs = prob[1], na.rm = T),
# mi_50 = quantile(miles, probs = prob[2], na.rm = T),
# mi_75 = quantile(miles, probs = prob[3], na.rm = T),
# mi_100 = quantile(miles, probs = prob[4], na.rm = T)
# )
# qsr <- left_join(qsr, temp,by="urbanity")
# setDT(qsr)
# #recode based on quartiles
# qsr[miles < mi_25, mi_qtile := 1]
# qsr[miles >= mi_25 & miles < mi_50, mi_qtile := 2]
# qsr[miles >= mi_50 & miles < mi_75, mi_qtile := 3]
# qsr[miles >= mi_75, mi_qtile := 4]
#summarize
qsr2a <- qsr[, list(wp_resp_count = sum(wp_resp_count,na.rm=T),
wp_tb_count = sum(wp_tb_count,na.rm=T),
wp_tb_score = sum(wp_tb_count,na.rm=T)/sum(wp_resp_count,na.rm=T),
miles = mean(miles,na.rm=T)),
by=c("qsrn_qtile","urbanity")]
qsr2a <- setorder(qsr2a,urbanity,qsrn_qtile)
#summarize
qsr2b <- qsr[, list(wp_resp_count = sum(wp_resp_count,na.rm=T),
wp_tb_count = sum(wp_tb_count,na.rm=T),
wp_tb_score = sum(wp_tb_count,na.rm=T)/sum(wp_resp_count,na.rm=T),
miles = mean(miles,na.rm=T)),
by=c("mi_qtile","urbanity")]
qsr2b <- setorder(qsr2b,urbanity,mi_qtile)
#qsr subset
qsr3 <- qsr[urbanity=="U2"|urbanity=="U4"|urbanity=="U5"]
#rcorr(qsr3[,miles],qsr3[,wp_tb_score],type="pearson")
rcorr(qsr3[,qsrn],qsr3[,wp_tb_score],type="pearson")
pcor.test(qsr3[,qsrn],qsr3[,wp_tb_score],qsr3[,miles],method="pearson")
#split by qsr number
#qsr3 <- qsr[miles<0.1]
qsr3 <- qsr[miles<0.2]
prob = c(1/4,2/4,3/4,1)
temp <- qsr3 %>% group_by(urbanity) %>% summarise(
qsrn_25 = quantile(qsrn, probs = prob[1], na.rm = T),
qsrn_50 = quantile(qsrn, probs = prob[2], na.rm = T),
qsrn_75 = quantile(qsrn, probs = prob[3], na.rm = T),
qsrn_100 = quantile(qsrn, probs = prob[4], na.rm = T)
)
qsr3 <- left_join(qsr3, temp,by="urbanity")
setDT(qsr3)
#recode based on quartiles
qsr3[qsrn < qsrn_25, qsrn_qtile := 1]
qsr3[qsrn >= qsrn_25 & qsrn < qsrn_50, qsrn_qtile := 2]
qsr3[qsrn >= qsrn_50 & qsrn < qsrn_75, qsrn_qtile := 3]
qsr3[qsrn >= qsrn_75, qsrn_qtile := 4]
#summarize
qsr3 <- qsr3[, list(wp_resp_count = sum(wp_resp_count,na.rm=T),
wp_tb_count = sum(wp_tb_count,na.rm=T),
wp_tb_score = sum(wp_tb_count,na.rm=T)/sum(wp_resp_count,na.rm=T),
miles = mean(miles,na.rm=T)),
by=c("qsrn_qtile","urbanity")]
qsr3 <- setorder(qsr3,urbanity,qsrn_qtile)
#split by qsr number - tertile
#qsr3 <- qsr[miles<0.1]
qsr3 <- qsr[miles<0.2]
prob = c(1/3,2/3,1)
temp <- qsr3 %>% group_by(urbanity) %>% summarise(
qsrn_33 = quantile(qsrn, probs = prob[1], na.rm = T),
qsrn_67 = quantile(qsrn, probs = prob[2], na.rm = T),
qsrn_100 = quantile(qsrn, probs = prob[3], na.rm = T)
)
qsr3 <- left_join(qsr3, temp,by="urbanity")
setDT(qsr3)
#recode based on quartiles
qsr3[qsrn < qsrn_33, qsrn_ttile := 1]
qsr3[qsrn >= qsrn_33 & qsrn < qsrn_67, qsrn_ttile := 2]
qsr3[qsrn >= qsrn_67, qsrn_ttile := 3]
#summarize
qsr3 <- qsr3[, list(wp_resp_count = sum(wp_resp_count,na.rm=T),
wp_tb_count = sum(wp_tb_count,na.rm=T),
wp_tb_score = sum(wp_tb_count,na.rm=T)/sum(wp_resp_count,na.rm=T),
miles = mean(miles,na.rm=T)),
by=c("qsrn_ttile","urbanity")]
qsr3 <- setorder(qsr3,urbanity,qsrn_ttile)
##make comps quartile factor for grouping
qsr3[, qsrn_ttile := as.factor(qsrn_ttile)]
#set labels
lname <- "QSR Count Tertile"
llabels <- c("33rd", "67th", "100th")
#plot of comps quartiles with average CC top box score for each
#set up unique elements
DT <- copy(qsr3[urbanity=="U2"])
maintitle <- "Worth Perceptions by QSR Count - Urban Core"
ylabel <- "WP Top Box Score"
xlabel <- "QSR Count Tertile"
xvar <- DT[,qsrn_ttile]
yvar <- DT[,wp_tb_score]
pdata <- DT
#plot
ggplot(data = pdata, aes(x = xvar, y = yvar*100)) +
geom_bar(stat="identity", width = 0.7, fill="lightgray", colour="black") + theme_bw() +
ggtitle(maintitle) + guides(fill=FALSE) +
scale_y_continuous(limits=c(0,40)) +
geom_text(size = 5, aes(label=paste0("WP = ",round(yvar,3)*100,"%"),y=0), stat= "identity", vjust = -1.75) +
geom_text(size = 5, aes(label=c("QSRs <3","QSRs 4-6","QSRs 7+"),y=0), stat= "identity", vjust = -.5) +
theme(axis.text=element_text(size=8), axis.title=element_text(size=8),
plot.title = element_text(size = 10, face = "bold")) +
labs(x = xlabel, y = ylabel)
##12/15/17 updates for Mike
#load data
qsr <- fread("O:/CoOp/CoOp194_PROReportng&OM/Julie/worthperceptions_QSRproximinity.csv")
setnames(qsr,c("SBUX_STORENUM","QSR_Count","miles_closest"),c("store_num","qsrn","miles"))
qsr[, qsrn := as.numeric(qsrn)]
regi <- fread("O:/CoOp/CoOp194_PROReportng&OM/Julie/worthperceptions_QSRproximinity_regioncodes.csv")
setnames(regi,c("STORE_NUM","RGN_ORG_LVL_ID","RGN_ORG_LVL_DESCR"),c("store_num","regcd","regid"))
#keep only stores in both data.tables
regi <- regi[store_num %in% unique(qsr[,store_num])]
temp5 <- copy(qsr)
temp5 <- left_join(temp5,regi,by="store_num")
setDT(temp5)
temp5 <- temp5[, c("store_num","regid","urbanity")]
temp5[, store_num := 1]
temp5 <- temp5[, list(Store_Count = sum(store_num)), by=c("regid","urbanity")]
temp <- temp5[, list(Store_Count = sum(Store_Count)), by=c("urbanity")]
setnames(temp,"Store_Count","Urbanity_Store_Count")
temp5 <- left_join(temp5,temp,by="urbanity")
setDT(temp5)
temp5[, totalN := 8309]
temp5[, Percent_of_Urbanity := round(Store_Count/Urbanity_Store_Count,3)*100]
temp5[, Percent_of_TotalStores := round(Store_Count/totalN,3)*100]
temp5[, Index_UrbanToTotal_Pct := round((Percent_of_Urbanity/Percent_of_TotalStores)/100,3)]
#change infinite values to 0
temp5[mapply(is.infinite, temp5)] <- 0
temp5[, Index_Flag := 0]
temp5[Index_UrbanToTotal_Pct>=.8&Index_UrbanToTotal_Pct<=1.2, Index_Flag := 1]
temp5[,totalN := NULL]
temp5 <- setorder(temp5,urbanity,-Index_UrbanToTotal_Pct)
write.xlsx(temp5,file="C:/Users/jumorris/UrbanCore_StoreCount-by-Region.xlsx")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_wnorm_fns.R
\name{rwnormmix}
\alias{rwnormmix}
\alias{dwnormmix}
\title{The univariate Wrapped Normal mixtures}
\usage{
rwnormmix(n = 1, kappa, mu, pmix)
dwnormmix(x, kappa, mu, pmix, int.displ = 3, log = FALSE)
}
\arguments{
\item{n}{number of observations. Ignored if at least one of the other parameters have length k > 1, in which
case, all the parameters are recycled to length k to produce k random variates.}
\item{kappa}{vector of component concentration (inverse-variance) parameters, \code{kappa > 0}.}
\item{mu}{vector of component means.}
\item{pmix}{vector of mixing proportions.}
\item{x}{vector of angles (in radians) where the densities are to be evaluated.}
\item{int.displ}{integer displacement. If \code{int.displ =} M, then the infinite sum in the
density is approximated by a sum over 2*M + 1 elements. (See Details.) The allowed values are 1, 2, 3, 4 and 5. Default is 3.}
\item{log}{logical. Should the log density be returned instead?}
}
\value{
\code{dwnormmix} computes the density and \code{rwnormmix} generates random deviates from the mixture density.
}
\description{
The univariate Wrapped Normal mixtures
}
\details{
\code{pmix}, \code{mu} and \code{kappa} must be of the same length, with \eqn{j}-th element corresponding to the \eqn{j}-th component of the mixture distribution.
The univariate wrapped normal mixture distribution with component size \code{K = \link{length}(pmix)} has density
\deqn{g(x) = p[1] * f(x; \kappa[1], \mu[1]) + ... + p[K] * f(x; \kappa[K], \mu[K])}
where \eqn{p[j], \kappa[j], \mu[j]} respectively denote the mixing proportion, concentration parameter and the mean parameter for the \eqn{j}-th component
and \eqn{f(. ; \kappa, \mu)} denotes the density function of the (univariate) wrapped normal distribution with mean parameter \eqn{\mu} and concentration parameter \eqn{\kappa}.
}
\examples{
kappa <- 1:3
mu <- 0:2
pmix <- c(0.3, 0.3, 0.4)
x <- 1:10
n <- 10
# mixture densities calculated at each point in x
dwnormmix(x, kappa, mu, pmix)
# number of observations generated from the mixture distribution is n
rwnormmix(n, kappa, mu, pmix)
}
|
/issuestests/BAMBI/man/rwnormmix.Rd
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false | true | 2,197 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_wnorm_fns.R
\name{rwnormmix}
\alias{rwnormmix}
\alias{dwnormmix}
\title{The univariate Wrapped Normal mixtures}
\usage{
rwnormmix(n = 1, kappa, mu, pmix)
dwnormmix(x, kappa, mu, pmix, int.displ = 3, log = FALSE)
}
\arguments{
\item{n}{number of observations. Ignored if at least one of the other parameters have length k > 1, in which
case, all the parameters are recycled to length k to produce k random variates.}
\item{kappa}{vector of component concentration (inverse-variance) parameters, \code{kappa > 0}.}
\item{mu}{vector of component means.}
\item{pmix}{vector of mixing proportions.}
\item{x}{vector of angles (in radians) where the densities are to be evaluated.}
\item{int.displ}{integer displacement. If \code{int.displ =} M, then the infinite sum in the
density is approximated by a sum over 2*M + 1 elements. (See Details.) The allowed values are 1, 2, 3, 4 and 5. Default is 3.}
\item{log}{logical. Should the log density be returned instead?}
}
\value{
\code{dwnormmix} computes the density and \code{rwnormmix} generates random deviates from the mixture density.
}
\description{
The univariate Wrapped Normal mixtures
}
\details{
\code{pmix}, \code{mu} and \code{kappa} must be of the same length, with \eqn{j}-th element corresponding to the \eqn{j}-th component of the mixture distribution.
The univariate wrapped normal mixture distribution with component size \code{K = \link{length}(pmix)} has density
\deqn{g(x) = p[1] * f(x; \kappa[1], \mu[1]) + ... + p[K] * f(x; \kappa[K], \mu[K])}
where \eqn{p[j], \kappa[j], \mu[j]} respectively denote the mixing proportion, concentration parameter and the mean parameter for the \eqn{j}-th component
and \eqn{f(. ; \kappa, \mu)} denotes the density function of the (univariate) wrapped normal distribution with mean parameter \eqn{\mu} and concentration parameter \eqn{\kappa}.
}
\examples{
kappa <- 1:3
mu <- 0:2
pmix <- c(0.3, 0.3, 0.4)
x <- 1:10
n <- 10
# mixture densities calculated at each point in x
dwnormmix(x, kappa, mu, pmix)
# number of observations generated from the mixture distribution is n
rwnormmix(n, kappa, mu, pmix)
}
|
## Reads in VCFs, filter results and annotations and outputs a single file
## Load packages
library(VariantAnnotation)
## RUN IT LIKE THIS: /c/Program\ Files/R/R-3.3.2/bin/Rscript.exe example.R vcfpath filterpath annopath
## /c/Program\ Files/R/R-3.3.2/bin/Rscript.exe /g/Genomics\ Lab/MGP/vcffiltermerger/vcf_filter_merger.R "Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/_localcopy/T05208Tumor.all.somatic.snvs.vcf" "Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/filteredsnvs/T05208Tumor" "Z://cody_working_dir/erasmus_annotations/T05208Tumor.all.somatic.snvs.vcf.tsv"
## Command line options; specify paths to VCF, filters and annotations
args = commandArgs(trailingOnly=TRUE)
vcfpath=args[1]
filterpath=args[2]
annopath=args[3]
#vcfpath="Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/_localcopy/EMN_4095_diagnosis.all.somatic.indels.vcf"
#filterpath="Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/filteredindels/EMN_4095_diagnosis"
#annopath="Z://cody_working_dir/erasmus_annotations/EMN_4095_diagnosis.all.somatic.indels.vcf.tsv"
#vcfpath="Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/_localcopy/T05208Tumor.all.somatic.snvs.vcf"
#filterpath="Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/filteredsnvs/T05208Tumor"
#annopath="Z://cody_working_dir/erasmus_annotations/T05208Tumor.all.somatic.snvs.vcf.tsv"
## Indel files with duplicate rownames
#vcfpath="Z://cody_working_dir/mgp_oncotator/UK_data/Strelka_1470762457/_EGAR00001321238_EGAS00001001147_C273MACXX_2_378.seqvar/all.somatic.indels.vcf"
#filterpath="Z://chris_working_dir/project_mgp/SNEF/strelkaindels/retry/C273MACXX_2_378"
#annopath="Z://cody_working_dir/mgp_oncotator/UK_data/Strelka_1470762457/_EGAR00001321238_EGAS00001001147_C273MACXX_2_378.seqvar/all.somatic.indels.tsv"
#################
## Read in VCF ##
#################
vcf=readVcf(vcfpath,genome="GRCh37")
########################
## End of read in VCF ##
########################
#########################################################
## Read in filters and merge them into a single object ##
#########################################################
filterfiles=dir(filterpath,pattern="txt")
boundfilters=as.data.frame(matrix(nrow=0,ncol=0))
## Discover tumor name and normal name from filenames
## Tumor name will occur more often
samplenames=sort(table(sapply(strsplit(filterfiles,"\\."),"[",1)))
normalname=names(samplenames)[1]
tumorname=names(samplenames)[2]
## Reorder filterfiles to that tumor is always first
filterfiles=c(sort(grep(tumorname,filterfiles,value=TRUE)),sort(grep(normalname,filterfiles,value=TRUE)))
for(i in 1:length(filterfiles)){
tumornormal=ifelse(grepl(tumorname,filterfiles[i]),"tumor.","normal.")
#thisfilter=read.delim(paste(filterpath,filterfiles[i],sep="/"),header=FALSE,stringsAsFactors=FALSE,row.names=1)
thisfilter=read.delim(paste(filterpath,filterfiles[i],sep="/"),header=FALSE,stringsAsFactors=FALSE)#,row.names=1)
#thisfilter=thisfilter[,2:ncol(thisfilter)]
thisfilter=as.data.frame(thisfilter[,2:ncol(thisfilter)])
#rownames(thisfilter)=1:nrow(thisfilter)
filtername=sapply(strsplit(filterfiles[i],"\\."),"[",2)
filtername=ifelse(grepl(tumorname,filterfiles[i]),paste0(tumornormal,filtername),paste0(tumornormal,filtername))
colnames(thisfilter)=ifelse(c(FALSE,rep(TRUE,ncol(thisfilter)-1)),paste(filtername,1:ncol(thisfilter),sep="."),filtername)
## Different behaviour for 1st filter
if(i==1){
boundfilters=rbind(boundfilters,thisfilter)
}else{
boundfilters=cbind(boundfilters,thisfilter)
}
}
################################################################
## End of read in filters and merge them into a single object ##
################################################################
#########################################
## Calculate pass/fail for all filters ##
#########################################
## Calculate filter results and append these (i.e. TRUE/FALSE TABLE)
## 1st step; source() filter functions from a separate file
setwd("G:/Genomics Lab/MGP/vcffiltermerger")
source("filter_functions.R")
filterresults=filterset(boundfilters)
################################################
## End of calculate pass/fail for all filters ##
################################################
###################################
## Read in Oncotator annotations ##
###################################
anno=read.delim(annopath,comment.char = "#",stringsAsFactors=FALSE)
anno=anno[anno$"Matched_Norm_Sample_Barcode"%in%"TUMOR",]
## Remove duplicate records: CAUTION, WE MAY BE REMOVING THE INCORRECT RECORDS
coords=paste(anno$Chromosome,anno$Start_position,sep=":")
anno=anno[!(duplicated(coords)),]
##########################################
## End of read in Oncotator annotations ##
##########################################
##############################
## Merge all data and write ##
##############################
## Files to merge:
#vcf
#boundfilters
#filterresults
#anno
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT TUMOR NORMAL
## We dump any variant where the ALT allele wasn't recorded. This appears to be a weird
## behaviour by Strelka; very low VAF alleles are not recorded... I think
nones=boundfilters$normal.vaf.2%in%"None"
vcf=vcf[!nones,]
boundfilters=boundfilters[!nones,]
filterresults=filterresults[!nones,]
anno=anno[!nones,]
## Make a fake vcf... this is easier than querying the original object
CHROM=sapply(strsplit(rownames(boundfilters),":"),"[",1)
POS=sapply(strsplit(rownames(boundfilters),":"),"[",2)
REF=boundfilters$normal.vaf
ALT=boundfilters$normal.vaf.2
final=cbind(CHROM,POS,REF,ALT,tumorname)
final=cbind(final,boundfilters,filterresults,anno)
write.table(final,file=paste0(tumorname,".txt"),quote=FALSE,sep="\t",row.names=FALSE,col.names=TRUE)
|
/vcf_filter_merger.R
|
no_license
|
cpwardell/vcffiltermerger
|
R
| false | false | 5,840 |
r
|
## Reads in VCFs, filter results and annotations and outputs a single file
## Load packages
library(VariantAnnotation)
## RUN IT LIKE THIS: /c/Program\ Files/R/R-3.3.2/bin/Rscript.exe example.R vcfpath filterpath annopath
## /c/Program\ Files/R/R-3.3.2/bin/Rscript.exe /g/Genomics\ Lab/MGP/vcffiltermerger/vcf_filter_merger.R "Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/_localcopy/T05208Tumor.all.somatic.snvs.vcf" "Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/filteredsnvs/T05208Tumor" "Z://cody_working_dir/erasmus_annotations/T05208Tumor.all.somatic.snvs.vcf.tsv"
## Command line options; specify paths to VCF, filters and annotations
args = commandArgs(trailingOnly=TRUE)
vcfpath=args[1]
filterpath=args[2]
annopath=args[3]
#vcfpath="Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/_localcopy/EMN_4095_diagnosis.all.somatic.indels.vcf"
#filterpath="Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/filteredindels/EMN_4095_diagnosis"
#annopath="Z://cody_working_dir/erasmus_annotations/EMN_4095_diagnosis.all.somatic.indels.vcf.tsv"
#vcfpath="Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/_localcopy/T05208Tumor.all.somatic.snvs.vcf"
#filterpath="Z://chris_working_dir/project_erasmusmc/erasmusmc_strelka/filteredsnvs/T05208Tumor"
#annopath="Z://cody_working_dir/erasmus_annotations/T05208Tumor.all.somatic.snvs.vcf.tsv"
## Indel files with duplicate rownames
#vcfpath="Z://cody_working_dir/mgp_oncotator/UK_data/Strelka_1470762457/_EGAR00001321238_EGAS00001001147_C273MACXX_2_378.seqvar/all.somatic.indels.vcf"
#filterpath="Z://chris_working_dir/project_mgp/SNEF/strelkaindels/retry/C273MACXX_2_378"
#annopath="Z://cody_working_dir/mgp_oncotator/UK_data/Strelka_1470762457/_EGAR00001321238_EGAS00001001147_C273MACXX_2_378.seqvar/all.somatic.indels.tsv"
#################
## Read in VCF ##
#################
vcf=readVcf(vcfpath,genome="GRCh37")
########################
## End of read in VCF ##
########################
#########################################################
## Read in filters and merge them into a single object ##
#########################################################
filterfiles=dir(filterpath,pattern="txt")
boundfilters=as.data.frame(matrix(nrow=0,ncol=0))
## Discover tumor name and normal name from filenames
## Tumor name will occur more often
samplenames=sort(table(sapply(strsplit(filterfiles,"\\."),"[",1)))
normalname=names(samplenames)[1]
tumorname=names(samplenames)[2]
## Reorder filterfiles to that tumor is always first
filterfiles=c(sort(grep(tumorname,filterfiles,value=TRUE)),sort(grep(normalname,filterfiles,value=TRUE)))
for(i in 1:length(filterfiles)){
tumornormal=ifelse(grepl(tumorname,filterfiles[i]),"tumor.","normal.")
#thisfilter=read.delim(paste(filterpath,filterfiles[i],sep="/"),header=FALSE,stringsAsFactors=FALSE,row.names=1)
thisfilter=read.delim(paste(filterpath,filterfiles[i],sep="/"),header=FALSE,stringsAsFactors=FALSE)#,row.names=1)
#thisfilter=thisfilter[,2:ncol(thisfilter)]
thisfilter=as.data.frame(thisfilter[,2:ncol(thisfilter)])
#rownames(thisfilter)=1:nrow(thisfilter)
filtername=sapply(strsplit(filterfiles[i],"\\."),"[",2)
filtername=ifelse(grepl(tumorname,filterfiles[i]),paste0(tumornormal,filtername),paste0(tumornormal,filtername))
colnames(thisfilter)=ifelse(c(FALSE,rep(TRUE,ncol(thisfilter)-1)),paste(filtername,1:ncol(thisfilter),sep="."),filtername)
## Different behaviour for 1st filter
if(i==1){
boundfilters=rbind(boundfilters,thisfilter)
}else{
boundfilters=cbind(boundfilters,thisfilter)
}
}
################################################################
## End of read in filters and merge them into a single object ##
################################################################
#########################################
## Calculate pass/fail for all filters ##
#########################################
## Calculate filter results and append these (i.e. TRUE/FALSE TABLE)
## 1st step; source() filter functions from a separate file
setwd("G:/Genomics Lab/MGP/vcffiltermerger")
source("filter_functions.R")
filterresults=filterset(boundfilters)
################################################
## End of calculate pass/fail for all filters ##
################################################
###################################
## Read in Oncotator annotations ##
###################################
anno=read.delim(annopath,comment.char = "#",stringsAsFactors=FALSE)
anno=anno[anno$"Matched_Norm_Sample_Barcode"%in%"TUMOR",]
## Remove duplicate records: CAUTION, WE MAY BE REMOVING THE INCORRECT RECORDS
coords=paste(anno$Chromosome,anno$Start_position,sep=":")
anno=anno[!(duplicated(coords)),]
##########################################
## End of read in Oncotator annotations ##
##########################################
##############################
## Merge all data and write ##
##############################
## Files to merge:
#vcf
#boundfilters
#filterresults
#anno
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT TUMOR NORMAL
## We dump any variant where the ALT allele wasn't recorded. This appears to be a weird
## behaviour by Strelka; very low VAF alleles are not recorded... I think
nones=boundfilters$normal.vaf.2%in%"None"
vcf=vcf[!nones,]
boundfilters=boundfilters[!nones,]
filterresults=filterresults[!nones,]
anno=anno[!nones,]
## Make a fake vcf... this is easier than querying the original object
CHROM=sapply(strsplit(rownames(boundfilters),":"),"[",1)
POS=sapply(strsplit(rownames(boundfilters),":"),"[",2)
REF=boundfilters$normal.vaf
ALT=boundfilters$normal.vaf.2
final=cbind(CHROM,POS,REF,ALT,tumorname)
final=cbind(final,boundfilters,filterresults,anno)
write.table(final,file=paste0(tumorname,".txt"),quote=FALSE,sep="\t",row.names=FALSE,col.names=TRUE)
|
# Load the Data
# Note: Adaboost can only be run for classification trees.
# Regression trees cannot be run in R
# movies_classification.csv
movies = read.csv(file.choose())
##Exploring and preparing the data ----
str(movies)
library(caTools)
set.seed(0)
split <- sample.split(movies$Start_Tech_Oscar, SplitRatio = 0.8)
movies_train <- subset(movies, split == TRUE)
movies_test <- subset(movies, split == FALSE)
summary(movies_train)
# install.packages("adabag")
library(adabag)
movies_train$Start_Tech_Oscar <- as.factor(movies_train$Start_Tech_Oscar)
adaboost <- boosting(Start_Tech_Oscar ~ ., data = movies_train, boos = TRUE)
# Test data
adaboost_test = predict(adaboost, movies_test)
table(adaboost_test$class, movies_test$Start_Tech_Oscar)
mean(adaboost_test$class == movies_test$Start_Tech_Oscar)
# Train data
adaboost_train = predict(adaboost, movies_train)
table(adaboost_train$class, movies_train$Start_Tech_Oscar)
mean(adaboost_train$class == movies_train$Start_Tech_Oscar)
|
/Boosting/AdaBoosting.R
|
no_license
|
kirankumar022/Ensembled-techniques
|
R
| false | false | 1,037 |
r
|
# Load the Data
# Note: Adaboost can only be run for classification trees.
# Regression trees cannot be run in R
# movies_classification.csv
movies = read.csv(file.choose())
##Exploring and preparing the data ----
str(movies)
library(caTools)
set.seed(0)
split <- sample.split(movies$Start_Tech_Oscar, SplitRatio = 0.8)
movies_train <- subset(movies, split == TRUE)
movies_test <- subset(movies, split == FALSE)
summary(movies_train)
# install.packages("adabag")
library(adabag)
movies_train$Start_Tech_Oscar <- as.factor(movies_train$Start_Tech_Oscar)
adaboost <- boosting(Start_Tech_Oscar ~ ., data = movies_train, boos = TRUE)
# Test data
adaboost_test = predict(adaboost, movies_test)
table(adaboost_test$class, movies_test$Start_Tech_Oscar)
mean(adaboost_test$class == movies_test$Start_Tech_Oscar)
# Train data
adaboost_train = predict(adaboost, movies_train)
table(adaboost_train$class, movies_train$Start_Tech_Oscar)
mean(adaboost_train$class == movies_train$Start_Tech_Oscar)
|
# Read in the necessary libraries
if(!require(ChannelAttribution)){
install.packages("ChannelAttribution")
library(ChannelAttribution)
}
# Set Working Directory
setwd <- setwd("C:/Users/Tim/OneDrive/Documents/GitHub/businessdatascience/other_data")
# Read in our CSV file output by the python script
df <- read.csv('Paths.csv')
# Select only the necessary columns
df <- df[c(1,2)]
# Run the Markov Model function
M <- markov_model(df, 'Path', var_value = 'Conversion', var_conv = 'Conversion', sep = '>', order=1, out_more = TRUE)
# Output the model output as a csv file, to be read back into Python
write.csv(M$result, file = "Markov - Output - Conversion values.csv", row.names=FALSE)
# Output the transition matrix as well, for visualization purposes
write.csv(M$transition_matrix, file = "Markov - Output - Transition matrix.csv", row.names=FALSE)
|
/other_data/Markov.r
|
no_license
|
timraiswell/businessdatascience
|
R
| false | false | 855 |
r
|
# Read in the necessary libraries
if(!require(ChannelAttribution)){
install.packages("ChannelAttribution")
library(ChannelAttribution)
}
# Set Working Directory
setwd <- setwd("C:/Users/Tim/OneDrive/Documents/GitHub/businessdatascience/other_data")
# Read in our CSV file output by the python script
df <- read.csv('Paths.csv')
# Select only the necessary columns
df <- df[c(1,2)]
# Run the Markov Model function
M <- markov_model(df, 'Path', var_value = 'Conversion', var_conv = 'Conversion', sep = '>', order=1, out_more = TRUE)
# Output the model output as a csv file, to be read back into Python
write.csv(M$result, file = "Markov - Output - Conversion values.csv", row.names=FALSE)
# Output the transition matrix as well, for visualization purposes
write.csv(M$transition_matrix, file = "Markov - Output - Transition matrix.csv", row.names=FALSE)
|
eFastC_delta.online <- function(Z_, K_, delta_, alpha_, verbose_, oldlength_, newlength_, cSum_, DLL_, DRR_, DLR_, Left_, Right_){
.Call("eFastC_delta_online", Z_, K_, delta_, alpha_, verbose_, oldlength_, newlength_, cSum_, DLL_, DRR_, DLR_, Left_, Right_, PACKAGE = "changepoint.online")
}
|
/R/eFastC.R
|
no_license
|
rkillick/changepoint.online
|
R
| false | false | 292 |
r
|
eFastC_delta.online <- function(Z_, K_, delta_, alpha_, verbose_, oldlength_, newlength_, cSum_, DLL_, DRR_, DLR_, Left_, Right_){
.Call("eFastC_delta_online", Z_, K_, delta_, alpha_, verbose_, oldlength_, newlength_, cSum_, DLL_, DRR_, DLR_, Left_, Right_, PACKAGE = "changepoint.online")
}
|
#load library
library(dplyr)
#Load all the data
features <- read.table("features.txt", col.names = c("Number", "feature_type"))
activity_label <- read.table("activity_labels.txt", col.names = c("code", "activities"))
subject_train <- read.table("./train/subject_train.txt", col.names = "subjects")
subject_test <- read.table("./test/subject_test.txt", col.names = "subjects")
x_train <- read.table("./train/X_train.txt", col.names = features$feature_type)
y_train <- read.table("./train/y_train.txt", col.names = "code")
x_test <- read.table("./test/X_test.txt", col.names = features$feature_type)
y_test <- read.table("./test/y_test.txt", col.names = "code")
#Combine the loaded data into a single data frame
test <- cbind(subject_test, y_test, x_test)
train <- cbind(subject_train, y_train, x_train)
train_test <- rbind(train, test)
#Select the columns that contains "mean" or "std"
mean_std <- c(TRUE,TRUE,grepl("mean|std",features$feature_type))
selected_file <-train_test[,mean_std]
#Change the activity code into activity name
activity_name=vector()
for (i in 1:nrow(selected_file)){
aaa<-selected_file$code[i]==activity_label$code
activity_name<-c(activity_name, activity_label$activities[aaa])
}
selected_file$code <- activity_name
#Change the column names to descriptive names
names(selected_file)[1] <- "People_object_ID"
names(selected_file)[2] <- "Activity"
names(selected_file)<-gsub("std","StandardDeviation", names(selected_file))
names(selected_file)<-gsub("Acc","_Accelerometer", names(selected_file))
names(selected_file)<-gsub("Gyro","_Gyroscope", names(selected_file))
names(selected_file)<-gsub("Mag","_Magnitude", names(selected_file))
names(selected_file)<-gsub("Freq","Frequency", names(selected_file))
names(selected_file)<-gsub("^f","Frequency_", names(selected_file))
names(selected_file)<-gsub("^t","Time_", names(selected_file))
names(selected_file)<-gsub("X$","X_axis", names(selected_file))
names(selected_file)<-gsub("Y$","Y_axis", names(selected_file))
names(selected_file)<-gsub("Z$","Z_axis", names(selected_file))
names(selected_file)<-gsub(".mean","_mean", names(selected_file))
names(selected_file)<-gsub(".StandardDeviation","_StandardDeviation", names(selected_file))
names(selected_file)<-gsub("BodyBody","Body", names(selected_file))
names(selected_file)<-gsub("Jerk","_Jerk", names(selected_file))
#Save the data as a data frame called "tidyTable"
tidyTable<-selected_file
write.csv(tidyTable, "tidyTable.csv")
#Creates a second, independent tidy data set (named "tidyTable2") with the average of
#each variable for each activity and each subject.
mean_activity_object<-tidyTable %>%
group_by(Activity, People_object_ID) %>%
summarize_all(mean)
tidyTable2 <- mean_activity_object
write.csv(tidyTable2, "tidyTable2.csv")
|
/script.R
|
no_license
|
ccm111222/course3_assignment
|
R
| false | false | 2,779 |
r
|
#load library
library(dplyr)
#Load all the data
features <- read.table("features.txt", col.names = c("Number", "feature_type"))
activity_label <- read.table("activity_labels.txt", col.names = c("code", "activities"))
subject_train <- read.table("./train/subject_train.txt", col.names = "subjects")
subject_test <- read.table("./test/subject_test.txt", col.names = "subjects")
x_train <- read.table("./train/X_train.txt", col.names = features$feature_type)
y_train <- read.table("./train/y_train.txt", col.names = "code")
x_test <- read.table("./test/X_test.txt", col.names = features$feature_type)
y_test <- read.table("./test/y_test.txt", col.names = "code")
#Combine the loaded data into a single data frame
test <- cbind(subject_test, y_test, x_test)
train <- cbind(subject_train, y_train, x_train)
train_test <- rbind(train, test)
#Select the columns that contains "mean" or "std"
mean_std <- c(TRUE,TRUE,grepl("mean|std",features$feature_type))
selected_file <-train_test[,mean_std]
#Change the activity code into activity name
activity_name=vector()
for (i in 1:nrow(selected_file)){
aaa<-selected_file$code[i]==activity_label$code
activity_name<-c(activity_name, activity_label$activities[aaa])
}
selected_file$code <- activity_name
#Change the column names to descriptive names
names(selected_file)[1] <- "People_object_ID"
names(selected_file)[2] <- "Activity"
names(selected_file)<-gsub("std","StandardDeviation", names(selected_file))
names(selected_file)<-gsub("Acc","_Accelerometer", names(selected_file))
names(selected_file)<-gsub("Gyro","_Gyroscope", names(selected_file))
names(selected_file)<-gsub("Mag","_Magnitude", names(selected_file))
names(selected_file)<-gsub("Freq","Frequency", names(selected_file))
names(selected_file)<-gsub("^f","Frequency_", names(selected_file))
names(selected_file)<-gsub("^t","Time_", names(selected_file))
names(selected_file)<-gsub("X$","X_axis", names(selected_file))
names(selected_file)<-gsub("Y$","Y_axis", names(selected_file))
names(selected_file)<-gsub("Z$","Z_axis", names(selected_file))
names(selected_file)<-gsub(".mean","_mean", names(selected_file))
names(selected_file)<-gsub(".StandardDeviation","_StandardDeviation", names(selected_file))
names(selected_file)<-gsub("BodyBody","Body", names(selected_file))
names(selected_file)<-gsub("Jerk","_Jerk", names(selected_file))
#Save the data as a data frame called "tidyTable"
tidyTable<-selected_file
write.csv(tidyTable, "tidyTable.csv")
#Creates a second, independent tidy data set (named "tidyTable2") with the average of
#each variable for each activity and each subject.
mean_activity_object<-tidyTable %>%
group_by(Activity, People_object_ID) %>%
summarize_all(mean)
tidyTable2 <- mean_activity_object
write.csv(tidyTable2, "tidyTable2.csv")
|
# Inner product. v1 and v2 should either be matrices of the same size or one of them (v1 in the hyperbolic case) should be a matrix and the other a vector of length equal to the number of rows in the matrix.
ip <- function(manifold, v1, v2) {
if ((manifold == 'euclidean') | (manifold == 'sphere')) {
result <- colSums(v1 * v2)
} else if (manifold == 'hyperbolic') {
v1[1, ] <- -v1[1, ]
result <- colSums(v1 * v2)
} else if (manifold == 'kendall') {
result <- colSums(v1 * Conj(v2))
}
return(result)
}
# Magnitude of a vector. v should be a matrix.
mag <- function(manifold, v) {
return(Re(sqrt(ip(manifold, v, v) + 0i)))
}
# Function for internal use in the geo_reg function. Similar to exp_map, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from expo2: p and v should be matrices of the same dimensions.
expo <- function(manifold, p, v) {
if (manifold == 'euclidean') {
result <- p + v
} else if (manifold == 'sphere') {
theta <- mag(manifold, v)
e1 <- t(t(p) / mag(manifold, p)) # reprojects p onto the manifold, for precision
e2 <- t(t(v) / theta)
result <- t(t(e1) * cos(theta)) + t(t(e2) * sin(theta))
index <- which(theta == 0) # theta == 0 case must be dealt with separately due to division by theta
result[, index] <- p[, index]
result <- t(t(result) / mag(manifold, result)) # reprojects result onto the manifold, for precision
} else if (manifold == 'hyperbolic') {
theta <- mag(manifold, v)
e1 <- t(t(p) / sqrt(-ip(manifold, p, p))) # reprojects p onto the manifold, for precision
e2 <- t(t(v) / theta)
result <- t(t(e1) * cosh(theta)) + t(t(e2) * sinh(theta))
index <- which(theta == 0) # theta == 0 case must be dealt with separately due to division by theta
result[, index] <- p[, index] # if theta == 0, the result is p
result <- t(t(result) / sqrt(-ip(manifold, result, result))) # reprojects result onto the manifold, for precision
} else if (manifold == 'kendall') {
meanp <- colMeans(p)
theta <- mag(manifold, v)
e1 <- t((t(p) - meanp) / mag(manifold, t(t(p) - meanp))) # reprojects p onto the manifold, for precision
e2 <- t(t(v) / theta)
result <- t(t(e1) * cos(theta) + t(e2) * sin(theta))
index <- which(theta == 0) # theta == 0 case must be dealt with separately due to division by theta
result[, index] <- p[, index]
result <- t((t(result) - colMeans(result)) / mag(manifold, t(t(result) - colMeans(result)))) # reprojects result onto the manifold, for precision
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to exp_map, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from expo: p should be a column matrix, v should be a matrix with the same number of rows as p.
expo2 <- function(manifold, p, v) {
if (manifold == 'euclidean') {
result <- as.vector(p) + v
} else if (manifold == 'sphere') {
theta <- mag(manifold, v)
e1 <- t(t(p) / mag(manifold, p))
e2 <- t(t(v) / theta)
result <- e1 %*% cos(theta) + t(t(e2) * sin(theta))
index <- which(theta == 0)
result[, index] <- p
result <- t(t(result) / mag(manifold, result))
} else if (manifold == 'hyperbolic') {
theta <- mag(manifold, v)
e1 <- t(t(p) / sqrt(-ip(manifold, p, p)))
e2 <- t(t(v) / theta)
result <- e1 %*% cosh(theta) + t(t(e2) * sinh(theta))
index <- which(theta == 0)
result[, index] <- p
result <- t(t(result) / sqrt(-ip(manifold, result, result)))
} else if (manifold == 'kendall') {
meanp <- colMeans(p)
theta <- mag(manifold, v)
e1 <- t((t(p) - meanp) / mag(manifold, t(t(p) - meanp)))
e2 <- t(t(v) / theta)
result <- e1 %*% t(cos(theta)) + t(t(e2) * sin(theta))
index <- which(theta == 0)
result[, index] <- p
result <- t((t(result) - colMeans(result)) / mag(manifold, t(t(result) - colMeans(result))))
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to log_map, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from loga2 and loga3: p1 and p2 should be matrices of the same dimensions.
loga <- function(manifold, p1, p2) {
if (manifold == 'euclidean') {
result <- p2 - p1
} else if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1)) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / mag(manifold, p2)) # reprojects p2 onto the manifold, for precision
a <- pmax(pmin(ip(manifold, p1, p2), 1), -1) # ensures a is in [-1, 1]
theta <- acos(a)
tang <- p2 - t(t(p1) * a)
t <- mag(manifold, tang)
if (any(t == 0)) { # any(t == 0) case must be dealt with separately due to division by t
if (any(mag(manifold, p1 - p2) < 1e-6)) { # determining whether any(t == 0) because of p1 = p2 or p1 = -p2
result <- numeric(dim(p1)[1])
} else if (any(mag(manifold, p1 + p2) < 1e-6)) { # determining whether any(t == 0) because of p1 = p2 or p1 = -p2
stop('p2 is the antipode of p1 and is therefore not in the domain of the log map at p1') ## change to continue
}
}
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1))) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / sqrt(-ip(manifold, p2, p2))) # reprojects p2 onto the manifold, for precision
a <- pmin(ip(manifold, p1, p2), -1) # ensures -a is at least 1
theta <- acosh(-a)
tang <- p2 + t(t(p1) * a)
t <- mag(manifold, tang)
if (any(t == 0)) { # any(t == 0) case must be dealt with separately due to division by t
result <- numeric(dim(p1)[1])
}
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
} else if (manifold == 'kendall') {
meanp1 <- colMeans(p1)
meanp2 <- colMeans(p2)
p1 <- t((t(p1) - meanp1) / mag(manifold, t(t(p1) - meanp1))) # reprojects p1 onto the manifold, for precision
p2 <- t((t(p2) - meanp2) / mag(manifold, t(t(p2) - meanp2))) # reprojects p2 onto the manifold, for precision
a <- ip(manifold, p1, p2)
theta <- acos(pmax(pmin(abs(a), 1), -1)) # ensures argument is in [-1, 1]
tang <- t(t(p2) * (a / abs(a))) - t(t(p1) * abs(a))
result <- t(t(tang) * (theta / mag(manifold, tang)))
result[, which(mag(manifold, tang) == 0)] <- 0 # mag(manifold, tang) == 0 case must be dealt with separately due to division by mag(manifold, tang)
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to log_map, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from loga1 and loga3: p1 should be a column matrix, p2 should be a matrix with the same number of rows as p1. Needed for the sphere, hyperbolic space and Kendall's shape space.
loga2 <- function(manifold, p1, p2) {
if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1)) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / mag(manifold, p2)) # reprojects p2 onto the manifold, for precision
a <- pmax(pmin(ip(manifold, p2, as.vector(p1)), 1), -1) # ensures a is in [-1, 1]
theta <- acos(a)
tang <- p2 - p1 %*% a
t <- mag(manifold, tang)
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1))) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / sqrt(-ip(manifold, p2, p2))) # reprojects p2 onto the manifold, for precision
a <- pmin(ip(manifold, p2, as.vector(p1)), -1)
theta <- acosh(-a)
tang <- p2 + p1 %*% a
t <- mag(manifold, tang)
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
} else if (manifold == 'kendall') {
meanp1 <- colMeans(p1)
meanp2 <- colMeans(p2)
p1 <- t((t(p1) - meanp1) / mag(manifold, t(t(p1) - meanp1))) # reprojects p1 onto the manifold, for precision
p2 <- t((t(p2) - meanp2) / mag(manifold, t(t(p2) - meanp2))) # reprojects p1 onto the manifold, for precision
a <- ip(manifold, as.vector(p1), p2)
theta <- acos(pmax(pmin(abs(a), 1), -1))
tang <- t(t(p2) * (a / abs(a))) - p1 %*% abs(a)
result <- t(t(tang) * (theta / mag(manifold, tang)))
result[, which(mag(manifold, tang) == 0)] <- 0
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to log_map, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from loga2 and loga3: p1 should be a matrix, p2 should be a column matrix with the same number of rows as p1. Needed for the sphere and hyperbolic space.
loga3 <- function(manifold, p1, p2) {
if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1)) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / mag(manifold, p2)) # reprojects p2 onto the manifold, for precision
a <- pmax(pmin(ip(manifold, p1, as.vector(p2)), 1), -1)
theta <- acos(a)
tang <- as.vector(p2) - t(t(p1) * a)
t <- mag(manifold, tang)
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1))) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / sqrt(-ip(manifold, p2, p2))) # reprojects p2 onto the manifold, for precision
a <- pmin(ip(manifold, p1, as.vector(p2)), -1)
theta <- acosh(-a)
tang <- as.vector(p2) + t(t(p1) * a)
t <- mag(manifold, tang)
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to geo_dis, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. p1 and p2 should be matrices of the same dimension.
dist <- function(manifold, p1, p2) {
return(mag(manifold, loga(manifold, p1, p2)))
}
# Function for internal use in the geo_reg function. Similar to par_trans, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from pt2 and pt3: p1, p2, and v should be matrices of the same dimensions.
pt <- function(manifold, p1, p2, v) {
if (manifold == 'euclidean') {
result <- v
} else if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1)) # reprojects p1 onto the manifold, for precision
w <- loga(manifold, p1, p2)
t <- mag(manifold, w)
e1 <- p1
e2 <- t(t(w) / t)
a <- ip(manifold, v, e2)
invar <- v - t(t(e2) * a)
result <- t(t(e2) * (a * cos(t))) - t(t(e1) * (a * sin(t))) + invar
index <- which(t == 0) # t == 0 case must be dealt with separately due to division by t
result[, index] <- v[, index]
# p1 <- t(t(p1) / mag(manifold, p1)) # reprojects p1 onto the manifold, for precision
# w <- loga(manifold, p1, p2)
# t <- mag(manifold, w)
# result <- v - t(t(w + loga(manifold, p2, p1)) * (ip(manifold, w, v) / (t ^ 2)))
# index <- which(t == 0) # t == 0 case must be dealt with separately due to division by t
# result[, index] <- v[, index]
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1))) # reprojects p1 onto the manifold, for precision
w <- loga(manifold, p1, p2)
t <- mag(manifold, w)
e1 <- p1
e2 <- t(t(w) / t)
a <- ip(manifold, v, e2)
invar <- v - t(t(e2) * a)
result <- t(t(e2) * (a * cosh(t))) + t(t(e1) * (a * sinh(t))) + invar
index <- which(t == 0) # t == 0 case must be dealt with separately due to division by t
result[, index] <- v[, index]
#p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1))) # reprojects p1 onto the manifold, for precision
#w <- loga(manifold, p1, p2)
#t <- mag(manifold, w)
#result <- v - t(t(w + loga(manifold, p2, p1)) * (ip(manifold, w, v) / (t ^ 2)))
#index <- which(t == 0) # t == 0 case must be dealt with separately due to division by t
#result[, index] <- v[, index]
} else if (manifold == 'kendall') {
meanp1 <- colMeans(p1)
meanp2 <- colMeans(p2)
p1 <- t((t(p1) - meanp1) / mag(manifold, t(t(p1) - meanp1))) # reprojects p1 onto the manifold, for precision
p2 <- t((t(p2) - meanp2) / mag(manifold, t(t(p2) - meanp2))) # reprojects p2 onto the manifold, for precision
yi <- expo(manifold, p1, v)
a <- ip(manifold, p1, p2)
p2 <- t(t(p2) * (a / abs(a))) # optimal alignment of p2 with p1
b <- (1 - (abs(a)) ^ 2) ^ 0.5
p2tilde <- t(t(p2 - t(t(p1) * abs(a))) / b)
result <- v - t(t(p1) * (ip(manifold, v, p1))) - t(t(p2tilde) * (ip(manifold, v, p2tilde))) + t(t(p1) * ((abs(a)) * (ip(manifold, v, p1)) - b * (ip(manifold, v, p2tilde)))) + t(t(p2tilde) * (b * (ip(manifold, v, p1)) + (abs(a)) * (ip(manifold, v, p2tilde))))
result <- t(t(result) * (Conj(a / abs(a))))
index <- which(abs(a) >= 1)
result[, index] <- loga(manifold, p2, yi)[, index]
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to par_trans, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from pt1 and pt3: p1 and p2 should be column matrices, v should be a matrix with the same number of rows as p1 and p2.
pt2 <- function(manifold, p1, p2, v) {
if (manifold == 'euclidean') {
result <- v
} else if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1))
w <- loga(manifold, p1, p2)
t <- mag(manifold, w)
e1 <- p1
e2 <- w / t
if (t == 0) {
result <- v
} else {
a <- ip(manifold, v, as.vector(e2))
invar <- v - e2 %*% a
result <- e2 %*% (a * cos(t)) - e1 %*% (a * sin(t)) + invar
}
# p1 <- t(t(p1) / mag(manifold, p1))
# w <- loga(manifold, p1, p2)
# t <- mag(manifold, w)
# if (t == 0) {
# result <- v
# } else {
# result <- v - (w + loga(manifold, p2, p1)) %*% (ip(manifold, v, as.vector(w)) / (t ^ 2))
# }
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1)))
w <- loga(manifold, p1, p2)
t <- mag(manifold, w)
e1 <- p1
e2 <- w / t
if (t == 0) {
result <- v
} else {
a <- ip(manifold, v, as.vector(e2))
invar <- v - e2 %*% a
result <- e2 %*% (a * cosh(t)) + e1 %*% (a * sinh(t)) + invar
}
# p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1)))
# w <- loga(manifold, p1, p2)
# t <- mag(manifold, w)
# if (t == 0) {
# result <- v
# } else {
# result <- v - (w + loga(manifold, p2, p1)) %*% (ip(manifold, v, as.vector(w)) / (t ^ 2))
# }
} else if (manifold == 'kendall') {
meanp1 <- colMeans(p1)
meanp2 <- colMeans(p2)
p1 <- t((t(p1) - meanp1) / mag(manifold, t(t(p1) - meanp1)))
p2 <- t((t(p2) - meanp2) / mag(manifold, t(t(p2) - meanp2)))
yi <- expo2(manifold, p1, v)
a <- ip(manifold, p1, p2)
if (abs(a) >= 1) {
result <- loga2(manifold, p2, yi)
} else {
p2 <- p2 * (a / abs(a)) # optimal alignment of p2 with p1
b <- (1 - (abs(a)) ^ 2) ^ 0.5
p2tilde <- (p2 - p1 * abs(a)) / b
p1 <- as.vector(p1)
p2tilde <- as.vector(p2tilde)
result <- v - p1 %*% t(ip(manifold, v, p1)) - p2tilde %*% t(ip(manifold, v, p2tilde)) + p1 %*% t((abs(a)) * (ip(manifold, v, p1)) - b * (ip(manifold, v, p2tilde))) + p2tilde %*% t(b * (ip(manifold, v, p1)) + (abs(a)) * (ip(manifold, v, p2tilde)))
result <- result * Conj(a / abs(a))
}
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to par_trans, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from pt1 and pt2: p2 should be a column matrix, p1 and v should be matrices of the same dimensions with the same number of rows as p2.
pt3 <- function(manifold, p1, p2, v) {
if (manifold == 'euclidean') {
result <- v
} else if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1))
w <- loga3(manifold, p1, p2)
e1 <- p1
e2 <- t(t(w) / mag(manifold, w))
a <- ip(manifold, v, e2)
invar <- v - t(t(e2) * a)
t <- mag(manifold, w)
result <- t(t(e2) * (a * cos(t))) - t(t(e1) * (a * sin(t))) + invar
index <- which(mag(manifold, w) == 0)
result[, index] <- v[, index]
# p1 <- t(t(p1) / mag(manifold, p1))
# w <- loga3(manifold, p1, p2)
# t <- mag(manifold, w)
# result <- v - t(t(w + loga2(manifold, p2, p1)) * (ip(manifold, w, v) / (t ^ 2)))
# index <- which(t == 0)
# result[, index] <- v[, index]
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1)))
w <- loga3(manifold, p1, p2)
e1 <- p1
e2 <- t(t(w) / mag(manifold, w))
a <- ip(manifold, v, e2)
invar <- v - t(t(e2) * a)
t <- mag(manifold, w)
result <- t(t(e2) * (a * cosh(t))) + t(t(e1) * (a * sinh(t))) + invar
index <- which(mag(manifold, w) == 0)
result[, index] <- v[, index]
# p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1)))
# w <- loga3(manifold, p1, p2)
# t <- mag(manifold, w)
# result <- v - t(t(w + loga2(manifold, p2, p1)) * (ip(manifold, w, v) / (t ^ 2)))
# index <- which(t == 0)
# result[, index] <- v[, index]
} else if (manifold == 'kendall') {
meanp1 <- colMeans(p1)
meanp2 <- colMeans(p2)
p1 <- t((t(p1) - meanp1) / mag(manifold, t(t(p1) - meanp1)))
p2 <- t((t(p2) - meanp2) / mag(manifold, t(t(p2) - meanp2)))
yi <- expo(manifold, p1, v)
a <- ip(manifold, p1, as.vector(p2))
p2 <- p2 %*% (a / abs(a)) # optimal alignment of p2 with p1
b <- (1 - (abs(a)) ^ 2) ^ 0.5
p2tilde <- t(t(p2 - t(t(p1) * abs(a))) / b)
result <- v - t(t(p1) * (ip(manifold, v, p1))) - t(t(p2tilde) * (ip(manifold, v, p2tilde))) + t(t(p1) * ((abs(a)) * (ip(manifold, v, p1)) - b * (ip(manifold, v, p2tilde)))) + t(t(p2tilde) * (b * (ip(manifold, v, p1)) + (abs(a)) * (ip(manifold, v, p2tilde))))
result <- t(t(result) * (Conj(a / abs(a))))
index <- which(abs(a) >= 1)
result[, index] <- loga(manifold, p2, yi)[, index]
}
return(result)
}
# Loss function for M-type estimators. t should be a vector of real numbers.
rho <- function(t, estimator, cutoff = NULL) {
if (estimator == 'l2') {
result <- 0.5 * t ^ 2
} else if (estimator == 'l1') {
result <- abs(t)
} else if (estimator == 'huber') {
result <- 0.5 * t ^ 2
index <- which(abs(t) >= cutoff)
result[index] <- cutoff * abs(t[index]) - 0.5 * cutoff ^ 2
} else if (estimator == 'tukey') {
result <- ((cutoff ^ 2) / 6) * (1 - (1 - (t / cutoff) ^ 2) ^ 3)
result[which(abs(t) >= cutoff)] <- (cutoff ^ 2) / 6
}
return(result)
}
# Derivative of the loss function for M-type estimators. t should be a vector of real numbers.
rho_prime <- function(t, estimator, cutoff = NULL) {
if (estimator == 'l2') {
result <- t
} else if (estimator == 'l1') {
result <- sign(t)
} else if (estimator == 'huber') {
result <- t
index <- which(abs(t) >= cutoff)
result[index] <- cutoff * sign(t[index])
} else if (estimator == 'tukey') {
result <- t * ((1 - (t / cutoff) ^ 2) ^ 2) * sign(t)
result[which(abs(t) >= cutoff)] <- 0
}
return(result)
}
# Calculates the residual vector for each data point. p, V, x, and y should all be matrices or appropriate dimensions.
eps <- function(manifold, p, V, x, y) {
shifts <- V %*% t(x)
predictions <- expo2(manifold, p, shifts)
result <- loga(manifold, predictions, y)
if (manifold == 'sphere') {
result[, which((mag(manifold, y - t(t(predictions) * max(min(abs(ip(manifold, predictions, y)), 1), -1))) == 0) & (mag(manifold, predictions + y) < 0.000001))] <- 0
} # we are ignoring cases where p2 is approximately -p1 in order to avoid an error
return(result)
}
# Move tangent vectors v2 at expo(p, v1) to stangent vectors at p1 using Jacobi fields and adjoint operators; used in gradient calculations. p should be a column matrix, v1 and v2 should be matrices of the same dimension. The columns of v1 should be tangent to p, and the columns of v2 should be tangent to exp_map(p, 'corresponsing column in v1').
jacobi <- function(manifold, p, v1, v2) {
result <- vector('list')
L <- mag(manifold, v1)
if (manifold == 'euclidean') {
result$p <- v2
result$V <- v2
} else if (manifold == 'sphere') {
v2_0 <- pt3(manifold, expo2(manifold, p, v1), p, v2)
v2_tan <- t((t(v1) / L) * (ip(manifold, v2_0, t(t(v1) / L))))
v2_orth <- v2_0 - v2_tan
result$p <- t(t(v2_orth) * cos(L)) + v2_tan
result$V <- t(t(v2_orth) * ((sin(L)) / L)) + v2_tan
index <- which(L == 0)
result$p[, index] <- v2[, index]
result$V[, index] <- v2[, index]
} else if (manifold == 'hyperbolic') {
v2_0 <- pt3(manifold, expo2(manifold, p, v1), p, v2)
v2_tan <- t((t(v1) / L) * (ip(manifold, v2_0, t(t(v1) / L))))
v2_orth <- v2_0 - v2_tan
result$p <- t(t(v2_orth) * cosh(L)) + v2_tan
result$V <- t(t(v2_orth) * ((sinh(L)) / L)) + v2_tan
index <- which(L == 0)
result$p[, index] <- v2[, index]
result$V[, index] <- v2[, index]
} else if (manifold == 'kendall') {
j <- (0 + 1i) * v1
v2_0 <- pt3(manifold, expo2(manifold, p, v1), p, v2)
w_0 <- t((t(j) / L) * (Re(ip(manifold, v2_0, t(t(j) / L)))))
u_0 <- v2_0 - w_0
w_tan <- t((t(v1) / L) * (Re(ip(manifold, w_0, t(t(v1) / L)))))
w_orth <- w_0 - w_tan
u_tan <- t((t(v1) / L) * (Re(ip(manifold, u_0, t(t(v1) / L)))))
u_orth <- u_0 - u_tan
result$p <- t(t(u_orth) * cos(L)) + t(t(w_orth) * cos(2 * L)) + u_tan + w_tan
result$V <- t(t(u_orth) * ((sin(L)) / L)) + t(t(w_orth) * ((sin(2 * L)) / (2 * L))) + u_tan + w_tan
index <- which(L == 0)
result$p[, index] <- v2[, index]
result$V[, index] <- v2[, index]
}
return(result)
}
# Calculates the gradient of the loss function at a given p, V. p, V, x, and y should all be matrices or appropriate dimensions. resids is redundant: it is simply eps(manifold, p, V, x, y). However, including it as an argument to the function quickens the calculation.
grad <- function(manifold, p, V, x, y, resids, estimator, cutoff = NULL) {
k <- dim(x)[2]
result <- vector("list")
mags <- mag(manifold, resids)
shifts <- V %*% t(x)
multiplier <- rho_prime(mags, estimator, cutoff)
unit_resids<- t(t(resids) / mags)
unit_resids[, which(mags == 0)] <- 0
jf <- jacobi(manifold, p, shifts, unit_resids)
result$p <- t(t(jf$p) * multiplier)
result$V <- aperm(replicate(k, jf$V), c(1, 3, 2)) * aperm(replicate(dim(p)[1], x * multiplier), c(3, 2, 1))
index <- which(mags <= 0.000001) # to avoid division by a small number
result$p[, index] <- 0
result$V[, , index] <- 0
result$p <- as.matrix(-rowSums(result$p))
result$V <- -rowSums(result$V, dims = 2)
return(result)
}
#' Manifold check and projection
#'
#' Checks whether each data point in \eqn{y} is on the given manifold, and if
#' not, provides a modified version of \eqn{y} where each column has been
#' projected onto the manifold.
#'
#' @param manifold Type of manifold (\code{'euclidean'}, \code{'sphere'},
#' \code{'hyperbolic'}, or \code{'kendall'}).
#' @param y A vector, matrix, or data frame whose columns should represent
#' points on the manifold.
#' @return A named list containing \item{on}{a logical vector describing whether
#' or not each column of \code{y} is on the manifold.} \item{data}{a matrix of
#' data frame of the same dimensions as \code{y}; each column of \code{y} has
#' been projected onto the manifold.}
#' @author Ha-Young Shin
#' @examples
#' y1 <- matrix(rnorm(10), ncol = 2)
#' y1 <- y1[, 1] + (1i) * y1[, 2]
#' y2 <- matrix(rnorm(10), ncol = 2)
#' y2 <- y2[, 1] + (1i) * y2[, 2]
#' y3 <- matrix(rnorm(10), ncol = 2)
#' y3 <- y3[, 1] + (1i) * y3[, 2]
#' y3 <- (y3 - mean(y3)) / norm(y3 - mean(y3), type = '2') # project onto preshape space
#' y <- matrix(c(y1, y2, y3), ncol = 3)
#' onmanifold('kendall', y)
#'
#' @export
onmanifold <- function(manifold, y) {
y <- as.matrix(y)
if (any(is.nan(y))) {
stop('y should not contain NaN values')
}
sample_size <- dim(y)[2]
result <- vector("list")
if (manifold == 'euclidean') {
result$on <- !logical(sample_size)
result$data <- y
} else if (manifold == 'sphere') {
ons <- !logical(sample_size)
mags <- mag(manifold, y)
ons[which(abs(mags - 1) > 1e-6)] <- FALSE
y <- t(t(y) / mags)
result$on <- ons
result$data <- y
} else if (manifold == 'hyperbolic') {
ons <- !logical(sample_size)
mags <- sqrt(-ip(manifold, y, y))
ons[which((abs(mags - 1) > 1e-6) | (y[1, ] < 0))] <- FALSE
y <- t(t(y) / mags)
result$on <- ons
result$data <- y
} else if (manifold == 'kendall') {
ons <- !logical(sample_size)
mags <- mag(manifold, y)
means <- colMeans(y)
ons[which((abs(mags - 1) > 1e-6) | (abs(means * Conj(means)) > 1e-6))] <- FALSE
y <- t((t(y) - means) / mag(manifold, t(t(y) - means)))
result$on <- ons
result$data <- y
} else {
stop('the manifold must be one of euclidean, sphere, hyperbolic, or kendall')
}
return(result)
}
#' Gradient descent for (robust) geodesic regression
#'
#' Finds \eqn{\mathrm{argmin}_{(p,V)\in M\times (T_pM) ^ n}\sum_{i=1} ^ {N}
#' \rho(d(\mathrm{Exp}(p,Vx_i),y_i))} through a gradient descent algorithm.
#'
#' Each column of \code{x} should be centered to have an average of 0 for the
#' quickest and most accurate results. If all of the elements of a column of
#' \code{x} are equal, the resulting vector will consist of \code{NA}s. In the
#' case of the \code{'sphere'}, an error will be raised if all points are on a
#' pair of antipodes.
#'
#' @param manifold Type of manifold (\code{'euclidean'}, \code{'sphere'},
#' \code{'hyperbolic'}, or \code{'kendall'}).
#' @param x A vector, matrix, or data frame of independent variables; for
#' matrices and data frames, the rows and columns represent the subjects and
#' independent variables, respectively.
#' @param y A matrix or data frame whose columns represent points on the
#' manifold.
#' @param estimator M-type estimator (\code{'l2'}, \code{'l1'}, \code{'huber'},
#' or \code{'tukey'}).
#' @param c Multiplier of \eqn{\sigma}, the square root of the variance, used in
#' the cutoff parameter for the \code{'huber'} and \code{'tukey'} estimators;
#' should be \code{NULL} for the \code{'l2'} or \code{'l1'} estimators.
#' @param p_tol Termination condition for the distance between consecutive
#' updates of \code{p}.
#' @param V_tol Termination condition for the distance between columns of
#' consecutive updates of \code{V}, parallel transported to be in the same
#' tangent space. Can be a vector of positive real numbers for each
#' independent variable or a single positive number.
#' @param max_iter Maximum number of gradient descent steps before ending the
#' algorithm.
#' @return A named list containing \item{p}{a vector representing the estimate
#' of the initial point on the manifold} \item{V}{a matrix representing the
#' estimate of the initial velocities for each independent variable; the
#' columns represent the independent variables.} \item{iteration}{number of
#' gradient descent steps taken.}
#' @references Fletcher, P. T. (2013). Geodesic regression and the theory of
#' least squares on Riemannian manifolds. International Journal of Computer
#' Vision, 105, 171-185.
#'
#' Kim, H. J., Adluru, N., Collins, M. D., Chung, M. K., Bendin, B. B.,
#' Johnson, S. C., Davidson, R. J. and Singh, V. (2014). Multivariate general
#' linear models (MGLM) on Riemannian manifolds with applications to
#' statistical analysis of diffusion weighted images. 2014 IEEE Conference on
#' Computer Vision and Pattern Recognition, 2705-2712.
#'
#' Shin, H.-Y. and Oh H.-S. (2020). Robust Geodesic Regression.
#' <arXiv:2007.04518>
#' @author Ha-Young Shin
#' @seealso \code{\link{intrinsic_location}}.
#' @examples
#' # an example of multiple regression with two independent variables, with 64
#' # data points
#'
#' x <- matrix(runif(2 * 64), ncol = 2)
#' x <- t(t(x) - colMeans(x))
#' y <- matrix(0L, nrow = 4, ncol = 64)
#' for (i in 1:64) {
#' y[, i] <- exp_map('sphere', c(1, 0, 0, 0), c(0, runif(1), runif(1),
#' runif(1)))
#' }
#' geo_reg('sphere', x, y, 'tukey', c = are_nr('tukey', 2, 6))
#'
#' @export
#' @importFrom stats median
geo_reg <- function(manifold, x, y, estimator, c = NULL, p_tol = 1e-5, V_tol = 1e-5, max_iter = 100000) {
if (((estimator == 'huber') | (estimator == 'tukey')) & is.null(c)) {
stop('a c value must be provided if the chosen m-estimator is huber or tukey')
}
if (!is.null(c)) {
if ((estimator == 'l2') | (estimator == 'l1')) {
warning('l2 and l1 do not use a c value')
}
if (c <= 0) {
stop('c must be positive')
}
}
if (any(is.nan(x))) {
stop('x should not contain NaN values')
}
ondata <- onmanifold(manifold, y)
if (any(!(ondata$on))) {
warning('all data points in y must lie on the manifold')
}
y <- ondata$data
embedded <- dim(y)[1]
sample_size <- dim(y)[2]
x <- as.matrix(x)
k <- dim(x)[2]
allequal <- c()
for (var in 1:k) { # Deals with case where all of the data for one of the independent variables are equal
if (length(unique(x[, var])) == 1) {
x[, var] <- numeric(sample_size)
allequal <- c(allequal, var)
}
}
if (manifold == 'sphere') {
if (sample_size > 1) {
antipode <- sum(mag(manifold, y[, 1] + y) == 0)
same <- sum(mag(manifold, y[, 1] - y) == 0)
if ((antipode + same == sample_size) & (antipode > 0)) {
stop('there is no unique solution as all the data points are on antipodes of the sphere, with at least one data point on either antipode')
}
}
}
if (any(abs(colMeans(x)) > 0.000001)) {
warning('the mean of the data for at least one of your independent variables is not zero; the x data should be centered for best/quickest results')
}
if (length(allequal) == k) {
current_p <- as.matrix(y[, 1])
} else {
current_p <- geo_reg(manifold, t(t(numeric(sample_size))), y, estimator, c, p_tol, V_tol, max_iter)$p
}
current_V <- matrix(0L, nrow = embedded, ncol = k)
old_p <- current_p
old_V <- current_V
count <- 0
alt_count <- 0
cutoff <- NULL
if ((estimator == 'huber') | (estimator == 'tukey')) {
if (manifold == 'euclidean') {
dimension <- embedded
} else if ((manifold == 'sphere') | (manifold == 'hyperbolic')) {
dimension <- embedded - 1
} else if (manifold == 'kendall') {
dimension <- 2 * embedded - 4
}
xi <- (2 * Pinv(dimension / 2, 0.5)) ^ 0.5
current_shifts <- current_V %*% t(x)
deviations <- dist(manifold, expo2(manifold, current_p, current_shifts), y)
mad <- median(deviations)
sigma <- mad / xi
cutoff <- c * sigma
}
current_resids <- eps(manifold, current_p, current_V, x, y)
current_loss <- sum(rho(mag(manifold, current_resids), estimator, cutoff))
step <- grad(manifold, current_p, current_V, x, y, current_resids, estimator, cutoff)
V_diffs <- mag(manifold, (pt2(manifold, old_p, current_p, old_V) - current_V))
lambda <- 0.1
while (((count == 0) | ((count < max_iter) & ((dist(manifold, old_p, current_p) > p_tol) | (any(V_diffs > V_tol))))) & (alt_count < 100000)) {
new_p <- tryCatch(expo(manifold, current_p, -lambda * step$p), warning = function(w) 'warning')
while ((new_p[1] == 'warning') | (mag(manifold, -lambda * step$p) > 10)) {
lambda <- lambda / 2
new_p <- tryCatch(expo(manifold, current_p, -lambda * step$p), warning = function(w) 'warning')
}
new_V <- pt2(manifold, current_p, new_p, current_V - lambda * step$V)
new_resids <- tryCatch(eps(manifold, new_p, new_V, x, y), warning = function(w) 'warning')
while ((new_resids[1] == 'warning') | any(Re(ip(manifold, new_V, as.vector(new_p))) > 0.000001)) {
lambda <- lambda / 2
new_p <- expo(manifold, current_p, -lambda * step$p)
new_V <- pt2(manifold, current_p, new_p, current_V - lambda * step$V)
new_resids <- tryCatch(eps(manifold, new_p, new_V, x, y), warning = function(w) 'warning')
}
new_loss <- sum(rho(mag(manifold, new_resids), estimator, cutoff))
if (current_loss >= new_loss) {
alt_count <- 0
old_p <- current_p
old_V <- current_V
current_p <- new_p
current_V <- new_V
if ((estimator == 'huber') | (estimator == 'tukey')) {
current_shifts <- current_V %*% t(x)
deviations <- dist(manifold, expo2(manifold, current_p, current_shifts), y)
mad <- median(deviations)
sigma <- mad / xi
cutoff <- c * sigma
}
current_resids <- new_resids
current_loss <- sum(rho(mag(manifold, current_resids), estimator, cutoff))
step <- grad(manifold, current_p, current_V, x, y, current_resids, estimator, cutoff)
V_diffs <- mag(manifold, (pt2(manifold, old_p, current_p, old_V) - current_V))
if ((manifold == 'euclidean') | (manifold == 'sphere') | (manifold == 'kendall')) {
lambda <- 8 * lambda
#} else if (manifold == 'hyperbolic') {
# lambda <- 1.1 * lambda
}
count <- count + 1
} else {
lambda <- lambda / 2
alt_count <- alt_count + 1
}
}
if (count == max_iter) {
warning('issues with convergence; make sure your independent variables are centered, and try adjusting max_iter, p_tol, or V_tol')
}
result <- vector("list")
result$p <- current_p
current_V[, allequal] <- NA
result$V <- current_V
result$iteration <- count
return(result)
}
#' Gradient descent for location based on M-type estimators
#'
#' Finds \eqn{\mathrm{argmin}_{p\in M}\sum_{i=1} ^ {N} \rho(d(p,y_i))} through a
#' gradient descent algorithm.
#'
#' In the case of the \code{'sphere'}, an error will be raised if all points are
#' on a pair of antipodes.
#'
#' @param manifold Type of manifold (\code{'euclidean'}, \code{'sphere'},
#' \code{'hyperbolic'}, or \code{'kendall'}).
#' @param y A matrix or data frame whose columns represent points on the
#' manifold.
#' @param estimator M-type estimator (\code{'l2'}, \code{'l1'}, \code{'huber'},
#' or \code{'tukey'}).
#' @param c Multiplier of \eqn{\sigma}, the square root of the variance, used in
#' the cutoff parameter for the \code{'huber'} and \code{'tukey'} estimators;
#' should be \code{NULL} for the \code{'l2'} or \code{'l1'} estimators.
#' @param p_tol Termination condition for the distance between consecutive
#' updates of \code{p}.
#' @param V_tol Termination condition for the distance between columns of
#' consecutive updates of \code{V}, parallel transported to be in the same
#' tangent space. Can be a vector of positive real numbers for each
#' independent variable or a single positive number.
#' @param max_iter Maximum number of gradient descent steps before ending the
#' algorithm.
#' @return A vector representing the location estimate
#' @references Fletcher, P. T. (2013). Geodesic regression and the theory of
#' least squares on Riemannian manifolds. International Journal of Computer
#' Vision, 105, 171-185.
#'
#' Kim, H. J., Adluru, N., Collins, M. D., Chung, M. K., Bendin, B. B.,
#' Johnson, S. C., Davidson, R. J. and Singh, V. (2014). Multivariate general
#' linear models (MGLM) on Riemannian manifolds with applications to
#' statistical analysis of diffusion weighted images. 2014 IEEE Conference on
#' Computer Vision and Pattern Recognition, 2705-2712.
#'
#' Shin, H.-Y. and Oh H.-S. (2020). Robust Geodesic Regression.
#' <arXiv:2007.04518>
#' @author Ha-Young Shin
#' @seealso \code{\link{geo_reg}}, \code{\link[RiemBase]{rbase.mean}},
#' \code{\link[RiemBase]{rbase.median}}.
#' @examples
#' y <- matrix(runif(100, 1000, 2000), nrow = 10)
#' intrinsic_location('euclidean', y, 'l2')
#'
#' @export
intrinsic_location <- function(manifold, y, estimator, c = NULL, p_tol = 1e-5, V_tol = 1e-5, max_iter = 100000) {
sample_size <- dim(y)[2]
return(geo_reg(manifold, numeric(sample_size), y, estimator, c, p_tol, V_tol, max_iter)$p)
}
|
/R/GeodRegr.R
|
no_license
|
cran/GeodRegr
|
R
| false | false | 37,732 |
r
|
# Inner product. v1 and v2 should either be matrices of the same size or one of them (v1 in the hyperbolic case) should be a matrix and the other a vector of length equal to the number of rows in the matrix.
ip <- function(manifold, v1, v2) {
if ((manifold == 'euclidean') | (manifold == 'sphere')) {
result <- colSums(v1 * v2)
} else if (manifold == 'hyperbolic') {
v1[1, ] <- -v1[1, ]
result <- colSums(v1 * v2)
} else if (manifold == 'kendall') {
result <- colSums(v1 * Conj(v2))
}
return(result)
}
# Magnitude of a vector. v should be a matrix.
mag <- function(manifold, v) {
return(Re(sqrt(ip(manifold, v, v) + 0i)))
}
# Function for internal use in the geo_reg function. Similar to exp_map, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from expo2: p and v should be matrices of the same dimensions.
expo <- function(manifold, p, v) {
if (manifold == 'euclidean') {
result <- p + v
} else if (manifold == 'sphere') {
theta <- mag(manifold, v)
e1 <- t(t(p) / mag(manifold, p)) # reprojects p onto the manifold, for precision
e2 <- t(t(v) / theta)
result <- t(t(e1) * cos(theta)) + t(t(e2) * sin(theta))
index <- which(theta == 0) # theta == 0 case must be dealt with separately due to division by theta
result[, index] <- p[, index]
result <- t(t(result) / mag(manifold, result)) # reprojects result onto the manifold, for precision
} else if (manifold == 'hyperbolic') {
theta <- mag(manifold, v)
e1 <- t(t(p) / sqrt(-ip(manifold, p, p))) # reprojects p onto the manifold, for precision
e2 <- t(t(v) / theta)
result <- t(t(e1) * cosh(theta)) + t(t(e2) * sinh(theta))
index <- which(theta == 0) # theta == 0 case must be dealt with separately due to division by theta
result[, index] <- p[, index] # if theta == 0, the result is p
result <- t(t(result) / sqrt(-ip(manifold, result, result))) # reprojects result onto the manifold, for precision
} else if (manifold == 'kendall') {
meanp <- colMeans(p)
theta <- mag(manifold, v)
e1 <- t((t(p) - meanp) / mag(manifold, t(t(p) - meanp))) # reprojects p onto the manifold, for precision
e2 <- t(t(v) / theta)
result <- t(t(e1) * cos(theta) + t(e2) * sin(theta))
index <- which(theta == 0) # theta == 0 case must be dealt with separately due to division by theta
result[, index] <- p[, index]
result <- t((t(result) - colMeans(result)) / mag(manifold, t(t(result) - colMeans(result)))) # reprojects result onto the manifold, for precision
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to exp_map, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from expo: p should be a column matrix, v should be a matrix with the same number of rows as p.
expo2 <- function(manifold, p, v) {
if (manifold == 'euclidean') {
result <- as.vector(p) + v
} else if (manifold == 'sphere') {
theta <- mag(manifold, v)
e1 <- t(t(p) / mag(manifold, p))
e2 <- t(t(v) / theta)
result <- e1 %*% cos(theta) + t(t(e2) * sin(theta))
index <- which(theta == 0)
result[, index] <- p
result <- t(t(result) / mag(manifold, result))
} else if (manifold == 'hyperbolic') {
theta <- mag(manifold, v)
e1 <- t(t(p) / sqrt(-ip(manifold, p, p)))
e2 <- t(t(v) / theta)
result <- e1 %*% cosh(theta) + t(t(e2) * sinh(theta))
index <- which(theta == 0)
result[, index] <- p
result <- t(t(result) / sqrt(-ip(manifold, result, result)))
} else if (manifold == 'kendall') {
meanp <- colMeans(p)
theta <- mag(manifold, v)
e1 <- t((t(p) - meanp) / mag(manifold, t(t(p) - meanp)))
e2 <- t(t(v) / theta)
result <- e1 %*% t(cos(theta)) + t(t(e2) * sin(theta))
index <- which(theta == 0)
result[, index] <- p
result <- t((t(result) - colMeans(result)) / mag(manifold, t(t(result) - colMeans(result))))
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to log_map, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from loga2 and loga3: p1 and p2 should be matrices of the same dimensions.
loga <- function(manifold, p1, p2) {
if (manifold == 'euclidean') {
result <- p2 - p1
} else if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1)) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / mag(manifold, p2)) # reprojects p2 onto the manifold, for precision
a <- pmax(pmin(ip(manifold, p1, p2), 1), -1) # ensures a is in [-1, 1]
theta <- acos(a)
tang <- p2 - t(t(p1) * a)
t <- mag(manifold, tang)
if (any(t == 0)) { # any(t == 0) case must be dealt with separately due to division by t
if (any(mag(manifold, p1 - p2) < 1e-6)) { # determining whether any(t == 0) because of p1 = p2 or p1 = -p2
result <- numeric(dim(p1)[1])
} else if (any(mag(manifold, p1 + p2) < 1e-6)) { # determining whether any(t == 0) because of p1 = p2 or p1 = -p2
stop('p2 is the antipode of p1 and is therefore not in the domain of the log map at p1') ## change to continue
}
}
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1))) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / sqrt(-ip(manifold, p2, p2))) # reprojects p2 onto the manifold, for precision
a <- pmin(ip(manifold, p1, p2), -1) # ensures -a is at least 1
theta <- acosh(-a)
tang <- p2 + t(t(p1) * a)
t <- mag(manifold, tang)
if (any(t == 0)) { # any(t == 0) case must be dealt with separately due to division by t
result <- numeric(dim(p1)[1])
}
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
} else if (manifold == 'kendall') {
meanp1 <- colMeans(p1)
meanp2 <- colMeans(p2)
p1 <- t((t(p1) - meanp1) / mag(manifold, t(t(p1) - meanp1))) # reprojects p1 onto the manifold, for precision
p2 <- t((t(p2) - meanp2) / mag(manifold, t(t(p2) - meanp2))) # reprojects p2 onto the manifold, for precision
a <- ip(manifold, p1, p2)
theta <- acos(pmax(pmin(abs(a), 1), -1)) # ensures argument is in [-1, 1]
tang <- t(t(p2) * (a / abs(a))) - t(t(p1) * abs(a))
result <- t(t(tang) * (theta / mag(manifold, tang)))
result[, which(mag(manifold, tang) == 0)] <- 0 # mag(manifold, tang) == 0 case must be dealt with separately due to division by mag(manifold, tang)
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to log_map, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from loga1 and loga3: p1 should be a column matrix, p2 should be a matrix with the same number of rows as p1. Needed for the sphere, hyperbolic space and Kendall's shape space.
loga2 <- function(manifold, p1, p2) {
if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1)) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / mag(manifold, p2)) # reprojects p2 onto the manifold, for precision
a <- pmax(pmin(ip(manifold, p2, as.vector(p1)), 1), -1) # ensures a is in [-1, 1]
theta <- acos(a)
tang <- p2 - p1 %*% a
t <- mag(manifold, tang)
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1))) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / sqrt(-ip(manifold, p2, p2))) # reprojects p2 onto the manifold, for precision
a <- pmin(ip(manifold, p2, as.vector(p1)), -1)
theta <- acosh(-a)
tang <- p2 + p1 %*% a
t <- mag(manifold, tang)
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
} else if (manifold == 'kendall') {
meanp1 <- colMeans(p1)
meanp2 <- colMeans(p2)
p1 <- t((t(p1) - meanp1) / mag(manifold, t(t(p1) - meanp1))) # reprojects p1 onto the manifold, for precision
p2 <- t((t(p2) - meanp2) / mag(manifold, t(t(p2) - meanp2))) # reprojects p1 onto the manifold, for precision
a <- ip(manifold, as.vector(p1), p2)
theta <- acos(pmax(pmin(abs(a), 1), -1))
tang <- t(t(p2) * (a / abs(a))) - p1 %*% abs(a)
result <- t(t(tang) * (theta / mag(manifold, tang)))
result[, which(mag(manifold, tang) == 0)] <- 0
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to log_map, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from loga2 and loga3: p1 should be a matrix, p2 should be a column matrix with the same number of rows as p1. Needed for the sphere and hyperbolic space.
loga3 <- function(manifold, p1, p2) {
if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1)) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / mag(manifold, p2)) # reprojects p2 onto the manifold, for precision
a <- pmax(pmin(ip(manifold, p1, as.vector(p2)), 1), -1)
theta <- acos(a)
tang <- as.vector(p2) - t(t(p1) * a)
t <- mag(manifold, tang)
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1))) # reprojects p1 onto the manifold, for precision
p2 <- t(t(p2) / sqrt(-ip(manifold, p2, p2))) # reprojects p2 onto the manifold, for precision
a <- pmin(ip(manifold, p1, as.vector(p2)), -1)
theta <- acosh(-a)
tang <- as.vector(p2) + t(t(p1) * a)
t <- mag(manifold, tang)
result <- t(t(tang) * (theta / t))
result[, which(t == 0)] <- 0
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to geo_dis, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. p1 and p2 should be matrices of the same dimension.
dist <- function(manifold, p1, p2) {
return(mag(manifold, loga(manifold, p1, p2)))
}
# Function for internal use in the geo_reg function. Similar to par_trans, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from pt2 and pt3: p1, p2, and v should be matrices of the same dimensions.
pt <- function(manifold, p1, p2, v) {
if (manifold == 'euclidean') {
result <- v
} else if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1)) # reprojects p1 onto the manifold, for precision
w <- loga(manifold, p1, p2)
t <- mag(manifold, w)
e1 <- p1
e2 <- t(t(w) / t)
a <- ip(manifold, v, e2)
invar <- v - t(t(e2) * a)
result <- t(t(e2) * (a * cos(t))) - t(t(e1) * (a * sin(t))) + invar
index <- which(t == 0) # t == 0 case must be dealt with separately due to division by t
result[, index] <- v[, index]
# p1 <- t(t(p1) / mag(manifold, p1)) # reprojects p1 onto the manifold, for precision
# w <- loga(manifold, p1, p2)
# t <- mag(manifold, w)
# result <- v - t(t(w + loga(manifold, p2, p1)) * (ip(manifold, w, v) / (t ^ 2)))
# index <- which(t == 0) # t == 0 case must be dealt with separately due to division by t
# result[, index] <- v[, index]
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1))) # reprojects p1 onto the manifold, for precision
w <- loga(manifold, p1, p2)
t <- mag(manifold, w)
e1 <- p1
e2 <- t(t(w) / t)
a <- ip(manifold, v, e2)
invar <- v - t(t(e2) * a)
result <- t(t(e2) * (a * cosh(t))) + t(t(e1) * (a * sinh(t))) + invar
index <- which(t == 0) # t == 0 case must be dealt with separately due to division by t
result[, index] <- v[, index]
#p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1))) # reprojects p1 onto the manifold, for precision
#w <- loga(manifold, p1, p2)
#t <- mag(manifold, w)
#result <- v - t(t(w + loga(manifold, p2, p1)) * (ip(manifold, w, v) / (t ^ 2)))
#index <- which(t == 0) # t == 0 case must be dealt with separately due to division by t
#result[, index] <- v[, index]
} else if (manifold == 'kendall') {
meanp1 <- colMeans(p1)
meanp2 <- colMeans(p2)
p1 <- t((t(p1) - meanp1) / mag(manifold, t(t(p1) - meanp1))) # reprojects p1 onto the manifold, for precision
p2 <- t((t(p2) - meanp2) / mag(manifold, t(t(p2) - meanp2))) # reprojects p2 onto the manifold, for precision
yi <- expo(manifold, p1, v)
a <- ip(manifold, p1, p2)
p2 <- t(t(p2) * (a / abs(a))) # optimal alignment of p2 with p1
b <- (1 - (abs(a)) ^ 2) ^ 0.5
p2tilde <- t(t(p2 - t(t(p1) * abs(a))) / b)
result <- v - t(t(p1) * (ip(manifold, v, p1))) - t(t(p2tilde) * (ip(manifold, v, p2tilde))) + t(t(p1) * ((abs(a)) * (ip(manifold, v, p1)) - b * (ip(manifold, v, p2tilde)))) + t(t(p2tilde) * (b * (ip(manifold, v, p1)) + (abs(a)) * (ip(manifold, v, p2tilde))))
result <- t(t(result) * (Conj(a / abs(a))))
index <- which(abs(a) >= 1)
result[, index] <- loga(manifold, p2, yi)[, index]
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to par_trans, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from pt1 and pt3: p1 and p2 should be column matrices, v should be a matrix with the same number of rows as p1 and p2.
pt2 <- function(manifold, p1, p2, v) {
if (manifold == 'euclidean') {
result <- v
} else if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1))
w <- loga(manifold, p1, p2)
t <- mag(manifold, w)
e1 <- p1
e2 <- w / t
if (t == 0) {
result <- v
} else {
a <- ip(manifold, v, as.vector(e2))
invar <- v - e2 %*% a
result <- e2 %*% (a * cos(t)) - e1 %*% (a * sin(t)) + invar
}
# p1 <- t(t(p1) / mag(manifold, p1))
# w <- loga(manifold, p1, p2)
# t <- mag(manifold, w)
# if (t == 0) {
# result <- v
# } else {
# result <- v - (w + loga(manifold, p2, p1)) %*% (ip(manifold, v, as.vector(w)) / (t ^ 2))
# }
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1)))
w <- loga(manifold, p1, p2)
t <- mag(manifold, w)
e1 <- p1
e2 <- w / t
if (t == 0) {
result <- v
} else {
a <- ip(manifold, v, as.vector(e2))
invar <- v - e2 %*% a
result <- e2 %*% (a * cosh(t)) + e1 %*% (a * sinh(t)) + invar
}
# p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1)))
# w <- loga(manifold, p1, p2)
# t <- mag(manifold, w)
# if (t == 0) {
# result <- v
# } else {
# result <- v - (w + loga(manifold, p2, p1)) %*% (ip(manifold, v, as.vector(w)) / (t ^ 2))
# }
} else if (manifold == 'kendall') {
meanp1 <- colMeans(p1)
meanp2 <- colMeans(p2)
p1 <- t((t(p1) - meanp1) / mag(manifold, t(t(p1) - meanp1)))
p2 <- t((t(p2) - meanp2) / mag(manifold, t(t(p2) - meanp2)))
yi <- expo2(manifold, p1, v)
a <- ip(manifold, p1, p2)
if (abs(a) >= 1) {
result <- loga2(manifold, p2, yi)
} else {
p2 <- p2 * (a / abs(a)) # optimal alignment of p2 with p1
b <- (1 - (abs(a)) ^ 2) ^ 0.5
p2tilde <- (p2 - p1 * abs(a)) / b
p1 <- as.vector(p1)
p2tilde <- as.vector(p2tilde)
result <- v - p1 %*% t(ip(manifold, v, p1)) - p2tilde %*% t(ip(manifold, v, p2tilde)) + p1 %*% t((abs(a)) * (ip(manifold, v, p1)) - b * (ip(manifold, v, p2tilde))) + p2tilde %*% t(b * (ip(manifold, v, p1)) + (abs(a)) * (ip(manifold, v, p2tilde)))
result <- result * Conj(a / abs(a))
}
}
return(result)
}
# Function for internal use in the geo_reg function. Similar to par_trans, but vectorized and without the errors, for speed and so as to not unexpectedly stop the algorithm because of one small error that can be safely ignored without changing the result. Difference from pt1 and pt2: p2 should be a column matrix, p1 and v should be matrices of the same dimensions with the same number of rows as p2.
pt3 <- function(manifold, p1, p2, v) {
if (manifold == 'euclidean') {
result <- v
} else if (manifold == 'sphere') {
p1 <- t(t(p1) / mag(manifold, p1))
w <- loga3(manifold, p1, p2)
e1 <- p1
e2 <- t(t(w) / mag(manifold, w))
a <- ip(manifold, v, e2)
invar <- v - t(t(e2) * a)
t <- mag(manifold, w)
result <- t(t(e2) * (a * cos(t))) - t(t(e1) * (a * sin(t))) + invar
index <- which(mag(manifold, w) == 0)
result[, index] <- v[, index]
# p1 <- t(t(p1) / mag(manifold, p1))
# w <- loga3(manifold, p1, p2)
# t <- mag(manifold, w)
# result <- v - t(t(w + loga2(manifold, p2, p1)) * (ip(manifold, w, v) / (t ^ 2)))
# index <- which(t == 0)
# result[, index] <- v[, index]
} else if (manifold == 'hyperbolic') {
p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1)))
w <- loga3(manifold, p1, p2)
e1 <- p1
e2 <- t(t(w) / mag(manifold, w))
a <- ip(manifold, v, e2)
invar <- v - t(t(e2) * a)
t <- mag(manifold, w)
result <- t(t(e2) * (a * cosh(t))) + t(t(e1) * (a * sinh(t))) + invar
index <- which(mag(manifold, w) == 0)
result[, index] <- v[, index]
# p1 <- t(t(p1) / sqrt(-ip(manifold, p1, p1)))
# w <- loga3(manifold, p1, p2)
# t <- mag(manifold, w)
# result <- v - t(t(w + loga2(manifold, p2, p1)) * (ip(manifold, w, v) / (t ^ 2)))
# index <- which(t == 0)
# result[, index] <- v[, index]
} else if (manifold == 'kendall') {
meanp1 <- colMeans(p1)
meanp2 <- colMeans(p2)
p1 <- t((t(p1) - meanp1) / mag(manifold, t(t(p1) - meanp1)))
p2 <- t((t(p2) - meanp2) / mag(manifold, t(t(p2) - meanp2)))
yi <- expo(manifold, p1, v)
a <- ip(manifold, p1, as.vector(p2))
p2 <- p2 %*% (a / abs(a)) # optimal alignment of p2 with p1
b <- (1 - (abs(a)) ^ 2) ^ 0.5
p2tilde <- t(t(p2 - t(t(p1) * abs(a))) / b)
result <- v - t(t(p1) * (ip(manifold, v, p1))) - t(t(p2tilde) * (ip(manifold, v, p2tilde))) + t(t(p1) * ((abs(a)) * (ip(manifold, v, p1)) - b * (ip(manifold, v, p2tilde)))) + t(t(p2tilde) * (b * (ip(manifold, v, p1)) + (abs(a)) * (ip(manifold, v, p2tilde))))
result <- t(t(result) * (Conj(a / abs(a))))
index <- which(abs(a) >= 1)
result[, index] <- loga(manifold, p2, yi)[, index]
}
return(result)
}
# Loss function for M-type estimators. t should be a vector of real numbers.
rho <- function(t, estimator, cutoff = NULL) {
if (estimator == 'l2') {
result <- 0.5 * t ^ 2
} else if (estimator == 'l1') {
result <- abs(t)
} else if (estimator == 'huber') {
result <- 0.5 * t ^ 2
index <- which(abs(t) >= cutoff)
result[index] <- cutoff * abs(t[index]) - 0.5 * cutoff ^ 2
} else if (estimator == 'tukey') {
result <- ((cutoff ^ 2) / 6) * (1 - (1 - (t / cutoff) ^ 2) ^ 3)
result[which(abs(t) >= cutoff)] <- (cutoff ^ 2) / 6
}
return(result)
}
# Derivative of the loss function for M-type estimators. t should be a vector of real numbers.
rho_prime <- function(t, estimator, cutoff = NULL) {
if (estimator == 'l2') {
result <- t
} else if (estimator == 'l1') {
result <- sign(t)
} else if (estimator == 'huber') {
result <- t
index <- which(abs(t) >= cutoff)
result[index] <- cutoff * sign(t[index])
} else if (estimator == 'tukey') {
result <- t * ((1 - (t / cutoff) ^ 2) ^ 2) * sign(t)
result[which(abs(t) >= cutoff)] <- 0
}
return(result)
}
# Calculates the residual vector for each data point. p, V, x, and y should all be matrices or appropriate dimensions.
eps <- function(manifold, p, V, x, y) {
shifts <- V %*% t(x)
predictions <- expo2(manifold, p, shifts)
result <- loga(manifold, predictions, y)
if (manifold == 'sphere') {
result[, which((mag(manifold, y - t(t(predictions) * max(min(abs(ip(manifold, predictions, y)), 1), -1))) == 0) & (mag(manifold, predictions + y) < 0.000001))] <- 0
} # we are ignoring cases where p2 is approximately -p1 in order to avoid an error
return(result)
}
# Move tangent vectors v2 at expo(p, v1) to stangent vectors at p1 using Jacobi fields and adjoint operators; used in gradient calculations. p should be a column matrix, v1 and v2 should be matrices of the same dimension. The columns of v1 should be tangent to p, and the columns of v2 should be tangent to exp_map(p, 'corresponsing column in v1').
jacobi <- function(manifold, p, v1, v2) {
result <- vector('list')
L <- mag(manifold, v1)
if (manifold == 'euclidean') {
result$p <- v2
result$V <- v2
} else if (manifold == 'sphere') {
v2_0 <- pt3(manifold, expo2(manifold, p, v1), p, v2)
v2_tan <- t((t(v1) / L) * (ip(manifold, v2_0, t(t(v1) / L))))
v2_orth <- v2_0 - v2_tan
result$p <- t(t(v2_orth) * cos(L)) + v2_tan
result$V <- t(t(v2_orth) * ((sin(L)) / L)) + v2_tan
index <- which(L == 0)
result$p[, index] <- v2[, index]
result$V[, index] <- v2[, index]
} else if (manifold == 'hyperbolic') {
v2_0 <- pt3(manifold, expo2(manifold, p, v1), p, v2)
v2_tan <- t((t(v1) / L) * (ip(manifold, v2_0, t(t(v1) / L))))
v2_orth <- v2_0 - v2_tan
result$p <- t(t(v2_orth) * cosh(L)) + v2_tan
result$V <- t(t(v2_orth) * ((sinh(L)) / L)) + v2_tan
index <- which(L == 0)
result$p[, index] <- v2[, index]
result$V[, index] <- v2[, index]
} else if (manifold == 'kendall') {
j <- (0 + 1i) * v1
v2_0 <- pt3(manifold, expo2(manifold, p, v1), p, v2)
w_0 <- t((t(j) / L) * (Re(ip(manifold, v2_0, t(t(j) / L)))))
u_0 <- v2_0 - w_0
w_tan <- t((t(v1) / L) * (Re(ip(manifold, w_0, t(t(v1) / L)))))
w_orth <- w_0 - w_tan
u_tan <- t((t(v1) / L) * (Re(ip(manifold, u_0, t(t(v1) / L)))))
u_orth <- u_0 - u_tan
result$p <- t(t(u_orth) * cos(L)) + t(t(w_orth) * cos(2 * L)) + u_tan + w_tan
result$V <- t(t(u_orth) * ((sin(L)) / L)) + t(t(w_orth) * ((sin(2 * L)) / (2 * L))) + u_tan + w_tan
index <- which(L == 0)
result$p[, index] <- v2[, index]
result$V[, index] <- v2[, index]
}
return(result)
}
# Calculates the gradient of the loss function at a given p, V. p, V, x, and y should all be matrices or appropriate dimensions. resids is redundant: it is simply eps(manifold, p, V, x, y). However, including it as an argument to the function quickens the calculation.
grad <- function(manifold, p, V, x, y, resids, estimator, cutoff = NULL) {
k <- dim(x)[2]
result <- vector("list")
mags <- mag(manifold, resids)
shifts <- V %*% t(x)
multiplier <- rho_prime(mags, estimator, cutoff)
unit_resids<- t(t(resids) / mags)
unit_resids[, which(mags == 0)] <- 0
jf <- jacobi(manifold, p, shifts, unit_resids)
result$p <- t(t(jf$p) * multiplier)
result$V <- aperm(replicate(k, jf$V), c(1, 3, 2)) * aperm(replicate(dim(p)[1], x * multiplier), c(3, 2, 1))
index <- which(mags <= 0.000001) # to avoid division by a small number
result$p[, index] <- 0
result$V[, , index] <- 0
result$p <- as.matrix(-rowSums(result$p))
result$V <- -rowSums(result$V, dims = 2)
return(result)
}
#' Manifold check and projection
#'
#' Checks whether each data point in \eqn{y} is on the given manifold, and if
#' not, provides a modified version of \eqn{y} where each column has been
#' projected onto the manifold.
#'
#' @param manifold Type of manifold (\code{'euclidean'}, \code{'sphere'},
#' \code{'hyperbolic'}, or \code{'kendall'}).
#' @param y A vector, matrix, or data frame whose columns should represent
#' points on the manifold.
#' @return A named list containing \item{on}{a logical vector describing whether
#' or not each column of \code{y} is on the manifold.} \item{data}{a matrix of
#' data frame of the same dimensions as \code{y}; each column of \code{y} has
#' been projected onto the manifold.}
#' @author Ha-Young Shin
#' @examples
#' y1 <- matrix(rnorm(10), ncol = 2)
#' y1 <- y1[, 1] + (1i) * y1[, 2]
#' y2 <- matrix(rnorm(10), ncol = 2)
#' y2 <- y2[, 1] + (1i) * y2[, 2]
#' y3 <- matrix(rnorm(10), ncol = 2)
#' y3 <- y3[, 1] + (1i) * y3[, 2]
#' y3 <- (y3 - mean(y3)) / norm(y3 - mean(y3), type = '2') # project onto preshape space
#' y <- matrix(c(y1, y2, y3), ncol = 3)
#' onmanifold('kendall', y)
#'
#' @export
onmanifold <- function(manifold, y) {
y <- as.matrix(y)
if (any(is.nan(y))) {
stop('y should not contain NaN values')
}
sample_size <- dim(y)[2]
result <- vector("list")
if (manifold == 'euclidean') {
result$on <- !logical(sample_size)
result$data <- y
} else if (manifold == 'sphere') {
ons <- !logical(sample_size)
mags <- mag(manifold, y)
ons[which(abs(mags - 1) > 1e-6)] <- FALSE
y <- t(t(y) / mags)
result$on <- ons
result$data <- y
} else if (manifold == 'hyperbolic') {
ons <- !logical(sample_size)
mags <- sqrt(-ip(manifold, y, y))
ons[which((abs(mags - 1) > 1e-6) | (y[1, ] < 0))] <- FALSE
y <- t(t(y) / mags)
result$on <- ons
result$data <- y
} else if (manifold == 'kendall') {
ons <- !logical(sample_size)
mags <- mag(manifold, y)
means <- colMeans(y)
ons[which((abs(mags - 1) > 1e-6) | (abs(means * Conj(means)) > 1e-6))] <- FALSE
y <- t((t(y) - means) / mag(manifold, t(t(y) - means)))
result$on <- ons
result$data <- y
} else {
stop('the manifold must be one of euclidean, sphere, hyperbolic, or kendall')
}
return(result)
}
#' Gradient descent for (robust) geodesic regression
#'
#' Finds \eqn{\mathrm{argmin}_{(p,V)\in M\times (T_pM) ^ n}\sum_{i=1} ^ {N}
#' \rho(d(\mathrm{Exp}(p,Vx_i),y_i))} through a gradient descent algorithm.
#'
#' Each column of \code{x} should be centered to have an average of 0 for the
#' quickest and most accurate results. If all of the elements of a column of
#' \code{x} are equal, the resulting vector will consist of \code{NA}s. In the
#' case of the \code{'sphere'}, an error will be raised if all points are on a
#' pair of antipodes.
#'
#' @param manifold Type of manifold (\code{'euclidean'}, \code{'sphere'},
#' \code{'hyperbolic'}, or \code{'kendall'}).
#' @param x A vector, matrix, or data frame of independent variables; for
#' matrices and data frames, the rows and columns represent the subjects and
#' independent variables, respectively.
#' @param y A matrix or data frame whose columns represent points on the
#' manifold.
#' @param estimator M-type estimator (\code{'l2'}, \code{'l1'}, \code{'huber'},
#' or \code{'tukey'}).
#' @param c Multiplier of \eqn{\sigma}, the square root of the variance, used in
#' the cutoff parameter for the \code{'huber'} and \code{'tukey'} estimators;
#' should be \code{NULL} for the \code{'l2'} or \code{'l1'} estimators.
#' @param p_tol Termination condition for the distance between consecutive
#' updates of \code{p}.
#' @param V_tol Termination condition for the distance between columns of
#' consecutive updates of \code{V}, parallel transported to be in the same
#' tangent space. Can be a vector of positive real numbers for each
#' independent variable or a single positive number.
#' @param max_iter Maximum number of gradient descent steps before ending the
#' algorithm.
#' @return A named list containing \item{p}{a vector representing the estimate
#' of the initial point on the manifold} \item{V}{a matrix representing the
#' estimate of the initial velocities for each independent variable; the
#' columns represent the independent variables.} \item{iteration}{number of
#' gradient descent steps taken.}
#' @references Fletcher, P. T. (2013). Geodesic regression and the theory of
#' least squares on Riemannian manifolds. International Journal of Computer
#' Vision, 105, 171-185.
#'
#' Kim, H. J., Adluru, N., Collins, M. D., Chung, M. K., Bendin, B. B.,
#' Johnson, S. C., Davidson, R. J. and Singh, V. (2014). Multivariate general
#' linear models (MGLM) on Riemannian manifolds with applications to
#' statistical analysis of diffusion weighted images. 2014 IEEE Conference on
#' Computer Vision and Pattern Recognition, 2705-2712.
#'
#' Shin, H.-Y. and Oh H.-S. (2020). Robust Geodesic Regression.
#' <arXiv:2007.04518>
#' @author Ha-Young Shin
#' @seealso \code{\link{intrinsic_location}}.
#' @examples
#' # an example of multiple regression with two independent variables, with 64
#' # data points
#'
#' x <- matrix(runif(2 * 64), ncol = 2)
#' x <- t(t(x) - colMeans(x))
#' y <- matrix(0L, nrow = 4, ncol = 64)
#' for (i in 1:64) {
#' y[, i] <- exp_map('sphere', c(1, 0, 0, 0), c(0, runif(1), runif(1),
#' runif(1)))
#' }
#' geo_reg('sphere', x, y, 'tukey', c = are_nr('tukey', 2, 6))
#'
#' @export
#' @importFrom stats median
geo_reg <- function(manifold, x, y, estimator, c = NULL, p_tol = 1e-5, V_tol = 1e-5, max_iter = 100000) {
if (((estimator == 'huber') | (estimator == 'tukey')) & is.null(c)) {
stop('a c value must be provided if the chosen m-estimator is huber or tukey')
}
if (!is.null(c)) {
if ((estimator == 'l2') | (estimator == 'l1')) {
warning('l2 and l1 do not use a c value')
}
if (c <= 0) {
stop('c must be positive')
}
}
if (any(is.nan(x))) {
stop('x should not contain NaN values')
}
ondata <- onmanifold(manifold, y)
if (any(!(ondata$on))) {
warning('all data points in y must lie on the manifold')
}
y <- ondata$data
embedded <- dim(y)[1]
sample_size <- dim(y)[2]
x <- as.matrix(x)
k <- dim(x)[2]
allequal <- c()
for (var in 1:k) { # Deals with case where all of the data for one of the independent variables are equal
if (length(unique(x[, var])) == 1) {
x[, var] <- numeric(sample_size)
allequal <- c(allequal, var)
}
}
if (manifold == 'sphere') {
if (sample_size > 1) {
antipode <- sum(mag(manifold, y[, 1] + y) == 0)
same <- sum(mag(manifold, y[, 1] - y) == 0)
if ((antipode + same == sample_size) & (antipode > 0)) {
stop('there is no unique solution as all the data points are on antipodes of the sphere, with at least one data point on either antipode')
}
}
}
if (any(abs(colMeans(x)) > 0.000001)) {
warning('the mean of the data for at least one of your independent variables is not zero; the x data should be centered for best/quickest results')
}
if (length(allequal) == k) {
current_p <- as.matrix(y[, 1])
} else {
current_p <- geo_reg(manifold, t(t(numeric(sample_size))), y, estimator, c, p_tol, V_tol, max_iter)$p
}
current_V <- matrix(0L, nrow = embedded, ncol = k)
old_p <- current_p
old_V <- current_V
count <- 0
alt_count <- 0
cutoff <- NULL
if ((estimator == 'huber') | (estimator == 'tukey')) {
if (manifold == 'euclidean') {
dimension <- embedded
} else if ((manifold == 'sphere') | (manifold == 'hyperbolic')) {
dimension <- embedded - 1
} else if (manifold == 'kendall') {
dimension <- 2 * embedded - 4
}
xi <- (2 * Pinv(dimension / 2, 0.5)) ^ 0.5
current_shifts <- current_V %*% t(x)
deviations <- dist(manifold, expo2(manifold, current_p, current_shifts), y)
mad <- median(deviations)
sigma <- mad / xi
cutoff <- c * sigma
}
current_resids <- eps(manifold, current_p, current_V, x, y)
current_loss <- sum(rho(mag(manifold, current_resids), estimator, cutoff))
step <- grad(manifold, current_p, current_V, x, y, current_resids, estimator, cutoff)
V_diffs <- mag(manifold, (pt2(manifold, old_p, current_p, old_V) - current_V))
lambda <- 0.1
while (((count == 0) | ((count < max_iter) & ((dist(manifold, old_p, current_p) > p_tol) | (any(V_diffs > V_tol))))) & (alt_count < 100000)) {
new_p <- tryCatch(expo(manifold, current_p, -lambda * step$p), warning = function(w) 'warning')
while ((new_p[1] == 'warning') | (mag(manifold, -lambda * step$p) > 10)) {
lambda <- lambda / 2
new_p <- tryCatch(expo(manifold, current_p, -lambda * step$p), warning = function(w) 'warning')
}
new_V <- pt2(manifold, current_p, new_p, current_V - lambda * step$V)
new_resids <- tryCatch(eps(manifold, new_p, new_V, x, y), warning = function(w) 'warning')
while ((new_resids[1] == 'warning') | any(Re(ip(manifold, new_V, as.vector(new_p))) > 0.000001)) {
lambda <- lambda / 2
new_p <- expo(manifold, current_p, -lambda * step$p)
new_V <- pt2(manifold, current_p, new_p, current_V - lambda * step$V)
new_resids <- tryCatch(eps(manifold, new_p, new_V, x, y), warning = function(w) 'warning')
}
new_loss <- sum(rho(mag(manifold, new_resids), estimator, cutoff))
if (current_loss >= new_loss) {
alt_count <- 0
old_p <- current_p
old_V <- current_V
current_p <- new_p
current_V <- new_V
if ((estimator == 'huber') | (estimator == 'tukey')) {
current_shifts <- current_V %*% t(x)
deviations <- dist(manifold, expo2(manifold, current_p, current_shifts), y)
mad <- median(deviations)
sigma <- mad / xi
cutoff <- c * sigma
}
current_resids <- new_resids
current_loss <- sum(rho(mag(manifold, current_resids), estimator, cutoff))
step <- grad(manifold, current_p, current_V, x, y, current_resids, estimator, cutoff)
V_diffs <- mag(manifold, (pt2(manifold, old_p, current_p, old_V) - current_V))
if ((manifold == 'euclidean') | (manifold == 'sphere') | (manifold == 'kendall')) {
lambda <- 8 * lambda
#} else if (manifold == 'hyperbolic') {
# lambda <- 1.1 * lambda
}
count <- count + 1
} else {
lambda <- lambda / 2
alt_count <- alt_count + 1
}
}
if (count == max_iter) {
warning('issues with convergence; make sure your independent variables are centered, and try adjusting max_iter, p_tol, or V_tol')
}
result <- vector("list")
result$p <- current_p
current_V[, allequal] <- NA
result$V <- current_V
result$iteration <- count
return(result)
}
#' Gradient descent for location based on M-type estimators
#'
#' Finds \eqn{\mathrm{argmin}_{p\in M}\sum_{i=1} ^ {N} \rho(d(p,y_i))} through a
#' gradient descent algorithm.
#'
#' In the case of the \code{'sphere'}, an error will be raised if all points are
#' on a pair of antipodes.
#'
#' @param manifold Type of manifold (\code{'euclidean'}, \code{'sphere'},
#' \code{'hyperbolic'}, or \code{'kendall'}).
#' @param y A matrix or data frame whose columns represent points on the
#' manifold.
#' @param estimator M-type estimator (\code{'l2'}, \code{'l1'}, \code{'huber'},
#' or \code{'tukey'}).
#' @param c Multiplier of \eqn{\sigma}, the square root of the variance, used in
#' the cutoff parameter for the \code{'huber'} and \code{'tukey'} estimators;
#' should be \code{NULL} for the \code{'l2'} or \code{'l1'} estimators.
#' @param p_tol Termination condition for the distance between consecutive
#' updates of \code{p}.
#' @param V_tol Termination condition for the distance between columns of
#' consecutive updates of \code{V}, parallel transported to be in the same
#' tangent space. Can be a vector of positive real numbers for each
#' independent variable or a single positive number.
#' @param max_iter Maximum number of gradient descent steps before ending the
#' algorithm.
#' @return A vector representing the location estimate
#' @references Fletcher, P. T. (2013). Geodesic regression and the theory of
#' least squares on Riemannian manifolds. International Journal of Computer
#' Vision, 105, 171-185.
#'
#' Kim, H. J., Adluru, N., Collins, M. D., Chung, M. K., Bendin, B. B.,
#' Johnson, S. C., Davidson, R. J. and Singh, V. (2014). Multivariate general
#' linear models (MGLM) on Riemannian manifolds with applications to
#' statistical analysis of diffusion weighted images. 2014 IEEE Conference on
#' Computer Vision and Pattern Recognition, 2705-2712.
#'
#' Shin, H.-Y. and Oh H.-S. (2020). Robust Geodesic Regression.
#' <arXiv:2007.04518>
#' @author Ha-Young Shin
#' @seealso \code{\link{geo_reg}}, \code{\link[RiemBase]{rbase.mean}},
#' \code{\link[RiemBase]{rbase.median}}.
#' @examples
#' y <- matrix(runif(100, 1000, 2000), nrow = 10)
#' intrinsic_location('euclidean', y, 'l2')
#'
#' @export
intrinsic_location <- function(manifold, y, estimator, c = NULL, p_tol = 1e-5, V_tol = 1e-5, max_iter = 100000) {
sample_size <- dim(y)[2]
return(geo_reg(manifold, numeric(sample_size), y, estimator, c, p_tol, V_tol, max_iter)$p)
}
|
\name{getClassifiers}
\alias{getClassifiers}
\title{List of available Weka classifiers}
\usage{
getClassifiers(regex = NULL)
}
\arguments{
\item{regex}{A list of regular expressions. Default value
is NULL, which makes function return full list of
available classifiers.}
}
\value{
\code{character} An array of Weka classes matching
\emph{regex}.
}
\description{
Returns a list of Weka classes that match the given
regular expression or the full list if regex is not
present.
}
\examples{
# Full list
getClassifiers()
# All trees and bayes classifiers
getClassifiers(c("trees","bayes"))
# Getting a specific classifier
getClassifiers("J48$")
}
|
/data/cibm.utils/man/getClassifiers.Rd
|
no_license
|
vfpimenta/corruption-profiler
|
R
| false | false | 662 |
rd
|
\name{getClassifiers}
\alias{getClassifiers}
\title{List of available Weka classifiers}
\usage{
getClassifiers(regex = NULL)
}
\arguments{
\item{regex}{A list of regular expressions. Default value
is NULL, which makes function return full list of
available classifiers.}
}
\value{
\code{character} An array of Weka classes matching
\emph{regex}.
}
\description{
Returns a list of Weka classes that match the given
regular expression or the full list if regex is not
present.
}
\examples{
# Full list
getClassifiers()
# All trees and bayes classifiers
getClassifiers(c("trees","bayes"))
# Getting a specific classifier
getClassifiers("J48$")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slingshot_lineages.R
\name{.get_lineages}
\alias{.get_lineages}
\title{Estimate the lineages (via Slingshot)}
\usage{
.get_lineages(dat, cluster_labels, starting_cluster,
cluster_group_list = NA, use_initialization = F)
}
\arguments{
\item{dat}{a \code{n} by \code{d} matrix}
\item{cluster_labels}{vector of cluster labels, where
the cluster labels are consecutive positive integers from 1 to
\code{max(cluster_labels)}. Cannot include \code{NA}}
\item{starting_cluster}{the "origin" cluster that all the lineages will start
from}
\item{cluster_group_list}{list denoting the hierarchy and order of the clusters}
\item{use_initialization}{use principal curves on each cluster to prune the
list of possible neighboring clusters}
}
\value{
A list of cluster indices, with \code{starting_cluster} starting as
its first element
}
\description{
Note: I removed the functionality to explicitly label a ending cluster
(might put back in later?).
}
\details{
Note: I removed the Omega parameter, which, to my understanding,
controls if a lineage (i.e. tree) is split into two separate lineages.
Code adapted from https://github.com/kstreet13/slingshot.
}
|
/eSVD/man/dot-get_lineages.Rd
|
permissive
|
zji90/esvd
|
R
| false | true | 1,231 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slingshot_lineages.R
\name{.get_lineages}
\alias{.get_lineages}
\title{Estimate the lineages (via Slingshot)}
\usage{
.get_lineages(dat, cluster_labels, starting_cluster,
cluster_group_list = NA, use_initialization = F)
}
\arguments{
\item{dat}{a \code{n} by \code{d} matrix}
\item{cluster_labels}{vector of cluster labels, where
the cluster labels are consecutive positive integers from 1 to
\code{max(cluster_labels)}. Cannot include \code{NA}}
\item{starting_cluster}{the "origin" cluster that all the lineages will start
from}
\item{cluster_group_list}{list denoting the hierarchy and order of the clusters}
\item{use_initialization}{use principal curves on each cluster to prune the
list of possible neighboring clusters}
}
\value{
A list of cluster indices, with \code{starting_cluster} starting as
its first element
}
\description{
Note: I removed the functionality to explicitly label a ending cluster
(might put back in later?).
}
\details{
Note: I removed the Omega parameter, which, to my understanding,
controls if a lineage (i.e. tree) is split into two separate lineages.
Code adapted from https://github.com/kstreet13/slingshot.
}
|
library(tidyverse)
library(hmisc)
library(corrplot)
library(car)
library(caret)
library(klaR)
library(MASS)
library(psd)
rm(list = ls())
##gettubg labels
##########################################################################################
act_labels = read_delim("activity_labels.txt", " ", col_names=F, trim_ws = T)
act_labels = act_labels %>% dplyr::select(X1,X2)
act_labels
labels <- read_delim("RawData/Train/labels_train.txt", " ", col_names = F)
colnames(labels) <- c('trial', 'userid', 'activity', 'start', 'end')
labels
train_labels = labels
##########################################################################################
##function to calculate entropy
##########################################################################################
entropy <- function(x, nbreaks = nclass.Sturges(x)) {
r = range(x)
x_binned = findInterval(x, seq(r[1], r[2], len= nbreaks))
h = tabulate(x_binned, nbins = nbreaks) # fast histogram
p = h/sum(h)
-sum(p[p>0] * log(p[p>0]))
}
##########################################################################################
##DEFNING THE FUNCTION to extract features
##########################################################################################
extractTimeDomainFeatures <-
function(filename, labels = train_labels) {
# extract user and experimental run ID's from file name
username = gsub(".+user(\\d+).+", "\\1", filename)
expname = gsub(".+exp(\\d+).+", "\\1", filename)
# import the data from the file
user01 <- read_delim(filename, " ", col_names = F, progress = TRUE, col_types = "ddd")
# select this users activity labels from the `labels` frame for this experimental run
user_labels <-
labels %>%
dplyr::filter(userid==as.numeric(username) & trial == as.numeric(expname)) %>% # select rows pertaining to current signals
mutate(segment = row_number()) %>% # segment identifies different WALKING_UPSTAIRS etc
gather(start_end, vec, -trial, -userid, -activity, -segment) %>% # stack start and end on top of each other
arrange(vec) %>% # arrange rows in natural order
mutate(activity = ifelse(start_end == 'end', NA, activity), activity_id = row_number()) # remove activity label `end` time rows
# add activity labels to each sample
user <-
user01 %>%
mutate(sample = row_number()-1) %>%
mutate(activity_id = findInterval(sample, user_labels$vec)) %>%
left_join(user_labels, by = "activity_id")
# split in epochs of 128 samples and compute features per epoch
usertimedom <-
user %>%
mutate(epoch = sample %/% 128) %>% # epoch = 2.56 sec
group_by(epoch) %>%
summarise(
user_id = username, # added to identify user in data frame rows
exp_id = expname, # added to identify experimental run in data frame rows
activity = names(which.max(table(c("-", activity)))),
sample = sample[1],
m1 = mean(X1), ###Means
m2 = mean(X2),
m3 = mean(X3),
sd1 = sd(X1), ##Standard deviations
sd2 = sd(X2),
sd3 = sd(X3),
q1_25 = quantile(X1, .25), ##Quartiles
skew1 = e1071::skewness(X1), ##Skewness
skew2 = e1071::skewness(X2),
skew3 = e1071::skewness(X3),
AR1.2 = cor(X1, lag(X1, n = 3), use = "pairwise"), ##Autocorrelations
AR2.2 = cor(X2, lag(X2, n = 3), use = "pairwise"),
AR3.2 = cor(X3, lag(X3, n = 3), use = "pairwise"),
AR12.1 = cor(X1, lag(X2, n= 2),use = "pairwise"),
AR23.1 = cor(X2, lag(X3, n=2),use = "pairwise"),
AR13.1 = cor(X1, lag(X3,n=2), use = "pairwise"),
max.1 = max(abs(X1)), ##Max and min values
max.2 = max(abs(X2)),
max.3 = max(abs(X3)),
min.1 = min(abs(X1)),
min.2 = min(abs(X2)),
min.3 = min(abs(X3)),
bim.1 = bimodality_coefficient(X1), ##Bimodality coefficients
bim.2 = bimodality_coefficient(X2),
bim.3 = bimodality_coefficient(X3),
entropy.1 = entropy(X1), #Entropy
entropy.2 = entropy(X2),
entropy.3 = entropy(X3),
reg1 = ar(X1, aic=FALSE, order.max=1)$ar, ##Auto regression
reg2 = ar(X2, aic=FALSE, order.max=1)$ar,
reg3 = ar(X3, aic=FALSE, order.max=1)$ar,
fft.1 = mean(abs(fft(X1))), ##Means of the fast Fourier transform (FFT), Fourier analysis converts a signal
fft.2 = mean(abs(fft(X2))), #from its original domain (often time or space) to a representation in the
fft.3 = mean(abs(fft(X3))), #frequency domain.
psd.1 = mean(spectrum(X1, plot = F)$spec), #Mean of the power spectral density (A Power Spectral Density
psd.2 = mean(spectrum(X2, plot = F)$spec), #(PSD) is the measure of signal's power content versus frequency
psd.3 = mean(spectrum(X3, plot = F)$spec),
n=n()
)
usertimedom
}
##########################################################################################
###running the function on accelartion and gyro and combining them
##########################################################################################
filenames_acc <- dir("RawData/Train/", "^acc", full.names = TRUE) # for demo only first 5 files
myData_Acc = filenames_acc %>%
map_dfr(extractTimeDomainFeatures) # map_dfr runs `extractTimeDomainFeatures` on all elements in filenames and binds results row wise
filenames_gyro <- dir("RawData/Train/", "^gyr", full.names = TRUE)
myData_Gyro = filenames_gyro %>%
map_dfr(extractTimeDomainFeatures) # map_dfr runs `extractTimeDomainFeatures` on all elements in filenames and binds results row wise
myData_Full <- left_join(myData_Acc, myData_Gyro, by = c("epoch", "user_id", "exp_id")) %>%
dplyr::select(-activity.y, -sample.y, -n.y)
myData_Full$activity.x <- as.integer(myData_Full$activity.x)
myData_Full$activity.x <- plyr::mapvalues(myData_Full$activity.x, 1:12, act_labels$X2)
data_to_work_with <- myData_Full %>% dplyr::filter(n.x >30) %>% dplyr::select( -c(1,3, sample.x, n.x)) %>% drop_na() %>% mutate(activity.x = factor(activity.x))
##########################################################################################
##make a function that splits the data into train and test, but based on the user_id and
#returns the cross validated classification accuracy using the model
##########################################################################################
cv_per_user <- function(dataset, model){
ids <- group_indices(dataset, user_id)
data_to_work_with_cv <- cbind(ids, dataset)
data_to_work_with_cv <- as_tibble(data_to_work_with_cv)
means <- c()
for (i in 1:10) {
train_index <- sample(x = ids, size = 12, replace = F)
train_set <- data_to_work_with_cv %>% dplyr::filter(ids %in% train_index)
'%ni%' <- Negate('%in%')
test_set <- data_to_work_with_cv %>% dplyr::filter(ids %ni% train_index)
lda.pred=predict (model, test_set[,-c(1,2)])
lda.class=lda.pred$class
means[i] <- mean(lda.class == test_set$activity.x)
}
mean(means)
}
##########################################################################################
##fit a 10/15/20 variable stepwise qda,
##first starting variable
qda.fit.15.1 <- train(activity.x ~ ., data = dplyr::select(data_to_work_with, -user_id),
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 20, direction = "forward"))
qda.fit.15.1 <-qda(activity.x ~ max.1.x + m2.x + AR2.2.x + AR12.1.x + q1_25.y + q1_25.x + m3.y + fft.3.x +
fft.2.x + AR12.1.y + reg1.x + reg1.y + entropy.2.x + reg3.x + min.2.x,
data_to_work_with)
##model 1
cv_per_user(data_to_work_with,qda.fit.15.1)
##Our predictions weren't very good so we tried a few things to improve it
#The first was removing values high in VIf (this was done in two ways)
#The second was removing highly correlated variables
#The third was removing extreme outliers
#The forth was centering and scaling the data
##########################################################################################
##REMOVING VALUES HIGH IN VIF
##There are two ways you can do it:
##1 - Simply remove variables with highest VIF
##2 - Remove variables with higherst VIF but also lowest effect on the outcome variables, in hopes that
##the total collinearity decreases but you still keep the best predicting variables.
##Way 1
vif_model <- lm(as.numeric(activity.x) ~., data_to_work_with[,-1])
vif(vif_model)
vif_model <- lm(as.numeric(activity.x) ~., dplyr::select(data_to_work_with,-user_id, -m1.x,
-sd1.x, -max.1.x, -sd1.y, -fft.2.y,
-fft.1.y, -q1_25.x, -max.3.y,
-fft.2.x, -fft.1.x, -fft.3.x,
-q1_25.y, -min.2.x, -fft.3.y,
-psd.3.x, -reg3.x, -psd.2.x, -sd3.y, -max.2.y,
-sd3.x, -psd.2.y))
vif(vif_model)
data_no_cor_1 <- dplyr::select(data_to_work_with, -m1.x,
-sd1.x, -max.1.x, -sd1.y, -fft.2.y,
-fft.1.y, -q1_25.x, -max.3.y,
-fft.2.x, -fft.1.x, -fft.3.x,
-q1_25.y, -min.2.x, -fft.3.y,
-psd.3.x, -reg3.x, -psd.2.x, -sd3.y, -max.2.y,
-sd3.x, -psd.2.y)
qda.fit.15_no_cor_1 <- train(activity.x ~ ., data = data_no_cor_1[,-1],
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 15, direction = "forward"))
qda.fit.15_no_cor_1 <- qda(activity.x ~ min.1.x + m2.x + AR12.1.x + AR2.2.x + psd.1.x + m3.y + entropy.1.x +
AR1.2.y + AR1.2.x +AR2.2.y + AR12.1.y + entropy.3.y + m3.x + skew2.x + AR13.1.y,
data = data_no_cor_1)
cv_per_user(data_no_cor_1, qda.fit.15_no_cor_1)
#Way 2 ##LEFT TO FIX THIS VIF MODEL, MAYBE MAKE A FUNCTION FOR IT BASED ON THE DISTANCE
##Now try removing correlation by taking the effect of the variables into account
##first we can examine effects of individual predictors
#make a loop which fits single variables one by one
single_effects <- c()
for (i in 3:length(colnames(data_to_work_with))) {
data = data_to_work_with[,c(2,i)]
qda.fit <- qda(activity.x ~., data = data)
acc <- cv_per_user(data_to_work_with, qda.fit)
single_effects[i] <- paste(colnames(data_to_work_with[,i]), round(acc,3))
}
single_effects
# NA NA "m1.x 0.39"
# "m2.x 0.436" "m3.x 0.382" "sd1.x 0.411"
# "sd2.x 0.335" "sd3.x 0.328" "q1_25.x 0.55"
# "skew1.x 0.28" "skew2.x 0.271" "skew3.x 0.203"
# "AR1.2.x 0.362" "AR2.2.x 0.293" "AR3.2.x 0.286"
# "AR12.1.x 0.36" "AR23.1.x 0.247" "AR13.1.x 0.274"
# "max.1.x 0.593" "max.2.x 0.357" "max.3.x 0.328"
# "min.1.x 0.51" "min.2.x 0.421" "min.3.x 0.36"
# "bim.1.x 0.315" "bim.2.x 0.219" "bim.3.x 0.214"
# "entropy.1.x 0.224" "entropy.2.x 0.218" "entropy.3.x 0.237"
# "reg1.x 0.371" "reg2.x 0.338" "reg3.x 0.358"
# "fft.1.x 0.562" "fft.2.x 0.411" "fft.3.x 0.374"
# "psd.1.x 0.406" "psd.2.x 0.339" "psd.3.x 0.311"
# "m1.y 0.293" "m2.y 0.294" "m3.y 0.241"
# "sd1.y 0.338" "sd2.y 0.31" "sd3.y 0.324"
# "q1_25.y 0.363" "skew1.y 0.23" "skew2.y 0.219"
# "skew3.y 0.191" "AR1.2.y 0.233" "AR2.2.y 0.238"
# "AR3.2.y 0.245" "AR12.1.y 0.245" "AR23.1.y 0.278"
# "AR13.1.y 0.209" "max.1.y 0.35" "max.2.y 0.314"
# "max.3.y 0.318" "min.1.y 0.241" "min.2.y 0.249"
# "min.3.y 0.249" "bim.1.y 0.219" "bim.2.y 0.197"
# "bim.3.y 0.203" "entropy.1.y 0.217" "entropy.2.y 0.205"
# "entropy.3.y 0.215" "reg1.y 0.264" "reg2.y 0.249"
# "reg3.y 0.313" "fft.1.y 0.361" "fft.2.y 0.333"
# "fft.3.y 0.346" "psd.1.y 0.278" "psd.2.y 0.255"
# "psd.3.y 0.303"
##examing vif once again
vif_model <- lm(as.numeric(activity.x) ~., data_to_work_with[,-1])
vif(vif_model)
vif_model <- lm(as.numeric(activity.x) ~., dplyr::select(data_to_work_with,-user_id, -m1.x, -fft.2.y,
-sd1.y, -fft.3.y,-max.1.y, -fft.3.x, -psd.2.y,
-max.3.y, -m1.y, -psd.2.x, -sd1.x, -max.1.x,
-fft.2.x, -fft.1.y, -AR3.2.x, -min.3.x,
-fft.1.x, -min.1.x, -sd3.y, -sd2.y, -max.2.x))
vif(vif_model)
data_no_cor_2 <- dplyr::select(data_to_work_with, -m1.x, -fft.2.y,
-sd1.y, -fft.3.y,-max.1.y, -fft.3.x, -psd.2.y,
-max.3.y, -m1.y, -psd.2.x, -sd1.x, -max.1.x,
-fft.2.x, -fft.1.y, -AR3.2.x, -min.3.x,
-fft.1.x, -min.1.x, -sd3.y, -sd2.y, -max.2.x)
qda.fit.15_no_cor_2 <- train(activity.x ~ ., data = data_no_cor_2[,-1],
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 15, direction = "forward"))
qda.fit.15_no_cor_2 <- qda(activity.x ~ q1_25.x + m2.x + AR12.1.x + q1_25.y + reg2.x + sd2.x + m3.y + bim.3.y +
entropy.1.x + m3.x + AR13.1.y + skew1.x + AR13.1.x + AR23.1.y + reg1.y,
data = data_no_cor_2)
cv_per_user(data_no_cor_2, qda.fit.15_no_cor_2)
##########################################################################################
##REMOVING HIGHLY CORRELATED VALUES
##########################################################################################
###Can also just remove varables with correlations over .80
CUTOFF <- 0.80
cor_matrix <- cor(data_to_work_with[,-c(1,2)])
cor_high <- findCorrelation(cor_matrix, CUTOFF)
high_cor_remove <- row.names(cor_matrix)[cor_high]
rawTrain <- data_to_work_with[,-c(1,2)][, -cor_high]
descrCor <- cor(rawTrain)
summary(descrCor[upper.tri(descrCor)])
data_no_correlation <- as_tibble(cbind(data_to_work_with[,c(1,2)], rawTrain))
##modelling
qda.fit.15_no_cor <- train(activity.x ~ ., data = data_no_correlation[,-1],
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 15, direction = "forward"))
qda.fit.15_no_cor_3 <- qda(activity.x ~ q1_25.x + AR12.1.x + psd.1.x + AR2.2.x + reg1.y + m3.y + reg2.y + m1.y +
m3.x + skew1.x + AR1.2.x + entropy.2.y + AR13.1.y + max.2.x + entropy.3.x,
data = data_no_correlation)
cv_per_user(data_no_correlation, qda.fit.15_no_cor_3)
##########################################################################################
##REMOVING OUTLIERS
##########################################################################################
##Now trying to remove the outliers and fit a qda, only removing the most extreme of outliers
##this function we found online
remove_outliers <- function(x, na.rm = TRUE, ...) {
qnt <- quantile(x, probs=c(.25, .75), na.rm = na.rm, ...)
H <- 18 * IQR(x, na.rm = na.rm)
y <- x
y[x < (qnt[1] - H)] <- NA
y[x > (qnt[2] + H)] <- NA
y
}
data_no_outliers <- data_to_work_with %>% mutate_at(vars(m1.x:psd.3.y), remove_outliers) %>% drop_na()
qda.fit.10_no_outliers <- train(activity.x ~ ., data = data_no_outliers[,-1],
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 10, direction = "forward"))
qda.fit.10_no_outliers <- qda(activity.x ~ max.1.x + m2.x + q1_25.y + AR12.1.x + AR2.2.x + sd1.x + AR12.1.y +
fft.3.x + m2.y + entropy.1.x, data = data_no_outliers)
cv_per_user(data_no_outliers, qda.fit.10_no_outliers)
##########################################################################################
##SCALING DATA
##########################################################################################
preProcValues <- preProcess(data_to_work_with[,-c(1,2)], method = c("center", "scale"))
trainTransformed <- predict(preProcValues, data_to_work_with)
qda.fit.15_scaled <- train(activity.x ~ ., data = trainTransformed[,-1],
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 30, direction = "forward"))
##THIS WAS THE BEST MODEL IN THE END
qda.fit.15_scaled <- qda(activity.x ~ max.1.x + m2.x + AR2.2.x +AR12.1.x + m1.y + q1_25.x + m3.y + reg3.x +
reg1.y + reg2.y + psd.3.y + reg1.x + entropy.3.y + skew2.x +AR13.1.y, data = trainTransformed)
cv_per_user(trainTransformed, qda.fit.15_scaled)
##The qda model using centeredand scaled data gave us the best improvement in our predicted variable
##########################################################################################
##Making the test datast
##########################################################################################
filenames_acc_test <- dir("RawData/Test/", "^acc", full.names = TRUE)
filenames_gyro_test <- dir("RawData/Test/", "^gyr", full.names = TRUE)
myData_Acc_test = filenames_acc_test %>%
map_dfr(extractTimeDomainFeatures)
myData_Gryo_test <- filenames_gyro_test %>%
map_dfr(extractTimeDomainFeatures)
myData_Full_test <- left_join(myData_Acc_test, myData_Gryo_test, by = c("epoch", "user_id", "exp_id")) %>%
dplyr::select(-sample.y, -n.y, -n.x, -activity.x, -activity.y)
##########################################################################################
##Predicting on scaled and centered data
preProcValues <- preProcess(myData_Full_test[,-c(1,2)], method = c("center", "scale"))
testTransformed <- predict(preProcValues, myData_Full_test)
lda.pred=predict(qda.fit.15_scaled, testTransformed)
preds <- lda.pred$class
myData_Full_test$activity <- preds
myData_Full_test %>%
mutate(user_id = paste("user", user_id, sep=""), exp_id = paste("exp", exp_id, sep="")) %>%
unite(Id, user_id, exp_id, sample.x) %>%
dplyr::select(Id, Predicted = activity) %>%
write_csv("test_set_predictions10.csv")
file.show("test_set_predictions10.csv")
##########################################################################################
|
/Physical_Recognition_Kaggle.R
|
no_license
|
AndriiG13/Classifying_Physical_Activity_Kaggle
|
R
| false | false | 19,087 |
r
|
library(tidyverse)
library(hmisc)
library(corrplot)
library(car)
library(caret)
library(klaR)
library(MASS)
library(psd)
rm(list = ls())
##gettubg labels
##########################################################################################
act_labels = read_delim("activity_labels.txt", " ", col_names=F, trim_ws = T)
act_labels = act_labels %>% dplyr::select(X1,X2)
act_labels
labels <- read_delim("RawData/Train/labels_train.txt", " ", col_names = F)
colnames(labels) <- c('trial', 'userid', 'activity', 'start', 'end')
labels
train_labels = labels
##########################################################################################
##function to calculate entropy
##########################################################################################
entropy <- function(x, nbreaks = nclass.Sturges(x)) {
r = range(x)
x_binned = findInterval(x, seq(r[1], r[2], len= nbreaks))
h = tabulate(x_binned, nbins = nbreaks) # fast histogram
p = h/sum(h)
-sum(p[p>0] * log(p[p>0]))
}
##########################################################################################
##DEFNING THE FUNCTION to extract features
##########################################################################################
extractTimeDomainFeatures <-
function(filename, labels = train_labels) {
# extract user and experimental run ID's from file name
username = gsub(".+user(\\d+).+", "\\1", filename)
expname = gsub(".+exp(\\d+).+", "\\1", filename)
# import the data from the file
user01 <- read_delim(filename, " ", col_names = F, progress = TRUE, col_types = "ddd")
# select this users activity labels from the `labels` frame for this experimental run
user_labels <-
labels %>%
dplyr::filter(userid==as.numeric(username) & trial == as.numeric(expname)) %>% # select rows pertaining to current signals
mutate(segment = row_number()) %>% # segment identifies different WALKING_UPSTAIRS etc
gather(start_end, vec, -trial, -userid, -activity, -segment) %>% # stack start and end on top of each other
arrange(vec) %>% # arrange rows in natural order
mutate(activity = ifelse(start_end == 'end', NA, activity), activity_id = row_number()) # remove activity label `end` time rows
# add activity labels to each sample
user <-
user01 %>%
mutate(sample = row_number()-1) %>%
mutate(activity_id = findInterval(sample, user_labels$vec)) %>%
left_join(user_labels, by = "activity_id")
# split in epochs of 128 samples and compute features per epoch
usertimedom <-
user %>%
mutate(epoch = sample %/% 128) %>% # epoch = 2.56 sec
group_by(epoch) %>%
summarise(
user_id = username, # added to identify user in data frame rows
exp_id = expname, # added to identify experimental run in data frame rows
activity = names(which.max(table(c("-", activity)))),
sample = sample[1],
m1 = mean(X1), ###Means
m2 = mean(X2),
m3 = mean(X3),
sd1 = sd(X1), ##Standard deviations
sd2 = sd(X2),
sd3 = sd(X3),
q1_25 = quantile(X1, .25), ##Quartiles
skew1 = e1071::skewness(X1), ##Skewness
skew2 = e1071::skewness(X2),
skew3 = e1071::skewness(X3),
AR1.2 = cor(X1, lag(X1, n = 3), use = "pairwise"), ##Autocorrelations
AR2.2 = cor(X2, lag(X2, n = 3), use = "pairwise"),
AR3.2 = cor(X3, lag(X3, n = 3), use = "pairwise"),
AR12.1 = cor(X1, lag(X2, n= 2),use = "pairwise"),
AR23.1 = cor(X2, lag(X3, n=2),use = "pairwise"),
AR13.1 = cor(X1, lag(X3,n=2), use = "pairwise"),
max.1 = max(abs(X1)), ##Max and min values
max.2 = max(abs(X2)),
max.3 = max(abs(X3)),
min.1 = min(abs(X1)),
min.2 = min(abs(X2)),
min.3 = min(abs(X3)),
bim.1 = bimodality_coefficient(X1), ##Bimodality coefficients
bim.2 = bimodality_coefficient(X2),
bim.3 = bimodality_coefficient(X3),
entropy.1 = entropy(X1), #Entropy
entropy.2 = entropy(X2),
entropy.3 = entropy(X3),
reg1 = ar(X1, aic=FALSE, order.max=1)$ar, ##Auto regression
reg2 = ar(X2, aic=FALSE, order.max=1)$ar,
reg3 = ar(X3, aic=FALSE, order.max=1)$ar,
fft.1 = mean(abs(fft(X1))), ##Means of the fast Fourier transform (FFT), Fourier analysis converts a signal
fft.2 = mean(abs(fft(X2))), #from its original domain (often time or space) to a representation in the
fft.3 = mean(abs(fft(X3))), #frequency domain.
psd.1 = mean(spectrum(X1, plot = F)$spec), #Mean of the power spectral density (A Power Spectral Density
psd.2 = mean(spectrum(X2, plot = F)$spec), #(PSD) is the measure of signal's power content versus frequency
psd.3 = mean(spectrum(X3, plot = F)$spec),
n=n()
)
usertimedom
}
##########################################################################################
###running the function on accelartion and gyro and combining them
##########################################################################################
filenames_acc <- dir("RawData/Train/", "^acc", full.names = TRUE) # for demo only first 5 files
myData_Acc = filenames_acc %>%
map_dfr(extractTimeDomainFeatures) # map_dfr runs `extractTimeDomainFeatures` on all elements in filenames and binds results row wise
filenames_gyro <- dir("RawData/Train/", "^gyr", full.names = TRUE)
myData_Gyro = filenames_gyro %>%
map_dfr(extractTimeDomainFeatures) # map_dfr runs `extractTimeDomainFeatures` on all elements in filenames and binds results row wise
myData_Full <- left_join(myData_Acc, myData_Gyro, by = c("epoch", "user_id", "exp_id")) %>%
dplyr::select(-activity.y, -sample.y, -n.y)
myData_Full$activity.x <- as.integer(myData_Full$activity.x)
myData_Full$activity.x <- plyr::mapvalues(myData_Full$activity.x, 1:12, act_labels$X2)
data_to_work_with <- myData_Full %>% dplyr::filter(n.x >30) %>% dplyr::select( -c(1,3, sample.x, n.x)) %>% drop_na() %>% mutate(activity.x = factor(activity.x))
##########################################################################################
##make a function that splits the data into train and test, but based on the user_id and
#returns the cross validated classification accuracy using the model
##########################################################################################
cv_per_user <- function(dataset, model){
ids <- group_indices(dataset, user_id)
data_to_work_with_cv <- cbind(ids, dataset)
data_to_work_with_cv <- as_tibble(data_to_work_with_cv)
means <- c()
for (i in 1:10) {
train_index <- sample(x = ids, size = 12, replace = F)
train_set <- data_to_work_with_cv %>% dplyr::filter(ids %in% train_index)
'%ni%' <- Negate('%in%')
test_set <- data_to_work_with_cv %>% dplyr::filter(ids %ni% train_index)
lda.pred=predict (model, test_set[,-c(1,2)])
lda.class=lda.pred$class
means[i] <- mean(lda.class == test_set$activity.x)
}
mean(means)
}
##########################################################################################
##fit a 10/15/20 variable stepwise qda,
##first starting variable
qda.fit.15.1 <- train(activity.x ~ ., data = dplyr::select(data_to_work_with, -user_id),
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 20, direction = "forward"))
qda.fit.15.1 <-qda(activity.x ~ max.1.x + m2.x + AR2.2.x + AR12.1.x + q1_25.y + q1_25.x + m3.y + fft.3.x +
fft.2.x + AR12.1.y + reg1.x + reg1.y + entropy.2.x + reg3.x + min.2.x,
data_to_work_with)
##model 1
cv_per_user(data_to_work_with,qda.fit.15.1)
##Our predictions weren't very good so we tried a few things to improve it
#The first was removing values high in VIf (this was done in two ways)
#The second was removing highly correlated variables
#The third was removing extreme outliers
#The forth was centering and scaling the data
##########################################################################################
##REMOVING VALUES HIGH IN VIF
##There are two ways you can do it:
##1 - Simply remove variables with highest VIF
##2 - Remove variables with higherst VIF but also lowest effect on the outcome variables, in hopes that
##the total collinearity decreases but you still keep the best predicting variables.
##Way 1
vif_model <- lm(as.numeric(activity.x) ~., data_to_work_with[,-1])
vif(vif_model)
vif_model <- lm(as.numeric(activity.x) ~., dplyr::select(data_to_work_with,-user_id, -m1.x,
-sd1.x, -max.1.x, -sd1.y, -fft.2.y,
-fft.1.y, -q1_25.x, -max.3.y,
-fft.2.x, -fft.1.x, -fft.3.x,
-q1_25.y, -min.2.x, -fft.3.y,
-psd.3.x, -reg3.x, -psd.2.x, -sd3.y, -max.2.y,
-sd3.x, -psd.2.y))
vif(vif_model)
data_no_cor_1 <- dplyr::select(data_to_work_with, -m1.x,
-sd1.x, -max.1.x, -sd1.y, -fft.2.y,
-fft.1.y, -q1_25.x, -max.3.y,
-fft.2.x, -fft.1.x, -fft.3.x,
-q1_25.y, -min.2.x, -fft.3.y,
-psd.3.x, -reg3.x, -psd.2.x, -sd3.y, -max.2.y,
-sd3.x, -psd.2.y)
qda.fit.15_no_cor_1 <- train(activity.x ~ ., data = data_no_cor_1[,-1],
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 15, direction = "forward"))
qda.fit.15_no_cor_1 <- qda(activity.x ~ min.1.x + m2.x + AR12.1.x + AR2.2.x + psd.1.x + m3.y + entropy.1.x +
AR1.2.y + AR1.2.x +AR2.2.y + AR12.1.y + entropy.3.y + m3.x + skew2.x + AR13.1.y,
data = data_no_cor_1)
cv_per_user(data_no_cor_1, qda.fit.15_no_cor_1)
#Way 2 ##LEFT TO FIX THIS VIF MODEL, MAYBE MAKE A FUNCTION FOR IT BASED ON THE DISTANCE
##Now try removing correlation by taking the effect of the variables into account
##first we can examine effects of individual predictors
#make a loop which fits single variables one by one
single_effects <- c()
for (i in 3:length(colnames(data_to_work_with))) {
data = data_to_work_with[,c(2,i)]
qda.fit <- qda(activity.x ~., data = data)
acc <- cv_per_user(data_to_work_with, qda.fit)
single_effects[i] <- paste(colnames(data_to_work_with[,i]), round(acc,3))
}
single_effects
# NA NA "m1.x 0.39"
# "m2.x 0.436" "m3.x 0.382" "sd1.x 0.411"
# "sd2.x 0.335" "sd3.x 0.328" "q1_25.x 0.55"
# "skew1.x 0.28" "skew2.x 0.271" "skew3.x 0.203"
# "AR1.2.x 0.362" "AR2.2.x 0.293" "AR3.2.x 0.286"
# "AR12.1.x 0.36" "AR23.1.x 0.247" "AR13.1.x 0.274"
# "max.1.x 0.593" "max.2.x 0.357" "max.3.x 0.328"
# "min.1.x 0.51" "min.2.x 0.421" "min.3.x 0.36"
# "bim.1.x 0.315" "bim.2.x 0.219" "bim.3.x 0.214"
# "entropy.1.x 0.224" "entropy.2.x 0.218" "entropy.3.x 0.237"
# "reg1.x 0.371" "reg2.x 0.338" "reg3.x 0.358"
# "fft.1.x 0.562" "fft.2.x 0.411" "fft.3.x 0.374"
# "psd.1.x 0.406" "psd.2.x 0.339" "psd.3.x 0.311"
# "m1.y 0.293" "m2.y 0.294" "m3.y 0.241"
# "sd1.y 0.338" "sd2.y 0.31" "sd3.y 0.324"
# "q1_25.y 0.363" "skew1.y 0.23" "skew2.y 0.219"
# "skew3.y 0.191" "AR1.2.y 0.233" "AR2.2.y 0.238"
# "AR3.2.y 0.245" "AR12.1.y 0.245" "AR23.1.y 0.278"
# "AR13.1.y 0.209" "max.1.y 0.35" "max.2.y 0.314"
# "max.3.y 0.318" "min.1.y 0.241" "min.2.y 0.249"
# "min.3.y 0.249" "bim.1.y 0.219" "bim.2.y 0.197"
# "bim.3.y 0.203" "entropy.1.y 0.217" "entropy.2.y 0.205"
# "entropy.3.y 0.215" "reg1.y 0.264" "reg2.y 0.249"
# "reg3.y 0.313" "fft.1.y 0.361" "fft.2.y 0.333"
# "fft.3.y 0.346" "psd.1.y 0.278" "psd.2.y 0.255"
# "psd.3.y 0.303"
##examing vif once again
vif_model <- lm(as.numeric(activity.x) ~., data_to_work_with[,-1])
vif(vif_model)
vif_model <- lm(as.numeric(activity.x) ~., dplyr::select(data_to_work_with,-user_id, -m1.x, -fft.2.y,
-sd1.y, -fft.3.y,-max.1.y, -fft.3.x, -psd.2.y,
-max.3.y, -m1.y, -psd.2.x, -sd1.x, -max.1.x,
-fft.2.x, -fft.1.y, -AR3.2.x, -min.3.x,
-fft.1.x, -min.1.x, -sd3.y, -sd2.y, -max.2.x))
vif(vif_model)
data_no_cor_2 <- dplyr::select(data_to_work_with, -m1.x, -fft.2.y,
-sd1.y, -fft.3.y,-max.1.y, -fft.3.x, -psd.2.y,
-max.3.y, -m1.y, -psd.2.x, -sd1.x, -max.1.x,
-fft.2.x, -fft.1.y, -AR3.2.x, -min.3.x,
-fft.1.x, -min.1.x, -sd3.y, -sd2.y, -max.2.x)
qda.fit.15_no_cor_2 <- train(activity.x ~ ., data = data_no_cor_2[,-1],
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 15, direction = "forward"))
qda.fit.15_no_cor_2 <- qda(activity.x ~ q1_25.x + m2.x + AR12.1.x + q1_25.y + reg2.x + sd2.x + m3.y + bim.3.y +
entropy.1.x + m3.x + AR13.1.y + skew1.x + AR13.1.x + AR23.1.y + reg1.y,
data = data_no_cor_2)
cv_per_user(data_no_cor_2, qda.fit.15_no_cor_2)
##########################################################################################
##REMOVING HIGHLY CORRELATED VALUES
##########################################################################################
###Can also just remove varables with correlations over .80
CUTOFF <- 0.80
cor_matrix <- cor(data_to_work_with[,-c(1,2)])
cor_high <- findCorrelation(cor_matrix, CUTOFF)
high_cor_remove <- row.names(cor_matrix)[cor_high]
rawTrain <- data_to_work_with[,-c(1,2)][, -cor_high]
descrCor <- cor(rawTrain)
summary(descrCor[upper.tri(descrCor)])
data_no_correlation <- as_tibble(cbind(data_to_work_with[,c(1,2)], rawTrain))
##modelling
qda.fit.15_no_cor <- train(activity.x ~ ., data = data_no_correlation[,-1],
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 15, direction = "forward"))
qda.fit.15_no_cor_3 <- qda(activity.x ~ q1_25.x + AR12.1.x + psd.1.x + AR2.2.x + reg1.y + m3.y + reg2.y + m1.y +
m3.x + skew1.x + AR1.2.x + entropy.2.y + AR13.1.y + max.2.x + entropy.3.x,
data = data_no_correlation)
cv_per_user(data_no_correlation, qda.fit.15_no_cor_3)
##########################################################################################
##REMOVING OUTLIERS
##########################################################################################
##Now trying to remove the outliers and fit a qda, only removing the most extreme of outliers
##this function we found online
remove_outliers <- function(x, na.rm = TRUE, ...) {
qnt <- quantile(x, probs=c(.25, .75), na.rm = na.rm, ...)
H <- 18 * IQR(x, na.rm = na.rm)
y <- x
y[x < (qnt[1] - H)] <- NA
y[x > (qnt[2] + H)] <- NA
y
}
data_no_outliers <- data_to_work_with %>% mutate_at(vars(m1.x:psd.3.y), remove_outliers) %>% drop_na()
qda.fit.10_no_outliers <- train(activity.x ~ ., data = data_no_outliers[,-1],
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 10, direction = "forward"))
qda.fit.10_no_outliers <- qda(activity.x ~ max.1.x + m2.x + q1_25.y + AR12.1.x + AR2.2.x + sd1.x + AR12.1.y +
fft.3.x + m2.y + entropy.1.x, data = data_no_outliers)
cv_per_user(data_no_outliers, qda.fit.10_no_outliers)
##########################################################################################
##SCALING DATA
##########################################################################################
preProcValues <- preProcess(data_to_work_with[,-c(1,2)], method = c("center", "scale"))
trainTransformed <- predict(preProcValues, data_to_work_with)
qda.fit.15_scaled <- train(activity.x ~ ., data = trainTransformed[,-1],
method = "stepQDA",
trControl = trainControl(method = "cv"),
tuneGrid = expand.grid(maxvar = 30, direction = "forward"))
##THIS WAS THE BEST MODEL IN THE END
qda.fit.15_scaled <- qda(activity.x ~ max.1.x + m2.x + AR2.2.x +AR12.1.x + m1.y + q1_25.x + m3.y + reg3.x +
reg1.y + reg2.y + psd.3.y + reg1.x + entropy.3.y + skew2.x +AR13.1.y, data = trainTransformed)
cv_per_user(trainTransformed, qda.fit.15_scaled)
##The qda model using centeredand scaled data gave us the best improvement in our predicted variable
##########################################################################################
##Making the test datast
##########################################################################################
filenames_acc_test <- dir("RawData/Test/", "^acc", full.names = TRUE)
filenames_gyro_test <- dir("RawData/Test/", "^gyr", full.names = TRUE)
myData_Acc_test = filenames_acc_test %>%
map_dfr(extractTimeDomainFeatures)
myData_Gryo_test <- filenames_gyro_test %>%
map_dfr(extractTimeDomainFeatures)
myData_Full_test <- left_join(myData_Acc_test, myData_Gryo_test, by = c("epoch", "user_id", "exp_id")) %>%
dplyr::select(-sample.y, -n.y, -n.x, -activity.x, -activity.y)
##########################################################################################
##Predicting on scaled and centered data
preProcValues <- preProcess(myData_Full_test[,-c(1,2)], method = c("center", "scale"))
testTransformed <- predict(preProcValues, myData_Full_test)
lda.pred=predict(qda.fit.15_scaled, testTransformed)
preds <- lda.pred$class
myData_Full_test$activity <- preds
myData_Full_test %>%
mutate(user_id = paste("user", user_id, sep=""), exp_id = paste("exp", exp_id, sep="")) %>%
unite(Id, user_id, exp_id, sample.x) %>%
dplyr::select(Id, Predicted = activity) %>%
write_csv("test_set_predictions10.csv")
file.show("test_set_predictions10.csv")
##########################################################################################
|
## Title ----
##
## Fetch geneset annotations.
##
## Description ----
##
## This script retrieves ID mappings (Ensembl to entrez)
## and KEGG pathway information.
##
## Details ----
##
## Ensembl to Entrez mappings are retrieved using biomaRt. KEGG pathways are retrieved
## directly from KEGG.
##
## Usage ----
##
## $ Rscript getGenesetAnnotations.R
## --ensemblversion=latest
## --species=mm
## --outdir=.
# Libraries ----
stopifnot(
require(optparse),
require(gsfisher)
)
# Options ----
option_list <- list(
make_option(
c("--ensemblversion"),
default="latest",
help="either latest or a specific number"
),
make_option(
c("--ensemblhost"),
default=NULL,
help="the ensembl host address"
),
make_option(
c("--species"),
default="none",
help="species - mm or hs"
),
make_option(
c("--outdir"),
default="none",
help="outdir")
)
opt <- parse_args(OptionParser(option_list=option_list))
cat("Running with options:\n")
print(opt)
# Fetch Ensembl to Entrez ID mappings ----
if(opt$ensemblversion=="latest")
{
version <- NULL
} else { version <- opt$ensemblversion }
if(is.null(opt$ensembl_host))
{
# use the default host (www.ensembl.org)
anno <- fetchAnnotation(species=opt$species,
ensembl_version=version)
} else {
anno <- fetchAnnotation(species=opt$species,
ensembl_version=version,
ensembl_host=opt$ensemblhost)
}
write.table(anno,
gzfile(file.path(opt$outdir,"ensembl.to.entrez.txt.gz")),
quote=FALSE,
row.names=FALSE,sep="\t")
# Fetch KEGG pathways ----
kegg_pathways <- fetchKEGG(species=opt$species)
saveRDS(kegg_pathways, file=file.path(opt$outdir,"kegg_pathways.rds"))
|
/R/fetch_geneset_annotations.R
|
permissive
|
crichgriffin/tenx
|
R
| false | false | 1,866 |
r
|
## Title ----
##
## Fetch geneset annotations.
##
## Description ----
##
## This script retrieves ID mappings (Ensembl to entrez)
## and KEGG pathway information.
##
## Details ----
##
## Ensembl to Entrez mappings are retrieved using biomaRt. KEGG pathways are retrieved
## directly from KEGG.
##
## Usage ----
##
## $ Rscript getGenesetAnnotations.R
## --ensemblversion=latest
## --species=mm
## --outdir=.
# Libraries ----
stopifnot(
require(optparse),
require(gsfisher)
)
# Options ----
option_list <- list(
make_option(
c("--ensemblversion"),
default="latest",
help="either latest or a specific number"
),
make_option(
c("--ensemblhost"),
default=NULL,
help="the ensembl host address"
),
make_option(
c("--species"),
default="none",
help="species - mm or hs"
),
make_option(
c("--outdir"),
default="none",
help="outdir")
)
opt <- parse_args(OptionParser(option_list=option_list))
cat("Running with options:\n")
print(opt)
# Fetch Ensembl to Entrez ID mappings ----
if(opt$ensemblversion=="latest")
{
version <- NULL
} else { version <- opt$ensemblversion }
if(is.null(opt$ensembl_host))
{
# use the default host (www.ensembl.org)
anno <- fetchAnnotation(species=opt$species,
ensembl_version=version)
} else {
anno <- fetchAnnotation(species=opt$species,
ensembl_version=version,
ensembl_host=opt$ensemblhost)
}
write.table(anno,
gzfile(file.path(opt$outdir,"ensembl.to.entrez.txt.gz")),
quote=FALSE,
row.names=FALSE,sep="\t")
# Fetch KEGG pathways ----
kegg_pathways <- fetchKEGG(species=opt$species)
saveRDS(kegg_pathways, file=file.path(opt$outdir,"kegg_pathways.rds"))
|
testlist <- list(a = 437976862L, b = 0L, x = c(1634740520L, 673869680L, 1948281198L, 673869680L, 1853060128L, 1685026146L, 1818568990L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610129900-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 198 |
r
|
testlist <- list(a = 437976862L, b = 0L, x = c(1634740520L, 673869680L, 1948281198L, 673869680L, 1853060128L, 1685026146L, 1818568990L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
#Clintri_NNDist
library("dplyr")
library("tidytext")
library("tidyr")
library("topicmodels")
library("ggplot2")
library("purrr")
library("Rtsne")
library("scatterpie")
#Préparation des données avec LDA et t-sne
CT_des1 <- clintri_descriptions_web("COVID", max_rnk = 50)
CT_des2 <- clintri_descriptions_web("Acute+lymphoblastic+leukemia", max_rnk = 50)
CT_des3 <- clintri_descriptions_web("Parkinson", max_rnk = 50)
CT_des4 <- clintri_descriptions_web("Flu", max_rnk = 50)
BDD <- bind_rows(CT_des1, CT_des2, CT_des3, CT_des4)
BDD <- BDD %>%
group_by(NCTid) %>%
mutate(text = paste0(textblock, collapse = " "))
BDD <- distinct(BDD[,-1])
BDD$text <- trimws(gsub("\\w*[0-9]+\\w*\\s*", "", BDD$text))
data("stop_words")
BDD_td <- BDD %>%
unnest_tokens(word, text, token = "words")%>%
anti_join(stop_words)
BDD_frq <- BDD_td %>%
count(NCTid, word)%>%
group_by(NCTid)
BDD_dtm <- BDD_frq %>%
cast_dtm(NCTid, word, n)
BDD_lda <- LDA(BDD_dtm, k = 4)
BDD_documents <- tidy(BDD_lda, matrix = "gamma")
gamma_spread <- BDD_documents %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, gamma) %>%
filter(topic1 > .001 | topic2 > .001 | topic3 > .001 | topic4 > .001)
tsne_out1 <- Rtsne(gamma_spread, pca=F, perplexity = 10,theta = 0.0)
gamma_spread$x <- tsne_out1$Y[,1]
gamma_spread$y <- tsne_out1$Y[,2]
#scatterpie
BDD_scatterpie <- ggplot() +
geom_scatterpie(aes(x=x, y = y, group = document),data = gamma_spread,
cols = c("topic1", "topic2", "topic3", "topic4"),
color=NA,
alpha = 0.3) +
coord_equal()
BDD_scatterpie
#install.packages("spatstat")
library("spatstat")
# NNDist : permet de calculer la distance des plus proches voisins pour chaque point
# k = nombre de voisins
data <- NULL
data$document <- gamma_spread$document
data$x <- tsne_out1$Y[,1]
data$y <- tsne_out1$Y[,2]
data <- as.data.frame(data)
View(data)
# test avec nombre de voisins = 5
dist <- nndist(data[c(2,3)], k=5)
# Test avec le document situé sur la 7e ligne
data[7,]
dist[7]
#Boucle pour comparer longueurs des vecteurs
Neighbor <- NULL
for(i in 1:nrow(data)){
xN <- data[i,]$x
yN <- data[i,]$y
xA <- data[7,]$x
yA <- data[7,]$y
vec <- sqrt((xN-xA)^2+(yN-yA)^2)
if(vec <= dist[7]){
NCTid <- data[i,]$document
Neighbor$NCTid[i] <- NCTid
}
}
Neighbor$NCTid <- Neighbor$NCTid[!is.na(Neighbor$NCTid)]
NCTid.list <- as.data.frame(Neighbor)
NCTid.list
NCTid.list[1:nrow(NCTid.list),]
List <- NCTidToTitles_web(NCTid.list)
View(List)
|
/Clintri_NNDist.R
|
no_license
|
jgodet/TYJuG
|
R
| false | false | 2,524 |
r
|
#Clintri_NNDist
library("dplyr")
library("tidytext")
library("tidyr")
library("topicmodels")
library("ggplot2")
library("purrr")
library("Rtsne")
library("scatterpie")
#Préparation des données avec LDA et t-sne
CT_des1 <- clintri_descriptions_web("COVID", max_rnk = 50)
CT_des2 <- clintri_descriptions_web("Acute+lymphoblastic+leukemia", max_rnk = 50)
CT_des3 <- clintri_descriptions_web("Parkinson", max_rnk = 50)
CT_des4 <- clintri_descriptions_web("Flu", max_rnk = 50)
BDD <- bind_rows(CT_des1, CT_des2, CT_des3, CT_des4)
BDD <- BDD %>%
group_by(NCTid) %>%
mutate(text = paste0(textblock, collapse = " "))
BDD <- distinct(BDD[,-1])
BDD$text <- trimws(gsub("\\w*[0-9]+\\w*\\s*", "", BDD$text))
data("stop_words")
BDD_td <- BDD %>%
unnest_tokens(word, text, token = "words")%>%
anti_join(stop_words)
BDD_frq <- BDD_td %>%
count(NCTid, word)%>%
group_by(NCTid)
BDD_dtm <- BDD_frq %>%
cast_dtm(NCTid, word, n)
BDD_lda <- LDA(BDD_dtm, k = 4)
BDD_documents <- tidy(BDD_lda, matrix = "gamma")
gamma_spread <- BDD_documents %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, gamma) %>%
filter(topic1 > .001 | topic2 > .001 | topic3 > .001 | topic4 > .001)
tsne_out1 <- Rtsne(gamma_spread, pca=F, perplexity = 10,theta = 0.0)
gamma_spread$x <- tsne_out1$Y[,1]
gamma_spread$y <- tsne_out1$Y[,2]
#scatterpie
BDD_scatterpie <- ggplot() +
geom_scatterpie(aes(x=x, y = y, group = document),data = gamma_spread,
cols = c("topic1", "topic2", "topic3", "topic4"),
color=NA,
alpha = 0.3) +
coord_equal()
BDD_scatterpie
#install.packages("spatstat")
library("spatstat")
# NNDist : permet de calculer la distance des plus proches voisins pour chaque point
# k = nombre de voisins
data <- NULL
data$document <- gamma_spread$document
data$x <- tsne_out1$Y[,1]
data$y <- tsne_out1$Y[,2]
data <- as.data.frame(data)
View(data)
# test avec nombre de voisins = 5
dist <- nndist(data[c(2,3)], k=5)
# Test avec le document situé sur la 7e ligne
data[7,]
dist[7]
#Boucle pour comparer longueurs des vecteurs
Neighbor <- NULL
for(i in 1:nrow(data)){
xN <- data[i,]$x
yN <- data[i,]$y
xA <- data[7,]$x
yA <- data[7,]$y
vec <- sqrt((xN-xA)^2+(yN-yA)^2)
if(vec <= dist[7]){
NCTid <- data[i,]$document
Neighbor$NCTid[i] <- NCTid
}
}
Neighbor$NCTid <- Neighbor$NCTid[!is.na(Neighbor$NCTid)]
NCTid.list <- as.data.frame(Neighbor)
NCTid.list
NCTid.list[1:nrow(NCTid.list),]
List <- NCTidToTitles_web(NCTid.list)
View(List)
|
# Load libraries
library(readr)
library(stringr)
library(stringi)
library(varhandle)
library(stringr)
library(dplyr)
# Profanity filtering
deprofane <- function(phrase, badwords){
removeWords(phrase, badwords)
}
NumberOfWords <- function(samp) {
samp <- str_trim(gsub("\\s+", " ", samp))
sps <- gregexpr(" ", samp)[[1]]
csps <- length(sps[which(sps>0)]) + 1
csps
}
GetLastWord <- function(phrase){
word(phrase, -1)
}
GetLastWords <- function(phrase, number){
word(phrase, -number, -1)
}
CutFirstWord <- function(phrase){
word(phrase, 2, NumberOfWords(phrase))
}
RemoveWhiteSpace <- function(phrase){
gsub("\\s+", " ", phrase)
}
LowerCase <- function(phrase){
phrase <- stri_trans_tolower(phrase)
}
# Load with no pre-loading
process_gram <- function(phrase, sample) {
# Count number of words
if (NumberOfWords(phrase) > 5) {
phrase <- GetLastWords(phrase, 5)
}
srchstr <- paste0(phrase, "[[:space:]][[:alnum:]]+")
poss <- grep(srchstr, sample, useBytes=TRUE, value=TRUE)
possl <- str_match(poss, srchstr)
if (length(possl) > 1) {
# passedl
freq <- as.data.frame(sort(table(possl), decreasing=TRUE))[1,1]
result <- GetLastWord(freq)
} else if (length(possl) == 1){
freq <- row.names(as.data.frame(sort(table(possl), decreasing=TRUE)))[1]
result <- GetLastWord(freq)
}
else if (length(possl) == 0 & NumberOfWords(phrase)>1) {
phrase <- CutFirstWord(phrase)
if (NumberOfWords(phrase) > 0) {
result <- process_gram(phrase)
} else {
result <- "the"
}}
else if (length(possl) == 0 & NumberOfWords(phrase)==1){
result <- "the"
}
result
}
preprocess_phrase <- function(phrase) {
contractions$contraction <- LowerCase(contractions$contraction)
contractions$expanded <- LowerCase(contractions$expanded)
phrase <- LowerCase(phrase)
phrase <- stringi::stri_replace_all_regex(phrase,
contractions$contraction,
contractions$expanded,
vectorize_all=FALSE)
phrase <- deprofane(phrase, badwords)
phrase <- RemovePunctuation(phrase)
phrase <- RemoveNumbers(phrase)
phrase <- RemoveTwitter(phrase)
phrase <- RemoveWhiteSpace(phrase)
phrase
}
badwords <- read_lines("badwords.txt")
sample <- read_lines("SamplePreProcessed.txt")
|
/prediction.R
|
no_license
|
pnicewicz421/nlp
|
R
| false | false | 2,507 |
r
|
# Load libraries
library(readr)
library(stringr)
library(stringi)
library(varhandle)
library(stringr)
library(dplyr)
# Profanity filtering
deprofane <- function(phrase, badwords){
removeWords(phrase, badwords)
}
NumberOfWords <- function(samp) {
samp <- str_trim(gsub("\\s+", " ", samp))
sps <- gregexpr(" ", samp)[[1]]
csps <- length(sps[which(sps>0)]) + 1
csps
}
GetLastWord <- function(phrase){
word(phrase, -1)
}
GetLastWords <- function(phrase, number){
word(phrase, -number, -1)
}
CutFirstWord <- function(phrase){
word(phrase, 2, NumberOfWords(phrase))
}
RemoveWhiteSpace <- function(phrase){
gsub("\\s+", " ", phrase)
}
LowerCase <- function(phrase){
phrase <- stri_trans_tolower(phrase)
}
# Load with no pre-loading
process_gram <- function(phrase, sample) {
# Count number of words
if (NumberOfWords(phrase) > 5) {
phrase <- GetLastWords(phrase, 5)
}
srchstr <- paste0(phrase, "[[:space:]][[:alnum:]]+")
poss <- grep(srchstr, sample, useBytes=TRUE, value=TRUE)
possl <- str_match(poss, srchstr)
if (length(possl) > 1) {
# passedl
freq <- as.data.frame(sort(table(possl), decreasing=TRUE))[1,1]
result <- GetLastWord(freq)
} else if (length(possl) == 1){
freq <- row.names(as.data.frame(sort(table(possl), decreasing=TRUE)))[1]
result <- GetLastWord(freq)
}
else if (length(possl) == 0 & NumberOfWords(phrase)>1) {
phrase <- CutFirstWord(phrase)
if (NumberOfWords(phrase) > 0) {
result <- process_gram(phrase)
} else {
result <- "the"
}}
else if (length(possl) == 0 & NumberOfWords(phrase)==1){
result <- "the"
}
result
}
preprocess_phrase <- function(phrase) {
contractions$contraction <- LowerCase(contractions$contraction)
contractions$expanded <- LowerCase(contractions$expanded)
phrase <- LowerCase(phrase)
phrase <- stringi::stri_replace_all_regex(phrase,
contractions$contraction,
contractions$expanded,
vectorize_all=FALSE)
phrase <- deprofane(phrase, badwords)
phrase <- RemovePunctuation(phrase)
phrase <- RemoveNumbers(phrase)
phrase <- RemoveTwitter(phrase)
phrase <- RemoveWhiteSpace(phrase)
phrase
}
badwords <- read_lines("badwords.txt")
sample <- read_lines("SamplePreProcessed.txt")
|
mergesort <- function(m)
{
merge_ <- function(left, right)
{
result <- c()
while(length(left) > 0 && length(right) > 0)
{
if(left[1] <= right[1])
{
result <- c(result, left[1])
left <- left[-1]
} else
{
result <- c(result, right[1])
right <- right[-1]
}
}
if(length(left) > 0) result <- c(result, left)
if(length(right) > 0) result <- c(result, right)
result
}
len <- length(m)
if(len <= 1) m else
{
middle <- length(m) / 2
left <- m[1:floor(middle)]
right <- m[floor(middle+1):len]
left <- mergesort(left)
right <- mergesort(right)
if(left[length(left)] <= right[1])
{
c(left, right)
} else
{
merge_(left, right)
}
}
}
n <- sample(1:10000,10000,replace = T)
n1 <- 10000:1
n2 <- 1:1000
# Random Numbers
mstart <- Sys.time()
mersortrandom <- mergesort(n)
mend <-Sys.time()
# Reverse Numbers
mstart1 <- Sys.time()
mersortrevrse <- mergesort(n1)
mend1 <- Sys.time()
# Sorted Numbers
mstart2 <- Sys.time()
mersorted<- mergesort(n2)
mend2 <- Sys.time()
# Time Diffrence
mdiff <- (mend-mstart)
mdiff1 <-(mend1-mstart1)
mdiff2 <-(mend2-mstart2)
mer <- c(mersortrandom,mersortrevrse,mersorted)
mna <- c("Random","Reverse","Sorted")
mdifftime <- c(mdiff,mdiff1,mdiff2)
mdf <- data.frame(mna,mdifftime)
|
/RStudioProject/SortingGraphs.R
|
no_license
|
neerajsh2008/RProgrames
|
R
| false | false | 1,365 |
r
|
mergesort <- function(m)
{
merge_ <- function(left, right)
{
result <- c()
while(length(left) > 0 && length(right) > 0)
{
if(left[1] <= right[1])
{
result <- c(result, left[1])
left <- left[-1]
} else
{
result <- c(result, right[1])
right <- right[-1]
}
}
if(length(left) > 0) result <- c(result, left)
if(length(right) > 0) result <- c(result, right)
result
}
len <- length(m)
if(len <= 1) m else
{
middle <- length(m) / 2
left <- m[1:floor(middle)]
right <- m[floor(middle+1):len]
left <- mergesort(left)
right <- mergesort(right)
if(left[length(left)] <= right[1])
{
c(left, right)
} else
{
merge_(left, right)
}
}
}
n <- sample(1:10000,10000,replace = T)
n1 <- 10000:1
n2 <- 1:1000
# Random Numbers
mstart <- Sys.time()
mersortrandom <- mergesort(n)
mend <-Sys.time()
# Reverse Numbers
mstart1 <- Sys.time()
mersortrevrse <- mergesort(n1)
mend1 <- Sys.time()
# Sorted Numbers
mstart2 <- Sys.time()
mersorted<- mergesort(n2)
mend2 <- Sys.time()
# Time Diffrence
mdiff <- (mend-mstart)
mdiff1 <-(mend1-mstart1)
mdiff2 <-(mend2-mstart2)
mer <- c(mersortrandom,mersortrevrse,mersorted)
mna <- c("Random","Reverse","Sorted")
mdifftime <- c(mdiff,mdiff1,mdiff2)
mdf <- data.frame(mna,mdifftime)
|
source("../../bin/WGSData.R")
source("../../bin/dermalNFData.R")
##now re-annotate based on synapse id.
##see if we can find missing WGS
mapping=synTableQuery('SELECT "Patient","DnaID","WGS" FROM syn5556216')@values
mapping=mapping[which(!is.na(mapping$WGS)),]
samps<-synapseQuery("select * from entity where parentId=='syn5522788'")
annotes<-sapply(samps$entity.id,function(x) synGet(x,downloadFile=F)@annotations)
names(annotes)<-samp$entity.id
##now i did some updates of the sequence analysis
all.muts<-getAllMutData(impact='HIGH')
#res=storeSomMutationFiles(impact='MODERATE')
res=getMutationStatsForGene("OR4C5")
res=getMutationStatsForGene("FOXD1")
res=getMutationStatsForGene("MUC6")
res=getMutationStatsForGene("NCAM1")
res=getMutationStatsForGene("SHANK3")
res=getMutationStatsForGene("FOXO6")
|
/analysis/2016-01-08/doGermlineAnalysis.R
|
no_license
|
allaway/dermalNF
|
R
| false | false | 812 |
r
|
source("../../bin/WGSData.R")
source("../../bin/dermalNFData.R")
##now re-annotate based on synapse id.
##see if we can find missing WGS
mapping=synTableQuery('SELECT "Patient","DnaID","WGS" FROM syn5556216')@values
mapping=mapping[which(!is.na(mapping$WGS)),]
samps<-synapseQuery("select * from entity where parentId=='syn5522788'")
annotes<-sapply(samps$entity.id,function(x) synGet(x,downloadFile=F)@annotations)
names(annotes)<-samp$entity.id
##now i did some updates of the sequence analysis
all.muts<-getAllMutData(impact='HIGH')
#res=storeSomMutationFiles(impact='MODERATE')
res=getMutationStatsForGene("OR4C5")
res=getMutationStatsForGene("FOXD1")
res=getMutationStatsForGene("MUC6")
res=getMutationStatsForGene("NCAM1")
res=getMutationStatsForGene("SHANK3")
res=getMutationStatsForGene("FOXO6")
|
# -------------------------------------
# Implement the proposed method CAM
# the wrapper function
wrbugs <- function(ri,ni,mu.L,V.L,ind.L,gphi,initials,nburnin=1,niter=30001,nthin=5,prior){
# ri: vector of observed correlations from primary studies
# mu.L: estimated vector of factor loadings for various measurement methods
# V.L: sampling covariance matrix of the estimated loading matrix
# ind.L: matrix of indices indicating which two measurement methods are used in the primary study
# gphi: vector of indices indicating nonzero residual correlations
# ni: within-study sample sizes
# initials: list of initial values
# nburnin: number of iterations for the burnin period
# niter: number of iterations
# nthin: thinning interval
#ri = r;mu.L=vL.obs; ind.L=indL;gphi=indP;ni=N;initials=inits;nburnin=1;niter = 30001;nthin=5
tryi = 1
fit = cam.bugs(ri,ni,mu.L,V.L,ind.L,gphi,initials,niter,prior)
fit = cam.bugs(ri,ni,mu.L,V.L,ind.L,gphi,fit$initials,niter,prior)
for(tryi in 2:20){
fit.coda <- mcmc(data = fit$mcmc.chain,start = nburnin+1,end = niter,thin = nthin)
conv = geweke.diag(fit.coda)[[1]]
if(sum(abs(conv)>1.96)==0){
break;
}else{
#print(fit$initials)
fit = cam.bugs(ri,ni,mu.L,V.L,ind.L,gphi,fit$initials,niter,prior)
}
}
return(list(tryi=tryi,fit.coda = fit.coda,fit = fit,conv = conv))
}
# The core function that implement the bayesian sampling algorithm for the proposed method
cam.bugs <- function(ri,ni,mu.L,V.L,ind.L,gphi,initials,niter,prior){
# uninformative prior is used
# ri: vector of observed correlations from primary studies
# mu.L: estimated vector of factor loadings for measurement methods
# V.L: sampling covariance matrix of the estimated loading matrix
# ind.L: matrix of indices indicating which measurement methods are used for the two factors
# gphi: vector of indices indicating nonzero residual correlations
# ni: within-study sample sizes
# initials: list of initial values
# niter: number of iterations
#ri = r;mu.L=vL.obs; ind.L=indL;gphi=indP;ni = N;initials = inits;niter = 30001
Nstudy = length(ri)
gphi.value = sort(unique(gphi))[-1]
nphi = length(gphi.value)
Iphii = matrix(0,Nstudy,nphi)
for(i in 1:nphi){
Iphii[which(gphi==gphi.value[i]),i] = 1
}
rho0 = c(initials$rho0,rep(NA,niter))
V.rho = c(initials$V.rho,rep(NA,niter))
sd.rho = c(sqrt(initials$V.rho),rep(NA,niter))
Phi = cbind(initials$Phi,matrix(NA,nphi,niter))
rhoi = cbind(initials$rhoi,matrix(NA,Nstudy,niter))
V.ri = (1-(ri^2))^2/(ni-1)
mup.Phi = prior$Phi$mu
Vp.Phi = prior$Phi$sigma
for(bi in 1:niter){
vL = my.mvrnorm(mu.L,V.L)
vL = apply(matrix(vL,ncol=1),1,function(x) max(min(x,1),-1))
rri = vL[ind.L[,1]]*vL[ind.L[,2]]
# update V.rho
# truncated gamma
a = Nstudy/2 - 1
b = sum((rhoi[,bi]-rho0[bi])^2)/2
V.rho[bi+1] = max(0.00001,1/rtgamma(1,shape = a,scale = 1/b,a = 1,b=10000))
sd.rho[bi+1] = sqrt(V.rho[bi+1])
# update rho0
# truncated normal
rho0[bi+1] = rtruncnorm(1,a=-1,b=1,mean=mean(rhoi[,bi]),sd=sqrt(V.rho[bi+1]/Nstudy))
# update Phi
# truncated normal
sig = 1/(t(Iphii)%*%(1/V.ri) + 1/Vp.Phi)
mu = (t(Iphii)%*%((ri-rri*rhoi[,bi])/V.ri) + mup.Phi/Vp.Phi)*sig
Phi[,bi+1] = apply(cbind(mu,sqrt(sig)),1,function(x) rtruncnorm(1,a=-1,b=1,mean=x[1],sd=x[2]))
# update rhoi
# unconstrained
numerator = rho0[bi+1]/V.rho[bi+1] + rri*(ri-Iphii%*%Phi[,bi+1])/V.ri
denominator = 1/V.rho[bi+1] + (rri^2)/V.ri
mu = numerator/denominator
sig = 1/denominator
rhoi[,bi+1] = apply(cbind(mu,sqrt(sig)),1,function(x) rnorm(1,mean=x[1],sd=x[2]))
}
simiseq = data.frame(rho0=rho0[-1],V.rho=V.rho[-1],sd.rho=sd.rho[-1],Phi=t(Phi[,-1]))
last.values = list(rho0=rho0[niter+1],V.rho = V.rho[niter+1],sd.rho = sd.rho[niter+1],
Phi = Phi[,niter+1],rhoi = rhoi[,niter+1])
return(list(mcmc.chain=simiseq,initials = last.values))
}
# Function that handles the square root of reliability esitmates
# if we are 100% confident about our reliability estimates
# we can set their variances to be zero
my.mvrnorm <- function(m,V){
s = m
sel0 = which(diag(V)==0)
n0var = length(sel0)
if(n0var==0){ # all reliability variances > 0
s = mvrnorm(1,m,V)
}else if( n0var < length(s) ){ # some reliability variances > 0
seln0 = which(diag(V)>0)
s[seln0] = mvrnorm(1,m[-sel0],V[-sel0,-sel0])
}
return(s)
}
#--------------------------------------------------
# Data generation for the simulation
# Transform the mean and sd of the normal distribution for true effect sizes (correlation)
# to those of a truncated normal
# The resulting mean and sd can be quite different from the original values when
# the true average correlaiton is large and the heterogeneity is substantial
# Formulus are from wiki https://en.wikipedia.org/wiki/Truncated_normal_distribution
msd.Norm2Trunc <- function(pars,ll,ul){
# ll: lower limit of the truncated normal
# ul: upper limit of the truncated normal
mu <- pars[1] # mean of the normal distribution
sd <- pars[2] # sd of the normal distribution
Zll <- (ll-mu)/sd
Zul <- (ul-mu)/sd
Z <- max(pnorm(Zul)-pnorm(Zll),.00001)
mu.trunc <- mu + sd*(dnorm(Zll)-dnorm(Zul))/Z
sd.trunc <- sd*sqrt(1+(Zll*dnorm(Zll)-Zul*dnorm(Zul))/Z-((dnorm(Zll)-dnorm(Zul))/Z)^2)
return(c(mu.trunc,sd.trunc))
}
# From vector to matrix
v2m <- function(v,Mind){
M = Mind
for(i in 1:ncol(Mind)){ M[,i] = v[Mind[,i]] }
return(M)
}
# Simulate Population parameter matrix
# v: vector of parameter values
# sdv: vector of between-study heterogeneity of the parameter
# M: Parameter matrix
# M.ind: indicator matrix to transform v to M
# ex.values: extra values in the parameter matrix; usually 0 or 1
simPar <- function(model.list,ex.values=0){
v = model.list$v
sdv = model.list$sdv
M.ind = model.list$M.ind
seln0 = which(sdv>0)
n.n0 = length(seln0)
if(n.n0>0){
if(n.n0==1){
vs <- msd.Norm2Trunc(c(v,sdv),-1,1)
v = rtruncnorm(1,a=-1,b=1,mean=vs[1],sd=vs[2])
}else if(n.n0 == length(v)){
ms = t(apply(cbind(v,sdv),1,msd.Norm2Trunc,ll=-1,ul=1))
v = apply(ms,1,function(x) rtruncnorm(1,a=-1,b=1,mean=x[1],sd=x[2]))
}else{
ms.sub = t(apply(cbind(v[seln0],sdv[seln0]),1,msd.Norm2Trunc,ll=-1,ul=1))
vsub = apply(ms.sub,1,function(x) rtruncnorm(1,a=-1,b=1,mean=x[1],sd=x[2]))
v[seln0] = vsub
}
}
if(is.null(M.ind) == 0){
M = v2m(c(v,ex.values),M.ind)
}else{M = NULL}
return(list(M=M,v=v))
}
# To obtain population correlation matrix for reliability estimation
# vFrho: vector of factor correlations
# sdFrho: between-study heterogeneity of factor correlations
# Frho: Factor correlation matrix
# Frho.ind: indicator matrix to transform vFrho to mFrho
# vL: vector of factor loadings
# sdL: between-study heterogeneity of factor loadings
# L: Factor loading matrix
# L.ind: indicator matrix to transform vL to L
# vPhi: vector of factor loadings
# sdL: between-study heterogeneity of factor loadings
# L: Factor loading matrix
# L.ind: indicator matrix to transform vL to L
getPrr <- function(rr.model){
Frho = simPar(rr.model$Frho,1)$M # generate factor correlation matrix
L = simPar(rr.model$L,0)$M # generate factor loading matrix
P <- L%*%Frho%*%t(L)
varP = diag(P)
for(tryi in 1:50){ # generate residual correlation matrix
Phi = simPar(rr.model$Phi,c(0,1-varP))$M
if(sum(eigen(Phi)$values>0)==nrow(Phi)){break}
}
P.rr <- P + Phi # population correlation matrix
P.rr = as.matrix(nearPD(P.rr)$mat)
return(P.rr)
}
# Generate observed squre root of reliabilities (factor loadings)
estL <- function(P.rr,myModel,N=500,extract.names){
p = nrow(P.rr)
rr.data = as.data.frame(mvrnorm(N,rep(0,p),P.rr))
colnames(rr.data) = paste('V',1:p,sep='')
nL = length(extract.names)
# model with residual correlations;
cfa.res = seq.CFA(rr.data,myModel,extract.names)
est = coef(cfa.res$fit)
Vest = try(vcov(cfa.res$fit))
if(inherits(Vest,'try-error')==0){
Lhat = est[extract.names]
VL = as.matrix(nearPD(Vest[extract.names,extract.names])$mat)
converged = 1
}else{
Lhat = rep(NA,nL)
VL = matrix(N,nL,nL)
converged = 0
}
return(list(Lhat = Lhat, VL = VL,converged = converged))
}
seq.CFA <- function(d,myM,extract.names){
n.M <- length(myM)
for(mi in 1:n.M){
fit <- cfa(model = myM[[mi]],data = d,std.lv = TRUE)
crit1 = fit@optim$converged
Lval <- c(coef(fit)[extract.names])
crit2 = sum(abs(Lval)>1)
Vest <- try(vcov(fit))
crit3 = abs(inherits(Vest,'try-error')-1)
crit4 = (sum(eigen(inspect(fit,"theta"))$values<0)==0)
crit.all = (crit1 == 1)*(crit2 == 0)*(crit3==1)*(crit4==1)
if(crit.all == 1){break}
}
crit12 = (crit1 == 1)*(crit2 == 0)
if(crit12==0){mi = mi+1}
return(list(mi = mi,fit = fit))
}
# meta.model: simulation setting for meta-analysis
# rr.model: model specification for reliability generation
GenData <- function(simi,meta.model,rr.model,CFAModel,filename){
Nstudy = meta.model$SZ$Nstudy
mu.N = meta.model$SZ$muN
indL = meta.model$indL
indPhi = meta.model$Phi$ind
Nrr = rr.model$N
Prr = rr.model$Prr
# Generate sample sizes per study
N <- rzinb(n = Nstudy, k = 0.4, lambda = mu.N*0.7, omega = 0)
N <- N + mu.N*0.3
# Generate true individual study reliability
vL.Per = simPar(rr.model$Personality$L,0)$v[c(1,3)]
vL.SWB = simPar(rr.model$SWB$L,0)$v
vL = c(vL.Per,vL.SWB)
rr = vL[indL[,1]]*vL[indL[,2]]
# Generate effect sizes
vtmp = msd.Norm2Trunc(c(meta.model$rho$v,meta.model$rho$sdv),-1,1)
rhoi = rtruncnorm(Nstudy,-1,1,mean=vtmp[1],sd=vtmp[2]) # individual study true correlations
rhoi.rr = rhoi*rr # attenuated population correlations
Phi.values = c(simPar(meta.model$Phi)$v,0)
vPhi = Phi.values[indPhi] # individual study residual correlations
rho.manifest <- rhoi.rr + vPhi # population correlation between manifest variables
# observed correlations for individual primary studies
r.data = rep(NA,Nstudy)
for(si in 1:Nstudy){
Sigma = matrix(rho.manifest[si],2,2)
diag(Sigma) = 1
Sigma = as.matrix(nearPD(Sigma)$mat)
data = mvrnorm(N[si],rep(0,2),Sigma) # generated data for primary studies
r.data[si] = cor(data)[2,1] # sampling error
}
r.data = round(r.data,3)
# Generate reliability estimates
L.personality = estL(Prr$Personality,CFAModel$Personality,Nrr,c('L1','L3'))
L.SWB = estL(Prr$SWB,CFAModel$SWB,Nrr,paste('L',1:6,sep=''))
vL.est = c(L.personality$Lhat,L.SWB$Lhat)
VL.est = rbind(cbind(L.personality$VL,matrix(0,2,6)),cbind(matrix(0,6,2),L.SWB$VL))
rr.est = round(vL.est[indL[,1]]*vL.est[indL[,2]],3)
LvalV = c(vL.est,VL.est[lower.tri(VL.est,diag = T)])
write.table(t(c(simi,r.data)),file = filename[1],append=T,quote=F,row.names=F,col.names=F)
write.table(t(c(simi,N)),file = filename[2],append=T,quote=F,row.names=F,col.names=F)
write.table(t(c(simi,rr.est)),file=filename[3],append=T,quote=F,row.names=F,col.names=F)
write.table(t(c(simi,LvalV)),file = filename[4],append=T,quote=F,row.names=F,col.names=F)
}
v2m.Lambda <- function(vVal,p){
vL <- vVal[1:p]
vV.L <- vVal[-c(1:p)]
mV.L <- matrix(0,p,p)
mV.L[lower.tri(mV.L,diag=T)] <- vV.L
mV.L <- mV.L + t(mV.L)
diag(mV.L) <- diag(mV.L)/2
mV.L <- as.matrix(nearPD(mV.L)$mat)
return(list(v = vL,V = mV.L))
}
org.res <- function(simi,res,out.fn,T.Values,prm){
fit = summary(res$fit.coda)
tryi = res$tryi
conv = res$conv
est <- c(simi,unlist(fit$quantiles[,'50%']))
sd <- c(simi,unlist(fit$statistics[,'SD']))
np = length(unlist(T.Values[prm]))
CI = cbind(HPDinterval(res$fit.coda,prob = .95),unlist(T.Values[prm]))
cover = c(simi,apply(CI,1,function(x) (x[1]< x[3]) * (x[3] < x[2])))
CI[,3] = rep(0,np)
sig = c(simi,1-apply(CI,1,function(x) (x[1]< x[3]) * (x[3] < x[2])))
conv.ex <- c(simi,tryi,conv)
write.table(t(est),paste(out.fn,'.Est.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(sd),paste(out.fn,'.SD.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(cover),paste(out.fn,'.CI.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(CI[,1]),paste(out.fn,'.CIL.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(CI[,2]),paste(out.fn,'.CIU.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(sig),paste(out.fn,'.Sig.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(conv.ex),paste(out.fn,'.Conv.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
}
org.res.meta4 <- function(simi,res,out.fn,T.Values){
np = sum(unlist(lapply(T.Values,length)))
est = c(simi,coef(res),res$tau2)
se = c(simi,res$se,res$se.tau2)
CI = cbind(res$ci.lb,res$ci.ub)
CI = rbind(CI,c(res$tau2-1.96*res$se.tau2,res$tau2+1.96*res$se.tau2))
CI = cbind(CI,unlist(T.Values))
cover = c(simi,apply(CI,1,function(x) (x[1]< x[3]) * (x[3] < x[2])))
CI[,3] = rep(0,np)
sig = c(simi,1-apply(CI,1,function(x) (x[1]< x[3]) * (x[3] < x[2])))
write.table(t(est),paste(out.fn,'.Est.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(se),paste(out.fn,'.SD.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(cover),paste(out.fn,'.CI.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(CI[,1]),paste(out.fn,'.CIL.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(CI[,2]),paste(out.fn,'.CIU.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(sig),paste(out.fn,'.Sig.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
}
org.resNA <- function(simi,out.fn,T.Values,prm){
np = sum(unlist(lapply(T.Values[prm],length)))
write.table(t(rep(NA,np+1)),paste(out.fn,'.Est.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(rep(NA,np+1)),paste(out.fn,'.SD.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(rep(NA,np+1)),paste(out.fn,'.CI.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(rep(NA,np+1)),paste(out.fn,'.CIL.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(rep(NA,np+1)),paste(out.fn,'.CIU.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(rep(NA,np+1)),paste(out.fn,'.Sig.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(c(simi,20,rep(NA,np))),paste(out.fn,'.Conv.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
}
summary.s <- function(mi,out.fn,T.Values,prm){
vTV = unlist(T.Values)
res <- vector('list',4)
names(res) <- c('est','sd','CI','conv')
res$est <- as.matrix(read.table(paste(out.fn,'.Est.out',sep='')))
res$sd <- as.matrix(read.table(paste(out.fn,'.SD.out',sep='')))
res$CI <- as.matrix(read.table(paste(out.fn,'.CI.out',sep='')))
res$sig <- as.matrix(read.table(paste(out.fn,'.Sig.out',sep='')))
id.nconv = rep(0,nrow(res$est))
if(mi > 2){
res$conv <- as.matrix(read.table(paste(out.fn,'.Conv.out',sep='')))
id.nconv <- which(res$conv[,2]>9)
}
if(sum(id.nconv)==0){
#print(id.nconv)
summary.r <- cbind(vTV,
apply(res$est[,-1],2,mean,na.rm = T),
apply(res$est[,-1],2,sd,na.rm = T),
apply(res$sd[,-1],2,mean,na.rm = T),
apply(res$CI[,-1],2,mean,na.rm = T),
apply(res$sig[,-1],2,mean,na.rm = T))
}else{
summary.r <- cbind(unlist(T.Values),
apply(res$est[-id.nconv,-1],2,mean,na.rm = T),
apply(res$est[-id.nconv,-1],2,sd,na.rm = T),
apply(res$sd[-id.nconv,-1],2,mean,na.rm = T),
apply(res$CI[-id.nconv,-1],2,mean,na.rm = T),
apply(res$sig[-id.nconv,-1],2,mean,na.rm = T))
}
if(mi==3){
summary.r[2,6] = mean(abs(res$est[,3]/res$sd[,3])>1.96,na.rm=T)
summary.r[3,6] = mean(abs(res$est[,4]/res$sd[,4])>1.96,na.rm=T)
}
colnames(summary.r) <- c('TrueValues','Est','ESE','ASE','CR','Sig')
rownames(summary.r) <- names(vTV)
print(summary.r)
return(summary.r)
}
|
/Simulations/RCode/RFuncs_Sim.R
|
no_license
|
zijunke/CAM
|
R
| false | false | 16,023 |
r
|
# -------------------------------------
# Implement the proposed method CAM
# the wrapper function
wrbugs <- function(ri,ni,mu.L,V.L,ind.L,gphi,initials,nburnin=1,niter=30001,nthin=5,prior){
# ri: vector of observed correlations from primary studies
# mu.L: estimated vector of factor loadings for various measurement methods
# V.L: sampling covariance matrix of the estimated loading matrix
# ind.L: matrix of indices indicating which two measurement methods are used in the primary study
# gphi: vector of indices indicating nonzero residual correlations
# ni: within-study sample sizes
# initials: list of initial values
# nburnin: number of iterations for the burnin period
# niter: number of iterations
# nthin: thinning interval
#ri = r;mu.L=vL.obs; ind.L=indL;gphi=indP;ni=N;initials=inits;nburnin=1;niter = 30001;nthin=5
tryi = 1
fit = cam.bugs(ri,ni,mu.L,V.L,ind.L,gphi,initials,niter,prior)
fit = cam.bugs(ri,ni,mu.L,V.L,ind.L,gphi,fit$initials,niter,prior)
for(tryi in 2:20){
fit.coda <- mcmc(data = fit$mcmc.chain,start = nburnin+1,end = niter,thin = nthin)
conv = geweke.diag(fit.coda)[[1]]
if(sum(abs(conv)>1.96)==0){
break;
}else{
#print(fit$initials)
fit = cam.bugs(ri,ni,mu.L,V.L,ind.L,gphi,fit$initials,niter,prior)
}
}
return(list(tryi=tryi,fit.coda = fit.coda,fit = fit,conv = conv))
}
# The core function that implement the bayesian sampling algorithm for the proposed method
cam.bugs <- function(ri,ni,mu.L,V.L,ind.L,gphi,initials,niter,prior){
# uninformative prior is used
# ri: vector of observed correlations from primary studies
# mu.L: estimated vector of factor loadings for measurement methods
# V.L: sampling covariance matrix of the estimated loading matrix
# ind.L: matrix of indices indicating which measurement methods are used for the two factors
# gphi: vector of indices indicating nonzero residual correlations
# ni: within-study sample sizes
# initials: list of initial values
# niter: number of iterations
#ri = r;mu.L=vL.obs; ind.L=indL;gphi=indP;ni = N;initials = inits;niter = 30001
Nstudy = length(ri)
gphi.value = sort(unique(gphi))[-1]
nphi = length(gphi.value)
Iphii = matrix(0,Nstudy,nphi)
for(i in 1:nphi){
Iphii[which(gphi==gphi.value[i]),i] = 1
}
rho0 = c(initials$rho0,rep(NA,niter))
V.rho = c(initials$V.rho,rep(NA,niter))
sd.rho = c(sqrt(initials$V.rho),rep(NA,niter))
Phi = cbind(initials$Phi,matrix(NA,nphi,niter))
rhoi = cbind(initials$rhoi,matrix(NA,Nstudy,niter))
V.ri = (1-(ri^2))^2/(ni-1)
mup.Phi = prior$Phi$mu
Vp.Phi = prior$Phi$sigma
for(bi in 1:niter){
vL = my.mvrnorm(mu.L,V.L)
vL = apply(matrix(vL,ncol=1),1,function(x) max(min(x,1),-1))
rri = vL[ind.L[,1]]*vL[ind.L[,2]]
# update V.rho
# truncated gamma
a = Nstudy/2 - 1
b = sum((rhoi[,bi]-rho0[bi])^2)/2
V.rho[bi+1] = max(0.00001,1/rtgamma(1,shape = a,scale = 1/b,a = 1,b=10000))
sd.rho[bi+1] = sqrt(V.rho[bi+1])
# update rho0
# truncated normal
rho0[bi+1] = rtruncnorm(1,a=-1,b=1,mean=mean(rhoi[,bi]),sd=sqrt(V.rho[bi+1]/Nstudy))
# update Phi
# truncated normal
sig = 1/(t(Iphii)%*%(1/V.ri) + 1/Vp.Phi)
mu = (t(Iphii)%*%((ri-rri*rhoi[,bi])/V.ri) + mup.Phi/Vp.Phi)*sig
Phi[,bi+1] = apply(cbind(mu,sqrt(sig)),1,function(x) rtruncnorm(1,a=-1,b=1,mean=x[1],sd=x[2]))
# update rhoi
# unconstrained
numerator = rho0[bi+1]/V.rho[bi+1] + rri*(ri-Iphii%*%Phi[,bi+1])/V.ri
denominator = 1/V.rho[bi+1] + (rri^2)/V.ri
mu = numerator/denominator
sig = 1/denominator
rhoi[,bi+1] = apply(cbind(mu,sqrt(sig)),1,function(x) rnorm(1,mean=x[1],sd=x[2]))
}
simiseq = data.frame(rho0=rho0[-1],V.rho=V.rho[-1],sd.rho=sd.rho[-1],Phi=t(Phi[,-1]))
last.values = list(rho0=rho0[niter+1],V.rho = V.rho[niter+1],sd.rho = sd.rho[niter+1],
Phi = Phi[,niter+1],rhoi = rhoi[,niter+1])
return(list(mcmc.chain=simiseq,initials = last.values))
}
# Function that handles the square root of reliability esitmates
# if we are 100% confident about our reliability estimates
# we can set their variances to be zero
my.mvrnorm <- function(m,V){
s = m
sel0 = which(diag(V)==0)
n0var = length(sel0)
if(n0var==0){ # all reliability variances > 0
s = mvrnorm(1,m,V)
}else if( n0var < length(s) ){ # some reliability variances > 0
seln0 = which(diag(V)>0)
s[seln0] = mvrnorm(1,m[-sel0],V[-sel0,-sel0])
}
return(s)
}
#--------------------------------------------------
# Data generation for the simulation
# Transform the mean and sd of the normal distribution for true effect sizes (correlation)
# to those of a truncated normal
# The resulting mean and sd can be quite different from the original values when
# the true average correlaiton is large and the heterogeneity is substantial
# Formulus are from wiki https://en.wikipedia.org/wiki/Truncated_normal_distribution
msd.Norm2Trunc <- function(pars,ll,ul){
# ll: lower limit of the truncated normal
# ul: upper limit of the truncated normal
mu <- pars[1] # mean of the normal distribution
sd <- pars[2] # sd of the normal distribution
Zll <- (ll-mu)/sd
Zul <- (ul-mu)/sd
Z <- max(pnorm(Zul)-pnorm(Zll),.00001)
mu.trunc <- mu + sd*(dnorm(Zll)-dnorm(Zul))/Z
sd.trunc <- sd*sqrt(1+(Zll*dnorm(Zll)-Zul*dnorm(Zul))/Z-((dnorm(Zll)-dnorm(Zul))/Z)^2)
return(c(mu.trunc,sd.trunc))
}
# From vector to matrix
v2m <- function(v,Mind){
M = Mind
for(i in 1:ncol(Mind)){ M[,i] = v[Mind[,i]] }
return(M)
}
# Simulate Population parameter matrix
# v: vector of parameter values
# sdv: vector of between-study heterogeneity of the parameter
# M: Parameter matrix
# M.ind: indicator matrix to transform v to M
# ex.values: extra values in the parameter matrix; usually 0 or 1
simPar <- function(model.list,ex.values=0){
v = model.list$v
sdv = model.list$sdv
M.ind = model.list$M.ind
seln0 = which(sdv>0)
n.n0 = length(seln0)
if(n.n0>0){
if(n.n0==1){
vs <- msd.Norm2Trunc(c(v,sdv),-1,1)
v = rtruncnorm(1,a=-1,b=1,mean=vs[1],sd=vs[2])
}else if(n.n0 == length(v)){
ms = t(apply(cbind(v,sdv),1,msd.Norm2Trunc,ll=-1,ul=1))
v = apply(ms,1,function(x) rtruncnorm(1,a=-1,b=1,mean=x[1],sd=x[2]))
}else{
ms.sub = t(apply(cbind(v[seln0],sdv[seln0]),1,msd.Norm2Trunc,ll=-1,ul=1))
vsub = apply(ms.sub,1,function(x) rtruncnorm(1,a=-1,b=1,mean=x[1],sd=x[2]))
v[seln0] = vsub
}
}
if(is.null(M.ind) == 0){
M = v2m(c(v,ex.values),M.ind)
}else{M = NULL}
return(list(M=M,v=v))
}
# To obtain population correlation matrix for reliability estimation
# vFrho: vector of factor correlations
# sdFrho: between-study heterogeneity of factor correlations
# Frho: Factor correlation matrix
# Frho.ind: indicator matrix to transform vFrho to mFrho
# vL: vector of factor loadings
# sdL: between-study heterogeneity of factor loadings
# L: Factor loading matrix
# L.ind: indicator matrix to transform vL to L
# vPhi: vector of factor loadings
# sdL: between-study heterogeneity of factor loadings
# L: Factor loading matrix
# L.ind: indicator matrix to transform vL to L
getPrr <- function(rr.model){
Frho = simPar(rr.model$Frho,1)$M # generate factor correlation matrix
L = simPar(rr.model$L,0)$M # generate factor loading matrix
P <- L%*%Frho%*%t(L)
varP = diag(P)
for(tryi in 1:50){ # generate residual correlation matrix
Phi = simPar(rr.model$Phi,c(0,1-varP))$M
if(sum(eigen(Phi)$values>0)==nrow(Phi)){break}
}
P.rr <- P + Phi # population correlation matrix
P.rr = as.matrix(nearPD(P.rr)$mat)
return(P.rr)
}
# Generate observed squre root of reliabilities (factor loadings)
estL <- function(P.rr,myModel,N=500,extract.names){
p = nrow(P.rr)
rr.data = as.data.frame(mvrnorm(N,rep(0,p),P.rr))
colnames(rr.data) = paste('V',1:p,sep='')
nL = length(extract.names)
# model with residual correlations;
cfa.res = seq.CFA(rr.data,myModel,extract.names)
est = coef(cfa.res$fit)
Vest = try(vcov(cfa.res$fit))
if(inherits(Vest,'try-error')==0){
Lhat = est[extract.names]
VL = as.matrix(nearPD(Vest[extract.names,extract.names])$mat)
converged = 1
}else{
Lhat = rep(NA,nL)
VL = matrix(N,nL,nL)
converged = 0
}
return(list(Lhat = Lhat, VL = VL,converged = converged))
}
seq.CFA <- function(d,myM,extract.names){
n.M <- length(myM)
for(mi in 1:n.M){
fit <- cfa(model = myM[[mi]],data = d,std.lv = TRUE)
crit1 = fit@optim$converged
Lval <- c(coef(fit)[extract.names])
crit2 = sum(abs(Lval)>1)
Vest <- try(vcov(fit))
crit3 = abs(inherits(Vest,'try-error')-1)
crit4 = (sum(eigen(inspect(fit,"theta"))$values<0)==0)
crit.all = (crit1 == 1)*(crit2 == 0)*(crit3==1)*(crit4==1)
if(crit.all == 1){break}
}
crit12 = (crit1 == 1)*(crit2 == 0)
if(crit12==0){mi = mi+1}
return(list(mi = mi,fit = fit))
}
# meta.model: simulation setting for meta-analysis
# rr.model: model specification for reliability generation
GenData <- function(simi,meta.model,rr.model,CFAModel,filename){
Nstudy = meta.model$SZ$Nstudy
mu.N = meta.model$SZ$muN
indL = meta.model$indL
indPhi = meta.model$Phi$ind
Nrr = rr.model$N
Prr = rr.model$Prr
# Generate sample sizes per study
N <- rzinb(n = Nstudy, k = 0.4, lambda = mu.N*0.7, omega = 0)
N <- N + mu.N*0.3
# Generate true individual study reliability
vL.Per = simPar(rr.model$Personality$L,0)$v[c(1,3)]
vL.SWB = simPar(rr.model$SWB$L,0)$v
vL = c(vL.Per,vL.SWB)
rr = vL[indL[,1]]*vL[indL[,2]]
# Generate effect sizes
vtmp = msd.Norm2Trunc(c(meta.model$rho$v,meta.model$rho$sdv),-1,1)
rhoi = rtruncnorm(Nstudy,-1,1,mean=vtmp[1],sd=vtmp[2]) # individual study true correlations
rhoi.rr = rhoi*rr # attenuated population correlations
Phi.values = c(simPar(meta.model$Phi)$v,0)
vPhi = Phi.values[indPhi] # individual study residual correlations
rho.manifest <- rhoi.rr + vPhi # population correlation between manifest variables
# observed correlations for individual primary studies
r.data = rep(NA,Nstudy)
for(si in 1:Nstudy){
Sigma = matrix(rho.manifest[si],2,2)
diag(Sigma) = 1
Sigma = as.matrix(nearPD(Sigma)$mat)
data = mvrnorm(N[si],rep(0,2),Sigma) # generated data for primary studies
r.data[si] = cor(data)[2,1] # sampling error
}
r.data = round(r.data,3)
# Generate reliability estimates
L.personality = estL(Prr$Personality,CFAModel$Personality,Nrr,c('L1','L3'))
L.SWB = estL(Prr$SWB,CFAModel$SWB,Nrr,paste('L',1:6,sep=''))
vL.est = c(L.personality$Lhat,L.SWB$Lhat)
VL.est = rbind(cbind(L.personality$VL,matrix(0,2,6)),cbind(matrix(0,6,2),L.SWB$VL))
rr.est = round(vL.est[indL[,1]]*vL.est[indL[,2]],3)
LvalV = c(vL.est,VL.est[lower.tri(VL.est,diag = T)])
write.table(t(c(simi,r.data)),file = filename[1],append=T,quote=F,row.names=F,col.names=F)
write.table(t(c(simi,N)),file = filename[2],append=T,quote=F,row.names=F,col.names=F)
write.table(t(c(simi,rr.est)),file=filename[3],append=T,quote=F,row.names=F,col.names=F)
write.table(t(c(simi,LvalV)),file = filename[4],append=T,quote=F,row.names=F,col.names=F)
}
v2m.Lambda <- function(vVal,p){
vL <- vVal[1:p]
vV.L <- vVal[-c(1:p)]
mV.L <- matrix(0,p,p)
mV.L[lower.tri(mV.L,diag=T)] <- vV.L
mV.L <- mV.L + t(mV.L)
diag(mV.L) <- diag(mV.L)/2
mV.L <- as.matrix(nearPD(mV.L)$mat)
return(list(v = vL,V = mV.L))
}
org.res <- function(simi,res,out.fn,T.Values,prm){
fit = summary(res$fit.coda)
tryi = res$tryi
conv = res$conv
est <- c(simi,unlist(fit$quantiles[,'50%']))
sd <- c(simi,unlist(fit$statistics[,'SD']))
np = length(unlist(T.Values[prm]))
CI = cbind(HPDinterval(res$fit.coda,prob = .95),unlist(T.Values[prm]))
cover = c(simi,apply(CI,1,function(x) (x[1]< x[3]) * (x[3] < x[2])))
CI[,3] = rep(0,np)
sig = c(simi,1-apply(CI,1,function(x) (x[1]< x[3]) * (x[3] < x[2])))
conv.ex <- c(simi,tryi,conv)
write.table(t(est),paste(out.fn,'.Est.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(sd),paste(out.fn,'.SD.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(cover),paste(out.fn,'.CI.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(CI[,1]),paste(out.fn,'.CIL.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(CI[,2]),paste(out.fn,'.CIU.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(sig),paste(out.fn,'.Sig.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(conv.ex),paste(out.fn,'.Conv.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
}
org.res.meta4 <- function(simi,res,out.fn,T.Values){
np = sum(unlist(lapply(T.Values,length)))
est = c(simi,coef(res),res$tau2)
se = c(simi,res$se,res$se.tau2)
CI = cbind(res$ci.lb,res$ci.ub)
CI = rbind(CI,c(res$tau2-1.96*res$se.tau2,res$tau2+1.96*res$se.tau2))
CI = cbind(CI,unlist(T.Values))
cover = c(simi,apply(CI,1,function(x) (x[1]< x[3]) * (x[3] < x[2])))
CI[,3] = rep(0,np)
sig = c(simi,1-apply(CI,1,function(x) (x[1]< x[3]) * (x[3] < x[2])))
write.table(t(est),paste(out.fn,'.Est.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(se),paste(out.fn,'.SD.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(cover),paste(out.fn,'.CI.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(CI[,1]),paste(out.fn,'.CIL.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(CI[,2]),paste(out.fn,'.CIU.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(sig),paste(out.fn,'.Sig.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
}
org.resNA <- function(simi,out.fn,T.Values,prm){
np = sum(unlist(lapply(T.Values[prm],length)))
write.table(t(rep(NA,np+1)),paste(out.fn,'.Est.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(rep(NA,np+1)),paste(out.fn,'.SD.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(rep(NA,np+1)),paste(out.fn,'.CI.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(rep(NA,np+1)),paste(out.fn,'.CIL.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(rep(NA,np+1)),paste(out.fn,'.CIU.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(rep(NA,np+1)),paste(out.fn,'.Sig.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
write.table(t(c(simi,20,rep(NA,np))),paste(out.fn,'.Conv.out',sep=''),append = T,
quote = F,row.names = F,col.names = F)
}
summary.s <- function(mi,out.fn,T.Values,prm){
vTV = unlist(T.Values)
res <- vector('list',4)
names(res) <- c('est','sd','CI','conv')
res$est <- as.matrix(read.table(paste(out.fn,'.Est.out',sep='')))
res$sd <- as.matrix(read.table(paste(out.fn,'.SD.out',sep='')))
res$CI <- as.matrix(read.table(paste(out.fn,'.CI.out',sep='')))
res$sig <- as.matrix(read.table(paste(out.fn,'.Sig.out',sep='')))
id.nconv = rep(0,nrow(res$est))
if(mi > 2){
res$conv <- as.matrix(read.table(paste(out.fn,'.Conv.out',sep='')))
id.nconv <- which(res$conv[,2]>9)
}
if(sum(id.nconv)==0){
#print(id.nconv)
summary.r <- cbind(vTV,
apply(res$est[,-1],2,mean,na.rm = T),
apply(res$est[,-1],2,sd,na.rm = T),
apply(res$sd[,-1],2,mean,na.rm = T),
apply(res$CI[,-1],2,mean,na.rm = T),
apply(res$sig[,-1],2,mean,na.rm = T))
}else{
summary.r <- cbind(unlist(T.Values),
apply(res$est[-id.nconv,-1],2,mean,na.rm = T),
apply(res$est[-id.nconv,-1],2,sd,na.rm = T),
apply(res$sd[-id.nconv,-1],2,mean,na.rm = T),
apply(res$CI[-id.nconv,-1],2,mean,na.rm = T),
apply(res$sig[-id.nconv,-1],2,mean,na.rm = T))
}
if(mi==3){
summary.r[2,6] = mean(abs(res$est[,3]/res$sd[,3])>1.96,na.rm=T)
summary.r[3,6] = mean(abs(res$est[,4]/res$sd[,4])>1.96,na.rm=T)
}
colnames(summary.r) <- c('TrueValues','Est','ESE','ASE','CR','Sig')
rownames(summary.r) <- names(vTV)
print(summary.r)
return(summary.r)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opsworks_operations.R
\name{opsworks_describe_layers}
\alias{opsworks_describe_layers}
\title{Requests a description of one or more layers in a specified stack}
\usage{
opsworks_describe_layers(StackId, LayerIds)
}
\arguments{
\item{StackId}{The stack ID.}
\item{LayerIds}{An array of layer IDs that specify the layers to be described. If you
omit this parameter, \code{DescribeLayers} returns a description of every
layer in the specified stack.}
}
\description{
Requests a description of one or more layers in a specified stack.
This call accepts only one resource-identifying parameter.
\strong{Required Permissions}: To use this action, an IAM user must have a
Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information about
user permissions, see \href{https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html}{Managing User Permissions}.
}
\section{Request syntax}{
\preformatted{svc$describe_layers(
StackId = "string",
LayerIds = list(
"string"
)
)
}
}
\keyword{internal}
|
/paws/man/opsworks_describe_layers.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false | true | 1,175 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opsworks_operations.R
\name{opsworks_describe_layers}
\alias{opsworks_describe_layers}
\title{Requests a description of one or more layers in a specified stack}
\usage{
opsworks_describe_layers(StackId, LayerIds)
}
\arguments{
\item{StackId}{The stack ID.}
\item{LayerIds}{An array of layer IDs that specify the layers to be described. If you
omit this parameter, \code{DescribeLayers} returns a description of every
layer in the specified stack.}
}
\description{
Requests a description of one or more layers in a specified stack.
This call accepts only one resource-identifying parameter.
\strong{Required Permissions}: To use this action, an IAM user must have a
Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information about
user permissions, see \href{https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html}{Managing User Permissions}.
}
\section{Request syntax}{
\preformatted{svc$describe_layers(
StackId = "string",
LayerIds = list(
"string"
)
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Loggers.R
\name{addDefaultErrorReportLogger}
\alias{addDefaultErrorReportLogger}
\title{Add the default error report logger}
\usage{
addDefaultErrorReportLogger(
fileName = file.path(getwd(), "errorReportR.txt"),
name = "DEFAULT_ERRORREPORT_LOGGER"
)
}
\arguments{
\item{fileName}{The name of the file to write to.}
\item{name}{A name for the logger.}
}
\description{
Add the default error report logger
}
\details{
Creates a logger that writes to a file using the "FATAL" threshold and the
\code{\link{layoutErrorReport}} layout. The file will be overwritten if it is older than 60
seconds. The user will be notified that the error report has been created, and where to find it.
}
|
/man/addDefaultErrorReportLogger.Rd
|
permissive
|
cran/ParallelLogger
|
R
| false | true | 789 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Loggers.R
\name{addDefaultErrorReportLogger}
\alias{addDefaultErrorReportLogger}
\title{Add the default error report logger}
\usage{
addDefaultErrorReportLogger(
fileName = file.path(getwd(), "errorReportR.txt"),
name = "DEFAULT_ERRORREPORT_LOGGER"
)
}
\arguments{
\item{fileName}{The name of the file to write to.}
\item{name}{A name for the logger.}
}
\description{
Add the default error report logger
}
\details{
Creates a logger that writes to a file using the "FATAL" threshold and the
\code{\link{layoutErrorReport}} layout. The file will be overwritten if it is older than 60
seconds. The user will be notified that the error report has been created, and where to find it.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AEI.R
\name{AEI}
\alias{AEI}
\title{Augmented Expected Improvement}
\usage{
AEI(x, model, new.noise.var = 0, y.min = NULL, type = "UK", envir = NULL)
}
\arguments{
\item{x}{the input vector at which one wants to evaluate the criterion}
\item{model}{a Kriging model of "km" class}
\item{new.noise.var}{the (scalar) noise variance of the future observation.}
\item{y.min}{The kriging predictor at the current best point (point with
smallest kriging quantile). If not provided, this quantity is evaluated.}
\item{type}{Kriging type: "SK" or "UK"}
\item{envir}{environment for saving intermediate calculations and reusing
them within AEI.grad}
}
\value{
Augmented Expected Improvement
}
\description{
Evaluation of the Augmented Expected Improvement (AEI) criterion, which is
a modification of the classical EI criterion for noisy functions. The AEI
consists of the regular EI multiplied by a penalization function that
accounts for the disminishing payoff of observation replicates. The current
minimum y.min is chosen as the kriging predictor of the observation with
smallest kriging quantile.
}
\examples{
##########################################################################
### AEI SURFACE ASSOCIATED WITH AN ORDINARY KRIGING MODEL ####
### OF THE BRANIN FUNCTION KNOWN AT A 12-POINT LATIN HYPERCUBE DESIGN ####
##########################################################################
set.seed(421)
# Set test problem parameters
doe.size <- 12
dim <- 2
test.function <- get("branin2")
lower <- rep(0,1,dim)
upper <- rep(1,1,dim)
noise.var <- 0.2
# Generate DOE and response
doe <- as.data.frame(matrix(runif(doe.size*dim),doe.size))
y.tilde <- rep(0, 1, doe.size)
for (i in 1:doe.size) {
y.tilde[i] <- test.function(doe[i,]) + sqrt(noise.var)*rnorm(n=1)
}
y.tilde <- as.numeric(y.tilde)
# Create kriging model
model <- km(y~1, design=doe, response=data.frame(y=y.tilde),
covtype="gauss", noise.var=rep(noise.var,1,doe.size),
lower=rep(.1,dim), upper=rep(1,dim), control=list(trace=FALSE))
# Compute actual function and criterion on a grid
n.grid <- 12 # Change to 21 for a nicer picture
x.grid <- y.grid <- seq(0,1,length=n.grid)
design.grid <- expand.grid(x.grid, y.grid)
nt <- nrow(design.grid)
crit.grid <- rep(0,1,nt)
func.grid <- rep(0,1,nt)
crit.grid <- apply(design.grid, 1, AEI, model=model, new.noise.var=noise.var)
func.grid <- apply(design.grid, 1, test.function)
# Compute kriging mean and variance on a grid
names(design.grid) <- c("V1","V2")
pred <- predict.km(model, newdata=design.grid, type="UK")
mk.grid <- pred$m
sk.grid <- pred$sd
# Plot actual function
z.grid <- matrix(func.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Actual function");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot Kriging mean
z.grid <- matrix(mk.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Kriging mean");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot Kriging variance
z.grid <- matrix(sk.grid^2, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Kriging variance");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot AEI criterion
z.grid <- matrix(crit.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("AEI");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
}
\references{
D. Huang, T.T. Allen, W.I. Notz, and N. Zeng (2006), Global Optimization of
Stochastic Black-Box Systems via Sequential Kriging Meta-Models,
\emph{Journal of Global Optimization}, 34, 441-466.
}
\author{
Victor Picheny
David Ginsbourger
}
\keyword{models}
|
/man/AEI.Rd
|
no_license
|
cran/DiceOptim
|
R
| false | true | 3,917 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AEI.R
\name{AEI}
\alias{AEI}
\title{Augmented Expected Improvement}
\usage{
AEI(x, model, new.noise.var = 0, y.min = NULL, type = "UK", envir = NULL)
}
\arguments{
\item{x}{the input vector at which one wants to evaluate the criterion}
\item{model}{a Kriging model of "km" class}
\item{new.noise.var}{the (scalar) noise variance of the future observation.}
\item{y.min}{The kriging predictor at the current best point (point with
smallest kriging quantile). If not provided, this quantity is evaluated.}
\item{type}{Kriging type: "SK" or "UK"}
\item{envir}{environment for saving intermediate calculations and reusing
them within AEI.grad}
}
\value{
Augmented Expected Improvement
}
\description{
Evaluation of the Augmented Expected Improvement (AEI) criterion, which is
a modification of the classical EI criterion for noisy functions. The AEI
consists of the regular EI multiplied by a penalization function that
accounts for the disminishing payoff of observation replicates. The current
minimum y.min is chosen as the kriging predictor of the observation with
smallest kriging quantile.
}
\examples{
##########################################################################
### AEI SURFACE ASSOCIATED WITH AN ORDINARY KRIGING MODEL ####
### OF THE BRANIN FUNCTION KNOWN AT A 12-POINT LATIN HYPERCUBE DESIGN ####
##########################################################################
set.seed(421)
# Set test problem parameters
doe.size <- 12
dim <- 2
test.function <- get("branin2")
lower <- rep(0,1,dim)
upper <- rep(1,1,dim)
noise.var <- 0.2
# Generate DOE and response
doe <- as.data.frame(matrix(runif(doe.size*dim),doe.size))
y.tilde <- rep(0, 1, doe.size)
for (i in 1:doe.size) {
y.tilde[i] <- test.function(doe[i,]) + sqrt(noise.var)*rnorm(n=1)
}
y.tilde <- as.numeric(y.tilde)
# Create kriging model
model <- km(y~1, design=doe, response=data.frame(y=y.tilde),
covtype="gauss", noise.var=rep(noise.var,1,doe.size),
lower=rep(.1,dim), upper=rep(1,dim), control=list(trace=FALSE))
# Compute actual function and criterion on a grid
n.grid <- 12 # Change to 21 for a nicer picture
x.grid <- y.grid <- seq(0,1,length=n.grid)
design.grid <- expand.grid(x.grid, y.grid)
nt <- nrow(design.grid)
crit.grid <- rep(0,1,nt)
func.grid <- rep(0,1,nt)
crit.grid <- apply(design.grid, 1, AEI, model=model, new.noise.var=noise.var)
func.grid <- apply(design.grid, 1, test.function)
# Compute kriging mean and variance on a grid
names(design.grid) <- c("V1","V2")
pred <- predict.km(model, newdata=design.grid, type="UK")
mk.grid <- pred$m
sk.grid <- pred$sd
# Plot actual function
z.grid <- matrix(func.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Actual function");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot Kriging mean
z.grid <- matrix(mk.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Kriging mean");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot Kriging variance
z.grid <- matrix(sk.grid^2, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Kriging variance");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot AEI criterion
z.grid <- matrix(crit.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("AEI");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
}
\references{
D. Huang, T.T. Allen, W.I. Notz, and N. Zeng (2006), Global Optimization of
Stochastic Black-Box Systems via Sequential Kriging Meta-Models,
\emph{Journal of Global Optimization}, 34, 441-466.
}
\author{
Victor Picheny
David Ginsbourger
}
\keyword{models}
|
##Note : utilisation sur le wiki
###################################
# Potthoff-Whittinghill statistic #
###################################
PWstat=function(cas,temoins,alternative=c("greater","two.sided")){
P=sum(cas)/sum(cas+temoins)
obs=cas+temoins
expected=P*obs
PW=sum(expected)*sum((cas*(cas-1)/expected))
mPW=sum(cas)*(sum(cas)-1)
vPW=2*(length(cas)-1)*mPW
sPW=(PW-mPW)/sqrt(vPW)
print(sPW)
if(alternative[1]=="two.sided") return(2*(1-pnorm(abs(sPW))))
else return((1-pnorm((sPW))))
}
replicatePW=function(ns=100,ra1=0.2,ra2=0.2,unif=FALSE){
obs=sample(corine$obs,ns,TRUE)
if(unif) cas=rbinom(ns,obs,runif(ns,ra1,ra2))
else cas=rbinom(ns,obs,c(rep(ra1,ns/2),rep(ra2,ns/2)))
return(PWstat(cas,obs-cas))
}
################################
# delta statistic #
################################
kPPV=function(x0,y0,xref,yref, k=3,seuil=Inf,nmin=0,check=TRUE){
d=sqrt((x0-xref)**2+(y0-yref)**2)
i=order(d)[1:k]
if(min(d)==0 & check) i=order(d)[2:(k+1)]
if(min(d)>seuil){
i=NULL
}
else{
i=i[which(d[i]<seuil)]
}
return(i)
}
deltaStat=function(data=corine,k=5,comparison=c("knn","random"),retour=c("pvalue","statistic","sum")){
P0=sum(data$cas)/sum(data$obs)
if(comparison[1]=="knn") deltaQ=sapply(1:nrow(data),function(i) sum((data$p[i] - data$p[kPPV(data$x[i],data$y[i],data$x,data$y,k = k)])**2/
(P0*(1-P0)*(1/data$obs[i]+1/data$obs[kPPV(data$x[i],data$y[i],data$x,data$y,k=k)] )) ))
else{
deltaQ=sapply(1:nrow(data),function(i){
cc=sample(c(1:nrow(data))[-i],k)
sum((data$p[i] - data$p[cc])**2/(P0*(1-P0)*(1/data$obs[i]+1/data$obs[cc] )) )})
}
if(retour[1]=="sum") return(sum(deltaQ))
else if(retour[1]=="statistic") return(deltaQ)
else return(1-pchisq(sum(deltaQ),k*nrow(data)))
}
deltaStatRandom=function(cas,obs,k=5){
P0=sum(cas)/sum(obs)
p=cas/obs
deltaQ=sapply(1:length(cas),function(i){
cc=sample(c(1:length(cas))[-i],k)
sum((p[i] - p[cc])**2/(P0*(1-P0)*(1/obs[i]+1/obs[cc] )) )})
return(1-pchisq(sum(deltaQ),k*length(cas)))
}
simDelta=function(p1=NA,p2=p1,pourc2=0, nsim=100,k=5,data=corine,retour=c("pvalue","statistic","sum")){
P0=sum(data$cas)/(sum(data$temoins)+sum(data$cas))
if(is.na(p1)) p1=P0
pv=sapply(1:nsim,function(j){
#pv=c()
#for(j in 1:nsim){
print(j)
data0=data
#print(data0)
if((p1-p2)*pourc2==0){
data0$cas=rbinom(nrow(data0),data0$obs,p1)
}
else{
ii=rep(0,nrow(data0))
i=sample(1:nrow(data0),floor(pourc2*nrow(data0)))
ii[i]=1
data0$cas=rbinom(nrow(data0),data0$obs,p1+ii*(p2-p1))
}
data0$temoins=data0$obs-data0$cas
data0$p=data0$cas/(data0$obs)
P0=sum(data0$cas)/sum(data0$obs)
deltaQ=sapply(1:nrow(data0),function(i) sum((data0$p[i] - data0$p[kPPV(data0$x[i],data0$y[i],data0$x,data0$y,k = k)])**2/
(P0*(1-P0)*(1/data0$obs[i]+1/data0$obs[kPPV(data0$x[i],data0$y[i],data0$x,data0$y,k=k)] )) ))
# hist(deltaQ,freq=F)
# xx=seq(0,60,by=0.01)
# lines(xx,dchisq(xx,df = k),col="green")
#pv=c(pv,1-pchisq(sum(deltaQ),k*nrow(data0)))
if(retour[1]=="sum") return(sum(deltaQ))
else if(retour[1]=="statistic") return(deltaQ)
else return(1-pchisq(sum(deltaQ),k*nrow(data0)))
#}
})
return(pv)
}
############
# Examples #
############
test.obs=rpois(1000,10000)
test.cas=rbinom(1000,test.obs,0.01)
test.temoins=test.obs-test.cas
PWstat(test.cas,test.temoins) #homogeneity
test.cas=rbinom(1000,test.obs,runif(1000,0.005,0.02))
test.temoins=test.obs-test.cas
PWstat(test.cas,test.temoins) #heterogeneity
test.obs=rpois(1000,10000)
test.cas=rbinom(1000,test.obs,0.2)
deltaStat(data.frame(cas=test.cas,obs=test.obs,p=test.cas/test.obs),comparison = "random",k=11)#homogeneity
test.cas=rbinom(1000,test.obs,sample(c(0.1,0.3),1000,T))
deltaStat(data.frame(cas=test.cas,obs=test.obs,p=test.cas/test.obs),comparison = "random",k=11)#heterogeneity
|
/homogeneityStatistics.R
|
no_license
|
lucasRemera/Clustering
|
R
| false | false | 4,040 |
r
|
##Note : utilisation sur le wiki
###################################
# Potthoff-Whittinghill statistic #
###################################
PWstat=function(cas,temoins,alternative=c("greater","two.sided")){
P=sum(cas)/sum(cas+temoins)
obs=cas+temoins
expected=P*obs
PW=sum(expected)*sum((cas*(cas-1)/expected))
mPW=sum(cas)*(sum(cas)-1)
vPW=2*(length(cas)-1)*mPW
sPW=(PW-mPW)/sqrt(vPW)
print(sPW)
if(alternative[1]=="two.sided") return(2*(1-pnorm(abs(sPW))))
else return((1-pnorm((sPW))))
}
replicatePW=function(ns=100,ra1=0.2,ra2=0.2,unif=FALSE){
obs=sample(corine$obs,ns,TRUE)
if(unif) cas=rbinom(ns,obs,runif(ns,ra1,ra2))
else cas=rbinom(ns,obs,c(rep(ra1,ns/2),rep(ra2,ns/2)))
return(PWstat(cas,obs-cas))
}
################################
# delta statistic #
################################
kPPV=function(x0,y0,xref,yref, k=3,seuil=Inf,nmin=0,check=TRUE){
d=sqrt((x0-xref)**2+(y0-yref)**2)
i=order(d)[1:k]
if(min(d)==0 & check) i=order(d)[2:(k+1)]
if(min(d)>seuil){
i=NULL
}
else{
i=i[which(d[i]<seuil)]
}
return(i)
}
deltaStat=function(data=corine,k=5,comparison=c("knn","random"),retour=c("pvalue","statistic","sum")){
P0=sum(data$cas)/sum(data$obs)
if(comparison[1]=="knn") deltaQ=sapply(1:nrow(data),function(i) sum((data$p[i] - data$p[kPPV(data$x[i],data$y[i],data$x,data$y,k = k)])**2/
(P0*(1-P0)*(1/data$obs[i]+1/data$obs[kPPV(data$x[i],data$y[i],data$x,data$y,k=k)] )) ))
else{
deltaQ=sapply(1:nrow(data),function(i){
cc=sample(c(1:nrow(data))[-i],k)
sum((data$p[i] - data$p[cc])**2/(P0*(1-P0)*(1/data$obs[i]+1/data$obs[cc] )) )})
}
if(retour[1]=="sum") return(sum(deltaQ))
else if(retour[1]=="statistic") return(deltaQ)
else return(1-pchisq(sum(deltaQ),k*nrow(data)))
}
deltaStatRandom=function(cas,obs,k=5){
P0=sum(cas)/sum(obs)
p=cas/obs
deltaQ=sapply(1:length(cas),function(i){
cc=sample(c(1:length(cas))[-i],k)
sum((p[i] - p[cc])**2/(P0*(1-P0)*(1/obs[i]+1/obs[cc] )) )})
return(1-pchisq(sum(deltaQ),k*length(cas)))
}
simDelta=function(p1=NA,p2=p1,pourc2=0, nsim=100,k=5,data=corine,retour=c("pvalue","statistic","sum")){
P0=sum(data$cas)/(sum(data$temoins)+sum(data$cas))
if(is.na(p1)) p1=P0
pv=sapply(1:nsim,function(j){
#pv=c()
#for(j in 1:nsim){
print(j)
data0=data
#print(data0)
if((p1-p2)*pourc2==0){
data0$cas=rbinom(nrow(data0),data0$obs,p1)
}
else{
ii=rep(0,nrow(data0))
i=sample(1:nrow(data0),floor(pourc2*nrow(data0)))
ii[i]=1
data0$cas=rbinom(nrow(data0),data0$obs,p1+ii*(p2-p1))
}
data0$temoins=data0$obs-data0$cas
data0$p=data0$cas/(data0$obs)
P0=sum(data0$cas)/sum(data0$obs)
deltaQ=sapply(1:nrow(data0),function(i) sum((data0$p[i] - data0$p[kPPV(data0$x[i],data0$y[i],data0$x,data0$y,k = k)])**2/
(P0*(1-P0)*(1/data0$obs[i]+1/data0$obs[kPPV(data0$x[i],data0$y[i],data0$x,data0$y,k=k)] )) ))
# hist(deltaQ,freq=F)
# xx=seq(0,60,by=0.01)
# lines(xx,dchisq(xx,df = k),col="green")
#pv=c(pv,1-pchisq(sum(deltaQ),k*nrow(data0)))
if(retour[1]=="sum") return(sum(deltaQ))
else if(retour[1]=="statistic") return(deltaQ)
else return(1-pchisq(sum(deltaQ),k*nrow(data0)))
#}
})
return(pv)
}
############
# Examples #
############
test.obs=rpois(1000,10000)
test.cas=rbinom(1000,test.obs,0.01)
test.temoins=test.obs-test.cas
PWstat(test.cas,test.temoins) #homogeneity
test.cas=rbinom(1000,test.obs,runif(1000,0.005,0.02))
test.temoins=test.obs-test.cas
PWstat(test.cas,test.temoins) #heterogeneity
test.obs=rpois(1000,10000)
test.cas=rbinom(1000,test.obs,0.2)
deltaStat(data.frame(cas=test.cas,obs=test.obs,p=test.cas/test.obs),comparison = "random",k=11)#homogeneity
test.cas=rbinom(1000,test.obs,sample(c(0.1,0.3),1000,T))
deltaStat(data.frame(cas=test.cas,obs=test.obs,p=test.cas/test.obs),comparison = "random",k=11)#heterogeneity
|
## intermahp - International Model of Alcohol Harms and Policies
## Copyright (C) 2018 Canadian Institute for Substance Use Research
# --- Datasets UI --- #
tagList(
tabsetPanel(
id = "tabset_datasets",
tabPanel(
title = "Upload new datasets",
value = "tabset_datasets_new",
br(),
p("Upload datasets that satisfy the specifications outlined in the InterMAHP user guide."),
p("The sample datasets provided satisfy these specfications (see guide for more details)."),
br(),
fileInput(
inputId = "datasets_upload_pc",
label = div(
"Prevalence and consumption data",
br(),
downloadLink(
"samplePC",
div(
"Sample prevalence and consumption data sheet"
)
)
),
accept = c("text/csv", "text/comma-separated-values", "text/plain", ".csv")
),
fileInput(
inputId = "datasets_upload_rr",
label = div(
"Relative risk data",
br(),
downloadLink(
outputId = "sampleRR",
label = "Sample relative risk data sheet"
)
),
accept = c("text/csv", "text/comma-separated-values", "text/plain", ".csv")
),
fileInput(
inputId = "datasets_upload_mm",
label = div(
"Morbidity and mortality data",
downloadLink(
"sampleMM",
div(
"Sample morbidity and mortality data sheet"
)
)
),
accept = c("text/csv", "text/comma-separated-values", "text/plain", ".csv")
),
withBusyIndicator(
actionButton(
"datasets_new_upload_btn",
"Upload data",
class = "btn-primary"
)
)
),
tabPanel(
title = "Use sample datasets",
value = "tabsets_datasets_sample",
br(),
p("Use sample data to explore Canadian mortality between 2007 and 2016."),
p("To begin, select years of study and ischaemic heart disease treatment."),
br(),
uiOutput("datasets_sample_years_render"),
selectInput(
inputId = "datasets_sample_rr",
label = div(
"Ischaemic heart disease treatment",
popover(
content = "Ischaemic heart disease relative risk is stratified at the meta-analysis level by treatment of abstainer bias.
<br /><br />
Zhao explicitly controls for abstainer bias by selecting studies with no bias and other methods.
<br /><br />
Roerecke reweights relative risk results from studies which pooled former and never drinkers as abstainers using a standard methodology.
<br /><br />
For more information, refer to the articles themselves:<br />
<a href=https://www.jsad.com/doi/abs/10.15288/jsad.2017.78.375>Zhao</a>
<br />
<a href=https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1360-0443.2012.03780.x>Roerecke</a>
",
pos = "right",
icon("info-circle")
)
),
choices = c("Zhao", "Roerecke")
),
withBusyIndicator(
actionButton(
"datasets_sample_load_btn",
"Load data",
class = "btn-primary"
)
)
),
tabPanel(
title = "Review loaded data",
value = "tabsets_loaded_data",
br(),
conditionalPanel(
condition = "output.dataChosen",
div(
id = "metadata_div",
div(
id = "pc_meta_div",
uiOutput("pc_metadata", inline = TRUE)
),
div(
id = "rr_meta_div",
uiOutput("rr_metadata", inline = TRUE)
),
div(
id = "mm_meta_div",
uiOutput("mm_metadata", inline = TRUE)
)
),
br(),
selectInput(
inputId = "loaded_raw_data",
label = "View loaded data",
choices = list(
"Prevalence and consumption" = "pc",
"Relative risks" = "rr",
"Morbidity and mortality" = "mm"
)
),
DT::dataTableOutput("datasets_summary_dt_render")
)
)
),
# Next step message ----
div(
id = "datasets_nextMsg",
class = "next-msg",
"Next, ",
actionLink("datasets_to_settings", "review and tweak settings"),
" or ",
actionLink("datasets_to_generate_estimates", "generate estimates.")
)
)
|
/ui/panel-datasets.R
|
permissive
|
uvic-cisur/intermahp
|
R
| false | false | 4,594 |
r
|
## intermahp - International Model of Alcohol Harms and Policies
## Copyright (C) 2018 Canadian Institute for Substance Use Research
# --- Datasets UI --- #
tagList(
tabsetPanel(
id = "tabset_datasets",
tabPanel(
title = "Upload new datasets",
value = "tabset_datasets_new",
br(),
p("Upload datasets that satisfy the specifications outlined in the InterMAHP user guide."),
p("The sample datasets provided satisfy these specfications (see guide for more details)."),
br(),
fileInput(
inputId = "datasets_upload_pc",
label = div(
"Prevalence and consumption data",
br(),
downloadLink(
"samplePC",
div(
"Sample prevalence and consumption data sheet"
)
)
),
accept = c("text/csv", "text/comma-separated-values", "text/plain", ".csv")
),
fileInput(
inputId = "datasets_upload_rr",
label = div(
"Relative risk data",
br(),
downloadLink(
outputId = "sampleRR",
label = "Sample relative risk data sheet"
)
),
accept = c("text/csv", "text/comma-separated-values", "text/plain", ".csv")
),
fileInput(
inputId = "datasets_upload_mm",
label = div(
"Morbidity and mortality data",
downloadLink(
"sampleMM",
div(
"Sample morbidity and mortality data sheet"
)
)
),
accept = c("text/csv", "text/comma-separated-values", "text/plain", ".csv")
),
withBusyIndicator(
actionButton(
"datasets_new_upload_btn",
"Upload data",
class = "btn-primary"
)
)
),
tabPanel(
title = "Use sample datasets",
value = "tabsets_datasets_sample",
br(),
p("Use sample data to explore Canadian mortality between 2007 and 2016."),
p("To begin, select years of study and ischaemic heart disease treatment."),
br(),
uiOutput("datasets_sample_years_render"),
selectInput(
inputId = "datasets_sample_rr",
label = div(
"Ischaemic heart disease treatment",
popover(
content = "Ischaemic heart disease relative risk is stratified at the meta-analysis level by treatment of abstainer bias.
<br /><br />
Zhao explicitly controls for abstainer bias by selecting studies with no bias and other methods.
<br /><br />
Roerecke reweights relative risk results from studies which pooled former and never drinkers as abstainers using a standard methodology.
<br /><br />
For more information, refer to the articles themselves:<br />
<a href=https://www.jsad.com/doi/abs/10.15288/jsad.2017.78.375>Zhao</a>
<br />
<a href=https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1360-0443.2012.03780.x>Roerecke</a>
",
pos = "right",
icon("info-circle")
)
),
choices = c("Zhao", "Roerecke")
),
withBusyIndicator(
actionButton(
"datasets_sample_load_btn",
"Load data",
class = "btn-primary"
)
)
),
tabPanel(
title = "Review loaded data",
value = "tabsets_loaded_data",
br(),
conditionalPanel(
condition = "output.dataChosen",
div(
id = "metadata_div",
div(
id = "pc_meta_div",
uiOutput("pc_metadata", inline = TRUE)
),
div(
id = "rr_meta_div",
uiOutput("rr_metadata", inline = TRUE)
),
div(
id = "mm_meta_div",
uiOutput("mm_metadata", inline = TRUE)
)
),
br(),
selectInput(
inputId = "loaded_raw_data",
label = "View loaded data",
choices = list(
"Prevalence and consumption" = "pc",
"Relative risks" = "rr",
"Morbidity and mortality" = "mm"
)
),
DT::dataTableOutput("datasets_summary_dt_render")
)
)
),
# Next step message ----
div(
id = "datasets_nextMsg",
class = "next-msg",
"Next, ",
actionLink("datasets_to_settings", "review and tweak settings"),
" or ",
actionLink("datasets_to_generate_estimates", "generate estimates.")
)
)
|
#Need most up-to-date version of R (>3.5)
library(lme4)
library(MASS)
library(emmeans)
library(ggplot2)
library(compute.es)
library(Hmisc)
setwd("/Users/dgari004/Google\ Drive/CC_RDI_NODDI/new\ contrast\ analyses/")
data<-read.csv("/Users/dgari004/Google\ Drive/CC_RDI_NODDI/new\ contrast\ analyses/hcp_842_rdi_within_subjects.csv")
attach(data)
#make plot. Adjust fo
cc.plot <- function(data){
theme_set(theme_light(base_size = 16))
p<-ggplot(mapping = aes(x = cc_region, y = rdi)) +
geom_point(position = position_jitter(0.1), shape=21, fill = "light blue", col = "grey", size=1, stroke = 1) +
stat_summary(geom = "point", fun.data = mean_cl_normal, color = "black", size = 3) +
stat_summary(geom = "errorbar", fun.data = mean_cl_normal, color = "black", width = .1) +
stat_summary(fun.y = mean, geom="line") + ylab('RDI') +
scale_x_discrete('Corpus Callosum Region', waiver(), labels = c("G1", "G2", "G3", "B1", "B2", "B3", "I", "S1", "S2", "S3"), c(0:9)) +
ggtitle('RDI Values by Corpus Callosum Region') + theme(panel.border = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(size = .5, linetype = "solid", colour = "black"),
axis.text = element_text(size = 18, colour = "black"), plot.title = element_text(hjust = .5)
)
return(p)
}
tiff("rdi_HCP.tiff", units = 'in', width = 6, height = 8, res = 200, compression = "lzw")
cc.plot(data)
dev.off()
# provide a boxplot. more for data inspection purposes. does not save plot
data$cc_region<- factor(data$cc_region, levels=unique(data$cc_region))
boxplot(rdi ~ cc_region,
data = data,
ylab="RDI",
xlab="CC_region")
levels(data$cc_region)
####IMPORTANT ADDITION FROM DEA; because number of observations exceeds 3000, must change limits like stated below
emm_options(pbkrtest.limit = 8400)
emm_options(lmerTest.limit = 8400)
#run the mixed effects model and calculate contrast
mod1 <- lmer(rdi ~ cc_region + (1 | subject), data = data, na.action = na.omit, REML=TRUE)
summary(mod1)
tapply(rdi, cc_region, mean, na.rm=TRUE) #summarize means of each condition
n<-tapply(rdi, cc_region, length)[1] #summarize n of each condition
means = emmeans(mod1, "cc_region") #complete summary with standard error estimates
Contrasts = list(contrast1 = c(-5.5,-3.5,-1.5,0.5,2.5,4.5,2.5,0.5,-0.5,0.5)) #specify the contrast
plot(c(1:10), Contrasts$contrast1)
print(contrast.test<-contrast(means, Contrasts)) #run the contrast
tval<-summary(contrast.test)$t.ratio
#compute Cohen's d. note that this is not ideal for repeated measures, but it is fine for comparing within the same dataset
#because the d's are computed across the same subjects. see
#http://jakewestfall.org/blog/index.php/2016/03/25/five-different-cohens-d-statistics-for-within-subject-designs/
cohend<-tval*sqrt(2/n)
###DONT RUN
##this runs the model within the repeated measures ANOVA framework. Gives a different answer, as expected. More for a check.
modelAOV <- aov(rdi~factor(cc_region)+Error(factor(subject)), data = data, na.action = na.omit)
meansAOV = emmeans(modelAOV, "cc_region")
contrast(meansAOV, Contrasts)
|
/cc_contrast_rdi_hcp.r
|
no_license
|
deagaric/AHEAD_project
|
R
| false | false | 3,129 |
r
|
#Need most up-to-date version of R (>3.5)
library(lme4)
library(MASS)
library(emmeans)
library(ggplot2)
library(compute.es)
library(Hmisc)
setwd("/Users/dgari004/Google\ Drive/CC_RDI_NODDI/new\ contrast\ analyses/")
data<-read.csv("/Users/dgari004/Google\ Drive/CC_RDI_NODDI/new\ contrast\ analyses/hcp_842_rdi_within_subjects.csv")
attach(data)
#make plot. Adjust fo
cc.plot <- function(data){
theme_set(theme_light(base_size = 16))
p<-ggplot(mapping = aes(x = cc_region, y = rdi)) +
geom_point(position = position_jitter(0.1), shape=21, fill = "light blue", col = "grey", size=1, stroke = 1) +
stat_summary(geom = "point", fun.data = mean_cl_normal, color = "black", size = 3) +
stat_summary(geom = "errorbar", fun.data = mean_cl_normal, color = "black", width = .1) +
stat_summary(fun.y = mean, geom="line") + ylab('RDI') +
scale_x_discrete('Corpus Callosum Region', waiver(), labels = c("G1", "G2", "G3", "B1", "B2", "B3", "I", "S1", "S2", "S3"), c(0:9)) +
ggtitle('RDI Values by Corpus Callosum Region') + theme(panel.border = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(size = .5, linetype = "solid", colour = "black"),
axis.text = element_text(size = 18, colour = "black"), plot.title = element_text(hjust = .5)
)
return(p)
}
tiff("rdi_HCP.tiff", units = 'in', width = 6, height = 8, res = 200, compression = "lzw")
cc.plot(data)
dev.off()
# provide a boxplot. more for data inspection purposes. does not save plot
data$cc_region<- factor(data$cc_region, levels=unique(data$cc_region))
boxplot(rdi ~ cc_region,
data = data,
ylab="RDI",
xlab="CC_region")
levels(data$cc_region)
####IMPORTANT ADDITION FROM DEA; because number of observations exceeds 3000, must change limits like stated below
emm_options(pbkrtest.limit = 8400)
emm_options(lmerTest.limit = 8400)
#run the mixed effects model and calculate contrast
mod1 <- lmer(rdi ~ cc_region + (1 | subject), data = data, na.action = na.omit, REML=TRUE)
summary(mod1)
tapply(rdi, cc_region, mean, na.rm=TRUE) #summarize means of each condition
n<-tapply(rdi, cc_region, length)[1] #summarize n of each condition
means = emmeans(mod1, "cc_region") #complete summary with standard error estimates
Contrasts = list(contrast1 = c(-5.5,-3.5,-1.5,0.5,2.5,4.5,2.5,0.5,-0.5,0.5)) #specify the contrast
plot(c(1:10), Contrasts$contrast1)
print(contrast.test<-contrast(means, Contrasts)) #run the contrast
tval<-summary(contrast.test)$t.ratio
#compute Cohen's d. note that this is not ideal for repeated measures, but it is fine for comparing within the same dataset
#because the d's are computed across the same subjects. see
#http://jakewestfall.org/blog/index.php/2016/03/25/five-different-cohens-d-statistics-for-within-subject-designs/
cohend<-tval*sqrt(2/n)
###DONT RUN
##this runs the model within the repeated measures ANOVA framework. Gives a different answer, as expected. More for a check.
modelAOV <- aov(rdi~factor(cc_region)+Error(factor(subject)), data = data, na.action = na.omit)
meansAOV = emmeans(modelAOV, "cc_region")
contrast(meansAOV, Contrasts)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(originalMatrix = matrix()) {
InverseMatrix <- NULL
set <- function(y) {
originalMatrix <<- y
InverseMatrix <<- NULL
}
get <- function() originalMatrix
setMatrixInverse <- function(solve) InverseMatrix <<- solve
getInverseMatrix <- function() InverseMatrix
list(set = set, get = get,
setMatrixInverse = setMatrixInverse,
getInverseMatrix = getInverseMatrix)
}
## Write a short comment describing this function
cacheSolve <- function(cacheableMatrix,...) {
## Return a matrix that is the inverse of 'x'
invertedMatrix <- cacheableMatrix$getInverseMatrix()
if(!is.null(invertedMatrix)) {
message("getting cached data")
return(invertedMatrix)
}
data <- cacheableMatrix$get()
invertedMatrix <- solve(data, ...)
cacheableMatrix$setMatrixInverse(invertedMatrix)
invertedMatrix
}
|
/testGitRepo/ProgrammingAssignment2/cachematrix.R
|
no_license
|
chaitu0909/ProgrammingAssignment2
|
R
| false | false | 1,013 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(originalMatrix = matrix()) {
InverseMatrix <- NULL
set <- function(y) {
originalMatrix <<- y
InverseMatrix <<- NULL
}
get <- function() originalMatrix
setMatrixInverse <- function(solve) InverseMatrix <<- solve
getInverseMatrix <- function() InverseMatrix
list(set = set, get = get,
setMatrixInverse = setMatrixInverse,
getInverseMatrix = getInverseMatrix)
}
## Write a short comment describing this function
cacheSolve <- function(cacheableMatrix,...) {
## Return a matrix that is the inverse of 'x'
invertedMatrix <- cacheableMatrix$getInverseMatrix()
if(!is.null(invertedMatrix)) {
message("getting cached data")
return(invertedMatrix)
}
data <- cacheableMatrix$get()
invertedMatrix <- solve(data, ...)
cacheableMatrix$setMatrixInverse(invertedMatrix)
invertedMatrix
}
|
\name{aaacgo2022}
\docType{data}
\alias{aaacgo2022}
\title{Dataset of Adekoya, Akinseye, Antonakakis, Chatziantoniou, Gabauer and Oliyide (2022)}
\description{
For detailed information see: Adekoya, O. B., Akinseye, A., Antonakakis, N., Chatziantoniou, I., Gabauer, D., and Oliyide, J. A. (2021). Crude oil and Islamic sectoral stocks: Asymmetric connectedness and investment strategies. Available at SSRN.
}
\usage{data(aaacgo2022)}
\format{zoo data.frame}
\keyword{datasets}
|
/man/aaacgo2022.Rd
|
no_license
|
GabauerDavid/ConnectednessApproach
|
R
| false | false | 477 |
rd
|
\name{aaacgo2022}
\docType{data}
\alias{aaacgo2022}
\title{Dataset of Adekoya, Akinseye, Antonakakis, Chatziantoniou, Gabauer and Oliyide (2022)}
\description{
For detailed information see: Adekoya, O. B., Akinseye, A., Antonakakis, N., Chatziantoniou, I., Gabauer, D., and Oliyide, J. A. (2021). Crude oil and Islamic sectoral stocks: Asymmetric connectedness and investment strategies. Available at SSRN.
}
\usage{data(aaacgo2022)}
\format{zoo data.frame}
\keyword{datasets}
|
node.map <-
function(mol.data=NULL, node.data, node.types=c("gene", "ortholog", "compound")[1], node.sum =c("sum","mean", "median", "max", "max.abs", "random")[1], entrez.gnodes=TRUE){
type.sel=node.data$type %in% node.types
if(sum(type.sel)<1){
message("Note: ", "No specified node types in the pathway!")
plot.data=NULL
return(plot.data)
}
node.data=lapply(node.data, "[", type.sel)
n.nodes=length(node.data$kegg.names)
spacials=as.matrix(as.data.frame(node.data[c("type", "x", "y", "width", "height")]))
if(node.types[1]=="gene"){
kng=node.data$kegg.names[node.data$type=="gene"]
kng.char=gsub("[0-9]", "", unlist(kng))
if(any(kng.char>"")) entrez.gnodes=FALSE
}
na.plot.data=function(){
sapply(1:n.nodes, function(i){
kns=node.data$kegg.names[[i]]
if(node.types[1]=="gene" & entrez.gnodes) items=as.numeric(kns)
else items=kns
ord=order(items)
items=items[ord]
kns=kns[ord]
return(c(kns[1],"", spacials[i,], NA))
})
}
if(is.null(mol.data)){
plot.data=na.plot.data()
} else{
#map gene data
if(is.character(mol.data)){
gd.names=mol.data
mol.data=rep(1, length(mol.data))
names(mol.data)=gd.names
}
mol.data=cbind(mol.data)
if(is.null(colnames(mol.data))) colnames(mol.data)=paste("ge", 1:ncol(mol.data),sep="")
mapped.mols <- intersect(unlist(node.data$kegg.names), row.names(mol.data))
if(length(mapped.mols)==0){
message("Warning: ", paste("None of the genes or compounds mapped to the pathway!",
"Argument gene.idtype or cpd.idtype may be wrong.", sep="\n"))
plot.data=na.plot.data()
} else{
if(node.types[1]=="gene" & entrez.gnodes) mapped.mols =as.numeric(mapped.mols)
plot.data=sapply(1:n.nodes, function(i){
kns=node.data$kegg.names[[i]]
if(node.types[1]=="gene" & entrez.gnodes) items=as.numeric(kns)
else items=kns
ord=order(items)
items=items[ord]
kns=kns[ord]
hit=items %in% mapped.mols
if(sum(hit)==0) {
return(c(kns[1], "", spacials[i,], rep(NA, ncol(mol.data))))
} else if(sum(hit)==1) {
edata=mol.data[as.character(items[hit]),]
return(c(kns[hit], kns[hit], spacials[i,], edata))
} else {
node.sum=eval(as.name(node.sum))
# edata=apply(cbind(mol.data[as.character(items[hit]),]), 2, node.sum, na.rm=T)
# edata=apply(cbind(mol.data[as.character(items[hit]),]), 2, function(x){
edata=apply(mol.data[as.character(items[hit]),,drop=F], 2, function(x){
x=x[!is.na(x)]
if(length(x)<1) return(NA)
else return(node.sum(x, na.rm=F))
})
return(c(kns[hit][1], paste(kns[hit],collapse=","), spacials[i,], edata))
}
})
}
}
colnames(plot.data)=names(node.data$kegg.names)
plot.data=as.data.frame(t(plot.data), stringsAsFactors = F)
plot.data$labels=node.data$labels
ncs=ncol(plot.data)
plot.data=plot.data[,c(1,ncs,2:(ncs-1))]
if(is.null(mol.data)) cns="mol.data" else cns=colnames(mol.data)
colnames(plot.data)[c(1,3,9:ncs)]=c("kegg.names","all.mapped",cns)#c(1,8:ncs)
for(ic in (1:ncol(plot.data))[-c(1:4)]) plot.data[,ic]=as.numeric(plot.data[,ic])#-c(1:3)
return(plot.data)
}
|
/R/node.map.R
|
no_license
|
khemlalnirmalkar/pathview
|
R
| false | false | 3,308 |
r
|
node.map <-
function(mol.data=NULL, node.data, node.types=c("gene", "ortholog", "compound")[1], node.sum =c("sum","mean", "median", "max", "max.abs", "random")[1], entrez.gnodes=TRUE){
type.sel=node.data$type %in% node.types
if(sum(type.sel)<1){
message("Note: ", "No specified node types in the pathway!")
plot.data=NULL
return(plot.data)
}
node.data=lapply(node.data, "[", type.sel)
n.nodes=length(node.data$kegg.names)
spacials=as.matrix(as.data.frame(node.data[c("type", "x", "y", "width", "height")]))
if(node.types[1]=="gene"){
kng=node.data$kegg.names[node.data$type=="gene"]
kng.char=gsub("[0-9]", "", unlist(kng))
if(any(kng.char>"")) entrez.gnodes=FALSE
}
na.plot.data=function(){
sapply(1:n.nodes, function(i){
kns=node.data$kegg.names[[i]]
if(node.types[1]=="gene" & entrez.gnodes) items=as.numeric(kns)
else items=kns
ord=order(items)
items=items[ord]
kns=kns[ord]
return(c(kns[1],"", spacials[i,], NA))
})
}
if(is.null(mol.data)){
plot.data=na.plot.data()
} else{
#map gene data
if(is.character(mol.data)){
gd.names=mol.data
mol.data=rep(1, length(mol.data))
names(mol.data)=gd.names
}
mol.data=cbind(mol.data)
if(is.null(colnames(mol.data))) colnames(mol.data)=paste("ge", 1:ncol(mol.data),sep="")
mapped.mols <- intersect(unlist(node.data$kegg.names), row.names(mol.data))
if(length(mapped.mols)==0){
message("Warning: ", paste("None of the genes or compounds mapped to the pathway!",
"Argument gene.idtype or cpd.idtype may be wrong.", sep="\n"))
plot.data=na.plot.data()
} else{
if(node.types[1]=="gene" & entrez.gnodes) mapped.mols =as.numeric(mapped.mols)
plot.data=sapply(1:n.nodes, function(i){
kns=node.data$kegg.names[[i]]
if(node.types[1]=="gene" & entrez.gnodes) items=as.numeric(kns)
else items=kns
ord=order(items)
items=items[ord]
kns=kns[ord]
hit=items %in% mapped.mols
if(sum(hit)==0) {
return(c(kns[1], "", spacials[i,], rep(NA, ncol(mol.data))))
} else if(sum(hit)==1) {
edata=mol.data[as.character(items[hit]),]
return(c(kns[hit], kns[hit], spacials[i,], edata))
} else {
node.sum=eval(as.name(node.sum))
# edata=apply(cbind(mol.data[as.character(items[hit]),]), 2, node.sum, na.rm=T)
# edata=apply(cbind(mol.data[as.character(items[hit]),]), 2, function(x){
edata=apply(mol.data[as.character(items[hit]),,drop=F], 2, function(x){
x=x[!is.na(x)]
if(length(x)<1) return(NA)
else return(node.sum(x, na.rm=F))
})
return(c(kns[hit][1], paste(kns[hit],collapse=","), spacials[i,], edata))
}
})
}
}
colnames(plot.data)=names(node.data$kegg.names)
plot.data=as.data.frame(t(plot.data), stringsAsFactors = F)
plot.data$labels=node.data$labels
ncs=ncol(plot.data)
plot.data=plot.data[,c(1,ncs,2:(ncs-1))]
if(is.null(mol.data)) cns="mol.data" else cns=colnames(mol.data)
colnames(plot.data)[c(1,3,9:ncs)]=c("kegg.names","all.mapped",cns)#c(1,8:ncs)
for(ic in (1:ncol(plot.data))[-c(1:4)]) plot.data[,ic]=as.numeric(plot.data[,ic])#-c(1:3)
return(plot.data)
}
|
#Things to keep in mind
#1A
mean(AverageNoteDuration) #the declaration of that variable got deleted, so I can't run it
#1B
TestData[,1] #25 outputs #this required YOU as a person to manually look at the answer
#1D
plot((TestData[,"num_syllables"]), (TestData[,"bout_duration.ms."]))
#not really a mistake, but the parens are unnecessary; commas tell you that an argument has ended
#2A
index <- which(TestData[,"num_unique_syllables"] > 1)
mean(index)#this is the average of the indicies, not the average of the num sylls
#2B
TestData[1,"avg_note_duration.ms."] > 40
TestData[3,"avg_note_duration.ms."] > 40
TestData[4,"avg_note_duration.ms."] > 40
TestData[5,"avg_note_duration.ms."] > 40
TestData[12,"avg_note_duration.ms."] > 40
TestData[23,"avg_note_duration.ms."] > 40
#you can test all of the values at once!
TestData[c(1,3,4,5,12,23),"avg_note_duration.ms."] > 40
is.vector(TestData[c(1,3,4,5,12,23),"avg_note_duration.ms."] > 40)
#doesn't really answer the question
|
/Initial-Data-Sources/101-mistakes.R
|
no_license
|
NeuroBio/Bird-Class
|
R
| false | false | 980 |
r
|
#Things to keep in mind
#1A
mean(AverageNoteDuration) #the declaration of that variable got deleted, so I can't run it
#1B
TestData[,1] #25 outputs #this required YOU as a person to manually look at the answer
#1D
plot((TestData[,"num_syllables"]), (TestData[,"bout_duration.ms."]))
#not really a mistake, but the parens are unnecessary; commas tell you that an argument has ended
#2A
index <- which(TestData[,"num_unique_syllables"] > 1)
mean(index)#this is the average of the indicies, not the average of the num sylls
#2B
TestData[1,"avg_note_duration.ms."] > 40
TestData[3,"avg_note_duration.ms."] > 40
TestData[4,"avg_note_duration.ms."] > 40
TestData[5,"avg_note_duration.ms."] > 40
TestData[12,"avg_note_duration.ms."] > 40
TestData[23,"avg_note_duration.ms."] > 40
#you can test all of the values at once!
TestData[c(1,3,4,5,12,23),"avg_note_duration.ms."] > 40
is.vector(TestData[c(1,3,4,5,12,23),"avg_note_duration.ms."] > 40)
#doesn't really answer the question
|
#' @family general
#' @keywords internal
get_header <- function(large_file,
colnames_only=TRUE,
n=2,
nThread=1){
### Reading in this way is more robust and able to handle bgz format.
header <- data.table::fread(text=readLines(con = large_file, n = n),nThread = nThread)
if(colnames_only) header <- colnames(header)
return(header)
}
|
/R/get_header.R
|
permissive
|
UKDRI/echolocatoR
|
R
| false | false | 412 |
r
|
#' @family general
#' @keywords internal
get_header <- function(large_file,
colnames_only=TRUE,
n=2,
nThread=1){
### Reading in this way is more robust and able to handle bgz format.
header <- data.table::fread(text=readLines(con = large_file, n = n),nThread = nThread)
if(colnames_only) header <- colnames(header)
return(header)
}
|
rm(list = ls())
e <- list(thing="hat", size=8.25)
k <- list(thing="hat", thing=8.25)
list(thing="hat", thing=8.25)
a1 <- e$thing
a2 <- e[[1]]
b1 <- e$size
g <- list(name="wk", age=25, e)
xx <- list(name="wk", age=25, list(add="bbk"))
yy <- list(name="wk", name="kw")
length(yy)
xx[[3]]$add
xx$name
|
/List.R
|
no_license
|
wk-j/hello-r
|
R
| false | false | 308 |
r
|
rm(list = ls())
e <- list(thing="hat", size=8.25)
k <- list(thing="hat", thing=8.25)
list(thing="hat", thing=8.25)
a1 <- e$thing
a2 <- e[[1]]
b1 <- e$size
g <- list(name="wk", age=25, e)
xx <- list(name="wk", age=25, list(add="bbk"))
yy <- list(name="wk", name="kw")
length(yy)
xx[[3]]$add
xx$name
|
\name{plotclus}
\alias{plotclus}
\title{Cluster plot}
\description{
Plots the data points and a representation of the cluster located.
}
\usage{
plotclus(nomlst, m, limx = c(0, 100), limy = c(0, 100), col1 = 225,
rcex = 0.68, pop, k=floor((m+1)/2))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{nomlst}{A list corresponding to the "clus" function's return value.}
\item{m}{The number of breaks of the model to be plotted.}
\item{limx}{2 element vector containing the study area bounds of the X-axis.}
\item{limy}{2 element vector containing the study area bounds of the Y-axis.}
\item{col1}{The color to be used for the cluster representation.}
\item{rcex}{The size to be used for the disc surrounding the points localized in cluster.}
\item{pop}{A 2 column matrix containing the underlying population coordinates.}
\item{k}{The number of clusters to be displayed.}
}
\value{
A R-graphic window containing the plot.
}
\details{
A cluster groups together the points between two breaks with a low mean distance. The plotreg function allow to choose the number of clusters.
}
\examples{
library(spatstat)
data(chemist)
data(grille)
data(irislist)
# location and detection of spatial clusters
# adjusted for an inhomogeneous population density
RES <- clus(chemist,grille,limx=c(-6,8),limy=c(-7,7))
# plot of the cluster located
for (i in 1:30){
plot(irislist[[i]],xlim=c(-6,8),ylim=c(-7,7),main=" ",lty=3)
par(new=TRUE)
}
plotclus(RES,m=2,limx=c(-6,8),limy=c(-7,7),rcex=11.5,pop=grille)
}
\author{Christophe Dematte{\"\i} \email{demattei@iurc.montp.inserm.fr}
}
\keyword{spatial}
|
/man/plotclus.Rd
|
no_license
|
cran/spatclus
|
R
| false | false | 1,662 |
rd
|
\name{plotclus}
\alias{plotclus}
\title{Cluster plot}
\description{
Plots the data points and a representation of the cluster located.
}
\usage{
plotclus(nomlst, m, limx = c(0, 100), limy = c(0, 100), col1 = 225,
rcex = 0.68, pop, k=floor((m+1)/2))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{nomlst}{A list corresponding to the "clus" function's return value.}
\item{m}{The number of breaks of the model to be plotted.}
\item{limx}{2 element vector containing the study area bounds of the X-axis.}
\item{limy}{2 element vector containing the study area bounds of the Y-axis.}
\item{col1}{The color to be used for the cluster representation.}
\item{rcex}{The size to be used for the disc surrounding the points localized in cluster.}
\item{pop}{A 2 column matrix containing the underlying population coordinates.}
\item{k}{The number of clusters to be displayed.}
}
\value{
A R-graphic window containing the plot.
}
\details{
A cluster groups together the points between two breaks with a low mean distance. The plotreg function allow to choose the number of clusters.
}
\examples{
library(spatstat)
data(chemist)
data(grille)
data(irislist)
# location and detection of spatial clusters
# adjusted for an inhomogeneous population density
RES <- clus(chemist,grille,limx=c(-6,8),limy=c(-7,7))
# plot of the cluster located
for (i in 1:30){
plot(irislist[[i]],xlim=c(-6,8),ylim=c(-7,7),main=" ",lty=3)
par(new=TRUE)
}
plotclus(RES,m=2,limx=c(-6,8),limy=c(-7,7),rcex=11.5,pop=grille)
}
\author{Christophe Dematte{\"\i} \email{demattei@iurc.montp.inserm.fr}
}
\keyword{spatial}
|
library(dplyr)
library(ggplot2)
library(plotly)
# load the diamonds dataset from the ggplot2 package
data(diamonds)
diamonds
# will try guess right plot if no extra info
plot_ly(diamonds, x = ~cut)
# or we can tell it what to plot
plot_ly(diamonds, x = ~cut) %>% add_histogram()
# basic style options
plot_ly(diamonds, x = ~cut, color = "red")
plot_ly(diamonds, x = ~cut, color = I("red"))
plot_ly(diamonds, x = ~cut, color = I("red"), stroke = I("black"))
plot_ly(diamonds, x = ~cut, color = I("red"), stroke = I("black"), span = I(5))
plot_ly(diamonds, x = ~cut, color = I("red"), stroke = I("black"), span = I(5), alpha = I(0.2))
# use the pipe
diamonds %>%
plot_ly(x = ~cut, color = I("red")) %>%
add_histogram() %>%
layout(title = "Number of diamonds per cut category")
|
/interactive/completed/01-ia-exercise.R
|
no_license
|
bahlemots/exploratory-data-analysis
|
R
| false | false | 786 |
r
|
library(dplyr)
library(ggplot2)
library(plotly)
# load the diamonds dataset from the ggplot2 package
data(diamonds)
diamonds
# will try guess right plot if no extra info
plot_ly(diamonds, x = ~cut)
# or we can tell it what to plot
plot_ly(diamonds, x = ~cut) %>% add_histogram()
# basic style options
plot_ly(diamonds, x = ~cut, color = "red")
plot_ly(diamonds, x = ~cut, color = I("red"))
plot_ly(diamonds, x = ~cut, color = I("red"), stroke = I("black"))
plot_ly(diamonds, x = ~cut, color = I("red"), stroke = I("black"), span = I(5))
plot_ly(diamonds, x = ~cut, color = I("red"), stroke = I("black"), span = I(5), alpha = I(0.2))
# use the pipe
diamonds %>%
plot_ly(x = ~cut, color = I("red")) %>%
add_histogram() %>%
layout(title = "Number of diamonds per cut category")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAccounts.R
\name{getAccounts}
\alias{getAccounts}
\title{Get Accounts}
\usage{
getAccounts(server = "test", ...)
}
\arguments{
\item{server}{Test, beta, production, or other name in R.environ OR full url of server}
\item{...}{Optional page options to pass to processRequest}
}
\description{
Get details of accounts that you have access to. Parameters
(lti_guid|registration_settings) for this request have all been enabled and
can be subsetted out if not needed.
}
|
/man/getAccounts.Rd
|
no_license
|
erikpal/bRush
|
R
| false | true | 549 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAccounts.R
\name{getAccounts}
\alias{getAccounts}
\title{Get Accounts}
\usage{
getAccounts(server = "test", ...)
}
\arguments{
\item{server}{Test, beta, production, or other name in R.environ OR full url of server}
\item{...}{Optional page options to pass to processRequest}
}
\description{
Get details of accounts that you have access to. Parameters
(lti_guid|registration_settings) for this request have all been enabled and
can be subsetted out if not needed.
}
|
par(mfrow = c(1,3))
x <- 0:20
plot(x, dbinom(x, 20, 0.1), type = "h")
plot(x, dbinom(x, 20, 0.1), type = "h",
col = "red", ylim = c(0, 0.3))
plot(x, dbinom(x, 20, 0.1), type = "h",
col = "red" ,ylim = c(0, 0.3),
main = "B(20,0.1)", lwd = 2)
|
/3-1/3. Probability and Statistics (R)/Assignment 2/1.r
|
no_license
|
Hanbyeol-Jang/KW-Uni
|
R
| false | false | 257 |
r
|
par(mfrow = c(1,3))
x <- 0:20
plot(x, dbinom(x, 20, 0.1), type = "h")
plot(x, dbinom(x, 20, 0.1), type = "h",
col = "red", ylim = c(0, 0.3))
plot(x, dbinom(x, 20, 0.1), type = "h",
col = "red" ,ylim = c(0, 0.3),
main = "B(20,0.1)", lwd = 2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fortify_stats.R
\name{fortify.acf}
\alias{fortify.acf}
\title{Convert \code{stats::acf} to \code{data.frame}}
\usage{
\method{fortify}{acf}(model, data = NULL, conf.int = TRUE,
conf.int.value = 0.95, conf.int.type = "white", ...)
}
\arguments{
\item{model}{\code{stats::acf} instance}
\item{data}{original dataset, if needed}
\item{conf.int}{Logical flag indicating whether to attach confidence intervals}
\item{conf.int.value}{Coverage probability for confidence interval}
\item{conf.int.type}{Type of confidence interval, 'white' for white noise or 'ma' MA(k-1) model}
\item{...}{other arguments passed to methods}
}
\value{
data.frame
}
\description{
Convert \code{stats::acf} to \code{data.frame}
}
\examples{
fortify(stats::acf(AirPassengers))
fortify(stats::pacf(AirPassengers))
fortify(stats::ccf(AirPassengers, AirPassengers))
fortify(stats::acf(AirPassengers), conf.int = TRUE)
}
|
/man/fortify.acf.Rd
|
no_license
|
randallgrout/ggfortify
|
R
| false | true | 976 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fortify_stats.R
\name{fortify.acf}
\alias{fortify.acf}
\title{Convert \code{stats::acf} to \code{data.frame}}
\usage{
\method{fortify}{acf}(model, data = NULL, conf.int = TRUE,
conf.int.value = 0.95, conf.int.type = "white", ...)
}
\arguments{
\item{model}{\code{stats::acf} instance}
\item{data}{original dataset, if needed}
\item{conf.int}{Logical flag indicating whether to attach confidence intervals}
\item{conf.int.value}{Coverage probability for confidence interval}
\item{conf.int.type}{Type of confidence interval, 'white' for white noise or 'ma' MA(k-1) model}
\item{...}{other arguments passed to methods}
}
\value{
data.frame
}
\description{
Convert \code{stats::acf} to \code{data.frame}
}
\examples{
fortify(stats::acf(AirPassengers))
fortify(stats::pacf(AirPassengers))
fortify(stats::ccf(AirPassengers, AirPassengers))
fortify(stats::acf(AirPassengers), conf.int = TRUE)
}
|
# zaciągnięcie bibliotek
library(shiny)
library(tidyverse)
library(data.table)
library(DT)
library(plotly)
library(dplyr)
library(plyr)
library(shinythemes)
library(odbc)
library(dbplyr)
library(sqldf)
library(promises)
library(future)
library(ipc)
library(shinycssloaders)
library(openxlsx)
#zaciągnięcie danych potrzebnych na początku w ui
# sciezka <- dirname(rstudioapi::getActiveDocumentContext()$path)
p_grupy <- character(0)
#
# con <- dbConnect(odbc::odbc(),
# dsn = "Hive z LLAP Cloudera",
# driver = "Cloudera ODBC Driver for Apache Hive",
# dbname = "training",
# Trusted_Connection = "True",
# encoding = "CP1250",
# port = 10500)
#
# con_ross <- DBI::dbConnect(odbc::odbc(),
# dsn = "rossod32",
# database = "ROSSORA",
# encoding = "CP1250",
# uid = "hati",
# pwd = "hati")
#
#
wszystkie_sklepy <- character(0)
# as.data.table(dbGetQuery(con, paste("SELECT DISTINCT dic.id_store as id_store FROM dict.dic_stores dic WHERE dic.close_date IS NULL ORDER BY dic.id_store", sep ="")))[id_store<=9000]
artykuly <- character(0)
# as.data.table(dbGetQuery(con_ross, paste("SELECT ARTNR1, ORDERKZ FROM ROSSORA.DBO.ACCESS_ARTIKEL", sep="")))[, ARTNR1 := as.numeric(ARTNR1)][ORDERKZ == 1, ARTNR1]
#
#
# ##elementy potrzebne do zakładki z alertami
# lista_plikow_wyd <- list.files(sciezka)
# lista_plikow_wyd_48 <- grep("^ALERT_48_", lista_plikow_wyd, value = TRUE)
#
# lista_plikow_wyd_57 <- grep("^ALERT_57_", lista_plikow_wyd, value = TRUE)
#
# nazwa_48 <- sort(lista_plikow_wyd_48, decreasing = TRUE)[1]
# nazwa_57 <- sort(lista_plikow_wyd_57, decreasing = TRUE)[1]
#
# sklepy_z_alertem_48_read <- fread(paste(sciezka, nazwa_48, sep="/"))
# sklepy_z_alertem_48_d <- as.data.table(sklepy_z_alertem_48_read)
# sklepy_z_alertem_48_list <- as.list(sklepy_z_alertem_48_d[,'StoreId'])
#
# sklepy_z_alertem_57_read <- fread(paste(sciezka, nazwa_57, sep="/"))
# sklepy_z_alertem_57_d <- as.data.table(sklepy_z_alertem_57_read)
# sklepy_z_alertem_57_list <- as.list(sklepy_z_alertem_57_d[,'StoreId'])
ui <- fluidPage(
theme = shinytheme("united"),
# stworzenie layoutu podzielonego na zakładki analiza i korekta
navbarPage(title=
"Rossmann", id = 'menu',
tabPanel("Analiza sklepów SAS",
column(3,
wellPanel(
selectizeInput(inputId = "sklepy",
label = "Wybierz numery sklepów:",
multiple = TRUE,
choices = wszystkie_sklepy,
options = list(closeAfterSelect = T)),
dateInput(inputId = "data_prognozy",
label = "Wybierz datę prognozy:",
value = Sys.Date()-1),
actionButton(inputId = "generuj", label = "Generuj wykres", icon("line-chart"), style="color: #fff; background-color: #228B22; border-color: #228B22")
)
),
column(9,tabsetPanel(
tabPanel("Wykres", fluidRow(
plotlyOutput("prognoza_total")%>%withSpinner(type = 5, color = '#808080')
)),
tabPanel("Opis zakładki", fluidRow(
htmlOutput("opis_analizy")
))
))
),
tabPanel("Wprowadzanie korekt",
column(3,
wellPanel(
radioButtons(inputId = "automat", label = h3("Automatyczna korekta?"),
choices = c("tak", "nie"),
selected = "nie"),
conditionalPanel(condition = "input.automat == 'tak'",
radioButtons(inputId = "kierunek", label = h3("Ustal kierunek korekty"),
choices = c("dowolna", "zmniejszająca", "zwiększająca"),
selected = "dowolna"),
numericInput("ograniczenie",
label = "Podaj miesięczną wartość graniczną prognozy dla artykułu",
value = 10,
step = 1)),
selectizeInput(inputId = "sklep_input",
label = "Wybierz numer sklepu:",
choices = wszystkie_sklepy,
options = list(closeAfterSelect = T)
),
numericInput("korekta",
label = "Podaj rozmiar korekty",
value = 1,
step = 0.1),
selectizeInput(
inputId = "pgrupa",
label = "Wybierz p-grupę:",
choices = p_grupy,
multiple = TRUE,
options = list(closeAfterSelect = T)),
selectizeInput(
inputId = "anty_pgrupa",
label = "Odfiltruj p-grupę:",
choices = p_grupy,
multiple = TRUE,
options = list(closeAfterSelect = T)),
selectizeInput(
inputId = "wgrupa",
label = "Wybierz w-grupę:",
choices = p_grupy,
multiple = TRUE,
options = list(closeAfterSelect = T)),
selectizeInput(
inputId = "agrupa",
label = "Wybierz a-grupę:",
choices = p_grupy,
multiple = TRUE,
options = list(closeAfterSelect = T)),
radioButtons(inputId = "radio", label = h3("Sposób wybrania artykułów"),
choices = c("Wybierz z listy", "Wklej lub wpisz"),
selected = "Wybierz z listy"),
conditionalPanel(condition = "input.radio == 'Wybierz z listy'", selectInput(
inputId = "artykul",
label = "Wybierz numery artykułów:",
choices = p_grupy,
multiple = TRUE)),
conditionalPanel(condition = "input.radio == 'Wklej lub wpisz'", textAreaInput("lis_art",
"Lista artykułów", "",
cols = 1, resize = "vertical",
placeholder = "Wklej listę artykułów przedzielonych enterem"))
,
actionButton(inputId = "symuluj", label = "Symuluj", icon("line-chart"), style="color: #fff; background-color: #5BC85B; border-color: black"),
actionButton(inputId = "zatwierdz", label = "Zatwierdź", icon("clipboard-check"), style="color: #fff; background-color: green; border-color: black", width = 110),
actionButton(inputId = "cofnij", label = "Cofnij", icon("step-backward"), style="color: #fff; background-color: orange; border-color: black", width = 110),
actionButton(inputId = "wyczysc", label = "Wyczyść", icon("skull-crossbones"), style="color: #fff; background-color: red; border-color: black", width = 110),
downloadButton("zapisz", label = "Zapisz", style="color: #fff; background-color: blue; border-color: black", width = 110),
downloadButton("pobierz", label = "Pobierz dane", style="color: #fff; background-color: light-blue; border-color: black", width = 110)
)
),
column(9,tabsetPanel( tabPanel("Wykres i tabela", fluidRow(
conditionalPanel(condition = "input.automat == 'tak'",
sliderInput(inputId = "data_przedz_auto",
label = "Wybierz przedział czasowy, który ma zostać wzięty do wygenerowania korekty automatycznej:",
min = as.Date(Sys.Date()-460),
max = as.Date(Sys.Date()-1),
value = c(as.Date(Sys.Date()-32),as.Date(Sys.Date()-2)),
timeFormat="%Y-%m-%d",
width = 1550),
sliderInput(inputId = "korekta_przedz",
label = "Wybierz przedział korekty automatycznej: ",
step = 0.1,
min = 0,
max = 10,
value = c(0.6, 2),
width = 1550)),
sliderInput(inputId = "data_przedz",
label = "Wybierz przedział czasowy korekty:",
min = Sys.Date(),
max = as.Date(Sys.Date()+120),
value = c(Sys.Date(),as.Date(Sys.Date()+30)),
timeFormat="%Y-%m-%d",
width = 1550)),
radioButtons(inputId = "wybor_agregacji", label = h3("Wybierz sposób agregacji danych"),
choices = c("per dzień", "tygodniowa"),
selected = "per dzień"),
fluidRow(
plotlyOutput("prognoza_po_korektach")%>%withSpinner(type = 5, color = '#808080')),
fluidRow(
DT::dataTableOutput("tabela_z_korektami")%>%withSpinner(type = 5, color = '#808080'))
),
tabPanel("Opis zakładki", fluidRow(
htmlOutput("opis_korekty")
))
))
),
tabPanel("Weryfikacja prognoz",
column(3,
wellPanel(
helpText("Wybierz numer sklepu i daty prognoz.")
),
wellPanel(
selectInput(inputId = "sklep_porownanie",
label = "Wybierz sklep:",
multiple = F,
choices = wszystkie_sklepy),
dateInput(inputId = "data_prognozy_1",
label = "Wybierz pierwszą datę prognozy:",
value = Sys.Date()-1),
dateInput(inputId = "data_prognozy_2",
label = "Wybierz drugą datę prognozy:",
value = Sys.Date()-2),
actionButton(inputId = "przelicz_porownanie", label = "Przelicz dane", icon("calculator"),
style="color: #fff; background-color: #228B22; border-color: #228B22"),
selectizeInput(inputId = "pgrupa_por",
label = "Wybierz p-grupę:",
multiple = T,
choices = p_grupy,
options = list(closeAfterSelect = T)),
selectizeInput(inputId = "wgrupa_por",
label = "Wybierz w-grupę:",
multiple = T,
choices = p_grupy,
options = list(closeAfterSelect = T)),
actionButton(inputId = "generuj_porownanie", label = "Generuj wykres", icon("line-chart"),
style="color: #fff; background-color: #228B22; border-color: #228B22")
)
),
column(9, tabsetPanel(tabPanel("Wykres", fluidRow(sliderInput(inputId = "data_przedz_por",
label = "Wybierz przedział czasowy analizy:",
min = as.Date(Sys.Date()-460),
max = as.Date(Sys.Date()+60),
value = c(as.Date(Sys.Date()-120),as.Date(Sys.Date()+60)),
timeFormat="%Y-%m-%d",
width = 1550)),
fluidRow(
DT::dataTableOutput("tabela")%>%withSpinner(type = 5, color = '#808080')),
fluidRow(
plotlyOutput("wykres_porownanie")%>%withSpinner(type = 5, color = '#808080')
),
fluidRow(
plotlyOutput("wykres_porownanie_zl")%>%withSpinner(type = 5, color = '#808080')
)
),
tabPanel("Opis zakładki", fluidRow(
htmlOutput("opis_porownania")
)))
))
# ),
# tabPanel("Kanibalizacja",
# column(3,
# wellPanel(
# helpText("Wybierz numer sklepu i datę prognozy.")
# ),
# wellPanel(
# selectInput(inputId = "sklep_kanibalizacja",
# label = "Wybierz sklep:",
# multiple = F,
# choices = wszystkie_sklepy),
# actionButton(inputId = "generuj_kan", label = "Generuj wykres", icon("line-chart"),
# style="color: #fff; background-color: #228B22; border-color: #228B22")
# )
# ),
# column(9, tabsetPanel(tabPanel("Wykres", fluidRow(
# plotlyOutput("wykres_kan")%>%withSpinner(type = 5, color = '#808080')
# )),
# tabPanel("Opis zakładki", fluidRow(
# htmlOutput("opis_kan")
# )))
# )
# ),
# tabPanel("Alerty",
#
# column(3,
# wellPanel(
# helpText("Wybierz alert i numer sklepu.")
# ),
# wellPanel(
# selectInput(inputId = "wybor_alertu",
# label = "Wybierz alert:",
# multiple = F,
# choices = list('48', '57')),
# selectInput(inputId = "sklep_alert",
# label = "Wybierz numer sklepu z alertem:",
# multiple = F,
# choices = sklepy_z_alertem_48_list),
# actionButton(inputId = "generuj_alert", label = "Generuj wykres", icon("line-chart"),
# style="color: #fff; background-color: #228B22; border-color: #228B22")
# )
# ),
# column(9, tabsetPanel(tabPanel("Wykres", fluidRow(
# plotlyOutput("alert_wykres")%>%withSpinner(type = 5, color = '#808080')
# )),
# tabPanel("Opis zakładki", fluidRow(
# htmlOutput("opis_alertow")
# )))
# )
# )
))
server <- function(input, output, session){
# opisy zakładek
output$opis_analizy = renderText({
"<b>Założenia:</b> <br/>
Ta zakładka służy do przesłania zapytań do bazy i przeliczenia danych.<br/>
Wykres zawiera realizacje na 460 dni w tył od dzisiejszego dnia oraz prognozy na 60 dni wprzód, na wybranych sklepach(agregacja) przy danej dacie prognozy.<br/><br/>
<strong>Instrukcja obsługi:</strong><br/>
- <strong>Wybierz numery sklepów</strong> - Wybieramy nr sklepów na których chcemy pracować pamiętając, że wybranie większej liczby niż ok.5 może spowodować spowolnione działanie aplikacji.,<br/>
- <strong>Wybierz datę prognozy</strong> - Wybieramy datę prognozy,<br/>
- <strong>kliknij 'Generuj'</strong> - Nastąpi przeliczenie które może potrwać od 1 do 5 minut, po czym wygeneruje się zbiorczy wykres realizacji na 460 dni w tył od dzisiejszego dnia oraz prognozy na 60 dni wprzód.
Po ukazaniu się wykresu przejdź do zakładki 'Wprowadzanie korekt'.<br/><br/>
<br/>"
})
output$opis_korekty = renderText({
"<b>Założenia:</b> <br/>
Ta zakładka służy do filtrowania danych, symulowania korekt, zatwierdzania ich i zapisu.<br/>
Wykres zawiera wyfiltrowane dane oraz symulację wprowadzanej korekty.<br/>
W tabeli znajdują się korekty, posortowane w kolejności dodawania.<br/>
Pamiętaj o tym by wykonać najpierw przeliczenie na poprzedniej zakładce.<br/>
Na wykresie pojawiają sie informacje o realizacji, prognozie, prognozie po symulowanej korekcie, aktualnym stoku sklepowym oraz capacity.<br/>
Capacity jest oznaczone poziomą linią jako capacity w dniu poprzedzającym prognozę dla wybranej grupy artykułów. Miara ta jest jedynie przydatna dla pojedynczych artykułów lub niewielkich grup.<br/>
W przypadku p-grup i całych sklepów capacity nie będzie widoczne na wykresie.<br/>
Korekta automatyczna to korekta wyliczana automatycznie per artykuł na danym filtrowaniu, na podstawie wybranych okresów jako iloraz średnich realizacji i prognozy.</br>
Korekta jest w przedziałach [0.5, 0.95] i [1.05, 2], z buforem 0.1, ograniczeniem ze względu na wielkość prognozy w okresie wybranym jako przedział czasowy korekty, zaokrąglona do wielokrotności 0.05, z możliwością wybrania jej kierunku. <br/><br/>
<strong>Instrukcja obsługi:</strong><br/>
- <strong>Automatyczna korekta?</strong> - Wybieramy czy korekta ma się wygenerować automatycznie per artykuł, czy dokonujemy ręcznej korekty na danym filtrowaniu,<br/>
- <strong>Wybierz numer sklepu</strong> - Wybieramy nr sklepu,<br/>
- <strong>Wybierz przedział czasowy korekty</strong> - Wybieramy przedział od do nałożenia korekty, należy pamiętać o tym, żeby daty na sklepo-artykule dla różnych korekt nie nakładały się,<br/>
- <strong>Wybierz przedział czasowy, który ma zostać wzięty do wygenerowania korekty automatycznej:</strong> - Wybieramy przedział od do, który ma zostać wzięty do automatycznej korekty jako wzór realizacji,<br/>
- <strong>Podaj rozmiar korekty</strong> - Podajemy rozmiar korekty, możemy manipulować nim za pomocą 'strzałeczek' o 0,1,<br/>
- <strong>Ustal kierunek korekty</strong> - Ustalamy, czy automatyczna korekta ma się ograniczyć do któregoś kierunku czy pozostać 'dowolna',<br/>
- <strong>Podaj wartość graniczną prognozy dla artykułu</strong> - Podajemy wartość prognozy w wybranym okresie od której artykuły są brane do korekty,<br/>
- <strong>Wybierz p-grupę</strong> - Wybieramy p-grupy,<br/>
- <strong>Odfiltruj p-grupę</strong> - Wybieramy p-grupy, na których nie chcemy wprowadzać korekty,<br/>
- <strong>Wybierz w-grupę</strong> - Wybieramy w-grupy,<br/>
- <strong>Wybierz a-grupę</strong> - Wybieramy a-grupy,<br/>
- <strong>Sposób wybrania artykułów</strong> - Wybieramy czy chcemy wkleić lub wpisać artykuły czy może wybrać z listy wyfiltrowanej na podstawie wcześniejszych filtrów ,<br/>
- <strong>Wybierz sposób agregacji</strong> - Wybieramy w jaki sposób dane mają być agregowane na wykresie,<br/>
- <strong>Przycisk 'Symuluj'</strong> - Powoduje wygenerowania wykresu na wyfiltrowanyh danych,<br/>
- <strong>Przycisk 'Zatwierdź'</strong> - Powoduje zatwierdzenie korekty,<br/>
- <strong>Przycisk 'Cofnij'</strong> - Powoduje usunięcie ostatniej korekty z tabeli,<br/>
- <strong>Przycisk 'Wyczyść'</strong> - Powoduje usunięcie wszystkich korekt z tabeli,<br/>
- <strong>Przycisk 'Zapisz'</strong> - Powoduje zapisanie korekt do pliku xlsx.<br/>
- <strong>Przycisk 'Pobierz dane'</strong> - Powoduje zapisanie aktualnie wyfiltrowanych danych (po kliknięciu symuluj!) do pliku xlsx.<br/>
<br/>"
})
output$opis_porownania = renderText({
"<b>Założenia:</b> <br/>
Ta zakładka służy do porównania prognoz z różnymi datami przeliczeń na danym sklepie<br/>"
})
output$opis_kan = renderText({
"<b>Założenia:</b> <br/>
Ta zakładka służy do wizualizacji kanibalizacji na sklepie<br/>"
})
output$opis_alertow = renderText({
"<b>Założenia:</b> <br/>
Ta zakładka służy do wizualizacji prognoz i realizacji na sklepach z alertami 48 i 57<br/>"
})
rv <- reactiveValues(g = 0, h = 0, tabela_korekta_final = data.table(`Numer sklepu`=numeric(), `Numer artykułu`=numeric(), `Data od`=as.Date(character()), `Data do`=as.Date(character()), `Typ korekty` = numeric(), `Wartość korekty` = as.double(), `Typ prognozy` = numeric()),
temp1 = 0, wszystkie_artykuly = c(), tabela_korekta = data.table(), wszystkie_sklepy = list(),
stok_sklep = data.table(id_store = numeric(), id_article= numeric(), forecast_date = as.Date(character()), stok_sklepy=numeric()),
capacity = data.table(id_store = numeric(), id_article= numeric(), capacity=numeric()), capacity1 = data.table(id_store = numeric(), id_article= numeric(), capacity=numeric()),
dane_do_korekty = data.table(id_store = numeric(), id_article= numeric(), korekta=numeric()))
dane_real <- eventReactive(input$generuj, {
# podstawowa tabela zawierająca wyniki przeliczenia
# 5 kwerend hadoopowych 1-2 prognoza, 3 - realizacja, 4 - stok. 5 - capacity
sklepy <- paste(shQuote(input$sklepy, type="csh"), collapse=", ")
s <- Sys.time()
process_step_run_id_full <- dbGetQuery(con, paste("SELECT calculation_date as calculation_date, max(process_step_run_id) as process_step_run_id, max(process_run_id) as process_run_id, max(calculation_id) as calculation_id FROM detail_sas.sales_forecast_hist WHERE calculation_date in ('", input$data_prognozy ,"') GROUP BY calculation_date", sep=""))
s1<- Sys.time()
print(s1-s)
process_step_run_id_full <- as.data.table(process_step_run_id_full)
s <- Sys.time()
dane_real <- dbGetQuery(con, paste("select
SAS.id_store as id_store,
SAS.id_article as id_article,
DICT.product_group as product_group,
DICT.type_group as type_group,
DICT.art_group as art_group,
SAS.forecast_date as forecast_date,
sum(SAS.forecast_quantity_stat) as forecast_quantity_stat,
sum(SAS.forecast_quantity_corr) as forecast_quantity_corr,
sum(SAS.forecast_value) as forecast_value
from detail_sas.sales_forecast_hist SAS
join dict.dic_articles DICT
ON SAS.id_article = DICT.id_article
WHERE SAS.id_store in (", sklepy ,")
AND SAS.process_step_run_id in (", process_step_run_id_full[, process_step_run_id], ")
and SAS.process_run_id in (", process_step_run_id_full[, process_run_id], ")
and SAS.calculation_id in (", process_step_run_id_full[, calculation_id], ")
AND SAS.forecast_date >= '", input$data_prognozy ,"'
AND SAS.forecast_date <= '", input$data_prognozy + 60 ,"'
group by SAS.id_store, SAS.id_article, DICT.product_group, DICT.type_group, DICT.art_group,
SAS.forecast_date;", sep=""))
s1<- Sys.time()
print(s1-s)
s <- Sys.time()
dane_real_2 <- dbGetQuery(con, paste("SELECT
REAL.id_store as id_store,
REAL.id_article as id_article,
DICT.product_group as product_group,
DICT.type_group as type_group,
DICT.art_group as art_group,
REAL.working_date as working_date,
sum(REAL.sale_cnt) as sale_cnt,
sum(REAL.sale_value) as sale_value
FROM detail_sales.tickets REAL
join dict.dic_articles DICT
ON REAL.id_article = DICT.id_article
WHERE REAL.id_store in (", sklepy ,") AND REAL.working_date >= '", Sys.Date() -460 ,"'
GROUP BY REAL.id_article, REAL.id_store, REAL.working_date, DICT.product_group,
DICT.type_group, DICT.art_group;", sep=""))
s1<- Sys.time()
print(s1-s)
s <- Sys.time()
rv$stok_sklep <- dbGetQuery(con, paste("SELECT STOK.id_store as id_store,
STOK.id_article as id_article, STOK.calculate_date as forecast_date,
sum(STOK.stan_999) as stok_sklepy
FROM detail_inventory.stock_article_store_daily STOK
where STOK.id_store in (", sklepy ,") AND STOK.calculate_date >='", Sys.Date() -30 ,
"'AND STOK.calculate_date <='", input$data_prognozy - 1 ,
"'GROUP BY STOK.id_store, STOK.id_article, STOK.calculate_date;", sep=""))
s1<- Sys.time()
print(s1-s)
rv$stok_sklep <- as.data.table(rv$stok_sklep)
rv$stok_sklep[, tydzien := format(forecast_date, "%Y-%V")]
rv$stok_sklep <- rv$stok_sklep[stok_sklepy >= 0]
rv$stok_sklep <- unique(rv$stok_sklep)
s <- Sys.time()
rv$capacity1 <- dbGetQuery(con, paste("SELECT id_store as id_store, id_article as id_article,
sum(capacity) as capacity FROM detail_snap.planogram_saa
WHERE id_store in (", sklepy ,") AND planogram_date ='", input$data_prognozy - 1 ,
"'GROUP BY id_store, id_article, planogram_date;", sep=""))
s1<- Sys.time()
print(s1-s)
dane_real <- as.data.table(dane_real)
dane_real <- na.omit(dane_real)
dane_real_2 <- as.data.table(dane_real_2)
dane_real_2 <- na.omit(dane_real_2)
dane_real <- merge(dane_real, dane_real_2, by.x=c("id_store", "id_article", "product_group", "forecast_date", "type_group", "art_group"),
by.y=c("id_store", "id_article", "product_group", "working_date", "type_group", "art_group"), all=TRUE)
dane_real[, tydzien := format(forecast_date, "%Y-%V")]
#usunięcie pgrup-śmieci
lista_niechcianych_pgrup <- list("Test", "Scanning", "Torby na zakupy", "Produkty wylistowane przed 01.01.2010", "Materiały reklamowe", "Artykuły testowe", "Stare i nieprzypisane", "Nieprzypisane", "Nieprzypięte", "Nieskategoryzowane", "Artykuły techniczne", "Wystrój sklepów", "Online-dom", "NF Traffic", "Koszty dostaw")
dane_real <- dane_real[!(product_group %in% lista_niechcianych_pgrup)]
print("zakończyłem obliczać dane_real")
print(dane_real)
return(dane_real)
})
tabela <- eventReactive(input$generuj, {
#przygotowanie danych do zbiorczego wykresu
req(dane_real())
rv$g<- as.data.table(dane_real())
rv$h <- rv$g[, .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)), by=c("forecast_date","product_group","type_group", "art_group", "id_store", "id_article")]
rv$h <- merge(data.table(forecast_date = seq(min(rv$h[, forecast_date]), max(rv$h[, forecast_date]), by="days")), rv$h, by=c("forecast_date"), all.x=TRUE)
for (i in names(rv$h)){
rv$h[is.na(get(i)), (i):=0]
}
tabela_wynik <- as.data.table(rv$h)
return(tabela_wynik)
})
wykres1 <- eventReactive(input$generuj,{
tabela_wynik<- tabela()[, .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)) , by = "forecast_date"]
ay <- list(
overlaying = "y",
side = "right",
title = "sztuki",
range = c(min(tabela_wynik$sale_cnt, tabela_wynik$forecast_quantity_stat, tabela_wynik$forecast_quantity_corr) / max(tabela_wynik$sale_value, tabela_wynik$forecast_value) * min(tabela_wynik$sale_value, tabela_wynik$forecast_value), as.numeric(max(tabela_wynik$sale_cnt, tabela_wynik$forecast_quantity_stat, tabela_wynik$forecast_quantity_corr))+1)
)
w <- plot_ly(tabela_wynik, x = ~forecast_date) %>%
add_bars(y = ~sale_value, color = I("steelblue3"), name = "realizacja_zł", opacity = 0.3) %>%
add_bars(y = ~forecast_value, color = I("firebrick3"), name = "prognoza_zł", opacity = 0.3) %>%
add_lines(y = ~sale_cnt, color = I("steelblue2"), name = "realizacja_szt", yaxis = "y2") %>%
add_lines(y = ~forecast_quantity_stat, color = I("firebrick2"), name = "prognoza_szt", yaxis = "y2") %>%
add_lines(y = ~forecast_quantity_corr, color = I("orange"), name = "prognoza_szt_corr", yaxis = "y2") %>%
layout(
title = paste("Prognoza total per dzień", sep="") , yaxis2 = ay,
yaxis=list(range = c(as.numeric(min(tabela_wynik$sale_value, tabela_wynik$forecast_value)), as.numeric(max(tabela_wynik$sale_value, tabela_wynik$forecast_value))), title = "zł"),
barmode = "group", xaxis = list(title="data")
)
return(w)
}
)
output$prognoza_total <- renderPlotly({
wykres1()
})
# observy na selectach
observeEvent(input$menu ,{
updateSelectizeInput(session, "sklep_input",
choices = input$sklepy,
options = list(closeAfterSelect = T))
}, ignoreInit = T)
observe({
updateSelectizeInput(session, "pgrupa",
choices = unique(dane_real()[id_store==input$sklep_input][order(product_group)][, product_group]),
options = list(closeAfterSelect = T)
)
})
observe({
updateSelectizeInput(session, "anty_pgrupa",
choices = unique(dane_real()[id_store==input$sklep_input][order(product_group)][, product_group]),
options = list(closeAfterSelect = T)
)
})
observe({
if(is.null(input$pgrupa)){c =unique(dane_real()[id_store==input$sklep_input][order(type_group)][, type_group])}
else{c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa][order(type_group)][, type_group])}
updateSelectizeInput(session, "wgrupa",
choices = c,
options = list(closeAfterSelect = T)
)
})
observe({
if(is.null(input$pgrupa)&is.null(input$wgrupa)){c =unique(dane_real()[id_store==input$sklep_input][order(art_group)][, art_group])}
else if((!is.null(input$pgrupa))&(is.null(input$wgrupa))){c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa][order(art_group)][, art_group])}
else if((is.null(input$pgrupa))&(!is.null(input$wgrupa))){c = unique(dane_real()[id_store==input$sklep_input][type_group %in% input$wgrupa][order(art_group)][, art_group])}
else{c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa][type_group %in% input$wgrupa][order(art_group)][, art_group])}
updateSelectizeInput(session, "agrupa",
choices = c,
options = list(closeAfterSelect = T)
)
})
observe({
if((!is.null(input$pgrupa))&(is.null(input$wgrupa))){c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa][, id_article])}
else if (!is.null(input$pgrupa)&!is.null(input$wgrupa)&is.null(input$agrupa)){c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa]
[type_group %in% input$wgrupa][, id_article])}
else if(!is.null(input$pgrupa)&!is.null(input$wgrupa)&!is.null(input$agrupa)){c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa]
[type_group %in% input$wgrupa][art_group %in% input$agrupa][, id_article])}
else if(is.null(input$pgrupa)&is.null(input$wgrupa)&!is.null(input$agrupa)){c = unique(dane_real()[id_store==input$sklep_input]
[art_group %in% input$agrupa][, id_article])}
else if(is.null(input$pgrupa)&!is.null(input$wgrupa)&is.null(input$agrupa)){c = unique(dane_real()[id_store==input$sklep_input]
[type_group %in% input$wgrupa][, id_article])}
else{c = unique(dane_real()[id_store==input$sklep_input]
[type_group %in% input$wgrupa][art_group %in% input$agrupa][, id_article])}
updateSelectizeInput(session, "artykul",
choices = c,
options = list(closeAfterSelect = T)
)
})
observeEvent(input$cofnij, {
if(NROW(rv$tabela_korekta_final)>0){
showNotification("Bieżąca korekta została wycofana, działaj dalej :P", type = "warning")
rv$tabela_korekta_final <- setdiff(rv$tabela_korekta_final, rv$tabela_korekta)}
})
observeEvent(input$wyczysc, {
showNotification("Tabela z korektami została wyczyszczona, zacznij od nowa.", type = "warning")
rv$tabela_korekta_final = data.table(`Numer sklepu`=numeric(), `Numer artykułu`=numeric(), `Data od`=as.Date(character()), `Data do`=as.Date(character()), `Typ korekty` = numeric(), `Wartość korekty` = as.double(), `Typ prognozy` = numeric())
rv$wszystkie_artykuly = c()
})
updated_artykul <- eventReactive(input$symuluj, {
if(input$radio=='Wybierz z listy'){
return(input$artykul)}
else{lista_art <- gsub(" ", "", input$lis_art)
lista_art <- strsplit(lista_art, "\n")[[1]]
return(lista_art)}})
tabela_filtr <- eventReactive(input$symuluj, {
# przygotowanie i filtrowanie danych w różnych agregacjach
rv$g<- as.data.table(dane_real())
if(input$wybor_agregacji=="tygodniowa"){
rv$h <- rv$g[, .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)),
by=c("tydzien", "product_group","type_group", "art_group", "id_store","id_article")]
rv$h <- merge(data.table(tydzien = format(seq(min(dane_real()[, forecast_date], na.rm = T), max(dane_real()[, forecast_date], na.rm = T), by="week"),"%Y-%V")), rv$h, by=c("tydzien"), all.x=TRUE)
for (i in names(rv$h)){
rv$h[is.na(get(i)), (i):=0]
}
tabela_filtr <- rv$h[, korekta := NULL]
tabela_filtr <- tabela_filtr[, korekta_final := 1]
temp <- data.table(tydzien=seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="week"))
temp[, tydzien := format(tydzien, "%Y-%V")]
tabela_filtr <- merge(tabela_filtr, temp, by=c("tydzien"), all.x=TRUE)
daty <- data.table(tydzien = format(seq(min(rv$stok_sklep[, forecast_date], na.rm = T), max(rv$stok_sklep[, forecast_date], na.rm = T), by="week"),"%Y-%V"))
stok_sklep <- rv$stok_sklep[, .(stok_sklepy = sum(stok_sklepy, na.rm = T)), by=c("tydzien", "id_store","id_article")]
stok_sklep <- merge(stok_sklep, daty, by='tydzien', all.y = T)
tabela_filtr <- merge(tabela_filtr, stok_sklep, by = c("id_store", "id_article", "tydzien"), all = T)
}
else{
rv$h <- rv$g[, .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)),
by=c("forecast_date", "product_group","type_group", "art_group", "id_store","id_article")]
rv$h <- merge(data.table(forecast_date = seq(min(dane_real()[, forecast_date], na.rm = T), max(dane_real()[, forecast_date], na.rm = T), by="days")), rv$h, by=c("forecast_date"), all.x=TRUE)
for (i in names(rv$h)){
rv$h[is.na(get(i)), (i):=0]
}
tabela_filtr <- rv$h[, korekta := NULL]
tabela_filtr <- tabela_filtr[, korekta_final := 1]
temp <- data.table(forecast_date=seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="days"))
tabela_filtr <- merge(tabela_filtr, temp, by=c("forecast_date"), all.x=TRUE)
daty <- data.table(forecast_date = seq(min(rv$stok_sklep[, forecast_date], na.rm = T), max(rv$stok_sklep[, forecast_date], na.rm = T), by="days"))
stok_sklep <- rv$stok_sklep[, .(stok_sklepy = sum(stok_sklepy, na.rm = T)), by=c("forecast_date", "id_store","id_article")]
stok_sklep <- merge(stok_sklep, daty, by='forecast_date', all.y = T)
tabela_filtr <- merge(tabela_filtr, stok_sklep, by = c("id_store", "id_article", "forecast_date"), all = T)
}
#filtry
tabela_filtr1 <- tabela_filtr[id_store == input$sklep_input]
if(input$radio=="Wybierz z listy"){
if(!is.null(input$pgrupa)){tabela_filtr1 <- tabela_filtr1[product_group %in% input$pgrupa]}
if(!is.null(input$anty_pgrupa)){tabela_filtr1 <- tabela_filtr1[!product_group %in% input$anty_pgrupa]}
if(!is.null(input$wgrupa)){tabela_filtr1 <- tabela_filtr1[type_group %in% input$wgrupa]}
if(!is.null(input$agrupa)){tabela_filtr1 <- tabela_filtr1[art_group %in% input$agrupa]}}
if(!is.null(updated_artykul())){tabela_filtr1 <- tabela_filtr1[id_article %in% updated_artykul()]}
rv$capacity <- as.data.table(rv$capacity1)
rv$capacity <- rv$capacity[, capacity := as.numeric(capacity)]
rv$capacity <- rv$capacity[id_store %in% tabela_filtr1[, id_store]]
rv$capacity <- rv$capacity[id_article %in% tabela_filtr1[, id_article]]
return(tabela_filtr1)
})
# przekształcenie danych pochodzących z inputów w ten sposób by wykres nie odświeżał się przy każdej zmianie
updated_pgrupa <- eventReactive(input$symuluj, {input$pgrupa})
updated_anty_pgrupa <- eventReactive(input$symuluj, {input$anty_pgrupa})
updated_wgrupa <- eventReactive(input$symuluj, {input$wgrupa})
updated_agrupa <- eventReactive(input$symuluj, {input$agrupa})
updated_sklep <- eventReactive(input$symuluj, {input$sklep_input})
updated_wybor_agregacji <- eventReactive(input$symuluj, {input$wybor_agregacji})
updated_korekta <- eventReactive(input$symuluj, {input$korekta})
updated_automat <- eventReactive(input$symuluj, {input$automat})
updated_kierunek <- eventReactive(input$symuluj, {input$kierunek})
updated_ograniczenie <- eventReactive(input$symuluj, {input$ograniczenie})
output$prognoza_po_korektach <- renderPlotly({
# przygotowanie danych do wykresu, w tym przeprowadzenie korekty automatycznej
# wynikiem jest wykres po filtrowaniu i nałożeniu korekty
req(tabela_filtr())
tytul_pgrupa <- if(length(updated_pgrupa())<=3){paste(updated_pgrupa(), collapse=", ")}
else{paste(updated_pgrupa()[1], updated_pgrupa()[2], updated_pgrupa()[3], "...", sep = ", ")}
tytul_wgrupa<-if(length(updated_wgrupa())<=3){paste(updated_wgrupa(), collapse=", ")}
else{paste(updated_wgrupa()[1], updated_wgrupa()[2], updated_wgrupa()[3], "...", sep = ", ")}
tytul_agrupa<-if(length(updated_agrupa())<=3){paste(updated_agrupa(), collapse=", ")}
else{paste(updated_agrupa()[1], updated_agrupa()[2], updated_agrupa()[3], "...", sep = ", ")}
tabela_filtr_wykres <- as.data.table(tabela_filtr())
if(updated_wybor_agregacji()=="tygodniowa"){
if(updated_automat()=="tak"){
# stworzenie tabel pomocniczych zawierających dane porównawcze do korekty automatycznej, Zsumowaną wartość realizacji za dany okres i prognozy
real_do_korekty <- tabela_filtr_wykres[tydzien %in% format(seq(as.Date(input$data_przedz_auto[1]),as.Date(input$data_przedz_auto[2]), by="week"),"%Y-%V"), .(sale_cnt = sum(sale_cnt, na.rm = T)), by = c("id_store", "id_article")]
prog_do_korekty <- tabela_filtr_wykres[tydzien %in% format(seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="week"),"%Y-%V"), .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T)), by = c("id_store", "id_article")]
dane_do_korekty <- merge(real_do_korekty, prog_do_korekty, by = c("id_store", "id_article"), all = T)
print(dane_do_korekty)
# filtrowanie artykułów do tych które występują zarówno w realizacji jak i w prognozie, oraz mającuch miesięczną prognozę wyższą niż 10 sztuk
dane_do_korekty <- dane_do_korekty[id_article %in% real_do_korekty[, id_article]&id_article %in% prog_do_korekty[, id_article]]
dane_do_korekty <- dane_do_korekty[!(forecast_quantity_stat<updated_ograniczenie())]
# okres by wyliczyć średnie relizacji i prognozy i porównać je
okres_prog <- NROW(as.data.table(format(seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="week"),"%Y-%V")))-1
okres_real <- NROW(as.data.table(format(seq(as.Date(input$data_przedz_auto[1]),as.Date(input$data_przedz_auto[2]), by="week"),"%Y-%V")))-1
# wyliczamy korektę automatyczną dla każdego artykułu dzieląc średnią realizacje z podanego okresu przez średnią prognozę z okresu na który ma być nałożona korekta
# parametry:
# - zakres od 0.9 do 1.1 korekty jest wyłączony
# - bufor 0.1 na korektę zmniejszającą i 0.2 na zwiększającą
# - ograniczenie na minimalną i maksymalną wartość korekty, default: 0.6 i 2.0
dane_do_korekty[, korekta := round(as.numeric(sale_cnt/okres_real)/as.numeric(forecast_quantity_stat/okres_prog),2)][, korekta := ifelse(korekta<=1.1&korekta>=0.9, 1, korekta)][, korekta := ifelse(korekta>1, korekta-0.2, korekta+0.1)][, korekta := ifelse(korekta<=1.1&korekta>=0.9, 1, korekta)][, korekta := ifelse(korekta<input$korekta_przedz[1], input$korekta_przedz[1], korekta)][, korekta := ifelse(korekta>input$korekta_przedz[2], input$korekta_przedz[2], korekta)]
dane_do_korekty <- dane_do_korekty[, korekta, by = c("id_store", "id_article")]
dane_do_korekty <- na.omit(dane_do_korekty)
dane_do_korekty <- dane_do_korekty[!is.na(korekta)][!(korekta==1)]
# filtrowanie korekt w zależności od kierunku
if(updated_kierunek()=="zmniejszająca"){
dane_do_korekty <- dane_do_korekty[!(`korekta`>1)]
}else if(updated_kierunek()=="zwiększająca"){
dane_do_korekty <- dane_do_korekty[!(`korekta`<1)]
}
rv$dane_do_korekty <- dane_do_korekty[, korekta := round_any(korekta, 0.05)]
tabela_filtr_wykres <- merge(tabela_filtr_wykres, rv$dane_do_korekty, by = c("id_store", "id_article"), all.x = T)
}else{
tabela_filtr_wykres[, korekta := updated_korekta()]
}
# przemnożenie prognozy przez korektę
tabela_filtr_wykres[, forecast_quantity_stat_new := ifelse(!is.na(korekta), forecast_quantity_stat * korekta, forecast_quantity_stat)]
# przygotowanie dnych do wykresu
tabela_filtr_wykres <- as.data.table(tabela_filtr_wykres[, .(stok_sklepy = sum(stok_sklepy, na.rm = T), forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_stat_new = sum(forecast_quantity_stat_new, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)), by=c("tydzien")])
daty <- data.table(tydzien = format(seq(min(dane_real()[, forecast_date], na.rm = T), max(dane_real()[, forecast_date], na.rm = T), by="week"),"%Y-%V"))
tabela_filtr_wykres <- merge(tabela_filtr_wykres, daty, by='tydzien', all.y = T)
tabela_filtr_wykres[is.na(tabela_filtr_wykres)] <- 0
tabela_filtr_wykres[!(tydzien %in% format(seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="week"),"%Y-%V")), forecast_quantity_stat_new := 0]
# linia z ostatnim capacity na artykule
capa <- sum(rv$capacity[, capacity], na.rm = T)
print(paste("capacity: ",capa, sep=""))
ay <- list(
overlaying = "y",
side = "right",
title = "stok",
range = c(as.numeric(max(tabela_filtr_wykres$stok_sklepy) / max(tabela_filtr_wykres$sale_cnt) * min(tabela_filtr_wykres$sale_cnt)),
as.numeric(max(tabela_filtr_wykres[, stok_sklepy])))
)
plot_ly(data = tabela_filtr_wykres, x = ~tydzien) %>%
add_lines(y = ~sale_cnt, color = I("steelblue2"), name = "realizacja_szt") %>%
add_lines(y = ~forecast_quantity_stat, color = I("firebrick2"), name = "prognoza_szt") %>%
add_lines(y = ~forecast_quantity_corr, color = I("orange"), name = "prognoza_szt_corr") %>%
add_lines(y = ~forecast_quantity_stat_new, color = I("green"), name = "prognoza_szt_stat_new") %>%
add_lines(y = ~stok_sklepy, color = I("wheat"), name = "stok sklepowy", opacity = 0.7, yaxis = 'y2') %>%
add_lines(y = capa, color = I("turquoise1"), name = "capacity", opacity = 0.7, yaxis = 'y2', if(is.null(updated_artykul())) {visible=FALSE}) %>%
layout(
title = if(is.null(updated_pgrupa())){paste("Prognoza, sklep: ",updated_sklep(), sep="")}
else{paste("Prognoza: ", tytul_pgrupa, tytul_wgrupa, tytul_agrupa, ", sklep: ", updated_sklep(), sep=" ")},
yaxis = list(title="sztuki", range = c(min(tabela_filtr_wykres$sale_cnt), max(max(tabela_filtr_wykres$sale_cnt),max(tabela_filtr_wykres$forecast_quantity_stat_new)))), xaxis = list(title="data"), yaxis2 = ay
)
}else{
if(updated_automat()=="tak"){
# stworzenie tabel pomocniczych zawierających dane porównawcze do korekty automatycznej, Zsumowaną wartość realizacji za dany okres i prognozy
real_do_korekty <- tabela_filtr_wykres[forecast_date %in% seq(as.Date(input$data_przedz_auto[1]),as.Date(input$data_przedz_auto[2]), by="days"), .(sale_cnt = sum(sale_cnt, na.rm = T)), by = c("id_store", "id_article")]
prog_do_korekty <- tabela_filtr_wykres[forecast_date %in% seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="days"), .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T)), by = c("id_store", "id_article")]
dane_do_korekty <- merge(real_do_korekty, prog_do_korekty, by = c("id_store", "id_article"), all = T)
# filtrowanie artykułów do tych które występują zarówno w realizacji jak i w prognozie, oraz mającuch miesięczną prognozę wyższą niż 10 sztuk
dane_do_korekty <- dane_do_korekty[id_article %in% real_do_korekty[, id_article]][id_article %in% prog_do_korekty[, id_article]]
dane_do_korekty <- dane_do_korekty[!(forecast_quantity_stat<updated_ograniczenie())]
# okres by wyliczyć średnie relizacji i prognozy i porównać je
okres_prog <- NROW(as.data.table(seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="days")))-1
okres_real <- NROW(as.data.table(seq(as.Date(input$data_przedz_auto[1]),as.Date(input$data_przedz_auto[2]), by="days")))-1
# wyliczamy korektę automatyczną dla każdego artykułu dzieląc średnią realizacje z podanego okresu przez średnią prognozę z okresu na który ma być nałożona korekta
# parametry:
# - zakres od 0.9 do 1.1 korekty jest wyłączony
# - bufor 0.1 na korektę zmniejszającą i 0.2 na zwiększającą
# - ograniczenie na minimalną i maksymalną wartość korekty, default: 0.6 i 2.0
dane_do_korekty[, korekta := round(as.numeric(sale_cnt/okres_real)/as.numeric(forecast_quantity_stat/okres_prog),2)][, korekta := ifelse(korekta<=1.1&korekta>=0.9, 1, korekta)][, korekta := ifelse(korekta>1, korekta-0.2, korekta+0.1)][, korekta := ifelse(korekta<=1.1&korekta>=0.9, 1, korekta)][, korekta := ifelse(korekta<input$korekta_przedz[1], input$korekta_przedz[1], korekta)][, korekta := ifelse(korekta>input$korekta_przedz[2], input$korekta_przedz[2], korekta)]
dane_do_korekty <- dane_do_korekty[, korekta, by = c("id_store", "id_article")]
dane_do_korekty <- na.omit(dane_do_korekty)
dane_do_korekty <- dane_do_korekty[!is.na(korekta)][!(korekta==1)]
# filtrowanie korekt w zależności od kierunku
if(updated_kierunek()=="zmniejszająca"){
dane_do_korekty <- dane_do_korekty[!(`korekta`>1)]
}else if(updated_kierunek()=="zwiększająca"){
dane_do_korekty <- dane_do_korekty[!(`korekta`<1)]
}
rv$dane_do_korekty <- dane_do_korekty[, korekta := round_any(korekta, 0.05)]
tabela_filtr_wykres <- merge(tabela_filtr_wykres, rv$dane_do_korekty, by = c("id_store", "id_article"), all.x = T)
}else{
tabela_filtr_wykres[, korekta := updated_korekta()]
}
# przemnożenie prognozy przez korektę
tabela_filtr_wykres[, forecast_quantity_stat_new := ifelse(!is.na(korekta), forecast_quantity_stat * korekta, forecast_quantity_stat)]
# przygotowanie dnych do wykresu
tabela_filtr_wykres <- as.data.table(tabela_filtr_wykres[, .(stok_sklepy = sum(stok_sklepy, na.rm = T), forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_stat_new = sum(forecast_quantity_stat_new, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)), by=c("forecast_date")])
daty <- data.table(forecast_date = seq(min(dane_real()[, forecast_date], na.rm = T), max(dane_real()[, forecast_date], na.rm = T), by="days"))
tabela_filtr_wykres <- merge(tabela_filtr_wykres, daty, by="forecast_date", all.y = T)
tabela_filtr_wykres[is.na(tabela_filtr_wykres)] <- 0
tabela_filtr_wykres[!(forecast_date %in% seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="days")), forecast_quantity_stat_new := 0]
# linia z ostatnim capacity na artykule
capa <- sum(rv$capacity[, capacity], na.rm = T)
print(paste("capacity: ",capa, sep=""))
ay <- list(
overlaying = "y",
side = "right",
title = "stok",
range = c(as.numeric(max(tabela_filtr_wykres$stok_sklepy) / max(tabela_filtr_wykres$sale_cnt) * min(tabela_filtr_wykres$sale_cnt)),
as.numeric(max(tabela_filtr_wykres[, stok_sklepy])))
)
plot_ly(data = tabela_filtr_wykres, x = ~forecast_date) %>%
add_lines(y = ~sale_cnt, color = I("steelblue2"), name = "realizacja_szt") %>%
add_lines(y = ~forecast_quantity_stat, color = I("firebrick2"), name = "prognoza_szt") %>%
add_lines(y = ~forecast_quantity_corr, color = I("orange"), name = "prognoza_szt_corr") %>%
add_lines(y = ~forecast_quantity_stat_new, color = I("green"), name = "prognoza_szt_stat_new") %>%
add_lines(y = ~stok_sklepy, color = I("wheat"), name = "stok sklepowy", opacity = 0.7, yaxis = 'y2') %>%
add_lines(y = capa, color = I("turquoise1"), name = "capacity", opacity = 0.7, yaxis = 'y2', if(is.null(updated_artykul())) {visible=FALSE}) %>%
layout(
title = if(is.null(updated_pgrupa())){paste("Prognoza, sklep: ",updated_sklep(), sep="")}
else{paste("Prognoza: ", tytul_pgrupa, tytul_wgrupa, tytul_agrupa, ", sklep: ", updated_sklep(), sep=" ")},
yaxis = list(title="sztuki", range = c(min(tabela_filtr_wykres$sale_cnt), max(max(tabela_filtr_wykres$sale_cnt),max(tabela_filtr_wykres$forecast_quantity_stat_new)))), xaxis = list(title="data"), yaxis2 = ay
)
}
})
tabela_pobierz <- eventReactive(input$symuluj,{
tabela_pobierz <- merge(tabela_filtr(), rv$capacity, by = c("id_store", "id_article"), all = T)
View(tabela_pobierz)
return(tabela_pobierz)
})
output$pobierz <- downloadHandler(
filename = function() {
paste(sciezka, "/dane_sklep_",updated_sklep(), ".xlsx", sep="")},
content = function(file) {
write.xlsx(tabela_pobierz(), file, sep=";", dec=",")
}
)
observeEvent(input$zatwierdz,{
# stworzenie tabeli finalnej
print("jestem w ostatnim bloku")
data_od <- input$data_przedz[1]
data_do <- input$data_przedz[2]
rv$temp1 <- dane_real()[, .(min1 = min(forecast_date), max1=max(forecast_date)), by=c("id_store", "id_article")]
rv$g <- as.data.table(dane_real())
if(input$radio=="Wybierz z listy"){
if(!is.null(updated_pgrupa())){rv$g <- as.data.table(rv$g)[product_group %in% updated_pgrupa()]}
if(!is.null(updated_anty_pgrupa())){rv$g <- as.data.table(rv$g)[!product_group %in% updated_anty_pgrupa()]}
if(!is.null(updated_wgrupa())){rv$g <- as.data.table(rv$g)[type_group %in% updated_wgrupa()]}
if(!is.null(updated_agrupa())){rv$g <- as.data.table(rv$g)[art_group %in% updated_agrupa()]}}
if(!is.null(updated_artykul())){rv$g <- as.data.table(rv$g)[id_article %in% updated_artykul()]}
rv$temp1 <- rv$temp1[id_store %in% updated_sklep()]
rv$tabela_korekta <- data.table()
rv$tabela_korekta <- as.data.table(unique(rv$g[forecast_date >= Sys.Date() & forecast_date <= data_do, .(id_store, id_article)]))
rv$tabela_korekta <- merge(rv$tabela_korekta, rv$temp1, by=c("id_article", "id_store"))
if(updated_automat()=="tak"){
rv$tabela_korekta <- merge(rv$tabela_korekta, rv$dane_do_korekty, by=c("id_article", "id_store"), all.x = T)}
else{rv$tabela_korekta <- rv$tabela_korekta[, korekta := updated_korekta()]}
rv$tabela_korekta <- rv$tabela_korekta[, .(`Numer sklepu` = as.numeric(updated_sklep()), `Numer artykułu` = id_article, `Data od` = as.Date(data_od, origin = "1970-01-01"), `Data do`= as.Date(data_do, origin = "1970-01-01"), `Typ korekty` = 1, `Wartość korekty` = as.numeric(korekta), `Typ prognozy` = 0)]
if(updated_kierunek()=="zmniejszająca"){
rv$tabela_korekta <- rv$tabela_korekta[!(`Wartość korekty`>1)]
}else if(updated_kierunek()=="zwiększająca"){
rv$tabela_korekta <- rv$tabela_korekta[!(`Wartość korekty`<1)]
}
rv$tabela_korekta <- rv$tabela_korekta[!is.na(`Wartość korekty`)]
rv$tabela_korekta <- rv$tabela_korekta[!(`Wartość korekty`==1)]
# obsłużenie błędu z nakładaniem się korekt
temp_check <- merge(rv$tabela_korekta, rv$tabela_korekta_final, by=c("Numer sklepu", "Numer artykułu"), allow.cartesian=T, suffixes = c('.new','.old' ))
temp_check_flag <- temp_check[, flag := ifelse(((`Data od.new`<=`Data do.old`&`Data do.new`>=`Data od.old`)|(`Data do.new`>=`Data od.old`&`Data od.new`<=`Data do.old`)|(`Data od.new`>=`Data od.old`&`Data do.new`<=`Data do.old`)|(`Data do.new`==`Data od.old`&`Data od.new`==`Data do.old`)), 1, 0)]
if(NROW(temp_check_flag[`Numer artykułu` %in% rv$tabela_korekta$`Numer artykułu`][flag==1])==0){
showNotification(paste("Korekta została zatwierdzona. Ilość dodanych wierszy: ", NROW(rv$tabela_korekta), ", średni wpis: ", round(mean(rv$tabela_korekta[, `Wartość korekty`]), 2), sep = ""), type = "message", duration = 20)
rv$tabela_korekta_final <- rbind(rv$tabela_korekta, rv$tabela_korekta_final)
}else{
showNotification(paste("Daty na sklepo-artykule nakładają się. Np. na: Sklepie ", temp_check_flag[flag==1][1, `Numer sklepu`],
", artykule ", temp_check_flag[flag==1][1, `Numer artykułu`], ", przy następujących datach poprzedniej korekty: ",
rv$tabela_korekta_final[1, `Data od`],", ", rv$tabela_korekta_final[1, `Data do`], sep=""), type = "error", duration = NULL)
rv$tabela_korekta <- rv$tabela_korekta[!(`Numer artykułu` %in% temp_check_flag$`Numer artykułu`)]
}
rv$wszystkie_artykuly <- unique(append(rv$wszystkie_artykuly, rv$tabela_korekta[, `Numer artykułu`]))
print(rv$wszystkie_artykuly)
rv$tabela_korekta_final <- rv$tabela_korekta_final[`Numer artykułu` %in% rv$wszystkie_artykuly]
rv$tabela_korekta_final <- unique(rv$tabela_korekta_final)
rv$wszystkie_sklepy <- unique(rv$tabela_korekta_final[, `Numer sklepu`])
print(rv$tabela_korekta_final)
})
output$tabela_z_korektami = DT::renderDataTable({
DT::datatable(rv$tabela_korekta_final, options = list(paging = TRUE, searching = FALSE))
})
output$zapisz <- downloadHandler(
filename = function() {
paste(sciezka, "/plik_korekty_", paste(rv$wszystkie_sklepy, collapse="_") ,"_", Sys.Date(),".xlsx", sep="")},
content = function(file) {
write.xlsx(rv$tabela_korekta_final, file, sep=";", dec=",")
}
)
rv2 <- reactiveValues(stare_calc_date = 0, nowe_calc_date = 0)
dane_porownanie <- eventReactive(input$przelicz_porownanie, {
sklepy <- paste(shQuote(input$sklep_porownanie, type="csh"), collapse=", ")
daty_prognozy <- c(input$data_prognozy_1, input$data_prognozy_2)
daty_prognozy <- paste(shQuote(daty_prognozy, type="csh"), collapse=", ")
s <- Sys.time()
process_step_run_id_full <- dbGetQuery(con, paste("SELECT count(*) maximum , calculation_date, process_step_run_id, process_run_id, calculation_id
FROM detail_sas.sales_forecast_hist
WHERE calculation_date in (", daty_prognozy ,")
GROUP BY calculation_date, process_step_run_id, process_run_id, calculation_id
order by maximum desc;", sep=""))
s1<- Sys.time()
print(s1-s)
process_step_run_id_full <- as.data.table(process_step_run_id_full)
process_step_run_id_full <- setDT(process_step_run_id_full)[order(-maximum)][,.SD[1,], by = .(calculation_date)]
calculation_date <- paste(shQuote(process_step_run_id_full[, calculation_date], type="csh"), collapse=", ")
process_step_run_id <- paste(shQuote(process_step_run_id_full[,process_step_run_id], type="csh"), collapse=", ")
process_run_id <- paste(shQuote(process_step_run_id_full[, process_run_id], type="csh"), collapse=", ")
calculation_id <- paste(shQuote(process_step_run_id_full[, calculation_id], type="csh"), collapse=", ")
first_date = as.Date(substring(calculation_date, 16,25))
second_date <- first_date + 60
# kwerenda realizująca wydobycie tabeli z prognozami
start_time <- Sys.time()
prognozy_dla_sklepow <- dbGetQuery(con, paste("select SAS.calculation_date as calculation_date,
SAS.id_store as id_store,
SAS.id_article as id_article,
SAS.forecast_date as forecast_date,
DICT.product_group as product_group,
DICT.type_group as type_group,
sum(SAS.forecast_quantity_stat) as forecast_quantity_stat,
sum(SAS.forecast_quantity_corr) as forecast_quantity_corr,
sum(SAS.forecast_value) as forecast_value
from detail_sas.sales_forecast_hist SAS
join dict.dic_articles DICT
ON SAS.id_article = DICT.id_article
WHERE id_store in (", sklepy, ")
AND process_step_run_id in (", process_step_run_id, ")
and process_run_id in (", process_run_id, ")
and calculation_id in (", calculation_id, ")
AND forecast_date >= '", first_date ,"'
AND forecast_date <= '", second_date ,"'
group by SAS.id_store, DICT.product_group, DICT.type_group, SAS.id_article,
SAS.forecast_date, SAS.calculation_date;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
start_time <- Sys.time()
#kwerenda realizująca wydobycie tabeli z realizacjami
realizacje_dla_sklepow <- dbGetQuery(con, paste("SELECT
REAL.id_store as id_store,
REAL.id_article as id_article,
DICT.product_group as product_group,
DICT.type_group as type_group,
REAL.working_date as working_date,
sum(REAL.sale_cnt) as sale_cnt,
sum(REAL.sale_value) as sale_value
FROM detail_sales.tickets REAL
join dict.dic_articles DICT
ON REAL.id_article = DICT.id_article
WHERE REAL.id_store in (", sklepy ,") AND REAL.working_date >= '", Sys.Date() -460 ,"'
GROUP BY REAL.id_article, REAL.id_store, REAL.working_date, DICT.product_group,
DICT.type_group;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
#przekształcenie tabel z prognozami i realizacjami
prognozy_dla_sklepow <- as.data.table(prognozy_dla_sklepow)
prognozy_dla_sklepow <- na.omit(prognozy_dla_sklepow)
realizacje_dla_sklepow <- as.data.table(realizacje_dla_sklepow)
realizacje_dla_sklepow <- na.omit(realizacje_dla_sklepow)
#połączenie tabeli;
prognozy_i_realizacje <- merge(prognozy_dla_sklepow, realizacje_dla_sklepow,
by.x=c("id_store","product_group", "id_article", "type_group", "forecast_date"), by.y=c("id_store", "product_group", "id_article", "type_group", "working_date"), all=TRUE)
rv2$stare_calc_date <-min(process_step_run_id_full[, calculation_date])
rv2$nowe_calc_date <-max(process_step_run_id_full[, calculation_date])
print("zakończyłem obliczać dane_porownanie")
return(prognozy_i_realizacje)
})
output$tabela = DT::renderDataTable({
req(dane_porownanie())
DT::datatable(NULL)
})
observe({
updateSelectizeInput(session, "pgrupa_por",
choices = unique(dane_porownanie()[id_store==input$sklep_porownanie][order(product_group)][, product_group]),
options = list(closeAfterSelect = T)
)
})
observe({
if(is.null(input$pgrupa_por)){c =unique(dane_porownanie()[id_store==input$sklep_porownanie][order(type_group)][, type_group])}
else{c = unique(dane_porownanie()[id_store==input$sklep_porownanie][product_group %in% input$pgrupa_por][order(type_group)][, type_group])}
updateSelectizeInput(session, "wgrupa_por",
choices = c,
options = list(closeAfterSelect = T)
)
})
tabela_filtr_por <- eventReactive(input$generuj_porownanie, {
req(dane_porownanie())
g<- as.data.table(dane_porownanie())
h <- g[, .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T),
forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)),
by=c("forecast_date", "calculation_date","product_group", "id_article", "type_group", "id_store")]
h <- merge(data.table(forecast_date = seq(input$data_przedz_por[1], input$data_przedz_por[2], by="days")), h, by=c("forecast_date"), all.x=TRUE)
for (i in names(h)){
h[is.na(get(i)), (i):=0]
}
tabela_filtr <- as.data.table(h)
tabela_filtr1 <- tabela_filtr[id_store == input$sklep_porownanie]
print(tabela_filtr1)
if(!is.null(input$pgrupa_por)){tabela_filtr1 <- tabela_filtr1[product_group %in% input$pgrupa_por]}
if(!is.null(input$wgrupa_por)){tabela_filtr1 <- tabela_filtr1[type_group %in% input$wgrupa_por]}
print("zakończyłem obliczać tabelę filtr")
return(tabela_filtr1)
})
updated_sklep_porownanie <- eventReactive(input$generuj_porownanie, {input$sklep_porownanie})
output$wykres_porownanie <- renderPlotly({
req(tabela_filtr_por())
tab_wykres_por <- as.data.table(tabela_filtr_por())
tab_wykres_por <- tab_wykres_por[, .(sale_cnt = sum(sale_cnt, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T)), by = c("forecast_date", "calculation_date")]
tab_wykres_por[is.na(tab_wykres_por)] <- 0
sklep = updated_sklep_porownanie()
#generowanie wykresu i przekształcenia mające na celu oddzielenie prognoz z różnymi "calculation_date"
# sposób data.table
plot_ly() %>%
add_lines(data = tab_wykres_por, x = ~forecast_date,
y = ~sale_cnt, color = I("steelblue2"), name = "realizacja") %>%
add_lines(data = tab_wykres_por[calculation_date==rv2$stare_calc_date],
x = ~forecast_date, y = ~forecast_quantity_corr, color = I("firebrick2"), name = "prognoza 2") %>%
add_lines(data = tab_wykres_por[calculation_date==rv2$nowe_calc_date],
x = ~forecast_date, y = ~forecast_quantity_corr, color = I("orange"), name = "prognoza 1") %>%
layout(
title = paste("Weryfikacja prognoz na sztukach dla sklepu nr: ", sklep, sep=""), yaxis = list(range = c(0, as.numeric(max(tab_wykres_por[, sale_cnt])))))
})
output$wykres_porownanie_zl <- renderPlotly({
req(tabela_filtr_por())
tab_wykres_por <- as.data.table(tabela_filtr_por())
tab_wykres_por <- tab_wykres_por[, .(sale_value = sum(sale_value, na.rm = T), forecast_value= sum(forecast_value, na.rm = T)), by = c("forecast_date", "calculation_date")]
tab_wykres_por[is.na(tab_wykres_por)] <- 0
sklep = updated_sklep_porownanie()
#generowanie wykresu i przekształcenia mające na celu oddzielenie prognoz z różnymi "calculation_date"
# sposób data.table
plot_ly() %>%
add_lines(data = tab_wykres_por, x = ~forecast_date,
y = ~sale_value, color = I("steelblue2"), name = "realizacja") %>%
add_lines(data = tab_wykres_por[calculation_date==rv2$stare_calc_date],
x = ~forecast_date, y = ~forecast_value, color = I("firebrick2"), name = "prognoza 2") %>%
add_lines(data = tab_wykres_por[calculation_date==rv2$nowe_calc_date],
x = ~forecast_date, y = ~forecast_value, color = I("orange"), name = "prognoza 1") %>%
layout(
title = paste("Weryfikacja prognoz na obrocie dla sklepu nr: ", sklep, sep=""), yaxis = list(range = c(0, as.numeric(max(tab_wykres_por[, sale_value])))))
})
dane_kanibalizm <- eventReactive(input$generuj_kan,{
sciezka <- "Z:/USERS/DZ_Magazyn/DZIAL ANALIZ/SEKCJA PROGNOZOWANIA/ML_Forecasts"
kanibalizacja <- fread(paste(sciezka, "/Dane_do_generatorów/15_perfect_distance.csv", sep=""))
kanibalizacja <- kanibalizacja[, data := as.Date(fastPOSIXct(data))]
sklep <- input$sklep_kanibalizacja
obrot <- dbGetQuery(con, paste("SELECT REAL.id_store as id_store, sum(REAL.sale_value) as sale_value, REAL.working_date as working_date
FROM detail_sales.tickets REAL WHERE REAL.id_store in (", sklep ,") AND REAL.working_date >= '2017-01-01'
GROUP BY REAL.id_store, REAL.working_date", sep=""))
kanibalizacja_podglad <- kanibalizacja[id_store == sklep]
kanibalizacja_podglad <- merge(kanibalizacja_podglad, obrot, by.x=c("data"), by.y=c("working_date"))
kanibalizacja_podglad <- kanibalizacja_podglad[sale_value > 0.01]
return(kanibalizacja_podglad)
})
updated_sklep_kan <- eventReactive(input$generuj_kan, {input$sklep_kanibalizacja})
output$wykres_kan <- renderPlotly({
req(dane_kanibalizm())
sklep <- updated_sklep_kan()
ay <- list(
tickfont = list(color = "red"),
overlaying = "y",
side = "right",
title = "obrót"
)
plot_ly(dane_kanibalizm(), x = ~data) %>%
add_lines(y = ~closest_neighbour, name = 'closest_neighbour',mode = 'lines', color = I('purple'), type = 'scatter') %>%
add_lines(y = ~neighbours_r_1km, name = 'neighbours_r_1km',mode = 'lines', color = I('cornflowerblue'), type = 'scatter') %>%
add_lines(y = ~neighbours_r_3km, name = 'neighbours_r_3km',mode = 'lines', color = I('green'), type = 'scatter') %>%
add_lines(y = ~neighbours_r_5km, name = 'neighbours_r_5km',mode = 'lines', color = I('red'), type = 'scatter') %>%
add_lines(y = ~sale_value, name = 'obrót',mode = 'lines', color = I('orange'), type = 'scatter', yaxis = "y2", opacity = 0.3) %>%
layout(
title = paste("Sklepy w pobliżu, a obrót dla sklepu: ", sklep, sep="") , yaxis2 = ay,
xaxis = list(title="x")
)
})
dane_alert <- eventReactive(input$generuj_alert,{
if(input$wybor_alertu=='48'){
sklepy_z_alertem_48 <- paste(shQuote(input$sklep_alert, type="csh"), collapse=", ")
data_alertu <- substring(nazwa_48, 10,17)
data_prognozy <- as.Date(data_alertu,format = "%Y %m %d")
data_prognozy_30dni <- data_prognozy - 30
daty_prognozy_48 <- append(data_prognozy,data_prognozy_30dni)
daty_prognozy_48 <- paste(shQuote(daty_prognozy_48, type="csh"), collapse=", ")
#Wysłanie zapytania w celu wydobycia tabeli "process_step_run_id_full" z informacjami identyfikującymi konkretne prognozy oraz przekształcenie jej
start_time <- Sys.time()
process_step_run_id_full <- dbGetQuery(con, paste("SELECT count(*) maximum , calculation_date, process_step_run_id, process_run_id, calculation_id
FROM detail_sas.sales_forecast_hist
WHERE calculation_date in (", daty_prognozy_48 ,")
GROUP BY calculation_date, process_step_run_id, process_run_id, calculation_id
order by maximum desc;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
process_step_run_id_full <- as.data.table(process_step_run_id_full)
process_step_run_id_full <- setDT(process_step_run_id_full)[order(-maximum)][,.SD[1,], by = .(calculation_date)]
rv$process_step_run_id_full_48 <- as.data.table(process_step_run_id_full)
calculation_date <- paste(shQuote(process_step_run_id_full[, calculation_date], type="csh"), collapse=", ")
process_step_run_id <- paste(shQuote(process_step_run_id_full[,process_step_run_id], type="csh"), collapse=", ")
process_run_id <- paste(shQuote(process_step_run_id_full[, process_run_id], type="csh"), collapse=", ")
calculation_id <- paste(shQuote(process_step_run_id_full[, calculation_id], type="csh"), collapse=", ")
first_date = as.Date(substring(calculation_date, 16,25))
second_date <- first_date + 60
#kwerenda realizująca wydobycie tabeli z prognozami
start_time <- Sys.time()
prognozy_dla_sklepow_z_alertem <- dbGetQuery(con, paste("select SAS.calculation_date,
SAS.id_store as id_store,
SAS.forecast_date as forecast_date,
sum(SAS.forecast_quantity_stat) as forecast_quantity_stat,
sum(SAS.forecast_quantity_corr) as forecast_quantity_corr,
sum(SAS.forecast_value) as forecast_value
from detail_sas.sales_forecast_hist SAS
WHERE id_store in (", sklepy_z_alertem_48, ")
AND process_step_run_id in (", process_step_run_id, ")
and process_run_id in (", process_run_id, ")
and calculation_id in (", calculation_id, ")
AND forecast_date >= '", first_date ,"'
AND forecast_date <= '", second_date ,"'
group by SAS.id_store,
SAS.forecast_date, SAS.calculation_date;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
prognozy_dla_sklepow_z_alertem
start_time <- Sys.time()
#kwerenda realizująca wydobycie tabeli z realizacjami
realizacje_dla_sklepow_z_alertem <- dbGetQuery(con, paste("SELECT
REAL.id_store as id_store,
REAL.date_tran_warsaw_d,
sum(REAL.sale_cnt) as sale_cnt,
sum(REAL.sale_value) as sale_value
FROM detail_sales.tickets REAL
WHERE id_store in (", sklepy_z_alertem_48 ,") AND REAL.date_tran_warsaw_d >= '", first_date ,"'
GROUP BY REAL.id_store, REAL.date_tran_warsaw_d"))
end_time <- Sys.time()
print(end_time-start_time)
realizacje_dla_sklepow_z_alertem
#przekształcenie tabel z prognozami i realizacjami
prognozy_dla_sklepow_z_alertem <- as.data.table(prognozy_dla_sklepow_z_alertem)
prognozy_dla_sklepow_z_alertem <- na.omit(prognozy_dla_sklepow_z_alertem)
realizacje_dla_sklepow_z_alertem <- as.data.table(realizacje_dla_sklepow_z_alertem)
realizacje_dla_sklepow_z_alertem <- na.omit(realizacje_dla_sklepow_z_alertem)
#połączenie tabeli;
prognozy_i_realizacje <- merge(prognozy_dla_sklepow_z_alertem, realizacje_dla_sklepow_z_alertem,
by.x=c("id_store", "forecast_date"), by.y=c("id_store", "date_tran_warsaw_d"), all=TRUE)
return(prognozy_i_realizacje)}else{
################################################57###############################################################
sklepy_z_alertem_57 <- paste(shQuote(input$sklep_alert, type="csh"), collapse=", ")
data_alertu_57 <- substring(nazwa_57, 10,17)
data_prognozy_57 <- as.Date(data_alertu_57,format = "%Y %m %d")
data_prognozy_wczoraj <- data_prognozy_57 - 1
daty_prognozy_57 <- append(data_prognozy_57,data_prognozy_wczoraj)
daty_prognozy_57 <- paste(shQuote(daty_prognozy_57, type="csh"), collapse=", ")
#Przekształcenie tabelki z alertem 57 tak by wyciągnąć odpowiednie daty z diffDays
sklepy_z_alertem_57_d[, dlugosc := str_count(diffDays, ',')+1]
vector_lengths <- ((sklepy_z_alertem_57_d[,4]))
maximum_length <- lapply(vector_lengths, function(x) x[which.max(abs(x))])
maximum_length <- as.integer(maximum_length)
maximum_length[1]
sklepy_z_alertem_57_d <- setDT(sklepy_z_alertem_57_d)[,paste0("diffDays_", 1:maximum_length[1]) := tstrsplit(diffDays, ",")][,-"diffDays", with=F]
colA = paste("diffDays_", 1:maximum_length, sep = "")
sklepy_z_alertem_57_unpivot <- melt(sklepy_z_alertem_57_d, id.vars = c("StoreId"),
measure.vars = colA)
sklepy_z_alertem_57_unpivot <- sklepy_z_alertem_57_unpivot[, variable :=NULL]
sklepy_z_alertem_57_unpivot <- na.omit(sklepy_z_alertem_57_unpivot)
str(sklepy_z_alertem_57_unpivot)
sklepy_z_alertem_57_unpivot[, Daty:= as.Date(value)]
str(sklepy_z_alertem_57_unpivot[, Daty])
#Wysłanie zapytania w celu wydobycia tabeli "process_step_run_id_full" z informacjami identyfikującymi konkretne prognozy oraz przekształcenie jej
start_time <- Sys.time()
process_step_run_id_full <-dbGetQuery(con, paste(
"SELECT count(*) maximum , calculation_date, process_step_run_id, process_run_id, calculation_id
FROM detail_sas.sales_forecast_hist
WHERE calculation_date in (", daty_prognozy_57 ,")
GROUP BY calculation_date, process_step_run_id, process_run_id, calculation_id
order by maximum desc;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
process_step_run_id_full <- as.data.table(process_step_run_id_full)
process_step_run_id_full<- setDT(process_step_run_id_full)[order(-maximum)][,.SD[1,], by = .(calculation_date)]
rv$process_step_run_id_full_57 <- as.data.table(process_step_run_id_full)
calculation_date <- paste(shQuote(process_step_run_id_full[, calculation_date], type="csh"), collapse=", ")
process_step_run_id <- paste(shQuote(process_step_run_id_full[,process_step_run_id], type="csh"), collapse=", ")
process_run_id <- paste(shQuote(process_step_run_id_full[, process_run_id], type="csh"), collapse=", ")
calculation_id <- paste(shQuote(process_step_run_id_full[, calculation_id], type="csh"), collapse=", ")
first_date = as.Date(substring(calculation_date, 16,25))
second_date <- first_date + 60
process_step_run_id_full
start_time <- Sys.time()
#kwerenda realizująca wydobycie tabeli z prognozami
prognozy_dla_sklepow_z_alertem_57 <- dbGetQuery(con, paste("select SAS.calculation_date,
SAS.id_store as id_store,
SAS.forecast_date as forecast_date,
sum(SAS.forecast_quantity_stat) as forecast_quantity_stat,
sum(SAS.forecast_quantity_corr) as forecast_quantity_corr,
sum(SAS.forecast_value) as forecast_value
from detail_sas.sales_forecast_hist SAS
WHERE id_store in (", sklepy_z_alertem_57 ,")
AND process_step_run_id in (", process_step_run_id, ")
and process_run_id in (", process_run_id, ")
and calculation_id in (", calculation_id, ")
AND forecast_date >= '", first_date ,"'
AND forecast_date <= '", second_date ,"'
group by SAS.id_store,
SAS.forecast_date, SAS.calculation_date;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
#odpowiednie przekształcenia tabel do połączenia
prognozy_dla_sklepow_z_alertem_57 <- as.data.table(prognozy_dla_sklepow_z_alertem_57)
prognozy_dla_sklepow_z_alertem_57 <- na.omit(prognozy_dla_sklepow_z_alertem_57)
sklepy_z_alertem_57_unpivot = subset(sklepy_z_alertem_57_unpivot, select = -c(value))
#połączenie tabel
prognozy_i_daty_alertow_57 <- merge(prognozy_dla_sklepow_z_alertem_57, sklepy_z_alertem_57_unpivot,
by.x=c("id_store"), by.y=c("StoreId"), all.x=TRUE, allow.cartesian = TRUE)
return(prognozy_i_daty_alertow_57)}
})
updated_alert <- eventReactive(input$generuj_alert, {input$wybor_alertu})
updated_sklep_alert <- eventReactive(input$generuj_alert, {input$sklep_alert})
output$alert_wykres <- renderPlotly({
req(dane_alert())
#wyodrębnienie calc_date: stare i nowe ##
if (updated_alert()=='48'){
stare_calc_date <-min(rv$process_step_run_id_full_48[, calculation_date])
nowe_calc_date <-max(rv$process_step_run_id_full_48[, calculation_date])
sklep = updated_sklep_alert()
#generowanie wykresu i przekształcenia mające na celu oddzielenie prognoz z różnymi "calculation_date"
# sposób data.table
plot_ly() %>%
add_lines(data = dane_alert()[calculation_date==stare_calc_date & id_store==sklep], x = ~forecast_date,
y = ~sale_cnt, color = I("steelblue2"), name = "realizacja") %>%
add_lines(data = dane_alert()[calculation_date==stare_calc_date & id_store==sklep],
x = ~forecast_date, y = ~forecast_quantity_corr, color = I("firebrick2"), name = "prognoza sprzed 30 dni") %>%
add_lines(data = dane_alert()[calculation_date==nowe_calc_date & id_store==sklep],
x = ~forecast_date, y = ~forecast_quantity_corr, color = I("firebrick3"), name = "prognoza dzisiejsza") %>%
layout(
title = paste("Prognozy i realizacje z alertem nr 48 dla sklepu nr: ", sklep, sep=""), yaxis = list(range= c(0, as.numeric(max(dane_alert()[, forecast_quantity_corr]))))
)
}else{
stare_calc_date <-min(rv$process_step_run_id_full_57[, calculation_date])
nowe_calc_date <-max(rv$process_step_run_id_full_57[, calculation_date])
sklep = updated_sklep_alert()
#generowanie wykresu i przekształcenia mające na celu oddzielenie prognoz z różnymi "calculation_date"
# sposób data.table
ay <- list(
overlaying = "y",
side = "right",
title = "alert_57",
range = c(0:1),
visible = FALSE
)
plot_ly(data = dane_alert()[calculation_date==stare_calc_date & id_store==sklep], x = ~forecast_date) %>%
add_lines(data = dane_alert()[calculation_date==stare_calc_date & id_store==sklep],
y = ~forecast_quantity_corr, color = I("firebrick2"), name = "prognoza wczorajsza") %>%
add_lines(data = dane_alert()[calculation_date==nowe_calc_date & id_store==sklep]
, y = ~forecast_quantity_corr, color = I("mediumpurple1"),line= list(dash="dashdot"), name = "prognoza dzisiejsza") %>%
add_trace(data = dane_alert()[forecast_date==Daty & id_store==sklep],
color = I("yellow2"), type="bar",
x = ~forecast_date, y=1, hoverinfo = 'y', name= "dni z alertem 57", opacity = 0.3, yaxis = "y2")%>%
layout(
title = paste("prognozy i daty alertów 57 dla sklepu nr:", sklep, sep=""), yaxis2 = ay ,
xaxis= list(title="Data"), yaxis=list(title="Sztuki", range = c(0, as.numeric(max(dane_alert()[, forecast_quantity_corr])))), bargap = 0.9)
}
})
}
shinyApp(ui=ui, server = server)
|
/Rossmann/Aplikacja_do_korekt_SAS_artykul_dev.r
|
permissive
|
kazzzz65/myRepository
|
R
| false | false | 85,605 |
r
|
# zaciągnięcie bibliotek
library(shiny)
library(tidyverse)
library(data.table)
library(DT)
library(plotly)
library(dplyr)
library(plyr)
library(shinythemes)
library(odbc)
library(dbplyr)
library(sqldf)
library(promises)
library(future)
library(ipc)
library(shinycssloaders)
library(openxlsx)
#zaciągnięcie danych potrzebnych na początku w ui
# sciezka <- dirname(rstudioapi::getActiveDocumentContext()$path)
p_grupy <- character(0)
#
# con <- dbConnect(odbc::odbc(),
# dsn = "Hive z LLAP Cloudera",
# driver = "Cloudera ODBC Driver for Apache Hive",
# dbname = "training",
# Trusted_Connection = "True",
# encoding = "CP1250",
# port = 10500)
#
# con_ross <- DBI::dbConnect(odbc::odbc(),
# dsn = "rossod32",
# database = "ROSSORA",
# encoding = "CP1250",
# uid = "hati",
# pwd = "hati")
#
#
wszystkie_sklepy <- character(0)
# as.data.table(dbGetQuery(con, paste("SELECT DISTINCT dic.id_store as id_store FROM dict.dic_stores dic WHERE dic.close_date IS NULL ORDER BY dic.id_store", sep ="")))[id_store<=9000]
artykuly <- character(0)
# as.data.table(dbGetQuery(con_ross, paste("SELECT ARTNR1, ORDERKZ FROM ROSSORA.DBO.ACCESS_ARTIKEL", sep="")))[, ARTNR1 := as.numeric(ARTNR1)][ORDERKZ == 1, ARTNR1]
#
#
# ##elementy potrzebne do zakładki z alertami
# lista_plikow_wyd <- list.files(sciezka)
# lista_plikow_wyd_48 <- grep("^ALERT_48_", lista_plikow_wyd, value = TRUE)
#
# lista_plikow_wyd_57 <- grep("^ALERT_57_", lista_plikow_wyd, value = TRUE)
#
# nazwa_48 <- sort(lista_plikow_wyd_48, decreasing = TRUE)[1]
# nazwa_57 <- sort(lista_plikow_wyd_57, decreasing = TRUE)[1]
#
# sklepy_z_alertem_48_read <- fread(paste(sciezka, nazwa_48, sep="/"))
# sklepy_z_alertem_48_d <- as.data.table(sklepy_z_alertem_48_read)
# sklepy_z_alertem_48_list <- as.list(sklepy_z_alertem_48_d[,'StoreId'])
#
# sklepy_z_alertem_57_read <- fread(paste(sciezka, nazwa_57, sep="/"))
# sklepy_z_alertem_57_d <- as.data.table(sklepy_z_alertem_57_read)
# sklepy_z_alertem_57_list <- as.list(sklepy_z_alertem_57_d[,'StoreId'])
ui <- fluidPage(
theme = shinytheme("united"),
# stworzenie layoutu podzielonego na zakładki analiza i korekta
navbarPage(title=
"Rossmann", id = 'menu',
tabPanel("Analiza sklepów SAS",
column(3,
wellPanel(
selectizeInput(inputId = "sklepy",
label = "Wybierz numery sklepów:",
multiple = TRUE,
choices = wszystkie_sklepy,
options = list(closeAfterSelect = T)),
dateInput(inputId = "data_prognozy",
label = "Wybierz datę prognozy:",
value = Sys.Date()-1),
actionButton(inputId = "generuj", label = "Generuj wykres", icon("line-chart"), style="color: #fff; background-color: #228B22; border-color: #228B22")
)
),
column(9,tabsetPanel(
tabPanel("Wykres", fluidRow(
plotlyOutput("prognoza_total")%>%withSpinner(type = 5, color = '#808080')
)),
tabPanel("Opis zakładki", fluidRow(
htmlOutput("opis_analizy")
))
))
),
tabPanel("Wprowadzanie korekt",
column(3,
wellPanel(
radioButtons(inputId = "automat", label = h3("Automatyczna korekta?"),
choices = c("tak", "nie"),
selected = "nie"),
conditionalPanel(condition = "input.automat == 'tak'",
radioButtons(inputId = "kierunek", label = h3("Ustal kierunek korekty"),
choices = c("dowolna", "zmniejszająca", "zwiększająca"),
selected = "dowolna"),
numericInput("ograniczenie",
label = "Podaj miesięczną wartość graniczną prognozy dla artykułu",
value = 10,
step = 1)),
selectizeInput(inputId = "sklep_input",
label = "Wybierz numer sklepu:",
choices = wszystkie_sklepy,
options = list(closeAfterSelect = T)
),
numericInput("korekta",
label = "Podaj rozmiar korekty",
value = 1,
step = 0.1),
selectizeInput(
inputId = "pgrupa",
label = "Wybierz p-grupę:",
choices = p_grupy,
multiple = TRUE,
options = list(closeAfterSelect = T)),
selectizeInput(
inputId = "anty_pgrupa",
label = "Odfiltruj p-grupę:",
choices = p_grupy,
multiple = TRUE,
options = list(closeAfterSelect = T)),
selectizeInput(
inputId = "wgrupa",
label = "Wybierz w-grupę:",
choices = p_grupy,
multiple = TRUE,
options = list(closeAfterSelect = T)),
selectizeInput(
inputId = "agrupa",
label = "Wybierz a-grupę:",
choices = p_grupy,
multiple = TRUE,
options = list(closeAfterSelect = T)),
radioButtons(inputId = "radio", label = h3("Sposób wybrania artykułów"),
choices = c("Wybierz z listy", "Wklej lub wpisz"),
selected = "Wybierz z listy"),
conditionalPanel(condition = "input.radio == 'Wybierz z listy'", selectInput(
inputId = "artykul",
label = "Wybierz numery artykułów:",
choices = p_grupy,
multiple = TRUE)),
conditionalPanel(condition = "input.radio == 'Wklej lub wpisz'", textAreaInput("lis_art",
"Lista artykułów", "",
cols = 1, resize = "vertical",
placeholder = "Wklej listę artykułów przedzielonych enterem"))
,
actionButton(inputId = "symuluj", label = "Symuluj", icon("line-chart"), style="color: #fff; background-color: #5BC85B; border-color: black"),
actionButton(inputId = "zatwierdz", label = "Zatwierdź", icon("clipboard-check"), style="color: #fff; background-color: green; border-color: black", width = 110),
actionButton(inputId = "cofnij", label = "Cofnij", icon("step-backward"), style="color: #fff; background-color: orange; border-color: black", width = 110),
actionButton(inputId = "wyczysc", label = "Wyczyść", icon("skull-crossbones"), style="color: #fff; background-color: red; border-color: black", width = 110),
downloadButton("zapisz", label = "Zapisz", style="color: #fff; background-color: blue; border-color: black", width = 110),
downloadButton("pobierz", label = "Pobierz dane", style="color: #fff; background-color: light-blue; border-color: black", width = 110)
)
),
column(9,tabsetPanel( tabPanel("Wykres i tabela", fluidRow(
conditionalPanel(condition = "input.automat == 'tak'",
sliderInput(inputId = "data_przedz_auto",
label = "Wybierz przedział czasowy, który ma zostać wzięty do wygenerowania korekty automatycznej:",
min = as.Date(Sys.Date()-460),
max = as.Date(Sys.Date()-1),
value = c(as.Date(Sys.Date()-32),as.Date(Sys.Date()-2)),
timeFormat="%Y-%m-%d",
width = 1550),
sliderInput(inputId = "korekta_przedz",
label = "Wybierz przedział korekty automatycznej: ",
step = 0.1,
min = 0,
max = 10,
value = c(0.6, 2),
width = 1550)),
sliderInput(inputId = "data_przedz",
label = "Wybierz przedział czasowy korekty:",
min = Sys.Date(),
max = as.Date(Sys.Date()+120),
value = c(Sys.Date(),as.Date(Sys.Date()+30)),
timeFormat="%Y-%m-%d",
width = 1550)),
radioButtons(inputId = "wybor_agregacji", label = h3("Wybierz sposób agregacji danych"),
choices = c("per dzień", "tygodniowa"),
selected = "per dzień"),
fluidRow(
plotlyOutput("prognoza_po_korektach")%>%withSpinner(type = 5, color = '#808080')),
fluidRow(
DT::dataTableOutput("tabela_z_korektami")%>%withSpinner(type = 5, color = '#808080'))
),
tabPanel("Opis zakładki", fluidRow(
htmlOutput("opis_korekty")
))
))
),
tabPanel("Weryfikacja prognoz",
column(3,
wellPanel(
helpText("Wybierz numer sklepu i daty prognoz.")
),
wellPanel(
selectInput(inputId = "sklep_porownanie",
label = "Wybierz sklep:",
multiple = F,
choices = wszystkie_sklepy),
dateInput(inputId = "data_prognozy_1",
label = "Wybierz pierwszą datę prognozy:",
value = Sys.Date()-1),
dateInput(inputId = "data_prognozy_2",
label = "Wybierz drugą datę prognozy:",
value = Sys.Date()-2),
actionButton(inputId = "przelicz_porownanie", label = "Przelicz dane", icon("calculator"),
style="color: #fff; background-color: #228B22; border-color: #228B22"),
selectizeInput(inputId = "pgrupa_por",
label = "Wybierz p-grupę:",
multiple = T,
choices = p_grupy,
options = list(closeAfterSelect = T)),
selectizeInput(inputId = "wgrupa_por",
label = "Wybierz w-grupę:",
multiple = T,
choices = p_grupy,
options = list(closeAfterSelect = T)),
actionButton(inputId = "generuj_porownanie", label = "Generuj wykres", icon("line-chart"),
style="color: #fff; background-color: #228B22; border-color: #228B22")
)
),
column(9, tabsetPanel(tabPanel("Wykres", fluidRow(sliderInput(inputId = "data_przedz_por",
label = "Wybierz przedział czasowy analizy:",
min = as.Date(Sys.Date()-460),
max = as.Date(Sys.Date()+60),
value = c(as.Date(Sys.Date()-120),as.Date(Sys.Date()+60)),
timeFormat="%Y-%m-%d",
width = 1550)),
fluidRow(
DT::dataTableOutput("tabela")%>%withSpinner(type = 5, color = '#808080')),
fluidRow(
plotlyOutput("wykres_porownanie")%>%withSpinner(type = 5, color = '#808080')
),
fluidRow(
plotlyOutput("wykres_porownanie_zl")%>%withSpinner(type = 5, color = '#808080')
)
),
tabPanel("Opis zakładki", fluidRow(
htmlOutput("opis_porownania")
)))
))
# ),
# tabPanel("Kanibalizacja",
# column(3,
# wellPanel(
# helpText("Wybierz numer sklepu i datę prognozy.")
# ),
# wellPanel(
# selectInput(inputId = "sklep_kanibalizacja",
# label = "Wybierz sklep:",
# multiple = F,
# choices = wszystkie_sklepy),
# actionButton(inputId = "generuj_kan", label = "Generuj wykres", icon("line-chart"),
# style="color: #fff; background-color: #228B22; border-color: #228B22")
# )
# ),
# column(9, tabsetPanel(tabPanel("Wykres", fluidRow(
# plotlyOutput("wykres_kan")%>%withSpinner(type = 5, color = '#808080')
# )),
# tabPanel("Opis zakładki", fluidRow(
# htmlOutput("opis_kan")
# )))
# )
# ),
# tabPanel("Alerty",
#
# column(3,
# wellPanel(
# helpText("Wybierz alert i numer sklepu.")
# ),
# wellPanel(
# selectInput(inputId = "wybor_alertu",
# label = "Wybierz alert:",
# multiple = F,
# choices = list('48', '57')),
# selectInput(inputId = "sklep_alert",
# label = "Wybierz numer sklepu z alertem:",
# multiple = F,
# choices = sklepy_z_alertem_48_list),
# actionButton(inputId = "generuj_alert", label = "Generuj wykres", icon("line-chart"),
# style="color: #fff; background-color: #228B22; border-color: #228B22")
# )
# ),
# column(9, tabsetPanel(tabPanel("Wykres", fluidRow(
# plotlyOutput("alert_wykres")%>%withSpinner(type = 5, color = '#808080')
# )),
# tabPanel("Opis zakładki", fluidRow(
# htmlOutput("opis_alertow")
# )))
# )
# )
))
server <- function(input, output, session){
# opisy zakładek
output$opis_analizy = renderText({
"<b>Założenia:</b> <br/>
Ta zakładka służy do przesłania zapytań do bazy i przeliczenia danych.<br/>
Wykres zawiera realizacje na 460 dni w tył od dzisiejszego dnia oraz prognozy na 60 dni wprzód, na wybranych sklepach(agregacja) przy danej dacie prognozy.<br/><br/>
<strong>Instrukcja obsługi:</strong><br/>
- <strong>Wybierz numery sklepów</strong> - Wybieramy nr sklepów na których chcemy pracować pamiętając, że wybranie większej liczby niż ok.5 może spowodować spowolnione działanie aplikacji.,<br/>
- <strong>Wybierz datę prognozy</strong> - Wybieramy datę prognozy,<br/>
- <strong>kliknij 'Generuj'</strong> - Nastąpi przeliczenie które może potrwać od 1 do 5 minut, po czym wygeneruje się zbiorczy wykres realizacji na 460 dni w tył od dzisiejszego dnia oraz prognozy na 60 dni wprzód.
Po ukazaniu się wykresu przejdź do zakładki 'Wprowadzanie korekt'.<br/><br/>
<br/>"
})
output$opis_korekty = renderText({
"<b>Założenia:</b> <br/>
Ta zakładka służy do filtrowania danych, symulowania korekt, zatwierdzania ich i zapisu.<br/>
Wykres zawiera wyfiltrowane dane oraz symulację wprowadzanej korekty.<br/>
W tabeli znajdują się korekty, posortowane w kolejności dodawania.<br/>
Pamiętaj o tym by wykonać najpierw przeliczenie na poprzedniej zakładce.<br/>
Na wykresie pojawiają sie informacje o realizacji, prognozie, prognozie po symulowanej korekcie, aktualnym stoku sklepowym oraz capacity.<br/>
Capacity jest oznaczone poziomą linią jako capacity w dniu poprzedzającym prognozę dla wybranej grupy artykułów. Miara ta jest jedynie przydatna dla pojedynczych artykułów lub niewielkich grup.<br/>
W przypadku p-grup i całych sklepów capacity nie będzie widoczne na wykresie.<br/>
Korekta automatyczna to korekta wyliczana automatycznie per artykuł na danym filtrowaniu, na podstawie wybranych okresów jako iloraz średnich realizacji i prognozy.</br>
Korekta jest w przedziałach [0.5, 0.95] i [1.05, 2], z buforem 0.1, ograniczeniem ze względu na wielkość prognozy w okresie wybranym jako przedział czasowy korekty, zaokrąglona do wielokrotności 0.05, z możliwością wybrania jej kierunku. <br/><br/>
<strong>Instrukcja obsługi:</strong><br/>
- <strong>Automatyczna korekta?</strong> - Wybieramy czy korekta ma się wygenerować automatycznie per artykuł, czy dokonujemy ręcznej korekty na danym filtrowaniu,<br/>
- <strong>Wybierz numer sklepu</strong> - Wybieramy nr sklepu,<br/>
- <strong>Wybierz przedział czasowy korekty</strong> - Wybieramy przedział od do nałożenia korekty, należy pamiętać o tym, żeby daty na sklepo-artykule dla różnych korekt nie nakładały się,<br/>
- <strong>Wybierz przedział czasowy, który ma zostać wzięty do wygenerowania korekty automatycznej:</strong> - Wybieramy przedział od do, który ma zostać wzięty do automatycznej korekty jako wzór realizacji,<br/>
- <strong>Podaj rozmiar korekty</strong> - Podajemy rozmiar korekty, możemy manipulować nim za pomocą 'strzałeczek' o 0,1,<br/>
- <strong>Ustal kierunek korekty</strong> - Ustalamy, czy automatyczna korekta ma się ograniczyć do któregoś kierunku czy pozostać 'dowolna',<br/>
- <strong>Podaj wartość graniczną prognozy dla artykułu</strong> - Podajemy wartość prognozy w wybranym okresie od której artykuły są brane do korekty,<br/>
- <strong>Wybierz p-grupę</strong> - Wybieramy p-grupy,<br/>
- <strong>Odfiltruj p-grupę</strong> - Wybieramy p-grupy, na których nie chcemy wprowadzać korekty,<br/>
- <strong>Wybierz w-grupę</strong> - Wybieramy w-grupy,<br/>
- <strong>Wybierz a-grupę</strong> - Wybieramy a-grupy,<br/>
- <strong>Sposób wybrania artykułów</strong> - Wybieramy czy chcemy wkleić lub wpisać artykuły czy może wybrać z listy wyfiltrowanej na podstawie wcześniejszych filtrów ,<br/>
- <strong>Wybierz sposób agregacji</strong> - Wybieramy w jaki sposób dane mają być agregowane na wykresie,<br/>
- <strong>Przycisk 'Symuluj'</strong> - Powoduje wygenerowania wykresu na wyfiltrowanyh danych,<br/>
- <strong>Przycisk 'Zatwierdź'</strong> - Powoduje zatwierdzenie korekty,<br/>
- <strong>Przycisk 'Cofnij'</strong> - Powoduje usunięcie ostatniej korekty z tabeli,<br/>
- <strong>Przycisk 'Wyczyść'</strong> - Powoduje usunięcie wszystkich korekt z tabeli,<br/>
- <strong>Przycisk 'Zapisz'</strong> - Powoduje zapisanie korekt do pliku xlsx.<br/>
- <strong>Przycisk 'Pobierz dane'</strong> - Powoduje zapisanie aktualnie wyfiltrowanych danych (po kliknięciu symuluj!) do pliku xlsx.<br/>
<br/>"
})
output$opis_porownania = renderText({
"<b>Założenia:</b> <br/>
Ta zakładka służy do porównania prognoz z różnymi datami przeliczeń na danym sklepie<br/>"
})
output$opis_kan = renderText({
"<b>Założenia:</b> <br/>
Ta zakładka służy do wizualizacji kanibalizacji na sklepie<br/>"
})
output$opis_alertow = renderText({
"<b>Założenia:</b> <br/>
Ta zakładka służy do wizualizacji prognoz i realizacji na sklepach z alertami 48 i 57<br/>"
})
rv <- reactiveValues(g = 0, h = 0, tabela_korekta_final = data.table(`Numer sklepu`=numeric(), `Numer artykułu`=numeric(), `Data od`=as.Date(character()), `Data do`=as.Date(character()), `Typ korekty` = numeric(), `Wartość korekty` = as.double(), `Typ prognozy` = numeric()),
temp1 = 0, wszystkie_artykuly = c(), tabela_korekta = data.table(), wszystkie_sklepy = list(),
stok_sklep = data.table(id_store = numeric(), id_article= numeric(), forecast_date = as.Date(character()), stok_sklepy=numeric()),
capacity = data.table(id_store = numeric(), id_article= numeric(), capacity=numeric()), capacity1 = data.table(id_store = numeric(), id_article= numeric(), capacity=numeric()),
dane_do_korekty = data.table(id_store = numeric(), id_article= numeric(), korekta=numeric()))
dane_real <- eventReactive(input$generuj, {
# podstawowa tabela zawierająca wyniki przeliczenia
# 5 kwerend hadoopowych 1-2 prognoza, 3 - realizacja, 4 - stok. 5 - capacity
sklepy <- paste(shQuote(input$sklepy, type="csh"), collapse=", ")
s <- Sys.time()
process_step_run_id_full <- dbGetQuery(con, paste("SELECT calculation_date as calculation_date, max(process_step_run_id) as process_step_run_id, max(process_run_id) as process_run_id, max(calculation_id) as calculation_id FROM detail_sas.sales_forecast_hist WHERE calculation_date in ('", input$data_prognozy ,"') GROUP BY calculation_date", sep=""))
s1<- Sys.time()
print(s1-s)
process_step_run_id_full <- as.data.table(process_step_run_id_full)
s <- Sys.time()
dane_real <- dbGetQuery(con, paste("select
SAS.id_store as id_store,
SAS.id_article as id_article,
DICT.product_group as product_group,
DICT.type_group as type_group,
DICT.art_group as art_group,
SAS.forecast_date as forecast_date,
sum(SAS.forecast_quantity_stat) as forecast_quantity_stat,
sum(SAS.forecast_quantity_corr) as forecast_quantity_corr,
sum(SAS.forecast_value) as forecast_value
from detail_sas.sales_forecast_hist SAS
join dict.dic_articles DICT
ON SAS.id_article = DICT.id_article
WHERE SAS.id_store in (", sklepy ,")
AND SAS.process_step_run_id in (", process_step_run_id_full[, process_step_run_id], ")
and SAS.process_run_id in (", process_step_run_id_full[, process_run_id], ")
and SAS.calculation_id in (", process_step_run_id_full[, calculation_id], ")
AND SAS.forecast_date >= '", input$data_prognozy ,"'
AND SAS.forecast_date <= '", input$data_prognozy + 60 ,"'
group by SAS.id_store, SAS.id_article, DICT.product_group, DICT.type_group, DICT.art_group,
SAS.forecast_date;", sep=""))
s1<- Sys.time()
print(s1-s)
s <- Sys.time()
dane_real_2 <- dbGetQuery(con, paste("SELECT
REAL.id_store as id_store,
REAL.id_article as id_article,
DICT.product_group as product_group,
DICT.type_group as type_group,
DICT.art_group as art_group,
REAL.working_date as working_date,
sum(REAL.sale_cnt) as sale_cnt,
sum(REAL.sale_value) as sale_value
FROM detail_sales.tickets REAL
join dict.dic_articles DICT
ON REAL.id_article = DICT.id_article
WHERE REAL.id_store in (", sklepy ,") AND REAL.working_date >= '", Sys.Date() -460 ,"'
GROUP BY REAL.id_article, REAL.id_store, REAL.working_date, DICT.product_group,
DICT.type_group, DICT.art_group;", sep=""))
s1<- Sys.time()
print(s1-s)
s <- Sys.time()
rv$stok_sklep <- dbGetQuery(con, paste("SELECT STOK.id_store as id_store,
STOK.id_article as id_article, STOK.calculate_date as forecast_date,
sum(STOK.stan_999) as stok_sklepy
FROM detail_inventory.stock_article_store_daily STOK
where STOK.id_store in (", sklepy ,") AND STOK.calculate_date >='", Sys.Date() -30 ,
"'AND STOK.calculate_date <='", input$data_prognozy - 1 ,
"'GROUP BY STOK.id_store, STOK.id_article, STOK.calculate_date;", sep=""))
s1<- Sys.time()
print(s1-s)
rv$stok_sklep <- as.data.table(rv$stok_sklep)
rv$stok_sklep[, tydzien := format(forecast_date, "%Y-%V")]
rv$stok_sklep <- rv$stok_sklep[stok_sklepy >= 0]
rv$stok_sklep <- unique(rv$stok_sklep)
s <- Sys.time()
rv$capacity1 <- dbGetQuery(con, paste("SELECT id_store as id_store, id_article as id_article,
sum(capacity) as capacity FROM detail_snap.planogram_saa
WHERE id_store in (", sklepy ,") AND planogram_date ='", input$data_prognozy - 1 ,
"'GROUP BY id_store, id_article, planogram_date;", sep=""))
s1<- Sys.time()
print(s1-s)
dane_real <- as.data.table(dane_real)
dane_real <- na.omit(dane_real)
dane_real_2 <- as.data.table(dane_real_2)
dane_real_2 <- na.omit(dane_real_2)
dane_real <- merge(dane_real, dane_real_2, by.x=c("id_store", "id_article", "product_group", "forecast_date", "type_group", "art_group"),
by.y=c("id_store", "id_article", "product_group", "working_date", "type_group", "art_group"), all=TRUE)
dane_real[, tydzien := format(forecast_date, "%Y-%V")]
#usunięcie pgrup-śmieci
lista_niechcianych_pgrup <- list("Test", "Scanning", "Torby na zakupy", "Produkty wylistowane przed 01.01.2010", "Materiały reklamowe", "Artykuły testowe", "Stare i nieprzypisane", "Nieprzypisane", "Nieprzypięte", "Nieskategoryzowane", "Artykuły techniczne", "Wystrój sklepów", "Online-dom", "NF Traffic", "Koszty dostaw")
dane_real <- dane_real[!(product_group %in% lista_niechcianych_pgrup)]
print("zakończyłem obliczać dane_real")
print(dane_real)
return(dane_real)
})
tabela <- eventReactive(input$generuj, {
#przygotowanie danych do zbiorczego wykresu
req(dane_real())
rv$g<- as.data.table(dane_real())
rv$h <- rv$g[, .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)), by=c("forecast_date","product_group","type_group", "art_group", "id_store", "id_article")]
rv$h <- merge(data.table(forecast_date = seq(min(rv$h[, forecast_date]), max(rv$h[, forecast_date]), by="days")), rv$h, by=c("forecast_date"), all.x=TRUE)
for (i in names(rv$h)){
rv$h[is.na(get(i)), (i):=0]
}
tabela_wynik <- as.data.table(rv$h)
return(tabela_wynik)
})
wykres1 <- eventReactive(input$generuj,{
tabela_wynik<- tabela()[, .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)) , by = "forecast_date"]
ay <- list(
overlaying = "y",
side = "right",
title = "sztuki",
range = c(min(tabela_wynik$sale_cnt, tabela_wynik$forecast_quantity_stat, tabela_wynik$forecast_quantity_corr) / max(tabela_wynik$sale_value, tabela_wynik$forecast_value) * min(tabela_wynik$sale_value, tabela_wynik$forecast_value), as.numeric(max(tabela_wynik$sale_cnt, tabela_wynik$forecast_quantity_stat, tabela_wynik$forecast_quantity_corr))+1)
)
w <- plot_ly(tabela_wynik, x = ~forecast_date) %>%
add_bars(y = ~sale_value, color = I("steelblue3"), name = "realizacja_zł", opacity = 0.3) %>%
add_bars(y = ~forecast_value, color = I("firebrick3"), name = "prognoza_zł", opacity = 0.3) %>%
add_lines(y = ~sale_cnt, color = I("steelblue2"), name = "realizacja_szt", yaxis = "y2") %>%
add_lines(y = ~forecast_quantity_stat, color = I("firebrick2"), name = "prognoza_szt", yaxis = "y2") %>%
add_lines(y = ~forecast_quantity_corr, color = I("orange"), name = "prognoza_szt_corr", yaxis = "y2") %>%
layout(
title = paste("Prognoza total per dzień", sep="") , yaxis2 = ay,
yaxis=list(range = c(as.numeric(min(tabela_wynik$sale_value, tabela_wynik$forecast_value)), as.numeric(max(tabela_wynik$sale_value, tabela_wynik$forecast_value))), title = "zł"),
barmode = "group", xaxis = list(title="data")
)
return(w)
}
)
output$prognoza_total <- renderPlotly({
wykres1()
})
# observy na selectach
observeEvent(input$menu ,{
updateSelectizeInput(session, "sklep_input",
choices = input$sklepy,
options = list(closeAfterSelect = T))
}, ignoreInit = T)
observe({
updateSelectizeInput(session, "pgrupa",
choices = unique(dane_real()[id_store==input$sklep_input][order(product_group)][, product_group]),
options = list(closeAfterSelect = T)
)
})
observe({
updateSelectizeInput(session, "anty_pgrupa",
choices = unique(dane_real()[id_store==input$sklep_input][order(product_group)][, product_group]),
options = list(closeAfterSelect = T)
)
})
observe({
if(is.null(input$pgrupa)){c =unique(dane_real()[id_store==input$sklep_input][order(type_group)][, type_group])}
else{c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa][order(type_group)][, type_group])}
updateSelectizeInput(session, "wgrupa",
choices = c,
options = list(closeAfterSelect = T)
)
})
observe({
if(is.null(input$pgrupa)&is.null(input$wgrupa)){c =unique(dane_real()[id_store==input$sklep_input][order(art_group)][, art_group])}
else if((!is.null(input$pgrupa))&(is.null(input$wgrupa))){c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa][order(art_group)][, art_group])}
else if((is.null(input$pgrupa))&(!is.null(input$wgrupa))){c = unique(dane_real()[id_store==input$sklep_input][type_group %in% input$wgrupa][order(art_group)][, art_group])}
else{c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa][type_group %in% input$wgrupa][order(art_group)][, art_group])}
updateSelectizeInput(session, "agrupa",
choices = c,
options = list(closeAfterSelect = T)
)
})
observe({
if((!is.null(input$pgrupa))&(is.null(input$wgrupa))){c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa][, id_article])}
else if (!is.null(input$pgrupa)&!is.null(input$wgrupa)&is.null(input$agrupa)){c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa]
[type_group %in% input$wgrupa][, id_article])}
else if(!is.null(input$pgrupa)&!is.null(input$wgrupa)&!is.null(input$agrupa)){c = unique(dane_real()[id_store==input$sklep_input][product_group %in% input$pgrupa]
[type_group %in% input$wgrupa][art_group %in% input$agrupa][, id_article])}
else if(is.null(input$pgrupa)&is.null(input$wgrupa)&!is.null(input$agrupa)){c = unique(dane_real()[id_store==input$sklep_input]
[art_group %in% input$agrupa][, id_article])}
else if(is.null(input$pgrupa)&!is.null(input$wgrupa)&is.null(input$agrupa)){c = unique(dane_real()[id_store==input$sklep_input]
[type_group %in% input$wgrupa][, id_article])}
else{c = unique(dane_real()[id_store==input$sklep_input]
[type_group %in% input$wgrupa][art_group %in% input$agrupa][, id_article])}
updateSelectizeInput(session, "artykul",
choices = c,
options = list(closeAfterSelect = T)
)
})
observeEvent(input$cofnij, {
if(NROW(rv$tabela_korekta_final)>0){
showNotification("Bieżąca korekta została wycofana, działaj dalej :P", type = "warning")
rv$tabela_korekta_final <- setdiff(rv$tabela_korekta_final, rv$tabela_korekta)}
})
observeEvent(input$wyczysc, {
showNotification("Tabela z korektami została wyczyszczona, zacznij od nowa.", type = "warning")
rv$tabela_korekta_final = data.table(`Numer sklepu`=numeric(), `Numer artykułu`=numeric(), `Data od`=as.Date(character()), `Data do`=as.Date(character()), `Typ korekty` = numeric(), `Wartość korekty` = as.double(), `Typ prognozy` = numeric())
rv$wszystkie_artykuly = c()
})
updated_artykul <- eventReactive(input$symuluj, {
if(input$radio=='Wybierz z listy'){
return(input$artykul)}
else{lista_art <- gsub(" ", "", input$lis_art)
lista_art <- strsplit(lista_art, "\n")[[1]]
return(lista_art)}})
tabela_filtr <- eventReactive(input$symuluj, {
# przygotowanie i filtrowanie danych w różnych agregacjach
rv$g<- as.data.table(dane_real())
if(input$wybor_agregacji=="tygodniowa"){
rv$h <- rv$g[, .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)),
by=c("tydzien", "product_group","type_group", "art_group", "id_store","id_article")]
rv$h <- merge(data.table(tydzien = format(seq(min(dane_real()[, forecast_date], na.rm = T), max(dane_real()[, forecast_date], na.rm = T), by="week"),"%Y-%V")), rv$h, by=c("tydzien"), all.x=TRUE)
for (i in names(rv$h)){
rv$h[is.na(get(i)), (i):=0]
}
tabela_filtr <- rv$h[, korekta := NULL]
tabela_filtr <- tabela_filtr[, korekta_final := 1]
temp <- data.table(tydzien=seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="week"))
temp[, tydzien := format(tydzien, "%Y-%V")]
tabela_filtr <- merge(tabela_filtr, temp, by=c("tydzien"), all.x=TRUE)
daty <- data.table(tydzien = format(seq(min(rv$stok_sklep[, forecast_date], na.rm = T), max(rv$stok_sklep[, forecast_date], na.rm = T), by="week"),"%Y-%V"))
stok_sklep <- rv$stok_sklep[, .(stok_sklepy = sum(stok_sklepy, na.rm = T)), by=c("tydzien", "id_store","id_article")]
stok_sklep <- merge(stok_sklep, daty, by='tydzien', all.y = T)
tabela_filtr <- merge(tabela_filtr, stok_sklep, by = c("id_store", "id_article", "tydzien"), all = T)
}
else{
rv$h <- rv$g[, .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)),
by=c("forecast_date", "product_group","type_group", "art_group", "id_store","id_article")]
rv$h <- merge(data.table(forecast_date = seq(min(dane_real()[, forecast_date], na.rm = T), max(dane_real()[, forecast_date], na.rm = T), by="days")), rv$h, by=c("forecast_date"), all.x=TRUE)
for (i in names(rv$h)){
rv$h[is.na(get(i)), (i):=0]
}
tabela_filtr <- rv$h[, korekta := NULL]
tabela_filtr <- tabela_filtr[, korekta_final := 1]
temp <- data.table(forecast_date=seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="days"))
tabela_filtr <- merge(tabela_filtr, temp, by=c("forecast_date"), all.x=TRUE)
daty <- data.table(forecast_date = seq(min(rv$stok_sklep[, forecast_date], na.rm = T), max(rv$stok_sklep[, forecast_date], na.rm = T), by="days"))
stok_sklep <- rv$stok_sklep[, .(stok_sklepy = sum(stok_sklepy, na.rm = T)), by=c("forecast_date", "id_store","id_article")]
stok_sklep <- merge(stok_sklep, daty, by='forecast_date', all.y = T)
tabela_filtr <- merge(tabela_filtr, stok_sklep, by = c("id_store", "id_article", "forecast_date"), all = T)
}
#filtry
tabela_filtr1 <- tabela_filtr[id_store == input$sklep_input]
if(input$radio=="Wybierz z listy"){
if(!is.null(input$pgrupa)){tabela_filtr1 <- tabela_filtr1[product_group %in% input$pgrupa]}
if(!is.null(input$anty_pgrupa)){tabela_filtr1 <- tabela_filtr1[!product_group %in% input$anty_pgrupa]}
if(!is.null(input$wgrupa)){tabela_filtr1 <- tabela_filtr1[type_group %in% input$wgrupa]}
if(!is.null(input$agrupa)){tabela_filtr1 <- tabela_filtr1[art_group %in% input$agrupa]}}
if(!is.null(updated_artykul())){tabela_filtr1 <- tabela_filtr1[id_article %in% updated_artykul()]}
rv$capacity <- as.data.table(rv$capacity1)
rv$capacity <- rv$capacity[, capacity := as.numeric(capacity)]
rv$capacity <- rv$capacity[id_store %in% tabela_filtr1[, id_store]]
rv$capacity <- rv$capacity[id_article %in% tabela_filtr1[, id_article]]
return(tabela_filtr1)
})
# przekształcenie danych pochodzących z inputów w ten sposób by wykres nie odświeżał się przy każdej zmianie
updated_pgrupa <- eventReactive(input$symuluj, {input$pgrupa})
updated_anty_pgrupa <- eventReactive(input$symuluj, {input$anty_pgrupa})
updated_wgrupa <- eventReactive(input$symuluj, {input$wgrupa})
updated_agrupa <- eventReactive(input$symuluj, {input$agrupa})
updated_sklep <- eventReactive(input$symuluj, {input$sklep_input})
updated_wybor_agregacji <- eventReactive(input$symuluj, {input$wybor_agregacji})
updated_korekta <- eventReactive(input$symuluj, {input$korekta})
updated_automat <- eventReactive(input$symuluj, {input$automat})
updated_kierunek <- eventReactive(input$symuluj, {input$kierunek})
updated_ograniczenie <- eventReactive(input$symuluj, {input$ograniczenie})
output$prognoza_po_korektach <- renderPlotly({
# przygotowanie danych do wykresu, w tym przeprowadzenie korekty automatycznej
# wynikiem jest wykres po filtrowaniu i nałożeniu korekty
req(tabela_filtr())
tytul_pgrupa <- if(length(updated_pgrupa())<=3){paste(updated_pgrupa(), collapse=", ")}
else{paste(updated_pgrupa()[1], updated_pgrupa()[2], updated_pgrupa()[3], "...", sep = ", ")}
tytul_wgrupa<-if(length(updated_wgrupa())<=3){paste(updated_wgrupa(), collapse=", ")}
else{paste(updated_wgrupa()[1], updated_wgrupa()[2], updated_wgrupa()[3], "...", sep = ", ")}
tytul_agrupa<-if(length(updated_agrupa())<=3){paste(updated_agrupa(), collapse=", ")}
else{paste(updated_agrupa()[1], updated_agrupa()[2], updated_agrupa()[3], "...", sep = ", ")}
tabela_filtr_wykres <- as.data.table(tabela_filtr())
if(updated_wybor_agregacji()=="tygodniowa"){
if(updated_automat()=="tak"){
# stworzenie tabel pomocniczych zawierających dane porównawcze do korekty automatycznej, Zsumowaną wartość realizacji za dany okres i prognozy
real_do_korekty <- tabela_filtr_wykres[tydzien %in% format(seq(as.Date(input$data_przedz_auto[1]),as.Date(input$data_przedz_auto[2]), by="week"),"%Y-%V"), .(sale_cnt = sum(sale_cnt, na.rm = T)), by = c("id_store", "id_article")]
prog_do_korekty <- tabela_filtr_wykres[tydzien %in% format(seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="week"),"%Y-%V"), .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T)), by = c("id_store", "id_article")]
dane_do_korekty <- merge(real_do_korekty, prog_do_korekty, by = c("id_store", "id_article"), all = T)
print(dane_do_korekty)
# filtrowanie artykułów do tych które występują zarówno w realizacji jak i w prognozie, oraz mającuch miesięczną prognozę wyższą niż 10 sztuk
dane_do_korekty <- dane_do_korekty[id_article %in% real_do_korekty[, id_article]&id_article %in% prog_do_korekty[, id_article]]
dane_do_korekty <- dane_do_korekty[!(forecast_quantity_stat<updated_ograniczenie())]
# okres by wyliczyć średnie relizacji i prognozy i porównać je
okres_prog <- NROW(as.data.table(format(seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="week"),"%Y-%V")))-1
okres_real <- NROW(as.data.table(format(seq(as.Date(input$data_przedz_auto[1]),as.Date(input$data_przedz_auto[2]), by="week"),"%Y-%V")))-1
# wyliczamy korektę automatyczną dla każdego artykułu dzieląc średnią realizacje z podanego okresu przez średnią prognozę z okresu na który ma być nałożona korekta
# parametry:
# - zakres od 0.9 do 1.1 korekty jest wyłączony
# - bufor 0.1 na korektę zmniejszającą i 0.2 na zwiększającą
# - ograniczenie na minimalną i maksymalną wartość korekty, default: 0.6 i 2.0
dane_do_korekty[, korekta := round(as.numeric(sale_cnt/okres_real)/as.numeric(forecast_quantity_stat/okres_prog),2)][, korekta := ifelse(korekta<=1.1&korekta>=0.9, 1, korekta)][, korekta := ifelse(korekta>1, korekta-0.2, korekta+0.1)][, korekta := ifelse(korekta<=1.1&korekta>=0.9, 1, korekta)][, korekta := ifelse(korekta<input$korekta_przedz[1], input$korekta_przedz[1], korekta)][, korekta := ifelse(korekta>input$korekta_przedz[2], input$korekta_przedz[2], korekta)]
dane_do_korekty <- dane_do_korekty[, korekta, by = c("id_store", "id_article")]
dane_do_korekty <- na.omit(dane_do_korekty)
dane_do_korekty <- dane_do_korekty[!is.na(korekta)][!(korekta==1)]
# filtrowanie korekt w zależności od kierunku
if(updated_kierunek()=="zmniejszająca"){
dane_do_korekty <- dane_do_korekty[!(`korekta`>1)]
}else if(updated_kierunek()=="zwiększająca"){
dane_do_korekty <- dane_do_korekty[!(`korekta`<1)]
}
rv$dane_do_korekty <- dane_do_korekty[, korekta := round_any(korekta, 0.05)]
tabela_filtr_wykres <- merge(tabela_filtr_wykres, rv$dane_do_korekty, by = c("id_store", "id_article"), all.x = T)
}else{
tabela_filtr_wykres[, korekta := updated_korekta()]
}
# przemnożenie prognozy przez korektę
tabela_filtr_wykres[, forecast_quantity_stat_new := ifelse(!is.na(korekta), forecast_quantity_stat * korekta, forecast_quantity_stat)]
# przygotowanie dnych do wykresu
tabela_filtr_wykres <- as.data.table(tabela_filtr_wykres[, .(stok_sklepy = sum(stok_sklepy, na.rm = T), forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_stat_new = sum(forecast_quantity_stat_new, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)), by=c("tydzien")])
daty <- data.table(tydzien = format(seq(min(dane_real()[, forecast_date], na.rm = T), max(dane_real()[, forecast_date], na.rm = T), by="week"),"%Y-%V"))
tabela_filtr_wykres <- merge(tabela_filtr_wykres, daty, by='tydzien', all.y = T)
tabela_filtr_wykres[is.na(tabela_filtr_wykres)] <- 0
tabela_filtr_wykres[!(tydzien %in% format(seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="week"),"%Y-%V")), forecast_quantity_stat_new := 0]
# linia z ostatnim capacity na artykule
capa <- sum(rv$capacity[, capacity], na.rm = T)
print(paste("capacity: ",capa, sep=""))
ay <- list(
overlaying = "y",
side = "right",
title = "stok",
range = c(as.numeric(max(tabela_filtr_wykres$stok_sklepy) / max(tabela_filtr_wykres$sale_cnt) * min(tabela_filtr_wykres$sale_cnt)),
as.numeric(max(tabela_filtr_wykres[, stok_sklepy])))
)
plot_ly(data = tabela_filtr_wykres, x = ~tydzien) %>%
add_lines(y = ~sale_cnt, color = I("steelblue2"), name = "realizacja_szt") %>%
add_lines(y = ~forecast_quantity_stat, color = I("firebrick2"), name = "prognoza_szt") %>%
add_lines(y = ~forecast_quantity_corr, color = I("orange"), name = "prognoza_szt_corr") %>%
add_lines(y = ~forecast_quantity_stat_new, color = I("green"), name = "prognoza_szt_stat_new") %>%
add_lines(y = ~stok_sklepy, color = I("wheat"), name = "stok sklepowy", opacity = 0.7, yaxis = 'y2') %>%
add_lines(y = capa, color = I("turquoise1"), name = "capacity", opacity = 0.7, yaxis = 'y2', if(is.null(updated_artykul())) {visible=FALSE}) %>%
layout(
title = if(is.null(updated_pgrupa())){paste("Prognoza, sklep: ",updated_sklep(), sep="")}
else{paste("Prognoza: ", tytul_pgrupa, tytul_wgrupa, tytul_agrupa, ", sklep: ", updated_sklep(), sep=" ")},
yaxis = list(title="sztuki", range = c(min(tabela_filtr_wykres$sale_cnt), max(max(tabela_filtr_wykres$sale_cnt),max(tabela_filtr_wykres$forecast_quantity_stat_new)))), xaxis = list(title="data"), yaxis2 = ay
)
}else{
if(updated_automat()=="tak"){
# stworzenie tabel pomocniczych zawierających dane porównawcze do korekty automatycznej, Zsumowaną wartość realizacji za dany okres i prognozy
real_do_korekty <- tabela_filtr_wykres[forecast_date %in% seq(as.Date(input$data_przedz_auto[1]),as.Date(input$data_przedz_auto[2]), by="days"), .(sale_cnt = sum(sale_cnt, na.rm = T)), by = c("id_store", "id_article")]
prog_do_korekty <- tabela_filtr_wykres[forecast_date %in% seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="days"), .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T)), by = c("id_store", "id_article")]
dane_do_korekty <- merge(real_do_korekty, prog_do_korekty, by = c("id_store", "id_article"), all = T)
# filtrowanie artykułów do tych które występują zarówno w realizacji jak i w prognozie, oraz mającuch miesięczną prognozę wyższą niż 10 sztuk
dane_do_korekty <- dane_do_korekty[id_article %in% real_do_korekty[, id_article]][id_article %in% prog_do_korekty[, id_article]]
dane_do_korekty <- dane_do_korekty[!(forecast_quantity_stat<updated_ograniczenie())]
# okres by wyliczyć średnie relizacji i prognozy i porównać je
okres_prog <- NROW(as.data.table(seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="days")))-1
okres_real <- NROW(as.data.table(seq(as.Date(input$data_przedz_auto[1]),as.Date(input$data_przedz_auto[2]), by="days")))-1
# wyliczamy korektę automatyczną dla każdego artykułu dzieląc średnią realizacje z podanego okresu przez średnią prognozę z okresu na który ma być nałożona korekta
# parametry:
# - zakres od 0.9 do 1.1 korekty jest wyłączony
# - bufor 0.1 na korektę zmniejszającą i 0.2 na zwiększającą
# - ograniczenie na minimalną i maksymalną wartość korekty, default: 0.6 i 2.0
dane_do_korekty[, korekta := round(as.numeric(sale_cnt/okres_real)/as.numeric(forecast_quantity_stat/okres_prog),2)][, korekta := ifelse(korekta<=1.1&korekta>=0.9, 1, korekta)][, korekta := ifelse(korekta>1, korekta-0.2, korekta+0.1)][, korekta := ifelse(korekta<=1.1&korekta>=0.9, 1, korekta)][, korekta := ifelse(korekta<input$korekta_przedz[1], input$korekta_przedz[1], korekta)][, korekta := ifelse(korekta>input$korekta_przedz[2], input$korekta_przedz[2], korekta)]
dane_do_korekty <- dane_do_korekty[, korekta, by = c("id_store", "id_article")]
dane_do_korekty <- na.omit(dane_do_korekty)
dane_do_korekty <- dane_do_korekty[!is.na(korekta)][!(korekta==1)]
# filtrowanie korekt w zależności od kierunku
if(updated_kierunek()=="zmniejszająca"){
dane_do_korekty <- dane_do_korekty[!(`korekta`>1)]
}else if(updated_kierunek()=="zwiększająca"){
dane_do_korekty <- dane_do_korekty[!(`korekta`<1)]
}
rv$dane_do_korekty <- dane_do_korekty[, korekta := round_any(korekta, 0.05)]
tabela_filtr_wykres <- merge(tabela_filtr_wykres, rv$dane_do_korekty, by = c("id_store", "id_article"), all.x = T)
}else{
tabela_filtr_wykres[, korekta := updated_korekta()]
}
# przemnożenie prognozy przez korektę
tabela_filtr_wykres[, forecast_quantity_stat_new := ifelse(!is.na(korekta), forecast_quantity_stat * korekta, forecast_quantity_stat)]
# przygotowanie dnych do wykresu
tabela_filtr_wykres <- as.data.table(tabela_filtr_wykres[, .(stok_sklepy = sum(stok_sklepy, na.rm = T), forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_stat_new = sum(forecast_quantity_stat_new, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T), forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)), by=c("forecast_date")])
daty <- data.table(forecast_date = seq(min(dane_real()[, forecast_date], na.rm = T), max(dane_real()[, forecast_date], na.rm = T), by="days"))
tabela_filtr_wykres <- merge(tabela_filtr_wykres, daty, by="forecast_date", all.y = T)
tabela_filtr_wykres[is.na(tabela_filtr_wykres)] <- 0
tabela_filtr_wykres[!(forecast_date %in% seq(as.Date(input$data_przedz[1]),as.Date(input$data_przedz[2]), by="days")), forecast_quantity_stat_new := 0]
# linia z ostatnim capacity na artykule
capa <- sum(rv$capacity[, capacity], na.rm = T)
print(paste("capacity: ",capa, sep=""))
ay <- list(
overlaying = "y",
side = "right",
title = "stok",
range = c(as.numeric(max(tabela_filtr_wykres$stok_sklepy) / max(tabela_filtr_wykres$sale_cnt) * min(tabela_filtr_wykres$sale_cnt)),
as.numeric(max(tabela_filtr_wykres[, stok_sklepy])))
)
plot_ly(data = tabela_filtr_wykres, x = ~forecast_date) %>%
add_lines(y = ~sale_cnt, color = I("steelblue2"), name = "realizacja_szt") %>%
add_lines(y = ~forecast_quantity_stat, color = I("firebrick2"), name = "prognoza_szt") %>%
add_lines(y = ~forecast_quantity_corr, color = I("orange"), name = "prognoza_szt_corr") %>%
add_lines(y = ~forecast_quantity_stat_new, color = I("green"), name = "prognoza_szt_stat_new") %>%
add_lines(y = ~stok_sklepy, color = I("wheat"), name = "stok sklepowy", opacity = 0.7, yaxis = 'y2') %>%
add_lines(y = capa, color = I("turquoise1"), name = "capacity", opacity = 0.7, yaxis = 'y2', if(is.null(updated_artykul())) {visible=FALSE}) %>%
layout(
title = if(is.null(updated_pgrupa())){paste("Prognoza, sklep: ",updated_sklep(), sep="")}
else{paste("Prognoza: ", tytul_pgrupa, tytul_wgrupa, tytul_agrupa, ", sklep: ", updated_sklep(), sep=" ")},
yaxis = list(title="sztuki", range = c(min(tabela_filtr_wykres$sale_cnt), max(max(tabela_filtr_wykres$sale_cnt),max(tabela_filtr_wykres$forecast_quantity_stat_new)))), xaxis = list(title="data"), yaxis2 = ay
)
}
})
tabela_pobierz <- eventReactive(input$symuluj,{
tabela_pobierz <- merge(tabela_filtr(), rv$capacity, by = c("id_store", "id_article"), all = T)
View(tabela_pobierz)
return(tabela_pobierz)
})
output$pobierz <- downloadHandler(
filename = function() {
paste(sciezka, "/dane_sklep_",updated_sklep(), ".xlsx", sep="")},
content = function(file) {
write.xlsx(tabela_pobierz(), file, sep=";", dec=",")
}
)
observeEvent(input$zatwierdz,{
# stworzenie tabeli finalnej
print("jestem w ostatnim bloku")
data_od <- input$data_przedz[1]
data_do <- input$data_przedz[2]
rv$temp1 <- dane_real()[, .(min1 = min(forecast_date), max1=max(forecast_date)), by=c("id_store", "id_article")]
rv$g <- as.data.table(dane_real())
if(input$radio=="Wybierz z listy"){
if(!is.null(updated_pgrupa())){rv$g <- as.data.table(rv$g)[product_group %in% updated_pgrupa()]}
if(!is.null(updated_anty_pgrupa())){rv$g <- as.data.table(rv$g)[!product_group %in% updated_anty_pgrupa()]}
if(!is.null(updated_wgrupa())){rv$g <- as.data.table(rv$g)[type_group %in% updated_wgrupa()]}
if(!is.null(updated_agrupa())){rv$g <- as.data.table(rv$g)[art_group %in% updated_agrupa()]}}
if(!is.null(updated_artykul())){rv$g <- as.data.table(rv$g)[id_article %in% updated_artykul()]}
rv$temp1 <- rv$temp1[id_store %in% updated_sklep()]
rv$tabela_korekta <- data.table()
rv$tabela_korekta <- as.data.table(unique(rv$g[forecast_date >= Sys.Date() & forecast_date <= data_do, .(id_store, id_article)]))
rv$tabela_korekta <- merge(rv$tabela_korekta, rv$temp1, by=c("id_article", "id_store"))
if(updated_automat()=="tak"){
rv$tabela_korekta <- merge(rv$tabela_korekta, rv$dane_do_korekty, by=c("id_article", "id_store"), all.x = T)}
else{rv$tabela_korekta <- rv$tabela_korekta[, korekta := updated_korekta()]}
rv$tabela_korekta <- rv$tabela_korekta[, .(`Numer sklepu` = as.numeric(updated_sklep()), `Numer artykułu` = id_article, `Data od` = as.Date(data_od, origin = "1970-01-01"), `Data do`= as.Date(data_do, origin = "1970-01-01"), `Typ korekty` = 1, `Wartość korekty` = as.numeric(korekta), `Typ prognozy` = 0)]
if(updated_kierunek()=="zmniejszająca"){
rv$tabela_korekta <- rv$tabela_korekta[!(`Wartość korekty`>1)]
}else if(updated_kierunek()=="zwiększająca"){
rv$tabela_korekta <- rv$tabela_korekta[!(`Wartość korekty`<1)]
}
rv$tabela_korekta <- rv$tabela_korekta[!is.na(`Wartość korekty`)]
rv$tabela_korekta <- rv$tabela_korekta[!(`Wartość korekty`==1)]
# obsłużenie błędu z nakładaniem się korekt
temp_check <- merge(rv$tabela_korekta, rv$tabela_korekta_final, by=c("Numer sklepu", "Numer artykułu"), allow.cartesian=T, suffixes = c('.new','.old' ))
temp_check_flag <- temp_check[, flag := ifelse(((`Data od.new`<=`Data do.old`&`Data do.new`>=`Data od.old`)|(`Data do.new`>=`Data od.old`&`Data od.new`<=`Data do.old`)|(`Data od.new`>=`Data od.old`&`Data do.new`<=`Data do.old`)|(`Data do.new`==`Data od.old`&`Data od.new`==`Data do.old`)), 1, 0)]
if(NROW(temp_check_flag[`Numer artykułu` %in% rv$tabela_korekta$`Numer artykułu`][flag==1])==0){
showNotification(paste("Korekta została zatwierdzona. Ilość dodanych wierszy: ", NROW(rv$tabela_korekta), ", średni wpis: ", round(mean(rv$tabela_korekta[, `Wartość korekty`]), 2), sep = ""), type = "message", duration = 20)
rv$tabela_korekta_final <- rbind(rv$tabela_korekta, rv$tabela_korekta_final)
}else{
showNotification(paste("Daty na sklepo-artykule nakładają się. Np. na: Sklepie ", temp_check_flag[flag==1][1, `Numer sklepu`],
", artykule ", temp_check_flag[flag==1][1, `Numer artykułu`], ", przy następujących datach poprzedniej korekty: ",
rv$tabela_korekta_final[1, `Data od`],", ", rv$tabela_korekta_final[1, `Data do`], sep=""), type = "error", duration = NULL)
rv$tabela_korekta <- rv$tabela_korekta[!(`Numer artykułu` %in% temp_check_flag$`Numer artykułu`)]
}
rv$wszystkie_artykuly <- unique(append(rv$wszystkie_artykuly, rv$tabela_korekta[, `Numer artykułu`]))
print(rv$wszystkie_artykuly)
rv$tabela_korekta_final <- rv$tabela_korekta_final[`Numer artykułu` %in% rv$wszystkie_artykuly]
rv$tabela_korekta_final <- unique(rv$tabela_korekta_final)
rv$wszystkie_sklepy <- unique(rv$tabela_korekta_final[, `Numer sklepu`])
print(rv$tabela_korekta_final)
})
output$tabela_z_korektami = DT::renderDataTable({
DT::datatable(rv$tabela_korekta_final, options = list(paging = TRUE, searching = FALSE))
})
output$zapisz <- downloadHandler(
filename = function() {
paste(sciezka, "/plik_korekty_", paste(rv$wszystkie_sklepy, collapse="_") ,"_", Sys.Date(),".xlsx", sep="")},
content = function(file) {
write.xlsx(rv$tabela_korekta_final, file, sep=";", dec=",")
}
)
rv2 <- reactiveValues(stare_calc_date = 0, nowe_calc_date = 0)
dane_porownanie <- eventReactive(input$przelicz_porownanie, {
sklepy <- paste(shQuote(input$sklep_porownanie, type="csh"), collapse=", ")
daty_prognozy <- c(input$data_prognozy_1, input$data_prognozy_2)
daty_prognozy <- paste(shQuote(daty_prognozy, type="csh"), collapse=", ")
s <- Sys.time()
process_step_run_id_full <- dbGetQuery(con, paste("SELECT count(*) maximum , calculation_date, process_step_run_id, process_run_id, calculation_id
FROM detail_sas.sales_forecast_hist
WHERE calculation_date in (", daty_prognozy ,")
GROUP BY calculation_date, process_step_run_id, process_run_id, calculation_id
order by maximum desc;", sep=""))
s1<- Sys.time()
print(s1-s)
process_step_run_id_full <- as.data.table(process_step_run_id_full)
process_step_run_id_full <- setDT(process_step_run_id_full)[order(-maximum)][,.SD[1,], by = .(calculation_date)]
calculation_date <- paste(shQuote(process_step_run_id_full[, calculation_date], type="csh"), collapse=", ")
process_step_run_id <- paste(shQuote(process_step_run_id_full[,process_step_run_id], type="csh"), collapse=", ")
process_run_id <- paste(shQuote(process_step_run_id_full[, process_run_id], type="csh"), collapse=", ")
calculation_id <- paste(shQuote(process_step_run_id_full[, calculation_id], type="csh"), collapse=", ")
first_date = as.Date(substring(calculation_date, 16,25))
second_date <- first_date + 60
# kwerenda realizująca wydobycie tabeli z prognozami
start_time <- Sys.time()
prognozy_dla_sklepow <- dbGetQuery(con, paste("select SAS.calculation_date as calculation_date,
SAS.id_store as id_store,
SAS.id_article as id_article,
SAS.forecast_date as forecast_date,
DICT.product_group as product_group,
DICT.type_group as type_group,
sum(SAS.forecast_quantity_stat) as forecast_quantity_stat,
sum(SAS.forecast_quantity_corr) as forecast_quantity_corr,
sum(SAS.forecast_value) as forecast_value
from detail_sas.sales_forecast_hist SAS
join dict.dic_articles DICT
ON SAS.id_article = DICT.id_article
WHERE id_store in (", sklepy, ")
AND process_step_run_id in (", process_step_run_id, ")
and process_run_id in (", process_run_id, ")
and calculation_id in (", calculation_id, ")
AND forecast_date >= '", first_date ,"'
AND forecast_date <= '", second_date ,"'
group by SAS.id_store, DICT.product_group, DICT.type_group, SAS.id_article,
SAS.forecast_date, SAS.calculation_date;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
start_time <- Sys.time()
#kwerenda realizująca wydobycie tabeli z realizacjami
realizacje_dla_sklepow <- dbGetQuery(con, paste("SELECT
REAL.id_store as id_store,
REAL.id_article as id_article,
DICT.product_group as product_group,
DICT.type_group as type_group,
REAL.working_date as working_date,
sum(REAL.sale_cnt) as sale_cnt,
sum(REAL.sale_value) as sale_value
FROM detail_sales.tickets REAL
join dict.dic_articles DICT
ON REAL.id_article = DICT.id_article
WHERE REAL.id_store in (", sklepy ,") AND REAL.working_date >= '", Sys.Date() -460 ,"'
GROUP BY REAL.id_article, REAL.id_store, REAL.working_date, DICT.product_group,
DICT.type_group;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
#przekształcenie tabel z prognozami i realizacjami
prognozy_dla_sklepow <- as.data.table(prognozy_dla_sklepow)
prognozy_dla_sklepow <- na.omit(prognozy_dla_sklepow)
realizacje_dla_sklepow <- as.data.table(realizacje_dla_sklepow)
realizacje_dla_sklepow <- na.omit(realizacje_dla_sklepow)
#połączenie tabeli;
prognozy_i_realizacje <- merge(prognozy_dla_sklepow, realizacje_dla_sklepow,
by.x=c("id_store","product_group", "id_article", "type_group", "forecast_date"), by.y=c("id_store", "product_group", "id_article", "type_group", "working_date"), all=TRUE)
rv2$stare_calc_date <-min(process_step_run_id_full[, calculation_date])
rv2$nowe_calc_date <-max(process_step_run_id_full[, calculation_date])
print("zakończyłem obliczać dane_porownanie")
return(prognozy_i_realizacje)
})
output$tabela = DT::renderDataTable({
req(dane_porownanie())
DT::datatable(NULL)
})
observe({
updateSelectizeInput(session, "pgrupa_por",
choices = unique(dane_porownanie()[id_store==input$sklep_porownanie][order(product_group)][, product_group]),
options = list(closeAfterSelect = T)
)
})
observe({
if(is.null(input$pgrupa_por)){c =unique(dane_porownanie()[id_store==input$sklep_porownanie][order(type_group)][, type_group])}
else{c = unique(dane_porownanie()[id_store==input$sklep_porownanie][product_group %in% input$pgrupa_por][order(type_group)][, type_group])}
updateSelectizeInput(session, "wgrupa_por",
choices = c,
options = list(closeAfterSelect = T)
)
})
tabela_filtr_por <- eventReactive(input$generuj_porownanie, {
req(dane_porownanie())
g<- as.data.table(dane_porownanie())
h <- g[, .(forecast_quantity_stat = sum(forecast_quantity_stat, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T),
forecast_value = sum(forecast_value, na.rm = T), sale_cnt = sum(sale_cnt, na.rm = T), sale_value = sum(sale_value, na.rm = T)),
by=c("forecast_date", "calculation_date","product_group", "id_article", "type_group", "id_store")]
h <- merge(data.table(forecast_date = seq(input$data_przedz_por[1], input$data_przedz_por[2], by="days")), h, by=c("forecast_date"), all.x=TRUE)
for (i in names(h)){
h[is.na(get(i)), (i):=0]
}
tabela_filtr <- as.data.table(h)
tabela_filtr1 <- tabela_filtr[id_store == input$sklep_porownanie]
print(tabela_filtr1)
if(!is.null(input$pgrupa_por)){tabela_filtr1 <- tabela_filtr1[product_group %in% input$pgrupa_por]}
if(!is.null(input$wgrupa_por)){tabela_filtr1 <- tabela_filtr1[type_group %in% input$wgrupa_por]}
print("zakończyłem obliczać tabelę filtr")
return(tabela_filtr1)
})
updated_sklep_porownanie <- eventReactive(input$generuj_porownanie, {input$sklep_porownanie})
output$wykres_porownanie <- renderPlotly({
req(tabela_filtr_por())
tab_wykres_por <- as.data.table(tabela_filtr_por())
tab_wykres_por <- tab_wykres_por[, .(sale_cnt = sum(sale_cnt, na.rm = T), forecast_quantity_corr= sum(forecast_quantity_corr, na.rm = T)), by = c("forecast_date", "calculation_date")]
tab_wykres_por[is.na(tab_wykres_por)] <- 0
sklep = updated_sklep_porownanie()
#generowanie wykresu i przekształcenia mające na celu oddzielenie prognoz z różnymi "calculation_date"
# sposób data.table
plot_ly() %>%
add_lines(data = tab_wykres_por, x = ~forecast_date,
y = ~sale_cnt, color = I("steelblue2"), name = "realizacja") %>%
add_lines(data = tab_wykres_por[calculation_date==rv2$stare_calc_date],
x = ~forecast_date, y = ~forecast_quantity_corr, color = I("firebrick2"), name = "prognoza 2") %>%
add_lines(data = tab_wykres_por[calculation_date==rv2$nowe_calc_date],
x = ~forecast_date, y = ~forecast_quantity_corr, color = I("orange"), name = "prognoza 1") %>%
layout(
title = paste("Weryfikacja prognoz na sztukach dla sklepu nr: ", sklep, sep=""), yaxis = list(range = c(0, as.numeric(max(tab_wykres_por[, sale_cnt])))))
})
output$wykres_porownanie_zl <- renderPlotly({
req(tabela_filtr_por())
tab_wykres_por <- as.data.table(tabela_filtr_por())
tab_wykres_por <- tab_wykres_por[, .(sale_value = sum(sale_value, na.rm = T), forecast_value= sum(forecast_value, na.rm = T)), by = c("forecast_date", "calculation_date")]
tab_wykres_por[is.na(tab_wykres_por)] <- 0
sklep = updated_sklep_porownanie()
#generowanie wykresu i przekształcenia mające na celu oddzielenie prognoz z różnymi "calculation_date"
# sposób data.table
plot_ly() %>%
add_lines(data = tab_wykres_por, x = ~forecast_date,
y = ~sale_value, color = I("steelblue2"), name = "realizacja") %>%
add_lines(data = tab_wykres_por[calculation_date==rv2$stare_calc_date],
x = ~forecast_date, y = ~forecast_value, color = I("firebrick2"), name = "prognoza 2") %>%
add_lines(data = tab_wykres_por[calculation_date==rv2$nowe_calc_date],
x = ~forecast_date, y = ~forecast_value, color = I("orange"), name = "prognoza 1") %>%
layout(
title = paste("Weryfikacja prognoz na obrocie dla sklepu nr: ", sklep, sep=""), yaxis = list(range = c(0, as.numeric(max(tab_wykres_por[, sale_value])))))
})
dane_kanibalizm <- eventReactive(input$generuj_kan,{
sciezka <- "Z:/USERS/DZ_Magazyn/DZIAL ANALIZ/SEKCJA PROGNOZOWANIA/ML_Forecasts"
kanibalizacja <- fread(paste(sciezka, "/Dane_do_generatorów/15_perfect_distance.csv", sep=""))
kanibalizacja <- kanibalizacja[, data := as.Date(fastPOSIXct(data))]
sklep <- input$sklep_kanibalizacja
obrot <- dbGetQuery(con, paste("SELECT REAL.id_store as id_store, sum(REAL.sale_value) as sale_value, REAL.working_date as working_date
FROM detail_sales.tickets REAL WHERE REAL.id_store in (", sklep ,") AND REAL.working_date >= '2017-01-01'
GROUP BY REAL.id_store, REAL.working_date", sep=""))
kanibalizacja_podglad <- kanibalizacja[id_store == sklep]
kanibalizacja_podglad <- merge(kanibalizacja_podglad, obrot, by.x=c("data"), by.y=c("working_date"))
kanibalizacja_podglad <- kanibalizacja_podglad[sale_value > 0.01]
return(kanibalizacja_podglad)
})
updated_sklep_kan <- eventReactive(input$generuj_kan, {input$sklep_kanibalizacja})
output$wykres_kan <- renderPlotly({
req(dane_kanibalizm())
sklep <- updated_sklep_kan()
ay <- list(
tickfont = list(color = "red"),
overlaying = "y",
side = "right",
title = "obrót"
)
plot_ly(dane_kanibalizm(), x = ~data) %>%
add_lines(y = ~closest_neighbour, name = 'closest_neighbour',mode = 'lines', color = I('purple'), type = 'scatter') %>%
add_lines(y = ~neighbours_r_1km, name = 'neighbours_r_1km',mode = 'lines', color = I('cornflowerblue'), type = 'scatter') %>%
add_lines(y = ~neighbours_r_3km, name = 'neighbours_r_3km',mode = 'lines', color = I('green'), type = 'scatter') %>%
add_lines(y = ~neighbours_r_5km, name = 'neighbours_r_5km',mode = 'lines', color = I('red'), type = 'scatter') %>%
add_lines(y = ~sale_value, name = 'obrót',mode = 'lines', color = I('orange'), type = 'scatter', yaxis = "y2", opacity = 0.3) %>%
layout(
title = paste("Sklepy w pobliżu, a obrót dla sklepu: ", sklep, sep="") , yaxis2 = ay,
xaxis = list(title="x")
)
})
dane_alert <- eventReactive(input$generuj_alert,{
if(input$wybor_alertu=='48'){
sklepy_z_alertem_48 <- paste(shQuote(input$sklep_alert, type="csh"), collapse=", ")
data_alertu <- substring(nazwa_48, 10,17)
data_prognozy <- as.Date(data_alertu,format = "%Y %m %d")
data_prognozy_30dni <- data_prognozy - 30
daty_prognozy_48 <- append(data_prognozy,data_prognozy_30dni)
daty_prognozy_48 <- paste(shQuote(daty_prognozy_48, type="csh"), collapse=", ")
#Wysłanie zapytania w celu wydobycia tabeli "process_step_run_id_full" z informacjami identyfikującymi konkretne prognozy oraz przekształcenie jej
start_time <- Sys.time()
process_step_run_id_full <- dbGetQuery(con, paste("SELECT count(*) maximum , calculation_date, process_step_run_id, process_run_id, calculation_id
FROM detail_sas.sales_forecast_hist
WHERE calculation_date in (", daty_prognozy_48 ,")
GROUP BY calculation_date, process_step_run_id, process_run_id, calculation_id
order by maximum desc;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
process_step_run_id_full <- as.data.table(process_step_run_id_full)
process_step_run_id_full <- setDT(process_step_run_id_full)[order(-maximum)][,.SD[1,], by = .(calculation_date)]
rv$process_step_run_id_full_48 <- as.data.table(process_step_run_id_full)
calculation_date <- paste(shQuote(process_step_run_id_full[, calculation_date], type="csh"), collapse=", ")
process_step_run_id <- paste(shQuote(process_step_run_id_full[,process_step_run_id], type="csh"), collapse=", ")
process_run_id <- paste(shQuote(process_step_run_id_full[, process_run_id], type="csh"), collapse=", ")
calculation_id <- paste(shQuote(process_step_run_id_full[, calculation_id], type="csh"), collapse=", ")
first_date = as.Date(substring(calculation_date, 16,25))
second_date <- first_date + 60
#kwerenda realizująca wydobycie tabeli z prognozami
start_time <- Sys.time()
prognozy_dla_sklepow_z_alertem <- dbGetQuery(con, paste("select SAS.calculation_date,
SAS.id_store as id_store,
SAS.forecast_date as forecast_date,
sum(SAS.forecast_quantity_stat) as forecast_quantity_stat,
sum(SAS.forecast_quantity_corr) as forecast_quantity_corr,
sum(SAS.forecast_value) as forecast_value
from detail_sas.sales_forecast_hist SAS
WHERE id_store in (", sklepy_z_alertem_48, ")
AND process_step_run_id in (", process_step_run_id, ")
and process_run_id in (", process_run_id, ")
and calculation_id in (", calculation_id, ")
AND forecast_date >= '", first_date ,"'
AND forecast_date <= '", second_date ,"'
group by SAS.id_store,
SAS.forecast_date, SAS.calculation_date;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
prognozy_dla_sklepow_z_alertem
start_time <- Sys.time()
#kwerenda realizująca wydobycie tabeli z realizacjami
realizacje_dla_sklepow_z_alertem <- dbGetQuery(con, paste("SELECT
REAL.id_store as id_store,
REAL.date_tran_warsaw_d,
sum(REAL.sale_cnt) as sale_cnt,
sum(REAL.sale_value) as sale_value
FROM detail_sales.tickets REAL
WHERE id_store in (", sklepy_z_alertem_48 ,") AND REAL.date_tran_warsaw_d >= '", first_date ,"'
GROUP BY REAL.id_store, REAL.date_tran_warsaw_d"))
end_time <- Sys.time()
print(end_time-start_time)
realizacje_dla_sklepow_z_alertem
#przekształcenie tabel z prognozami i realizacjami
prognozy_dla_sklepow_z_alertem <- as.data.table(prognozy_dla_sklepow_z_alertem)
prognozy_dla_sklepow_z_alertem <- na.omit(prognozy_dla_sklepow_z_alertem)
realizacje_dla_sklepow_z_alertem <- as.data.table(realizacje_dla_sklepow_z_alertem)
realizacje_dla_sklepow_z_alertem <- na.omit(realizacje_dla_sklepow_z_alertem)
#połączenie tabeli;
prognozy_i_realizacje <- merge(prognozy_dla_sklepow_z_alertem, realizacje_dla_sklepow_z_alertem,
by.x=c("id_store", "forecast_date"), by.y=c("id_store", "date_tran_warsaw_d"), all=TRUE)
return(prognozy_i_realizacje)}else{
################################################57###############################################################
sklepy_z_alertem_57 <- paste(shQuote(input$sklep_alert, type="csh"), collapse=", ")
data_alertu_57 <- substring(nazwa_57, 10,17)
data_prognozy_57 <- as.Date(data_alertu_57,format = "%Y %m %d")
data_prognozy_wczoraj <- data_prognozy_57 - 1
daty_prognozy_57 <- append(data_prognozy_57,data_prognozy_wczoraj)
daty_prognozy_57 <- paste(shQuote(daty_prognozy_57, type="csh"), collapse=", ")
#Przekształcenie tabelki z alertem 57 tak by wyciągnąć odpowiednie daty z diffDays
sklepy_z_alertem_57_d[, dlugosc := str_count(diffDays, ',')+1]
vector_lengths <- ((sklepy_z_alertem_57_d[,4]))
maximum_length <- lapply(vector_lengths, function(x) x[which.max(abs(x))])
maximum_length <- as.integer(maximum_length)
maximum_length[1]
sklepy_z_alertem_57_d <- setDT(sklepy_z_alertem_57_d)[,paste0("diffDays_", 1:maximum_length[1]) := tstrsplit(diffDays, ",")][,-"diffDays", with=F]
colA = paste("diffDays_", 1:maximum_length, sep = "")
sklepy_z_alertem_57_unpivot <- melt(sklepy_z_alertem_57_d, id.vars = c("StoreId"),
measure.vars = colA)
sklepy_z_alertem_57_unpivot <- sklepy_z_alertem_57_unpivot[, variable :=NULL]
sklepy_z_alertem_57_unpivot <- na.omit(sklepy_z_alertem_57_unpivot)
str(sklepy_z_alertem_57_unpivot)
sklepy_z_alertem_57_unpivot[, Daty:= as.Date(value)]
str(sklepy_z_alertem_57_unpivot[, Daty])
#Wysłanie zapytania w celu wydobycia tabeli "process_step_run_id_full" z informacjami identyfikującymi konkretne prognozy oraz przekształcenie jej
start_time <- Sys.time()
process_step_run_id_full <-dbGetQuery(con, paste(
"SELECT count(*) maximum , calculation_date, process_step_run_id, process_run_id, calculation_id
FROM detail_sas.sales_forecast_hist
WHERE calculation_date in (", daty_prognozy_57 ,")
GROUP BY calculation_date, process_step_run_id, process_run_id, calculation_id
order by maximum desc;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
process_step_run_id_full <- as.data.table(process_step_run_id_full)
process_step_run_id_full<- setDT(process_step_run_id_full)[order(-maximum)][,.SD[1,], by = .(calculation_date)]
rv$process_step_run_id_full_57 <- as.data.table(process_step_run_id_full)
calculation_date <- paste(shQuote(process_step_run_id_full[, calculation_date], type="csh"), collapse=", ")
process_step_run_id <- paste(shQuote(process_step_run_id_full[,process_step_run_id], type="csh"), collapse=", ")
process_run_id <- paste(shQuote(process_step_run_id_full[, process_run_id], type="csh"), collapse=", ")
calculation_id <- paste(shQuote(process_step_run_id_full[, calculation_id], type="csh"), collapse=", ")
first_date = as.Date(substring(calculation_date, 16,25))
second_date <- first_date + 60
process_step_run_id_full
start_time <- Sys.time()
#kwerenda realizująca wydobycie tabeli z prognozami
prognozy_dla_sklepow_z_alertem_57 <- dbGetQuery(con, paste("select SAS.calculation_date,
SAS.id_store as id_store,
SAS.forecast_date as forecast_date,
sum(SAS.forecast_quantity_stat) as forecast_quantity_stat,
sum(SAS.forecast_quantity_corr) as forecast_quantity_corr,
sum(SAS.forecast_value) as forecast_value
from detail_sas.sales_forecast_hist SAS
WHERE id_store in (", sklepy_z_alertem_57 ,")
AND process_step_run_id in (", process_step_run_id, ")
and process_run_id in (", process_run_id, ")
and calculation_id in (", calculation_id, ")
AND forecast_date >= '", first_date ,"'
AND forecast_date <= '", second_date ,"'
group by SAS.id_store,
SAS.forecast_date, SAS.calculation_date;", sep=""))
end_time <- Sys.time()
print(end_time-start_time)
#odpowiednie przekształcenia tabel do połączenia
prognozy_dla_sklepow_z_alertem_57 <- as.data.table(prognozy_dla_sklepow_z_alertem_57)
prognozy_dla_sklepow_z_alertem_57 <- na.omit(prognozy_dla_sklepow_z_alertem_57)
sklepy_z_alertem_57_unpivot = subset(sklepy_z_alertem_57_unpivot, select = -c(value))
#połączenie tabel
prognozy_i_daty_alertow_57 <- merge(prognozy_dla_sklepow_z_alertem_57, sklepy_z_alertem_57_unpivot,
by.x=c("id_store"), by.y=c("StoreId"), all.x=TRUE, allow.cartesian = TRUE)
return(prognozy_i_daty_alertow_57)}
})
updated_alert <- eventReactive(input$generuj_alert, {input$wybor_alertu})
updated_sklep_alert <- eventReactive(input$generuj_alert, {input$sklep_alert})
output$alert_wykres <- renderPlotly({
req(dane_alert())
#wyodrębnienie calc_date: stare i nowe ##
if (updated_alert()=='48'){
stare_calc_date <-min(rv$process_step_run_id_full_48[, calculation_date])
nowe_calc_date <-max(rv$process_step_run_id_full_48[, calculation_date])
sklep = updated_sklep_alert()
#generowanie wykresu i przekształcenia mające na celu oddzielenie prognoz z różnymi "calculation_date"
# sposób data.table
plot_ly() %>%
add_lines(data = dane_alert()[calculation_date==stare_calc_date & id_store==sklep], x = ~forecast_date,
y = ~sale_cnt, color = I("steelblue2"), name = "realizacja") %>%
add_lines(data = dane_alert()[calculation_date==stare_calc_date & id_store==sklep],
x = ~forecast_date, y = ~forecast_quantity_corr, color = I("firebrick2"), name = "prognoza sprzed 30 dni") %>%
add_lines(data = dane_alert()[calculation_date==nowe_calc_date & id_store==sklep],
x = ~forecast_date, y = ~forecast_quantity_corr, color = I("firebrick3"), name = "prognoza dzisiejsza") %>%
layout(
title = paste("Prognozy i realizacje z alertem nr 48 dla sklepu nr: ", sklep, sep=""), yaxis = list(range= c(0, as.numeric(max(dane_alert()[, forecast_quantity_corr]))))
)
}else{
stare_calc_date <-min(rv$process_step_run_id_full_57[, calculation_date])
nowe_calc_date <-max(rv$process_step_run_id_full_57[, calculation_date])
sklep = updated_sklep_alert()
#generowanie wykresu i przekształcenia mające na celu oddzielenie prognoz z różnymi "calculation_date"
# sposób data.table
ay <- list(
overlaying = "y",
side = "right",
title = "alert_57",
range = c(0:1),
visible = FALSE
)
plot_ly(data = dane_alert()[calculation_date==stare_calc_date & id_store==sklep], x = ~forecast_date) %>%
add_lines(data = dane_alert()[calculation_date==stare_calc_date & id_store==sklep],
y = ~forecast_quantity_corr, color = I("firebrick2"), name = "prognoza wczorajsza") %>%
add_lines(data = dane_alert()[calculation_date==nowe_calc_date & id_store==sklep]
, y = ~forecast_quantity_corr, color = I("mediumpurple1"),line= list(dash="dashdot"), name = "prognoza dzisiejsza") %>%
add_trace(data = dane_alert()[forecast_date==Daty & id_store==sklep],
color = I("yellow2"), type="bar",
x = ~forecast_date, y=1, hoverinfo = 'y', name= "dni z alertem 57", opacity = 0.3, yaxis = "y2")%>%
layout(
title = paste("prognozy i daty alertów 57 dla sklepu nr:", sklep, sep=""), yaxis2 = ay ,
xaxis= list(title="Data"), yaxis=list(title="Sztuki", range = c(0, as.numeric(max(dane_alert()[, forecast_quantity_corr])))), bargap = 0.9)
}
})
}
shinyApp(ui=ui, server = server)
|
##Helper functions for visualization app
##Victor Veitch
##03/11/2015
#for now these should just be run by the programmer to make the relevant lists for the app,
#but they'll hopefully be useful if/when I get around to allowing data upload
#returns the names of all the categorical variables (factors) in a dataframe
factorNames <- function(data){
names(data)[sapply(names(data), function(n) class(data[[n]])=="factor")]
}
#returns the names of all the quantitative variables (non-factors) in a dataframe
nonfactorNames <- function(data){
names(data)[sapply(names(data), function(n) class(data[[n]])!="factor")]
}
|
/summarising_data/helpers.R
|
no_license
|
vveitch/OO_R_Shiny_dev
|
R
| false | false | 623 |
r
|
##Helper functions for visualization app
##Victor Veitch
##03/11/2015
#for now these should just be run by the programmer to make the relevant lists for the app,
#but they'll hopefully be useful if/when I get around to allowing data upload
#returns the names of all the categorical variables (factors) in a dataframe
factorNames <- function(data){
names(data)[sapply(names(data), function(n) class(data[[n]])=="factor")]
}
#returns the names of all the quantitative variables (non-factors) in a dataframe
nonfactorNames <- function(data){
names(data)[sapply(names(data), function(n) class(data[[n]])!="factor")]
}
|
# 7_26 designcon.R
# Read in the data
data <- read.csv("design.con", header = FALSE, stringsAsFactors = FALSE)
data2 <- data$V1[5:length(data$V1)]
data2 <- data.frame(cbind(data2), stringsAsFactors = FALSE)
# Need a for loop need to enclose the codes below
for (i in 1:2) {
ncols <- length(strsplit(data2$data2[1], " ")[[1]]) # Configure columns number
ori <- data2$data2[1:length(data2$data2)]
# If there is one covariate, rep is 2 times
# If there are two covariate, rep is 4 times
EV1 <- c(ori, rep(paste(rep("0", ncols), collapse = " "), 2))
EV2 <- c(rep("0", length(data2$data2)), 1, -1) # need update
data2 <- data.frame(cbind(EV1,EV2))
cols <- names(data2)
data2$x <- apply(data2, 1, paste, collapse = " ")
data2 <- data2[,!(names(data2) %in% cols)]
data2 <- data.frame(data2, stringsAsFactors = FALSE)
}
# Need to configure headings
# Configure NW, NC, PP
makehead <- function(NW = ncols + 1, NC = 4, PP = "1 1") { # A function creating the heading
NumWaves <- paste("/NumWaves", NW)
NumContrasts <- paste("/NumContrasts", NC)
PPheights <- paste("/PPheights", PP)
Matrix <- "/Matrix"
heading <- c(NumWaves, NumContrasts, PPheights, Matrix)
return(heading)
}
heading <- makehead()
# Combine heading and updated matrix
data3 <- cbind(c(heading, data2$data2))
data3 <- data.frame(data3)
# Export data
filename <- "Hellocon"
write.table(data3, paste(filename,".con", sep = ""), quote = FALSE, row.names = FALSE, col.names = FALSE)
|
/7_26_designcon_draft.R
|
no_license
|
yukaizou2015/designmat
|
R
| false | false | 1,572 |
r
|
# 7_26 designcon.R
# Read in the data
data <- read.csv("design.con", header = FALSE, stringsAsFactors = FALSE)
data2 <- data$V1[5:length(data$V1)]
data2 <- data.frame(cbind(data2), stringsAsFactors = FALSE)
# Need a for loop need to enclose the codes below
for (i in 1:2) {
ncols <- length(strsplit(data2$data2[1], " ")[[1]]) # Configure columns number
ori <- data2$data2[1:length(data2$data2)]
# If there is one covariate, rep is 2 times
# If there are two covariate, rep is 4 times
EV1 <- c(ori, rep(paste(rep("0", ncols), collapse = " "), 2))
EV2 <- c(rep("0", length(data2$data2)), 1, -1) # need update
data2 <- data.frame(cbind(EV1,EV2))
cols <- names(data2)
data2$x <- apply(data2, 1, paste, collapse = " ")
data2 <- data2[,!(names(data2) %in% cols)]
data2 <- data.frame(data2, stringsAsFactors = FALSE)
}
# Need to configure headings
# Configure NW, NC, PP
makehead <- function(NW = ncols + 1, NC = 4, PP = "1 1") { # A function creating the heading
NumWaves <- paste("/NumWaves", NW)
NumContrasts <- paste("/NumContrasts", NC)
PPheights <- paste("/PPheights", PP)
Matrix <- "/Matrix"
heading <- c(NumWaves, NumContrasts, PPheights, Matrix)
return(heading)
}
heading <- makehead()
# Combine heading and updated matrix
data3 <- cbind(c(heading, data2$data2))
data3 <- data.frame(data3)
# Export data
filename <- "Hellocon"
write.table(data3, paste(filename,".con", sep = ""), quote = FALSE, row.names = FALSE, col.names = FALSE)
|
####################################
## Prepare Ireland CORINE landcover data for use at hectad scale
##
## author: Willson Gaul wgaul@hotmail.com (based on a script by Hannah White
## vasc_fres_all.R shared with wg via GitHub in April 2018)
## created: 2 May 2018
## last modified: 23 Dec 2019 - remove water from proportion calculations
## 6 Jan 2020 - add 1km raster also
####################################
corine <- raster("./data/CORINE_IE.grd")
c_legend <- read_csv("./data/clc_legend.csv")
hecs <- read_csv("./data/Irish_land_hectads.csv")
irish_1km_raster <- raster(xmn = 10000, xmx = 380000,ymn = -30000,ymx = 500000,
crs = CRS("+init=epsg:29903"), vals = 1)
res(irish_1km_raster) <- 1000
df_1km <- data.frame(rasterToPoints(irish_1km_raster, spatial = F))
colnames(df_1km) <- c("eastings", "northings")
corine_sp <- rasterToPoints(corine, fun = function(x){x!=0}, spatial = T)
corine_sp <- spTransform(corine_sp, CRS("+init=epsg:29903")) # transform to IE grid
corine_df <- as.data.frame(corine_sp)
# add land cover text descriptions
corine_df <- left_join(corine_df, c_legend,
by = c("g100_clc12_V18_5" = "GRID_CODE"))
# convert land cover text to valid names (to be used for column headers later)
corine_df$LABEL1 <- make.names(corine_df$LABEL1, unique = FALSE)
corine_df$LABEL2 <- make.names(corine_df$LABEL2, unique = FALSE)
#corine_df$LABEL3 <- make.names(corine_df$LABEL3, unique = FALSE)
# make array to hold the number of Label1-level land use classes in each hectad
clc_l1_counts_hecs <- array(0, dim = c(nrow(hecs),
length(unique(c_legend$LABEL1))))
colnames(clc_l1_counts_hecs) <- make.names(unique(c_legend$LABEL1), unique = T)
# remove water bodies column (for millipedes I don't think the proportion of
# water in a grid square matters much)
clc_l1_counts_hecs <- clc_l1_counts_hecs[, colnames(clc_l1_counts_hecs) !=
"Water.bodies"]
# make array to hold the number of Label1-level land use classes in each 1km square
clc_l1_counts_1km <- array(0, dim = c(nrow(df_1km),
length(unique(c_legend$LABEL1))))
colnames(clc_l1_counts_1km) <- make.names(unique(c_legend$LABEL1), unique = T)
# remove water bodies column (for millipedes I don't think the proportion of
# water in a grid square matters much)
clc_l1_counts_1km <- clc_l1_counts_1km[, colnames(clc_l1_counts_1km) !=
"Water.bodies"]
# make array to hold proportion of each Label1-level land use class in hectads
clc_l1_props_hecs <- array(0, dim = c(nrow(hecs),
ncol(clc_l1_counts_hecs)))
colnames(clc_l1_props_hecs) <- make.names(colnames(clc_l1_counts_hecs),
unique = T)
# make array to hold proportion of each Label1-level land use class in 1 km squares
clc_l1_props_1km <- array(0, dim = c(nrow(df_1km),
ncol(clc_l1_counts_1km)))
colnames(clc_l1_props_1km) <- make.names(colnames(clc_l1_counts_1km),
unique = T)
# make array to hold the number of LABEL2-level land use classes in each hectad
clc_l2_counts_hecs <- array(0, dim = c(nrow(hecs),
length(unique(c_legend$LABEL2))))
colnames(clc_l2_counts_hecs) <- make.names(unique(c_legend$LABEL2), unique = T)
clc_l2_counts_hecs <- clc_l2_counts_hecs[, colnames(clc_l2_counts_hecs) %nin%
c("Inland.waters", "Marine.waters",
"UNCLASSIFIED.WATER.BODIES")]
# make array to hold proportion of each LABEL2-level land use class in hectads
clc_l2_props_hecs <- array(0, dim = c(nrow(hecs), ncol(clc_l2_counts_hecs)))
colnames(clc_l2_props_hecs) <- make.names(colnames(clc_l2_counts_hecs),
unique = T)
# make array to hold the number of LABEL2-level land use classes in each 1km square
clc_l2_counts_1km <- array(0, dim = c(nrow(df_1km),
length(unique(c_legend$LABEL2))))
colnames(clc_l2_counts_1km) <- make.names(unique(c_legend$LABEL2), unique = T)
clc_l2_counts_1km <- clc_l2_counts_1km[, colnames(clc_l2_counts_1km) %nin%
c("Inland.waters", "Marine.waters",
"UNCLASSIFIED.WATER.BODIES")]
# make array to hold proportion of each LABEL2-level land use class in 1km squares
clc_l2_props_1km <- array(0, dim = c(nrow(df_1km), ncol(clc_l2_counts_1km)))
colnames(clc_l2_props_1km) <- make.names(colnames(clc_l2_counts_1km),
unique = T)
for (i in 1:nrow(hecs)) {
# find centers of corine raster cells that lie within each hectad
# (some cells may cross hectad boundary but this is ignored)
corine_sub = corine_df[which(corine_df$x >= hecs$eastings[i] &
corine_df$x < hecs$eastings[i] + 10^4 &
corine_df$y >= hecs$northings[i] &
corine_df$y < hecs$northings[i] + 10^4), ]
## LABEL1 classes
# count number of corine LABEL1 classes within this hectad
clc_l1_counts_hecs[i, ] <- sapply(colnames(clc_l1_counts_hecs),
FUN = function(x) {
sum(x == corine_sub$LABEL1)})
# calculate proportions of corine LABEL1 classes (if no land cover classes in
# this grid cell, then set all proportions to zero)
if(sum(clc_l1_counts_hecs[i, ]) == 0) {
clc_l1_props_hecs[i, ] <- 0} else {
clc_l1_props_hecs[i, ] <- clc_l1_counts_hecs[i, ] / sum(
clc_l1_counts_hecs[i, ])
}
## LABEL2 classes
# count number of corine LABEL2 classes within this hectad
clc_l2_counts_hecs[i, ] <- sapply(colnames(clc_l2_counts_hecs),
FUN = function(x) {sum(
x == corine_sub$LABEL2)})
# calculate proportions of corine LABEL1 classes
if(sum(clc_l2_counts_hecs[i, ]) == 0) {
clc_l2_props_hecs[i, ] <- 0
} else {
clc_l2_props_hecs[i, ] <- clc_l2_counts_hecs[i, ] / sum(
clc_l2_counts_hecs[i, ])
}
}
for (i in 1:nrow(df_1km)) {
# find centers of corine raster cells that lie within each 1km square
# (some cells may cross square boundary but this is ignored)
corine_sub = corine_df[which(corine_df$x >= df_1km$eastings[i] &
corine_df$x < df_1km$eastings[i] + 10^3 &
corine_df$y >= df_1km$northings[i] &
corine_df$y < df_1km$northings[i] + 10^3), ]
## LABEL1 classes
# count number of corine LABEL1 classes within this 1km cell
clc_l1_counts_1km[i, ] <- sapply(colnames(clc_l1_counts_1km),
FUN = function(x) {
sum(x == corine_sub$LABEL1)})
# calculate proportions of corine LABEL1 classes (if no land cover classes in
# this grid cell, then set all proportions to zero)
if(sum(clc_l1_counts_1km[i, ]) == 0) {
clc_l1_props_1km[i, ] <- 0} else {
clc_l1_props_1km[i, ] <- clc_l1_counts_1km[i, ] / sum(
clc_l1_counts_1km[i, ])
}
## LABEL2 classes
# count number of corine LABEL2 classes within this 1km square
clc_l2_counts_1km[i, ] <- sapply(colnames(clc_l2_counts_1km),
FUN = function(x) {sum(
x == corine_sub$LABEL2)})
# calculate proportions of corine LABEL1 classes
if(sum(clc_l2_counts_1km[i, ]) == 0) {
clc_l2_props_1km[i, ] <- 0
} else {
clc_l2_props_1km[i, ] <- clc_l2_counts_1km[i, ] / sum(
clc_l2_counts_1km[i, ])
}
}
clc_l1_counts_hecs <- cbind(hecs, clc_l1_counts_hecs)
clc_l1_props_hecs <- cbind(hecs, clc_l1_props_hecs)
clc_l2_counts_hecs <- cbind(hecs, clc_l2_counts_hecs)
clc_l2_props_hecs <- cbind(hecs, clc_l2_props_hecs)
clc_l1_counts_1km <- cbind(df_1km, clc_l1_counts_1km)
clc_l1_props_1km <- cbind(df_1km, clc_l1_props_1km)
clc_l2_counts_1km <- cbind(df_1km, clc_l2_counts_1km)
clc_l2_props_1km <- cbind(df_1km, clc_l2_props_1km)
## promote results to spatial objects -----------------------------------------
coordinates(clc_l1_props_hecs) <- as.matrix(
clc_l1_props_hecs[, c("eastings", "northings")])
coordinates(clc_l2_props_hecs) <- as.matrix(
clc_l2_props_hecs[, c("eastings", "northings")])
proj4string(clc_l1_props_hecs) <- CRS("+init=epsg:29903")
proj4string(clc_l2_props_hecs) <- CRS("+init=epsg:29903")
gridded(clc_l1_props_hecs) <- TRUE # promote to SpatialPixelsDataFrame
gridded(clc_l2_props_hecs) <- TRUE
clc_l1_props_hecs <- as(clc_l1_props_hecs, "SpatialGridDataFrame")
clc_l2_props_hecs <- as(clc_l2_props_hecs, "SpatialGridDataFrame")
coordinates(clc_l1_props_1km) <- as.matrix(
clc_l1_props_1km[, c("eastings", "northings")])
coordinates(clc_l2_props_1km) <- as.matrix(
clc_l2_props_1km[, c("eastings", "northings")])
proj4string(clc_l1_props_1km) <- CRS("+init=epsg:29903")
proj4string(clc_l2_props_1km) <- CRS("+init=epsg:29903")
gridded(clc_l1_props_1km) <- TRUE # promote to SpatialPixelsDataFrame
gridded(clc_l2_props_1km) <- TRUE
clc_l1_props_1km <- as(clc_l1_props_1km, "SpatialGridDataFrame")
clc_l2_props_1km <- as(clc_l2_props_1km, "SpatialGridDataFrame")
## Make rasters of each variable --------------------------------------------
## make a raster for each land cover class giving the proportion of the hectad
# (raster cell) that is that class.
# These 5 (or 16) rasters will be the predictor variables.
# LABEL 1 level
artificial_surfaces_l1_rast <- raster::raster(
clc_l1_props_hecs["Artificial.surfaces"])
agricultural_l1_rast <- raster::raster(
clc_l1_props_hecs["Agricultural.areas"])
forest_seminatural_l1_rast <- raster::raster(
clc_l1_props_hecs["Forest.and.semi.natural.areas"])
wetlands_l1_rast <- raster::raster(
clc_l1_props_hecs["Wetlands"])
artificial_surfaces_l1_rast_1km <- raster::raster(
clc_l1_props_1km["Artificial.surfaces"])
agricultural_l1_rast_1km <- raster::raster(
clc_l1_props_1km["Agricultural.areas"])
forest_seminatural_l1_rast_1km <- raster::raster(
clc_l1_props_1km["Forest.and.semi.natural.areas"])
wetlands_l1_rast_1km <- raster::raster(
clc_l1_props_1km["Wetlands"])
# LABEL2 level
urban_fabric_l2_rast <- raster::raster(
clc_l2_props_hecs["Urban.fabric"])
industrial_commercial_transport_l2_rast <- raster::raster(
clc_l2_props_hecs["Industrial..commercial.and.transport.units"])
mine_dump_construction_l2_rast <- raster::raster(
clc_l2_props_hecs["Mine..dump.and.construction.sites"])
artificial_non_ag_vegetated_l2_rast <- raster::raster(
clc_l2_props_hecs["Artificial..non.agricultural.vegetated.areas"])
arable_land_l2_rast <- raster::raster(
clc_l2_props_hecs["Arable.land"])
permanent_crops_l2_rast <- raster::raster(
clc_l2_props_hecs["Permanent.crops"])
pasture_l2_rast <- raster::raster(
clc_l2_props_hecs["Pastures"])
heterogeneous_ag_l2_rast <- raster::raster(
clc_l2_props_hecs["Heterogeneous.agricultural.areas"])
forest_l2_rast <- raster::raster(
clc_l2_props_hecs["Forests"])
scrub_herbaceous_l2_rast <- raster::raster(
clc_l2_props_hecs["Scrub.and.or.herbaceous.vegetation.associations"])
open_space_no_veg_l2_rast <- raster::raster(
clc_l2_props_hecs["Open.spaces.with.little.or.no.vegetation"])
inland_wetlands_l2_rast <- raster::raster(
clc_l2_props_hecs["Inland.wetlands"])
maritime_wetlands_l2_rast <- raster::raster(
clc_l2_props_hecs["Maritime.wetlands"])
urban_fabric_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Urban.fabric"])
industrial_commercial_transport_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Industrial..commercial.and.transport.units"])
mine_dump_construction_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Mine..dump.and.construction.sites"])
artificial_non_ag_vegetated_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Artificial..non.agricultural.vegetated.areas"])
arable_land_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Arable.land"])
permanent_crops_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Permanent.crops"])
pasture_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Pastures"])
heterogeneous_ag_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Heterogeneous.agricultural.areas"])
forest_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Forests"])
scrub_herbaceous_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Scrub.and.or.herbaceous.vegetation.associations"])
open_space_no_veg_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Open.spaces.with.little.or.no.vegetation"])
inland_wetlands_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Inland.wetlands"])
maritime_wetlands_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Maritime.wetlands"])
## end make rasters -----------------------------------------------------------
## clean workspace except for rasters and results dfs
rm(list = c("c_legend", "corine_df", "corine_sub", "hecs", "df_1km", "corine",
"corine_sp", "i"))
## save outputs --------------------------------------------------------------
## write out RData with rasters and results dfs
save(clc_l1_props_hecs, artificial_surfaces_l1_rast, agricultural_l1_rast,
forest_seminatural_l1_rast, wetlands_l1_rast,
file = "corine_label_1_hectad.RData")
save(clc_l1_props_1km, artificial_surfaces_l1_rast_1km, agricultural_l1_rast_1km,
forest_seminatural_l1_rast_1km, wetlands_l1_rast_1km,
file = "corine_label_1_1km.RData")
save(urban_fabric_l2_rast, industrial_commercial_transport_l2_rast,
mine_dump_construction_l2_rast, artificial_non_ag_vegetated_l2_rast,
arable_land_l2_rast, permanent_crops_l2_rast, pasture_l2_rast,
heterogeneous_ag_l2_rast, forest_l2_rast, scrub_herbaceous_l2_rast,
open_space_no_veg_l2_rast, inland_wetlands_l2_rast,
maritime_wetlands_l2_rast,
file = "corine_label_2_hectad.RData")
save(urban_fabric_l2_rast_1km, industrial_commercial_transport_l2_rast_1km,
mine_dump_construction_l2_rast_1km, artificial_non_ag_vegetated_l2_rast_1km,
arable_land_l2_rast_1km, permanent_crops_l2_rast_1km, pasture_l2_rast_1km,
heterogeneous_ag_l2_rast_1km, forest_l2_rast_1km,
scrub_herbaceous_l2_rast_1km,
open_space_no_veg_l2_rast_1km, inland_wetlands_l2_rast_1km,
maritime_wetlands_l2_rast_1km,
file = "corine_label_2_1km.RData")
|
/prep_corine.R
|
permissive
|
wgaul/millipede_spatial_undersampling
|
R
| false | false | 14,621 |
r
|
####################################
## Prepare Ireland CORINE landcover data for use at hectad scale
##
## author: Willson Gaul wgaul@hotmail.com (based on a script by Hannah White
## vasc_fres_all.R shared with wg via GitHub in April 2018)
## created: 2 May 2018
## last modified: 23 Dec 2019 - remove water from proportion calculations
## 6 Jan 2020 - add 1km raster also
####################################
corine <- raster("./data/CORINE_IE.grd")
c_legend <- read_csv("./data/clc_legend.csv")
hecs <- read_csv("./data/Irish_land_hectads.csv")
irish_1km_raster <- raster(xmn = 10000, xmx = 380000,ymn = -30000,ymx = 500000,
crs = CRS("+init=epsg:29903"), vals = 1)
res(irish_1km_raster) <- 1000
df_1km <- data.frame(rasterToPoints(irish_1km_raster, spatial = F))
colnames(df_1km) <- c("eastings", "northings")
corine_sp <- rasterToPoints(corine, fun = function(x){x!=0}, spatial = T)
corine_sp <- spTransform(corine_sp, CRS("+init=epsg:29903")) # transform to IE grid
corine_df <- as.data.frame(corine_sp)
# add land cover text descriptions
corine_df <- left_join(corine_df, c_legend,
by = c("g100_clc12_V18_5" = "GRID_CODE"))
# convert land cover text to valid names (to be used for column headers later)
corine_df$LABEL1 <- make.names(corine_df$LABEL1, unique = FALSE)
corine_df$LABEL2 <- make.names(corine_df$LABEL2, unique = FALSE)
#corine_df$LABEL3 <- make.names(corine_df$LABEL3, unique = FALSE)
# make array to hold the number of Label1-level land use classes in each hectad
clc_l1_counts_hecs <- array(0, dim = c(nrow(hecs),
length(unique(c_legend$LABEL1))))
colnames(clc_l1_counts_hecs) <- make.names(unique(c_legend$LABEL1), unique = T)
# remove water bodies column (for millipedes I don't think the proportion of
# water in a grid square matters much)
clc_l1_counts_hecs <- clc_l1_counts_hecs[, colnames(clc_l1_counts_hecs) !=
"Water.bodies"]
# make array to hold the number of Label1-level land use classes in each 1km square
clc_l1_counts_1km <- array(0, dim = c(nrow(df_1km),
length(unique(c_legend$LABEL1))))
colnames(clc_l1_counts_1km) <- make.names(unique(c_legend$LABEL1), unique = T)
# remove water bodies column (for millipedes I don't think the proportion of
# water in a grid square matters much)
clc_l1_counts_1km <- clc_l1_counts_1km[, colnames(clc_l1_counts_1km) !=
"Water.bodies"]
# make array to hold proportion of each Label1-level land use class in hectads
clc_l1_props_hecs <- array(0, dim = c(nrow(hecs),
ncol(clc_l1_counts_hecs)))
colnames(clc_l1_props_hecs) <- make.names(colnames(clc_l1_counts_hecs),
unique = T)
# make array to hold proportion of each Label1-level land use class in 1 km squares
clc_l1_props_1km <- array(0, dim = c(nrow(df_1km),
ncol(clc_l1_counts_1km)))
colnames(clc_l1_props_1km) <- make.names(colnames(clc_l1_counts_1km),
unique = T)
# make array to hold the number of LABEL2-level land use classes in each hectad
clc_l2_counts_hecs <- array(0, dim = c(nrow(hecs),
length(unique(c_legend$LABEL2))))
colnames(clc_l2_counts_hecs) <- make.names(unique(c_legend$LABEL2), unique = T)
clc_l2_counts_hecs <- clc_l2_counts_hecs[, colnames(clc_l2_counts_hecs) %nin%
c("Inland.waters", "Marine.waters",
"UNCLASSIFIED.WATER.BODIES")]
# make array to hold proportion of each LABEL2-level land use class in hectads
clc_l2_props_hecs <- array(0, dim = c(nrow(hecs), ncol(clc_l2_counts_hecs)))
colnames(clc_l2_props_hecs) <- make.names(colnames(clc_l2_counts_hecs),
unique = T)
# make array to hold the number of LABEL2-level land use classes in each 1km square
clc_l2_counts_1km <- array(0, dim = c(nrow(df_1km),
length(unique(c_legend$LABEL2))))
colnames(clc_l2_counts_1km) <- make.names(unique(c_legend$LABEL2), unique = T)
clc_l2_counts_1km <- clc_l2_counts_1km[, colnames(clc_l2_counts_1km) %nin%
c("Inland.waters", "Marine.waters",
"UNCLASSIFIED.WATER.BODIES")]
# make array to hold proportion of each LABEL2-level land use class in 1km squares
clc_l2_props_1km <- array(0, dim = c(nrow(df_1km), ncol(clc_l2_counts_1km)))
colnames(clc_l2_props_1km) <- make.names(colnames(clc_l2_counts_1km),
unique = T)
for (i in 1:nrow(hecs)) {
# find centers of corine raster cells that lie within each hectad
# (some cells may cross hectad boundary but this is ignored)
corine_sub = corine_df[which(corine_df$x >= hecs$eastings[i] &
corine_df$x < hecs$eastings[i] + 10^4 &
corine_df$y >= hecs$northings[i] &
corine_df$y < hecs$northings[i] + 10^4), ]
## LABEL1 classes
# count number of corine LABEL1 classes within this hectad
clc_l1_counts_hecs[i, ] <- sapply(colnames(clc_l1_counts_hecs),
FUN = function(x) {
sum(x == corine_sub$LABEL1)})
# calculate proportions of corine LABEL1 classes (if no land cover classes in
# this grid cell, then set all proportions to zero)
if(sum(clc_l1_counts_hecs[i, ]) == 0) {
clc_l1_props_hecs[i, ] <- 0} else {
clc_l1_props_hecs[i, ] <- clc_l1_counts_hecs[i, ] / sum(
clc_l1_counts_hecs[i, ])
}
## LABEL2 classes
# count number of corine LABEL2 classes within this hectad
clc_l2_counts_hecs[i, ] <- sapply(colnames(clc_l2_counts_hecs),
FUN = function(x) {sum(
x == corine_sub$LABEL2)})
# calculate proportions of corine LABEL1 classes
if(sum(clc_l2_counts_hecs[i, ]) == 0) {
clc_l2_props_hecs[i, ] <- 0
} else {
clc_l2_props_hecs[i, ] <- clc_l2_counts_hecs[i, ] / sum(
clc_l2_counts_hecs[i, ])
}
}
for (i in 1:nrow(df_1km)) {
# find centers of corine raster cells that lie within each 1km square
# (some cells may cross square boundary but this is ignored)
corine_sub = corine_df[which(corine_df$x >= df_1km$eastings[i] &
corine_df$x < df_1km$eastings[i] + 10^3 &
corine_df$y >= df_1km$northings[i] &
corine_df$y < df_1km$northings[i] + 10^3), ]
## LABEL1 classes
# count number of corine LABEL1 classes within this 1km cell
clc_l1_counts_1km[i, ] <- sapply(colnames(clc_l1_counts_1km),
FUN = function(x) {
sum(x == corine_sub$LABEL1)})
# calculate proportions of corine LABEL1 classes (if no land cover classes in
# this grid cell, then set all proportions to zero)
if(sum(clc_l1_counts_1km[i, ]) == 0) {
clc_l1_props_1km[i, ] <- 0} else {
clc_l1_props_1km[i, ] <- clc_l1_counts_1km[i, ] / sum(
clc_l1_counts_1km[i, ])
}
## LABEL2 classes
# count number of corine LABEL2 classes within this 1km square
clc_l2_counts_1km[i, ] <- sapply(colnames(clc_l2_counts_1km),
FUN = function(x) {sum(
x == corine_sub$LABEL2)})
# calculate proportions of corine LABEL1 classes
if(sum(clc_l2_counts_1km[i, ]) == 0) {
clc_l2_props_1km[i, ] <- 0
} else {
clc_l2_props_1km[i, ] <- clc_l2_counts_1km[i, ] / sum(
clc_l2_counts_1km[i, ])
}
}
clc_l1_counts_hecs <- cbind(hecs, clc_l1_counts_hecs)
clc_l1_props_hecs <- cbind(hecs, clc_l1_props_hecs)
clc_l2_counts_hecs <- cbind(hecs, clc_l2_counts_hecs)
clc_l2_props_hecs <- cbind(hecs, clc_l2_props_hecs)
clc_l1_counts_1km <- cbind(df_1km, clc_l1_counts_1km)
clc_l1_props_1km <- cbind(df_1km, clc_l1_props_1km)
clc_l2_counts_1km <- cbind(df_1km, clc_l2_counts_1km)
clc_l2_props_1km <- cbind(df_1km, clc_l2_props_1km)
## promote results to spatial objects -----------------------------------------
coordinates(clc_l1_props_hecs) <- as.matrix(
clc_l1_props_hecs[, c("eastings", "northings")])
coordinates(clc_l2_props_hecs) <- as.matrix(
clc_l2_props_hecs[, c("eastings", "northings")])
proj4string(clc_l1_props_hecs) <- CRS("+init=epsg:29903")
proj4string(clc_l2_props_hecs) <- CRS("+init=epsg:29903")
gridded(clc_l1_props_hecs) <- TRUE # promote to SpatialPixelsDataFrame
gridded(clc_l2_props_hecs) <- TRUE
clc_l1_props_hecs <- as(clc_l1_props_hecs, "SpatialGridDataFrame")
clc_l2_props_hecs <- as(clc_l2_props_hecs, "SpatialGridDataFrame")
coordinates(clc_l1_props_1km) <- as.matrix(
clc_l1_props_1km[, c("eastings", "northings")])
coordinates(clc_l2_props_1km) <- as.matrix(
clc_l2_props_1km[, c("eastings", "northings")])
proj4string(clc_l1_props_1km) <- CRS("+init=epsg:29903")
proj4string(clc_l2_props_1km) <- CRS("+init=epsg:29903")
gridded(clc_l1_props_1km) <- TRUE # promote to SpatialPixelsDataFrame
gridded(clc_l2_props_1km) <- TRUE
clc_l1_props_1km <- as(clc_l1_props_1km, "SpatialGridDataFrame")
clc_l2_props_1km <- as(clc_l2_props_1km, "SpatialGridDataFrame")
## Make rasters of each variable --------------------------------------------
## make a raster for each land cover class giving the proportion of the hectad
# (raster cell) that is that class.
# These 5 (or 16) rasters will be the predictor variables.
# LABEL 1 level
artificial_surfaces_l1_rast <- raster::raster(
clc_l1_props_hecs["Artificial.surfaces"])
agricultural_l1_rast <- raster::raster(
clc_l1_props_hecs["Agricultural.areas"])
forest_seminatural_l1_rast <- raster::raster(
clc_l1_props_hecs["Forest.and.semi.natural.areas"])
wetlands_l1_rast <- raster::raster(
clc_l1_props_hecs["Wetlands"])
artificial_surfaces_l1_rast_1km <- raster::raster(
clc_l1_props_1km["Artificial.surfaces"])
agricultural_l1_rast_1km <- raster::raster(
clc_l1_props_1km["Agricultural.areas"])
forest_seminatural_l1_rast_1km <- raster::raster(
clc_l1_props_1km["Forest.and.semi.natural.areas"])
wetlands_l1_rast_1km <- raster::raster(
clc_l1_props_1km["Wetlands"])
# LABEL2 level
urban_fabric_l2_rast <- raster::raster(
clc_l2_props_hecs["Urban.fabric"])
industrial_commercial_transport_l2_rast <- raster::raster(
clc_l2_props_hecs["Industrial..commercial.and.transport.units"])
mine_dump_construction_l2_rast <- raster::raster(
clc_l2_props_hecs["Mine..dump.and.construction.sites"])
artificial_non_ag_vegetated_l2_rast <- raster::raster(
clc_l2_props_hecs["Artificial..non.agricultural.vegetated.areas"])
arable_land_l2_rast <- raster::raster(
clc_l2_props_hecs["Arable.land"])
permanent_crops_l2_rast <- raster::raster(
clc_l2_props_hecs["Permanent.crops"])
pasture_l2_rast <- raster::raster(
clc_l2_props_hecs["Pastures"])
heterogeneous_ag_l2_rast <- raster::raster(
clc_l2_props_hecs["Heterogeneous.agricultural.areas"])
forest_l2_rast <- raster::raster(
clc_l2_props_hecs["Forests"])
scrub_herbaceous_l2_rast <- raster::raster(
clc_l2_props_hecs["Scrub.and.or.herbaceous.vegetation.associations"])
open_space_no_veg_l2_rast <- raster::raster(
clc_l2_props_hecs["Open.spaces.with.little.or.no.vegetation"])
inland_wetlands_l2_rast <- raster::raster(
clc_l2_props_hecs["Inland.wetlands"])
maritime_wetlands_l2_rast <- raster::raster(
clc_l2_props_hecs["Maritime.wetlands"])
urban_fabric_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Urban.fabric"])
industrial_commercial_transport_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Industrial..commercial.and.transport.units"])
mine_dump_construction_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Mine..dump.and.construction.sites"])
artificial_non_ag_vegetated_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Artificial..non.agricultural.vegetated.areas"])
arable_land_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Arable.land"])
permanent_crops_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Permanent.crops"])
pasture_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Pastures"])
heterogeneous_ag_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Heterogeneous.agricultural.areas"])
forest_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Forests"])
scrub_herbaceous_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Scrub.and.or.herbaceous.vegetation.associations"])
open_space_no_veg_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Open.spaces.with.little.or.no.vegetation"])
inland_wetlands_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Inland.wetlands"])
maritime_wetlands_l2_rast_1km <- raster::raster(
clc_l2_props_1km["Maritime.wetlands"])
## end make rasters -----------------------------------------------------------
## clean workspace except for rasters and results dfs
rm(list = c("c_legend", "corine_df", "corine_sub", "hecs", "df_1km", "corine",
"corine_sp", "i"))
## save outputs --------------------------------------------------------------
## write out RData with rasters and results dfs
save(clc_l1_props_hecs, artificial_surfaces_l1_rast, agricultural_l1_rast,
forest_seminatural_l1_rast, wetlands_l1_rast,
file = "corine_label_1_hectad.RData")
save(clc_l1_props_1km, artificial_surfaces_l1_rast_1km, agricultural_l1_rast_1km,
forest_seminatural_l1_rast_1km, wetlands_l1_rast_1km,
file = "corine_label_1_1km.RData")
save(urban_fabric_l2_rast, industrial_commercial_transport_l2_rast,
mine_dump_construction_l2_rast, artificial_non_ag_vegetated_l2_rast,
arable_land_l2_rast, permanent_crops_l2_rast, pasture_l2_rast,
heterogeneous_ag_l2_rast, forest_l2_rast, scrub_herbaceous_l2_rast,
open_space_no_veg_l2_rast, inland_wetlands_l2_rast,
maritime_wetlands_l2_rast,
file = "corine_label_2_hectad.RData")
save(urban_fabric_l2_rast_1km, industrial_commercial_transport_l2_rast_1km,
mine_dump_construction_l2_rast_1km, artificial_non_ag_vegetated_l2_rast_1km,
arable_land_l2_rast_1km, permanent_crops_l2_rast_1km, pasture_l2_rast_1km,
heterogeneous_ag_l2_rast_1km, forest_l2_rast_1km,
scrub_herbaceous_l2_rast_1km,
open_space_no_veg_l2_rast_1km, inland_wetlands_l2_rast_1km,
maritime_wetlands_l2_rast_1km,
file = "corine_label_2_1km.RData")
|
dat = read.table(file="WeekInt.txt",header=T)
library("car")
attach(dat)
cm10_dif = diff( cm10 )
aaa_dif = diff( aaa )
cm30_dif = diff( cm30 )
ff_dif = diff( ff )
postscript("cm10aaa.ps",width=6,height=5)
plot(cm10_dif,aaa_dif,xlab="change in 10YR T rate",
ylab="change in AAA rate")
graphics.off()
postscript("weekint_splotm.ps",width=6,height=5)
plot(as.data.frame(cbind(aaa_dif,cm10_dif,cm30_dif,ff_dif)))
graphics.off()
fit1 = lm(aaa_dif ~ cm10_dif)
fit2 = lm(aaa_dif~cm10_dif+cm30_dif)
fit3 = lm(aaa_dif~cm10_dif+cm30_dif+ff_dif)
fit4 = lm(aaa_dif~cm30_dif)
summary(lm(aaa_dif~ff_dif))
cor(aaa_dif,ff_dif)
options(digits=3)
anova(lm(aaa_dif~ff_dif+cm30_dif+cm10_dif))
anova(lm(aaa_dif~cm10_dif+cm30_dif+ff_dif))
anova(lm(aaa_dif~cm30_dif+cm10_dif+ff_dif))
postscript("weekint_partialresidual.ps",width=6,height=5)
par(mfrow=c(2,2))
cr.plot(fit2,var="cm10_dif",main="(a)",smooth=F,lty=1,lwd=2,col="black")
cr.plot(fit2,var="cm30_dif",main="(b)",smooth=F,lty=1,lwd=2,col="black")
plot(cm10_dif,aaa_dif,main="(c)")
reg.line(fit1,col="black",lwd=2,lty="dashed")
plot(cm30_dif,aaa_dif,main="(d)")
reg.line(fit4,col="black",lwd=2,lty="dashed")
graphics.off()
# experimenting with av plots.
par(mfrow=c(3,2))
cr.plot(fit2,var="cm10_dif",main="(a)",smooth=F,lty=1,lwd=2,col="black")
cr.plot(fit2,var="cm30_dif",main="(b)",smooth=F,lty=1,lwd=2,col="black")
av.plot(fit2,var="cm10_dif",main="(a)",smooth=F,lty=1,lwd=2,col="black")
av.plot(fit2,var="cm30_dif",main="(b)",smooth=F,lty=1,lwd=2,col="black")
plot(cm10_dif,aaa_dif,main="(c)")
reg.line(fit1,col="black",lwd=2,lty="dashed")
plot(cm30_dif,aaa_dif,main="(d)")
reg.line(fit4,col="black",lwd=2,lty="dashed")
library(faraway)
vif(fit3)
options(digits=2)
summary(fit1)
summary(fit2)
summary(fit3)
library(leaps)
subsets = regsubsets(aaa_dif~.,
data=as.data.frame(cbind(cm10_dif,cm30_dif,ff_dif)),nbest=1)
b=summary(subsets)
postscript("WeekInt_model_selection.ps",width=6,height=3)
par(mfrow=c(1,3),lab=c(2,5,3),pch=19)
plot(1:3,b$bic,type="b",xlab="number of variables",
ylab="BIC",cex=2.5)
plot(1:3,b$cp,type="b",xlab="number of variables",
ylab="Cp",cex=2.5)
plot(1:3,b$adjr2,type="b",xlab="number of variables",
ylab="adjusted R2")
graphics.off()
|
/Statistics_and_Data_Analysis_for_Financial_Engineering_AER/RPrograms/Chapter 13/WeekInt.R
|
no_license
|
burakbayramli/books
|
R
| false | false | 2,346 |
r
|
dat = read.table(file="WeekInt.txt",header=T)
library("car")
attach(dat)
cm10_dif = diff( cm10 )
aaa_dif = diff( aaa )
cm30_dif = diff( cm30 )
ff_dif = diff( ff )
postscript("cm10aaa.ps",width=6,height=5)
plot(cm10_dif,aaa_dif,xlab="change in 10YR T rate",
ylab="change in AAA rate")
graphics.off()
postscript("weekint_splotm.ps",width=6,height=5)
plot(as.data.frame(cbind(aaa_dif,cm10_dif,cm30_dif,ff_dif)))
graphics.off()
fit1 = lm(aaa_dif ~ cm10_dif)
fit2 = lm(aaa_dif~cm10_dif+cm30_dif)
fit3 = lm(aaa_dif~cm10_dif+cm30_dif+ff_dif)
fit4 = lm(aaa_dif~cm30_dif)
summary(lm(aaa_dif~ff_dif))
cor(aaa_dif,ff_dif)
options(digits=3)
anova(lm(aaa_dif~ff_dif+cm30_dif+cm10_dif))
anova(lm(aaa_dif~cm10_dif+cm30_dif+ff_dif))
anova(lm(aaa_dif~cm30_dif+cm10_dif+ff_dif))
postscript("weekint_partialresidual.ps",width=6,height=5)
par(mfrow=c(2,2))
cr.plot(fit2,var="cm10_dif",main="(a)",smooth=F,lty=1,lwd=2,col="black")
cr.plot(fit2,var="cm30_dif",main="(b)",smooth=F,lty=1,lwd=2,col="black")
plot(cm10_dif,aaa_dif,main="(c)")
reg.line(fit1,col="black",lwd=2,lty="dashed")
plot(cm30_dif,aaa_dif,main="(d)")
reg.line(fit4,col="black",lwd=2,lty="dashed")
graphics.off()
# experimenting with av plots.
par(mfrow=c(3,2))
cr.plot(fit2,var="cm10_dif",main="(a)",smooth=F,lty=1,lwd=2,col="black")
cr.plot(fit2,var="cm30_dif",main="(b)",smooth=F,lty=1,lwd=2,col="black")
av.plot(fit2,var="cm10_dif",main="(a)",smooth=F,lty=1,lwd=2,col="black")
av.plot(fit2,var="cm30_dif",main="(b)",smooth=F,lty=1,lwd=2,col="black")
plot(cm10_dif,aaa_dif,main="(c)")
reg.line(fit1,col="black",lwd=2,lty="dashed")
plot(cm30_dif,aaa_dif,main="(d)")
reg.line(fit4,col="black",lwd=2,lty="dashed")
library(faraway)
vif(fit3)
options(digits=2)
summary(fit1)
summary(fit2)
summary(fit3)
library(leaps)
subsets = regsubsets(aaa_dif~.,
data=as.data.frame(cbind(cm10_dif,cm30_dif,ff_dif)),nbest=1)
b=summary(subsets)
postscript("WeekInt_model_selection.ps",width=6,height=3)
par(mfrow=c(1,3),lab=c(2,5,3),pch=19)
plot(1:3,b$bic,type="b",xlab="number of variables",
ylab="BIC",cex=2.5)
plot(1:3,b$cp,type="b",xlab="number of variables",
ylab="Cp",cex=2.5)
plot(1:3,b$adjr2,type="b",xlab="number of variables",
ylab="adjusted R2")
graphics.off()
|
##ASSOCIATION RULE##
library(arules)
library(arulesViz)
#Loading the data
books<-read.csv("E:/Data science Excelr/Assigments/Association/book.csv")
databook<-data.frame(as.matrix(books)) #Coverting the dataset in dataframe
barplot(sapply(databook,sum),col = 1:2000)
#RULES
options(max.print = 9999)
rules <- apriori(as.matrix(databook),parameter = list(support=0.005,confidence=0.9,minlen= 5))#907 rules done
arules::inspect(rules) # show the support, lift and confidence for all rules
plot(rules)
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
arules::inspect(rules_conf)
plot(rules_conf)# show the support, lift and confidence for all rules
#The rules with confidence of 1 imply that, whenever the LHS item was purchased,the RHS item was also purchased 100% of the time.
rules_lift <- sort (rules, by="lift", decreasing=TRUE) # 'high-lift' rules.
arules::inspect(rules_lift) # show the support, lift and confidence for all rules
#To get ‘strong‘ rules,we increase the value of ‘conf’ parameter.
#To get ‘longer‘ rules, increase ‘maxlen’.
subsetRules <- which(colSums(is.subset(rules, rules)) > 1) # get subset rules in vector
length(subsetRules)
rules <- rules[-subsetRules] # remove subset rules.
########Rule 2##########
rules <- apriori (as.matrix(databook), parameter=list (supp=0.01,conf = 0.9,minlen=2), appearance = list (default="lhs",rhs="ChildBks"), control = list (verbose=T)) # get rules that lead to buying "childbks"
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
arules::inspect(rules_conf)
rules_lift <- sort (rules, by="lift", decreasing=TRUE) # 'high-lift' rules.
arules::inspect(rules_lift)
#we came to know people who read youthbks,geogbks,Refbks,italsbks also read childbks
######Visualization######
library(arulesViz)
plot(rules,method = "scatterplot", jitter=0 )
plot(rules,method = "grouped")
plot(rules,method="paracoord",control=list(reorder=TRUE))
plot(rules,method = "graph")
#plot(rules,method = "mosaic") #this plot dont work for all rules together it can visualize one rule at one time
plot(rules,method="two-key plot", jitter=0)
top4rules <- head(rules, n = 10, by = "confidence")
plot(top4rules, method = "graph", engine = "htmlwidget") #an interactive plot
#######TRying random values of support and confidence
rules <- apriori(as.matrix(databook),parameter = list(support=0.01,confidence=0.8))#678 rules done
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
arules::inspect(rules_conf)# show the support, lift and confidence for all rules
rules <- apriori(as.matrix(databook),parameter = list(support=0.06,confidence=0.9)) #3 rules
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
arules::inspect(rules_conf)# show the support, lift and confidence for all rules
########Changing the minimum length in apriori algorithm
rules1 <- apriori (as.matrix(databook), parameter=list (supp=0.01,conf = 0.6,minlen=4,maxlen = 15), control = list (verbose=T)) #1026 Rules
rules_conf1 <- sort (rules1, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
arules::inspect(rules_conf1)
plot(rules1,method = "scatterplot", jitter=0 )
|
/3. Algorithms/Unsupervised Machine Learning Algorithms/Association Rule/book/book.R
|
no_license
|
Nikita9779/Datascience_R_codes
|
R
| false | false | 3,264 |
r
|
##ASSOCIATION RULE##
library(arules)
library(arulesViz)
#Loading the data
books<-read.csv("E:/Data science Excelr/Assigments/Association/book.csv")
databook<-data.frame(as.matrix(books)) #Coverting the dataset in dataframe
barplot(sapply(databook,sum),col = 1:2000)
#RULES
options(max.print = 9999)
rules <- apriori(as.matrix(databook),parameter = list(support=0.005,confidence=0.9,minlen= 5))#907 rules done
arules::inspect(rules) # show the support, lift and confidence for all rules
plot(rules)
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
arules::inspect(rules_conf)
plot(rules_conf)# show the support, lift and confidence for all rules
#The rules with confidence of 1 imply that, whenever the LHS item was purchased,the RHS item was also purchased 100% of the time.
rules_lift <- sort (rules, by="lift", decreasing=TRUE) # 'high-lift' rules.
arules::inspect(rules_lift) # show the support, lift and confidence for all rules
#To get ‘strong‘ rules,we increase the value of ‘conf’ parameter.
#To get ‘longer‘ rules, increase ‘maxlen’.
subsetRules <- which(colSums(is.subset(rules, rules)) > 1) # get subset rules in vector
length(subsetRules)
rules <- rules[-subsetRules] # remove subset rules.
########Rule 2##########
rules <- apriori (as.matrix(databook), parameter=list (supp=0.01,conf = 0.9,minlen=2), appearance = list (default="lhs",rhs="ChildBks"), control = list (verbose=T)) # get rules that lead to buying "childbks"
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
arules::inspect(rules_conf)
rules_lift <- sort (rules, by="lift", decreasing=TRUE) # 'high-lift' rules.
arules::inspect(rules_lift)
#we came to know people who read youthbks,geogbks,Refbks,italsbks also read childbks
######Visualization######
library(arulesViz)
plot(rules,method = "scatterplot", jitter=0 )
plot(rules,method = "grouped")
plot(rules,method="paracoord",control=list(reorder=TRUE))
plot(rules,method = "graph")
#plot(rules,method = "mosaic") #this plot dont work for all rules together it can visualize one rule at one time
plot(rules,method="two-key plot", jitter=0)
top4rules <- head(rules, n = 10, by = "confidence")
plot(top4rules, method = "graph", engine = "htmlwidget") #an interactive plot
#######TRying random values of support and confidence
rules <- apriori(as.matrix(databook),parameter = list(support=0.01,confidence=0.8))#678 rules done
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
arules::inspect(rules_conf)# show the support, lift and confidence for all rules
rules <- apriori(as.matrix(databook),parameter = list(support=0.06,confidence=0.9)) #3 rules
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
arules::inspect(rules_conf)# show the support, lift and confidence for all rules
########Changing the minimum length in apriori algorithm
rules1 <- apriori (as.matrix(databook), parameter=list (supp=0.01,conf = 0.6,minlen=4,maxlen = 15), control = list (verbose=T)) #1026 Rules
rules_conf1 <- sort (rules1, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
arules::inspect(rules_conf1)
plot(rules1,method = "scatterplot", jitter=0 )
|
#' Union Select Columns From Multiple Datasets
#'
#' This function will union the records from multiple data sets returning only the requested columns (all of which are
#' assumed to be named the same between data sets).
#'
#' @param .data A `list()` of `data.frame`s or `tbl_spark`s.
#' @param ... <[`tidy-select`][dplyr_tidy_select]> One or more unquoted expressions separated by commas. Variable names
#' can be used as if they were positions in the `data.frame`, so expressions like `x:y` can be used to select a range
#' of variables.
#' @param .all `logical(1)`. Whether to keep duplicate records (def: `TRUE`) or not (`FALSE`).
#'
#' @return
#' A `tbl_spark` or a `data.frame` depending on the input, `.data`.
#'
#' @examples
#' a <- data.frame(col1 = c(1:10, 10), col2 = 6)
#' b <- data.frame(col1 = c(1:5, 5), col2 = 4)
#' c <- data.frame(col1 = c(0, 1, 1, 2, 3, 5, 8))
#'
#' # You can union specific columns
#' union_select(.data = list(a, b, c), "col1")
#'
#' # And you can remove duplicate records
#' union_select(.data = list(a, b, c), ends_with("1"), .all = FALSE)
#'
#' @export
union_select <- function(.data, ..., .all = TRUE) {
if (!inherits(.data, "list")) stop("`.data` should be provided as a `list(n)`")
if (!is.logical(.all) || length(.all) != 1L) stop("`.all` should be a `logical(1)` vector")
if (!missing(...)) .data <- lapply(.data, function(x, ...) dplyr::select(x, ...), ...)
res <- .data[[1]]
if (length(.data) == 1L) return(res)
for (i in seq_along(.data)[-1]) {
res <- if (.all) dplyr::union_all(res, .data[[i]]) else dplyr::union(res, .data[[i]])
}
res
}
|
/R/union_select.R
|
no_license
|
nathaneastwood/flicker
|
R
| false | false | 1,607 |
r
|
#' Union Select Columns From Multiple Datasets
#'
#' This function will union the records from multiple data sets returning only the requested columns (all of which are
#' assumed to be named the same between data sets).
#'
#' @param .data A `list()` of `data.frame`s or `tbl_spark`s.
#' @param ... <[`tidy-select`][dplyr_tidy_select]> One or more unquoted expressions separated by commas. Variable names
#' can be used as if they were positions in the `data.frame`, so expressions like `x:y` can be used to select a range
#' of variables.
#' @param .all `logical(1)`. Whether to keep duplicate records (def: `TRUE`) or not (`FALSE`).
#'
#' @return
#' A `tbl_spark` or a `data.frame` depending on the input, `.data`.
#'
#' @examples
#' a <- data.frame(col1 = c(1:10, 10), col2 = 6)
#' b <- data.frame(col1 = c(1:5, 5), col2 = 4)
#' c <- data.frame(col1 = c(0, 1, 1, 2, 3, 5, 8))
#'
#' # You can union specific columns
#' union_select(.data = list(a, b, c), "col1")
#'
#' # And you can remove duplicate records
#' union_select(.data = list(a, b, c), ends_with("1"), .all = FALSE)
#'
#' @export
union_select <- function(.data, ..., .all = TRUE) {
if (!inherits(.data, "list")) stop("`.data` should be provided as a `list(n)`")
if (!is.logical(.all) || length(.all) != 1L) stop("`.all` should be a `logical(1)` vector")
if (!missing(...)) .data <- lapply(.data, function(x, ...) dplyr::select(x, ...), ...)
res <- .data[[1]]
if (length(.data) == 1L) return(res)
for (i in seq_along(.data)[-1]) {
res <- if (.all) dplyr::union_all(res, .data[[i]]) else dplyr::union(res, .data[[i]])
}
res
}
|
# Setting the working directory to a temporary directory
setwd(Sys.getenv("TEMP"))
# Setting locale
Sys.setlocale("LC_TIME", "English")
# Downloading and unzipping file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destfile <- ".\\project1data.zip"
download.file(url,destfile)
unzip(destfile)
# Reading file and changing variable formats
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", as.is = TRUE)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data$Time <- strptime(data$Time, format = "%H:%M:%S")
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Global_reactive_power <- as.numeric(data$Global_reactive_power)
data$Global_intensity <- as.numeric(data$Global_intensity)
data$Voltage <- as.numeric(data$Voltage)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
# Subsetting and pasting data
library(lubridate)
dataSub <- data[data$Date == "2007-02-01" | data$Date == "2007-02-02",]
dataSub$Time <- ymd_hm(paste0(dataSub$Date,format((dataSub$Time)," %H:%M")))
# Creating PNG
png("plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
plot(dataSub$Time,dataSub$Global_active_power,type="l", ylab = "Global Active Power (kilowatts)", xlab = "")
plot(dataSub$Time,dataSub$Voltage,type="l",xlab="datetime",ylab="Voltage")
plot(dataSub$Time,dataSub$Sub_metering_1,type="l", ylab = "Energy sub metering", xlab = "")
lines(dataSub$Time,dataSub$Sub_metering_2, col=2 )
lines(dataSub$Time,dataSub$Sub_metering_3, col=4 )
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c(1,2,4), lty = 1, cex = 0.75)
plot(dataSub$Time,dataSub$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
MariaOCruz/Exploratory-Data-Analysis
|
R
| false | false | 1,863 |
r
|
# Setting the working directory to a temporary directory
setwd(Sys.getenv("TEMP"))
# Setting locale
Sys.setlocale("LC_TIME", "English")
# Downloading and unzipping file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destfile <- ".\\project1data.zip"
download.file(url,destfile)
unzip(destfile)
# Reading file and changing variable formats
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", as.is = TRUE)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data$Time <- strptime(data$Time, format = "%H:%M:%S")
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Global_reactive_power <- as.numeric(data$Global_reactive_power)
data$Global_intensity <- as.numeric(data$Global_intensity)
data$Voltage <- as.numeric(data$Voltage)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
# Subsetting and pasting data
library(lubridate)
dataSub <- data[data$Date == "2007-02-01" | data$Date == "2007-02-02",]
dataSub$Time <- ymd_hm(paste0(dataSub$Date,format((dataSub$Time)," %H:%M")))
# Creating PNG
png("plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
plot(dataSub$Time,dataSub$Global_active_power,type="l", ylab = "Global Active Power (kilowatts)", xlab = "")
plot(dataSub$Time,dataSub$Voltage,type="l",xlab="datetime",ylab="Voltage")
plot(dataSub$Time,dataSub$Sub_metering_1,type="l", ylab = "Energy sub metering", xlab = "")
lines(dataSub$Time,dataSub$Sub_metering_2, col=2 )
lines(dataSub$Time,dataSub$Sub_metering_3, col=4 )
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c(1,2,4), lty = 1, cex = 0.75)
plot(dataSub$Time,dataSub$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
#The main EM routine.
require(mvtnorm)
require(matrixcalc)
#' dcem_cluster (multivariate data): Part of DCEM package.
#'
#' Implements the Expectation Maximization algorithm for multivariate data. This function is called
#' by the dcem_train routine.
#'
#' @param data A matrix: The dataset provided by the user.
#'
#' @param meu (matrix): The matrix containing the initial meu(s).
#'
#' @param sigma (list): A list containing the initial covariance matrices.
#'
#' @param prior (vector): A vector containing the initial prior.
#'
#' @param num_clusters (numeric): The number of clusters specified by the user. Default value is 2.
#'
#' @param iteration_count (numeric): The number of iterations for which the algorithm should run, if the
#' convergence is not achieved then the algorithm stops. Default: 200.
#'
#' @param threshold (numeric): A small value to check for convergence (if the estimated meu are within this
#' specified threshold then the algorithm stops and exit).
#'
#' \strong{Note: Choosing a very small value (0.0000001) for threshold can increase the runtime substantially
#' and the algorithm may not converge. On the other hand, choosing a larger value (0.1)
#' can lead to sub-optimal clustering. Default: 0.00001}.
#'
#' @param num_data (numeric): The total number of observations in the data.
#'
#' @return
#' A list of objects. This list contains parameters associated with the
#' Gaussian(s) (posterior probabilities, meu, co-variance and prior)
#'
#'\enumerate{
#' \item (1) Posterior Probabilities: \strong{prob} :A matrix of
#' posterior-probabilities.
#'
#' \item (2) Meu: \strong{meu}: It is a matrix of meu(s). Each row in
#' the matrix corresponds to one meu.
#'
#' \item (3) Sigma: Co-variance matrices: \strong{sigma}
#'
#' \item (4) prior: \strong{prior}: A vector of prior.
#'
#' \item (5) Membership: \strong{membership}: A vector of
#' cluster membership for data.
#' }
#'
#' @usage
#' dcem_cluster_mv(data, meu, sigma, prior, num_clusters, iteration_count,
#' threshold, num_data)
#'
#' @references
#' Using data to build a better EM: EM* for big data.
#'
#' Hasan Kurban, Mark Jenne, Mehmet M. Dalkilic
#' (2016) <https://doi.org/10.1007/s41060-017-0062-1>.
#'
#' @author Parichit Sharma \email{parishar@iu.edu}, Hasan Kurban, Mark Jenne, Mehmet Dalkilic
#'
#' This work is partially supported by NCI Grant 1R01CA213466-01.
dcem_cluster_mv <-
function(data,
meu,
sigma,
prior,
num_clusters,
iteration_count,
threshold,
num_data)
{
# Initialize the parameters
counter = 1
weights <- matrix(0,
nrow = num_clusters,
ncol = num_data,
byrow = TRUE)
# Get the machine tolerance for checking null values
# in liklihood matrix
tolerance <- .Machine$double.eps
init_attempt = 1
# Checking for empty partitions
# and re-attempt initialization.
while(init_attempt < 5){
# Expectation
weights = expectation_mv(data,
weights,
meu,
sigma,
prior,
num_clusters,
tolerance)
part_size = apply(weights, 2, which.max)
if (length(unique(part_size)) < num_clusters) {
print(paste("Retrying on empty partition, attempt: ", init_attempt))
meu <- meu_mv(data, num_clusters)
init_attempt = init_attempt + 1
}
# Break if no empty partitions
else if (length(unique(part_size)) == num_clusters){
break
}
# Inform user if non-empty clusters could not be
# found in 5 attempts.
else{
cat("The specified number of clusters:", num_clusters, "results in",
num_clusters - length(unique(part_size)), "empty clusters.",
"\nThe data may have lesser modalities. Please retry with less number of clusters.\n")
stop("Exiting...")
}
}
# Repeat till convergence threshold or iteration whichever is earlier.
while (counter <= iteration_count) {
# Store the current meu
old_meu <- meu
# Initialize the weight matrix
weights <- matrix(0,
nrow = num_clusters,
ncol = num_data,
byrow = TRUE)
# Expectation
weights = expectation_mv(data,
weights,
meu,
sigma,
prior,
num_clusters,
tolerance)
# Maximisation
out = maximisation_mv(data, weights, meu, sigma, prior, num_clusters, num_data)
meu = out$meu
sigma = out$sigma
prior = out$prior
# Find the difference in the old and estimated meu values
meu_diff <- sqrt(sum((meu - old_meu) ^ 2))
# Check convergence
if (!is.na(meu_diff) && round(meu_diff, 4) < threshold) {
print((paste(
"Convergence at iteration number: ", counter
)))
break
}
# Check iterations
else if (counter == iteration_count) {
print("Iteration threshold crossed. Stoping the execution.")
break
}
counter = counter + 1
}
# Prepare output list
output = list(
prob = weights,
'meu' = meu,
'sigma' = sigma,
'prior' = prior,
'count' = counter,
'membership' = apply(weights, 2, which.max)
)
return(output)
}
|
/R/dcem_cluster_mv.R
|
no_license
|
hasankurban/DCEM
|
R
| false | false | 5,730 |
r
|
#The main EM routine.
require(mvtnorm)
require(matrixcalc)
#' dcem_cluster (multivariate data): Part of DCEM package.
#'
#' Implements the Expectation Maximization algorithm for multivariate data. This function is called
#' by the dcem_train routine.
#'
#' @param data A matrix: The dataset provided by the user.
#'
#' @param meu (matrix): The matrix containing the initial meu(s).
#'
#' @param sigma (list): A list containing the initial covariance matrices.
#'
#' @param prior (vector): A vector containing the initial prior.
#'
#' @param num_clusters (numeric): The number of clusters specified by the user. Default value is 2.
#'
#' @param iteration_count (numeric): The number of iterations for which the algorithm should run, if the
#' convergence is not achieved then the algorithm stops. Default: 200.
#'
#' @param threshold (numeric): A small value to check for convergence (if the estimated meu are within this
#' specified threshold then the algorithm stops and exit).
#'
#' \strong{Note: Choosing a very small value (0.0000001) for threshold can increase the runtime substantially
#' and the algorithm may not converge. On the other hand, choosing a larger value (0.1)
#' can lead to sub-optimal clustering. Default: 0.00001}.
#'
#' @param num_data (numeric): The total number of observations in the data.
#'
#' @return
#' A list of objects. This list contains parameters associated with the
#' Gaussian(s) (posterior probabilities, meu, co-variance and prior)
#'
#'\enumerate{
#' \item (1) Posterior Probabilities: \strong{prob} :A matrix of
#' posterior-probabilities.
#'
#' \item (2) Meu: \strong{meu}: It is a matrix of meu(s). Each row in
#' the matrix corresponds to one meu.
#'
#' \item (3) Sigma: Co-variance matrices: \strong{sigma}
#'
#' \item (4) prior: \strong{prior}: A vector of prior.
#'
#' \item (5) Membership: \strong{membership}: A vector of
#' cluster membership for data.
#' }
#'
#' @usage
#' dcem_cluster_mv(data, meu, sigma, prior, num_clusters, iteration_count,
#' threshold, num_data)
#'
#' @references
#' Using data to build a better EM: EM* for big data.
#'
#' Hasan Kurban, Mark Jenne, Mehmet M. Dalkilic
#' (2016) <https://doi.org/10.1007/s41060-017-0062-1>.
#'
#' @author Parichit Sharma \email{parishar@iu.edu}, Hasan Kurban, Mark Jenne, Mehmet Dalkilic
#'
#' This work is partially supported by NCI Grant 1R01CA213466-01.
dcem_cluster_mv <-
function(data,
meu,
sigma,
prior,
num_clusters,
iteration_count,
threshold,
num_data)
{
# Initialize the parameters
counter = 1
weights <- matrix(0,
nrow = num_clusters,
ncol = num_data,
byrow = TRUE)
# Get the machine tolerance for checking null values
# in liklihood matrix
tolerance <- .Machine$double.eps
init_attempt = 1
# Checking for empty partitions
# and re-attempt initialization.
while(init_attempt < 5){
# Expectation
weights = expectation_mv(data,
weights,
meu,
sigma,
prior,
num_clusters,
tolerance)
part_size = apply(weights, 2, which.max)
if (length(unique(part_size)) < num_clusters) {
print(paste("Retrying on empty partition, attempt: ", init_attempt))
meu <- meu_mv(data, num_clusters)
init_attempt = init_attempt + 1
}
# Break if no empty partitions
else if (length(unique(part_size)) == num_clusters){
break
}
# Inform user if non-empty clusters could not be
# found in 5 attempts.
else{
cat("The specified number of clusters:", num_clusters, "results in",
num_clusters - length(unique(part_size)), "empty clusters.",
"\nThe data may have lesser modalities. Please retry with less number of clusters.\n")
stop("Exiting...")
}
}
# Repeat till convergence threshold or iteration whichever is earlier.
while (counter <= iteration_count) {
# Store the current meu
old_meu <- meu
# Initialize the weight matrix
weights <- matrix(0,
nrow = num_clusters,
ncol = num_data,
byrow = TRUE)
# Expectation
weights = expectation_mv(data,
weights,
meu,
sigma,
prior,
num_clusters,
tolerance)
# Maximisation
out = maximisation_mv(data, weights, meu, sigma, prior, num_clusters, num_data)
meu = out$meu
sigma = out$sigma
prior = out$prior
# Find the difference in the old and estimated meu values
meu_diff <- sqrt(sum((meu - old_meu) ^ 2))
# Check convergence
if (!is.na(meu_diff) && round(meu_diff, 4) < threshold) {
print((paste(
"Convergence at iteration number: ", counter
)))
break
}
# Check iterations
else if (counter == iteration_count) {
print("Iteration threshold crossed. Stoping the execution.")
break
}
counter = counter + 1
}
# Prepare output list
output = list(
prob = weights,
'meu' = meu,
'sigma' = sigma,
'prior' = prior,
'count' = counter,
'membership' = apply(weights, 2, which.max)
)
return(output)
}
|
##Load functions and code
source("/Users/jrigdon/Box sync/Rigdon/Useful Functions/Tables.R")
source("/Users/jrigdon/Box sync/Rigdon/Useful Functions/Figures.R")
source("/Users/jrigdon/Box sync/Rigdon/Useful Functions/Functions.R")
source("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/HEI/Code/NDSR_hei.R")
source("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/AHEI/Code/NDSR_ahei.R")
foods = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/HEI/Code/foods.csv", header=TRUE)
##Average recalls within person, merge w/ diet, and summarize
perID = function(dta) {
dta2 = aggregate(dta[, 3:16], by=list(dta$ID), function(x) mean(x, na.rm=TRUE)) #change to 3:16 for HEI
names(dta2)[1] = "ID"
dta2
}
perID2 = function(dta) {
dta2 = aggregate(dta[, 3:14], by=list(dta$ID), function(x) mean(x, na.rm=TRUE)) #change to 3:16 for HEI
names(dta2)[1] = "ID"
dta2
}
##Read in NDSR data
bl4 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/Baseline_NDSR_4_2018-07-02.csv", header=TRUE)
bl9 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/Baseline_NDSR_9_2018-07-02.csv", header=TRUE)
s4 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/6_Month_NDSR_4_2018-07-02.csv", header=TRUE)
s9 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/6_Month_NDSR_9_2018-07-02.csv", header=TRUE)
##Calculate HEI scores
blh = perID(hei_ndsr(r4=bl4, r9=bl9))
blh$time = "a.Baseline"
m6h = perID(hei_ndsr(r4=s4, r9=s9))
m6h$time = "b.6M"
hei = rbind(blh, m6h)
##Calculate AHEI scores
bla = perID2(ahei_ndsr(r4=bl4, r9=bl9))
bla$time = "a.Baseline"
m6a = perID2(ahei_ndsr(r4=s4, r9=s9))
m6a$time = "b.6M"
ahei = rbind(bla, m6a)
##Merge with randomization groups
d1 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/DATA_2018-03-22.csv", header=TRUE)
d2 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/Groups_2018-03-22.csv", header=TRUE)
d3 = merge(d1, d2[, names(d2) %in% c("record_id", "randomize_group")], by="record_id")
hei2 = hei[hei$ID %in% d3$chives_id, ]
names(hei2)[1] = "chives_id"
ahei2 = ahei[ahei$ID %in% d3$chives_id, ]
names(ahei2)[1] = "chives_id"
hei3 = merge(hei2, d3[, names(d3) %in% c("chives_id", "randomize_group")], by="chives_id", all.x=TRUE)
ahei3 = merge(ahei2, d3[, names(d3) %in% c("chives_id", "randomize_group")], by="chives_id", all.x=TRUE)
##Re-order groups
table(hei3$randomize_group)
hei3$group = NA
hei3$group[hei3$randomize_group==4] = "a.FVweekly"
hei3$group[hei3$randomize_group==1] = "b.FVmonthly"
hei3$group[hei3$randomize_group==3] = "c.UNweekly"
hei3$group[hei3$randomize_group==2] = "d.UNmonthly"
table(hei3$group, hei3$randomize_group, exclude=NULL)
table(ahei3$randomize_group)
ahei3$group = NA
ahei3$group[ahei3$randomize_group==4] = "a.FVweekly"
ahei3$group[ahei3$randomize_group==1] = "b.FVmonthly"
ahei3$group[ahei3$randomize_group==3] = "c.UNweekly"
ahei3$group[ahei3$randomize_group==2] = "d.UNmonthly"
table(ahei3$group, ahei3$randomize_group, exclude=NULL)
##Save data
write.csv(hei3, "/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/HEI_2018-07-20.csv", row.names=FALSE)
write.csv(ahei3, "/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/AHEI_2018-07-20.csv", row.names=FALSE)
##Using data loaded in above and date ranges, calculate by week (1 and 4) scores for HEI and AHEI
##HEI
blh2 = hei_ndsr(r4=bl4, r9=bl9)
blh2$date = as.Date(blh2[, names(blh2)=="date"], format="%m/%d/%y")
blh2$week = "week4"
blh2$week[substr(blh2$date, 9, 9)=="0"] = "week1"
blh2$time = "a.BL"
m6h2 = hei_ndsr(r4=s4, r9=s9)
m6h2$date = as.Date(m6h2[, names(m6h2)=="date"], format="%m/%d/%y")
m6h2$week = "week4"
m6h2$week[substr(m6h2$date, 9, 9)=="0"] = "week1"
m6h2$time = "b.6m"
##Take averages within time and week
hei2 = rbind(blh2, m6h2)
hei2$unique = paste(hei2$ID, paste(hei2$time, hei2$week, sep="_"), sep="_")
hei3 = aggregate(hei2[, 3:16], by=list(hei2$unique), function(x) mean(x, na.rm=TRUE))
##Save for studying consumption cycle
hei3$chives_id = substr(hei3$Group.1, 1, 6)
hei3$time = substr(hei3$Group.1, 8, 11)
hei3$week = substr(hei3$Group.1, 13, 17)
hei4 = hei3[, c(16:18, 2:15)]
hei5 = hei4[hei4$chives_id %in% d3$chives_id, ]
write.csv(hei5, "/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/HEI_weekly_2018-08-22.csv", row.names=FALSE)
##AHEI
bla2 = ahei_ndsr(r4=bl4, r9=bl9)
bla2$date = as.Date(bla2[, names(bla2)=="date"], format="%m/%d/%y")
bla2$week = "week4"
bla2$week[substr(bla2$date, 9, 9)=="0"] = "week1"
bla2$time = "a.BL"
m6a2 = ahei_ndsr(r4=s4, r9=s9)
m6a2$date = as.Date(m6a2[, names(m6a2)=="date"], format="%m/%d/%y")
m6a2$week = "week4"
m6a2$week[substr(m6a2$date, 9, 9)=="0"] = "week1"
m6a2$time = "b.6m"
##Take averages within time and week
ahei2 = rbind(bla2, m6a2)
ahei2$unique = paste(ahei2$ID, paste(ahei2$time, ahei2$week, sep="_"), sep="_")
ahei3 = aggregate(ahei2[, 3:14], by=list(ahei2$unique), function(x) mean(x, na.rm=TRUE))
##Save for studying consumption cycle
ahei3$chives_id = substr(ahei3$Group.1, 1, 6)
ahei3$time = substr(ahei3$Group.1, 8, 11)
ahei3$week = substr(ahei3$Group.1, 13, 17)
ahei4 = ahei3[, c(14:16, 2:13)]
ahei5 = ahei4[ahei4$chives_id %in% d3$chives_id, ]
write.csv(ahei5, "/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/AHEI_weekly_2018-08-22.csv", row.names=FALSE)
bla = perID2(ahei_ndsr(r4=bl4, r9=bl9))
bla$time = "a.Baseline"
m6a = perID2(ahei_ndsr(r4=s4, r9=s9))
m6a$time = "b.6M"
ahei = rbind(bla, m6a)
bl$date = as.Date(bl[, names(bl)=="Date.of.Intake"], format="%m/%d/%y")
s$date = as.Date(s[, names(s)=="Date.of.Intake"], format="%m/%d/%y")
bl$week = "week4"
bl$week[substr(bl$date, 9, 9)=="0"] = "week1"
table(bl$week, exclude=NULL)
s$week = "week4"
s$week[substr(s$date, 9, 9)=="0"] = "week1"
table(s$week, exclude=NULL)
|
/Indices.R
|
no_license
|
joerigdon/CHIVES
|
R
| false | false | 5,746 |
r
|
##Load functions and code
source("/Users/jrigdon/Box sync/Rigdon/Useful Functions/Tables.R")
source("/Users/jrigdon/Box sync/Rigdon/Useful Functions/Figures.R")
source("/Users/jrigdon/Box sync/Rigdon/Useful Functions/Functions.R")
source("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/HEI/Code/NDSR_hei.R")
source("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/AHEI/Code/NDSR_ahei.R")
foods = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/HEI/Code/foods.csv", header=TRUE)
##Average recalls within person, merge w/ diet, and summarize
perID = function(dta) {
dta2 = aggregate(dta[, 3:16], by=list(dta$ID), function(x) mean(x, na.rm=TRUE)) #change to 3:16 for HEI
names(dta2)[1] = "ID"
dta2
}
perID2 = function(dta) {
dta2 = aggregate(dta[, 3:14], by=list(dta$ID), function(x) mean(x, na.rm=TRUE)) #change to 3:16 for HEI
names(dta2)[1] = "ID"
dta2
}
##Read in NDSR data
bl4 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/Baseline_NDSR_4_2018-07-02.csv", header=TRUE)
bl9 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/Baseline_NDSR_9_2018-07-02.csv", header=TRUE)
s4 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/6_Month_NDSR_4_2018-07-02.csv", header=TRUE)
s9 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/6_Month_NDSR_9_2018-07-02.csv", header=TRUE)
##Calculate HEI scores
blh = perID(hei_ndsr(r4=bl4, r9=bl9))
blh$time = "a.Baseline"
m6h = perID(hei_ndsr(r4=s4, r9=s9))
m6h$time = "b.6M"
hei = rbind(blh, m6h)
##Calculate AHEI scores
bla = perID2(ahei_ndsr(r4=bl4, r9=bl9))
bla$time = "a.Baseline"
m6a = perID2(ahei_ndsr(r4=s4, r9=s9))
m6a$time = "b.6M"
ahei = rbind(bla, m6a)
##Merge with randomization groups
d1 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/DATA_2018-03-22.csv", header=TRUE)
d2 = read.csv("/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/Groups_2018-03-22.csv", header=TRUE)
d3 = merge(d1, d2[, names(d2) %in% c("record_id", "randomize_group")], by="record_id")
hei2 = hei[hei$ID %in% d3$chives_id, ]
names(hei2)[1] = "chives_id"
ahei2 = ahei[ahei$ID %in% d3$chives_id, ]
names(ahei2)[1] = "chives_id"
hei3 = merge(hei2, d3[, names(d3) %in% c("chives_id", "randomize_group")], by="chives_id", all.x=TRUE)
ahei3 = merge(ahei2, d3[, names(d3) %in% c("chives_id", "randomize_group")], by="chives_id", all.x=TRUE)
##Re-order groups
table(hei3$randomize_group)
hei3$group = NA
hei3$group[hei3$randomize_group==4] = "a.FVweekly"
hei3$group[hei3$randomize_group==1] = "b.FVmonthly"
hei3$group[hei3$randomize_group==3] = "c.UNweekly"
hei3$group[hei3$randomize_group==2] = "d.UNmonthly"
table(hei3$group, hei3$randomize_group, exclude=NULL)
table(ahei3$randomize_group)
ahei3$group = NA
ahei3$group[ahei3$randomize_group==4] = "a.FVweekly"
ahei3$group[ahei3$randomize_group==1] = "b.FVmonthly"
ahei3$group[ahei3$randomize_group==3] = "c.UNweekly"
ahei3$group[ahei3$randomize_group==2] = "d.UNmonthly"
table(ahei3$group, ahei3$randomize_group, exclude=NULL)
##Save data
write.csv(hei3, "/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/HEI_2018-07-20.csv", row.names=FALSE)
write.csv(ahei3, "/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/AHEI_2018-07-20.csv", row.names=FALSE)
##Using data loaded in above and date ranges, calculate by week (1 and 4) scores for HEI and AHEI
##HEI
blh2 = hei_ndsr(r4=bl4, r9=bl9)
blh2$date = as.Date(blh2[, names(blh2)=="date"], format="%m/%d/%y")
blh2$week = "week4"
blh2$week[substr(blh2$date, 9, 9)=="0"] = "week1"
blh2$time = "a.BL"
m6h2 = hei_ndsr(r4=s4, r9=s9)
m6h2$date = as.Date(m6h2[, names(m6h2)=="date"], format="%m/%d/%y")
m6h2$week = "week4"
m6h2$week[substr(m6h2$date, 9, 9)=="0"] = "week1"
m6h2$time = "b.6m"
##Take averages within time and week
hei2 = rbind(blh2, m6h2)
hei2$unique = paste(hei2$ID, paste(hei2$time, hei2$week, sep="_"), sep="_")
hei3 = aggregate(hei2[, 3:16], by=list(hei2$unique), function(x) mean(x, na.rm=TRUE))
##Save for studying consumption cycle
hei3$chives_id = substr(hei3$Group.1, 1, 6)
hei3$time = substr(hei3$Group.1, 8, 11)
hei3$week = substr(hei3$Group.1, 13, 17)
hei4 = hei3[, c(16:18, 2:15)]
hei5 = hei4[hei4$chives_id %in% d3$chives_id, ]
write.csv(hei5, "/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/HEI_weekly_2018-08-22.csv", row.names=FALSE)
##AHEI
bla2 = ahei_ndsr(r4=bl4, r9=bl9)
bla2$date = as.Date(bla2[, names(bla2)=="date"], format="%m/%d/%y")
bla2$week = "week4"
bla2$week[substr(bla2$date, 9, 9)=="0"] = "week1"
bla2$time = "a.BL"
m6a2 = ahei_ndsr(r4=s4, r9=s9)
m6a2$date = as.Date(m6a2[, names(m6a2)=="date"], format="%m/%d/%y")
m6a2$week = "week4"
m6a2$week[substr(m6a2$date, 9, 9)=="0"] = "week1"
m6a2$time = "b.6m"
##Take averages within time and week
ahei2 = rbind(bla2, m6a2)
ahei2$unique = paste(ahei2$ID, paste(ahei2$time, ahei2$week, sep="_"), sep="_")
ahei3 = aggregate(ahei2[, 3:14], by=list(ahei2$unique), function(x) mean(x, na.rm=TRUE))
##Save for studying consumption cycle
ahei3$chives_id = substr(ahei3$Group.1, 1, 6)
ahei3$time = substr(ahei3$Group.1, 8, 11)
ahei3$week = substr(ahei3$Group.1, 13, 17)
ahei4 = ahei3[, c(14:16, 2:13)]
ahei5 = ahei4[ahei4$chives_id %in% d3$chives_id, ]
write.csv(ahei5, "/Users/jrigdon/Box sync/Rigdon/Sanjay/CHIVES/Data/AHEI_weekly_2018-08-22.csv", row.names=FALSE)
bla = perID2(ahei_ndsr(r4=bl4, r9=bl9))
bla$time = "a.Baseline"
m6a = perID2(ahei_ndsr(r4=s4, r9=s9))
m6a$time = "b.6M"
ahei = rbind(bla, m6a)
bl$date = as.Date(bl[, names(bl)=="Date.of.Intake"], format="%m/%d/%y")
s$date = as.Date(s[, names(s)=="Date.of.Intake"], format="%m/%d/%y")
bl$week = "week4"
bl$week[substr(bl$date, 9, 9)=="0"] = "week1"
table(bl$week, exclude=NULL)
s$week = "week4"
s$week[substr(s$date, 9, 9)=="0"] = "week1"
table(s$week, exclude=NULL)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\docType{class}
\name{intsp-class}
\alias{intsp-class}
\alias{intsp}
\title{An interval extension of a SpatialPointsDataFrame}
\description{
An interval extension of a SpatialPointsDataFrame
}
\section{Slots}{
\describe{
\item{\code{interval}}{A matrix of two columns representing the lower and upper
endpoints of an interval.}
}}
|
/fuzzedpackages/intkrige/man/intsp-class.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false | true | 423 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\docType{class}
\name{intsp-class}
\alias{intsp-class}
\alias{intsp}
\title{An interval extension of a SpatialPointsDataFrame}
\description{
An interval extension of a SpatialPointsDataFrame
}
\section{Slots}{
\describe{
\item{\code{interval}}{A matrix of two columns representing the lower and upper
endpoints of an interval.}
}}
|
#' Fitting Count Regression Models with Latent Covariates
#'
#' This function is the main function of the package and can be used to estimate
#' latent variable count regression models in one or multiple group(s).
#'
#' @param forml An object of class \code{\link[stats]{formula}} (or one
#' that can be coerced to that class): a symbolic description of the model to
#' be fitted. The details of model specification are given under Details.
#' @param data A data frame containing all variables specified in \code{forml}
#' and/or indicators of the latent variables specified in \code{lv} (if
#' applicable).
#' @param lv A named list, where names of elements represent the names of the
#' latent variables and each element consists of a character vector containing
#' variable names of indicators for the respective latent variable, e.g.,
#' \code{list(eta1 = c("z1", "z2", "z3"))}.
#' @param group A group variable. If specified, the regression model specified
#' in \code{forml} is estimated as multi-group model (i.e., within each group).
#' @param family A character indicating the family of the generalized linear
#' model to be estimated. At the moment, \code{"poisson"} (for Poisson
#' regression; default) or \code{"nbinom"} (for negative binomial regression)
#' are available.
#' @param silent Logical. Should informations about the estimation process
#' be suppressed? (Defaults to FALSE)
#' @param se Logical. Should standard errors be computed? Defaults to TRUE.
#' (Can take a while for complex models)
#' @param creg_options optional list of additional options for the estimation
#' procedure
#' @return An object of type \code{lavacreg}. Use \code{summary(object)} to
#' print results containing parameter estimates and their standard errors.
#' @examples
#' fit <- countreg(forml = "dv ~ z11", data = example01, family = "poisson")
#' summary(fit)
#' \donttest{
#' fit <- countreg(
#' forml = "dv ~ eta1 + z11 + z21",
#' lv = list(eta1 = c("z41", "z42", "z43")),
#' group = "treat",
#' data = example01,
#' family = "poisson"
#' )
#' summary(fit)
#' }
#'
#' @importFrom methods new
#' @export
countreg <- function(forml,
data,
lv = NULL,
group = NULL,
family = "poisson",
silent = FALSE,
se = TRUE,
creg_options = NULL) {
# Initialize new lavacreg object to store and process information
object <- new("lavacreg")
# Create, process and save function input
object@input <- creg_create_input(
forml = forml,
lv = lv,
group = group,
data = data,
family = family,
silent = silent,
se = se,
creg_options = creg_options
)
# Create datalist
# i.e., split data in group-conditional datasets of dv and covariates
object@dataobj <- creg_create_datalist(object, data)
# Start estimation process
# TODO: seperate model and standard error estimation
object <- creg_fit_model(object)
# Return all information back to the user
return(object)
}
|
/R/countreg.R
|
no_license
|
cran/lavacreg
|
R
| false | false | 3,142 |
r
|
#' Fitting Count Regression Models with Latent Covariates
#'
#' This function is the main function of the package and can be used to estimate
#' latent variable count regression models in one or multiple group(s).
#'
#' @param forml An object of class \code{\link[stats]{formula}} (or one
#' that can be coerced to that class): a symbolic description of the model to
#' be fitted. The details of model specification are given under Details.
#' @param data A data frame containing all variables specified in \code{forml}
#' and/or indicators of the latent variables specified in \code{lv} (if
#' applicable).
#' @param lv A named list, where names of elements represent the names of the
#' latent variables and each element consists of a character vector containing
#' variable names of indicators for the respective latent variable, e.g.,
#' \code{list(eta1 = c("z1", "z2", "z3"))}.
#' @param group A group variable. If specified, the regression model specified
#' in \code{forml} is estimated as multi-group model (i.e., within each group).
#' @param family A character indicating the family of the generalized linear
#' model to be estimated. At the moment, \code{"poisson"} (for Poisson
#' regression; default) or \code{"nbinom"} (for negative binomial regression)
#' are available.
#' @param silent Logical. Should informations about the estimation process
#' be suppressed? (Defaults to FALSE)
#' @param se Logical. Should standard errors be computed? Defaults to TRUE.
#' (Can take a while for complex models)
#' @param creg_options optional list of additional options for the estimation
#' procedure
#' @return An object of type \code{lavacreg}. Use \code{summary(object)} to
#' print results containing parameter estimates and their standard errors.
#' @examples
#' fit <- countreg(forml = "dv ~ z11", data = example01, family = "poisson")
#' summary(fit)
#' \donttest{
#' fit <- countreg(
#' forml = "dv ~ eta1 + z11 + z21",
#' lv = list(eta1 = c("z41", "z42", "z43")),
#' group = "treat",
#' data = example01,
#' family = "poisson"
#' )
#' summary(fit)
#' }
#'
#' @importFrom methods new
#' @export
countreg <- function(forml,
data,
lv = NULL,
group = NULL,
family = "poisson",
silent = FALSE,
se = TRUE,
creg_options = NULL) {
# Initialize new lavacreg object to store and process information
object <- new("lavacreg")
# Create, process and save function input
object@input <- creg_create_input(
forml = forml,
lv = lv,
group = group,
data = data,
family = family,
silent = silent,
se = se,
creg_options = creg_options
)
# Create datalist
# i.e., split data in group-conditional datasets of dv and covariates
object@dataobj <- creg_create_datalist(object, data)
# Start estimation process
# TODO: seperate model and standard error estimation
object <- creg_fit_model(object)
# Return all information back to the user
return(object)
}
|
#!/usr/bin/env Rscript
setwd('/home/sabeiro/lav/media/')
##install.packages(c('textcat','svglite'))
source('src/R/graphEnv.R')
library('svglite')
require(stats)
require(dplyr)
library(grid)
library(sqldf)
library(rjson)
if(FALSE){
fs <- read.csv('raw/audienceNielsen.csv',stringsAsFactor=FALSE)
fMap <- read.csv('raw/audienceNielsenMap.csv',stringsAsFactor=FALSE)
fs <- fs[,c("Campaign.Name","Demo.Segment","Computer","Mobile","Digital..C.M.")]
fs <- melt(fs,id.vars=c("Campaign.Name","Demo.Segment"))
colnames(fs) <- c("Campaign.Name","Demo.Segment","Platform","unique")
fs$aud <- fs$Campaign.Name
unique(fs$aud)
unique(fs$Campaign.Name)
fs$source <- "interest"
for(i in 1:nrow(fMap)){
fs[fs$Campaign.Name == fMap$name1[i],"aud"] <- fMap$name2[i]
fs[fs$Campaign.Name == fMap$name1[i],"source"] <- fMap$group[i]
}
fs <- fs[!grepl("Total",fs$Demo.Segment),]
fs$seg <- fs$aud %>% sub("pub ","",.) %>% gsub("[[:punct:]]"," ",.) %>% sub(" v","",.) %>% sub(" z","",.) %>% sub(" e","",.) %>% sub(" 1st","",.) %>% sub(" beha","",.) %>% sub("BZ SE ","",.) %>% sub("BZ SD ","",.) %>% sub("BZ SU ","",.) %>% sub("Pub ","",.)
uniT <- fs#ddply(fs,.(aud,seg,Demo.Segment,source),summarise,unique=sum(Total.Digital,na.rm=T))
write.csv(uniT,paste("out/audComp","Nielsen",".csv",sep=""))
}
if(TRUE){
## fs <- read.csv('raw/audCompAll.csv',stringsAsFactor=FALSE)[,c("Campaign.Name","Demo.Segment","Placement","Platform.Device","Unique.Audience")]
## fs1 <- read.csv('raw/audCompAll3.csv',stringsAsFactor=FALSE)[,c("Campaign.Name","Demo.Segment","Placement","Platform.Device","Unique.Audience")]
## fs1 <- fs1[fs1$Campaign.Name=="post z",]
## fs <- rbind(fs,fs1)
fs <- read.csv('raw/audienceNielsen2.csv',stringsAsFactor=FALSE)[,c("Campaign.Name","Demo.Segment","Placement","Platform.Device","Unique.Audience")]
fs = fs[fs$Platform.Device %in% c("Computer","Mobile","Digital (C/M)"),]
fs$Unique.Audience = fs$Unique.Audience %>% gsub(",","",.) %>% as.numeric(.)
fs$Platform <- fs$Platform.Device
## fs$Placement[fs$Placement=="test bk 1 dinamic"] <- "mediamond_plc0001"
#fs$Platform[fs$Platform=="Digital (C/M)"] <- "Total Digital"
## fs <- fs[fs1$Country=="ITALY",]
fs <- ddply(fs,.(Campaign.Name,Placement,Demo.Segment,Platform),summarise,unique=sum(Unique.Audience,na.rm=T))
fMap <- read.csv('raw/audCampListNie.csv',stringsAsFactor=FALSE)
cMap <- ddply(fMap,.(source,camp),summarise,imps=1)
fs$source = "rest"
for(i in 1:nrow(cMap)){fs[fs$Campaign.Name == cMap$camp[i],"source"] <- cMap$source[i]}
cMap <- ddply(fMap,.(source,pc,camp),summarise,name=head(name,1))
fs$aud = "rest"
for(i in 1:nrow(cMap)){
#set <- fs$Placement == cMap$pc[i] & fs$source == cMap$source[i]
set <- fs$Placement == cMap$pc[i] & fs$Campaign.Name == cMap$camp[i]
fs[set,"aud"] <- cMap$name[i]
}
table(fs$aud)
fs$aud <- gsub("pub ","",fs$aud) %>% gsub("Pub ","",.)
fs$source[grepl("beha",fs$aud)] = "zalando beha"
fs1 <- read.csv('raw/audCompBanzai.csv',stringsAsFactor=FALSE)
fs1 <- ddply(fs1,.(Campaign.Name,Placement,Demo.Segment,Platform),summarise,unique=sum(Unique.Audience,na.rm=T))
## fs1 <- fs1[fs1$Country=="ITALY",]
fs1$aud <- fs1$Placement
fs1$source <- "banzai"
fs1$aud <- fs1$aud %>% gsub("BZ - ","",.) %>% gsub("ALL","",.) %>% gsub("SD","F",.) %>% gsub("SU","M",.) %>% gsub("SE ","",.) %>% gsub("\\_","",.)
fs <- rbind(fs,fs1)
fs <- fs[!grepl("Total",fs$Demo.Segment),]
## fs <- fs[!(fs$Site=="All"),]
fs <- fs[!fs$Placement=="",]
##
fs$seg <- fs$aud %>% sub("pub ","",.) %>% gsub("[[:punct:]]"," ",.) %>% sub(" v","",.) %>% sub(" z","",.) %>% sub(" e","",.) %>% sub(" 1st","",.) %>% sub(" beha","",.) %>% sub("BZ SE ","",.) %>% sub("BZ SD ","",.) %>% sub("BZ SU ","",.) %>% sub("Pub ","",.)
segMap <- read.csv('raw/audCompSegMap.csv',row.names=1)
segMap <- segMap[!grepl("Total",rownames(segMap)),]
segMap2 <- read.csv('raw/audCompSegMap2.csv',row.names=1)
segMap2 <- ifelse(segMap2==1,TRUE,FALSE)
audL <- colnames(segMap2) %>% gsub("^X","",.) %>% gsub("[[:punct:]]"," ",.)
segN <- colnames(segMap) %>% gsub("pub ","",.) %>% gsub("X","",.) %>% gsub("[[:punct:]]"," ",.) %>% gsub(" $","",.)
audM <- audL %>% sub("pub ","",.) %>% gsub("[[:punct:]]"," ",.) %>% sub(" v","",.) %>% sub(" z","",.) %>% sub(" 1st","",.) %>% sub(" beha","",.) %>% sub("BZ SE ","",.) %>% sub("BZ SD ","",.) %>% sub("BZ SU ","",.)
audSeg <- match(audM,segN)
fs[grepl("Cultura",fs$aud),"source"] = "first i-t 2"
fs[grepl("Ecologiaambiente",fs$aud),"source"] = "first i-t 2"
fs[grepl("Scienza",fs$aud),"source"] = "first i-t 2"
fs[grepl("Sciure",fs$aud),"source"] = "first i-t 2"
fs[grepl("Musica",fs$aud),"source"] = "first i-t 2"
fs[grepl("Dinamici",fs$aud),"source"] = "first i-t 2"
uniT <- fs
write.csv(uniT,paste("out/audComp","PostVal",".csv",sep=""))
ab <- read.csv("raw/audCompBenchmarkGraph.csv")
ab$percent <- ab$percent/100
ab$target <- ab$target %>% gsub("[[:punct:]]"," ",.)
ab$device <- ab$device %>% gsub(" Only","",.) %>% gsub("Total ","",.)
}
##----------------------benchmark-----------------------------
## mMap <- melt(read.csv('raw/audCompSegMap.csv'),id.vars="X")
## mMap$variable <- mMap$variable %>% gsub("X","",.) %>% gsub("[[:punct:]]"," ",.) %>% gsub(" $","",.)
## tmp <- sqldf("SELECT * FROM fs AS f LEFT JOIN mMap AS s ON (t.Var2 = s.seg) AND (t.source = s.source)")
devL <- c("Digital","Computer","Mobile")
sourceL <- unique(fs$source)
meltTarget <- NULL
reachTarget <- NULL
sour = "none"
sour <- sourceL[8]
for(sour in c(sourceL[!grepl("i-t",sourceL)],"all","none")){
inTarget <- NULL
tReach <- NULL
d <- devL[1]
for(d in devL){
print(d)
set <- TRUE
if (sour=="all"){
set <- grepl(d,uniT$Platform) & (uniT$source %in% c("vodafone s-d","zalando s-d","first s-d"))
} else if (sour=="none"){
set <- grepl(d,uniT$Platform)
} else {
set <- grepl(d,uniT$Platform) & uniT$source==sour
}
uniD <- uniT[set,]
inTarget1 <- NULL
tReach1 <- NULL
i <- 4
for(i in 1:length(audL)){
set2 <- segMap2[,audL[i] == colnames(segMap2) %>% gsub("X","",.) %>% gsub("[[:punct:]]"," ",.)]
set3 <- FALSE
for(s in names(set2[set2])){set3 <- set3 | grepl(s,uniD$seg)}
if(sour=="none"){set3 <- TRUE}
uniD1 <- uniD[set3,]
setC <- unique(uniD1[,"seg"])
sel <- segMap[,audL[i] == segN]
selT <- rep(sel,length(setC))
inTarget1[i] <- NA
if(!any(audL[i] == segN)){next}
inTarget1[i] <- sum(uniD1[selT,"unique"],na.rm=T)/sum(uniD1[,"unique"],na.rm=T)
tReach1[i] <- sum(uniD1[,"unique"],na.rm=T)
}
inTarget <- rbind(inTarget,inTarget1)
tReach <- rbind(tReach,tReach1)
}
colnames(inTarget) <- audL
rownames(inTarget) <- devL
colnames(tReach) <- audL
rownames(tReach) <- devL
meltTarget <- rbind(meltTarget,cbind(melt(inTarget),source=sour))
reachTarget <- rbind(reachTarget,cbind(melt(tReach),source=sour))
}
meltTarget$reach <- reachTarget$value
breakN = unique(c(0,quantile(meltTarget$reach,seq(1,5)/5,na.rm=T)))
meltTarget$accuracy <- as.numeric(cut(meltTarget$reach,breaks=breakN,labels=1:(length(breakN)-1)))
write.csv(meltTarget,"out/audCompInTarget.csv")
##write.csv(as.data.frame.matrix(xtabs("value ~ Var2 + Var1",data=meltTarget)),"out/audCompInTarget.csv",sep=",")
write.csv(fs,"out/audCompAll.csv")
|
/R/report/audienceInTargetLoad.R
|
no_license
|
sabeiro/screw_drawer
|
R
| false | false | 7,757 |
r
|
#!/usr/bin/env Rscript
setwd('/home/sabeiro/lav/media/')
##install.packages(c('textcat','svglite'))
source('src/R/graphEnv.R')
library('svglite')
require(stats)
require(dplyr)
library(grid)
library(sqldf)
library(rjson)
if(FALSE){
fs <- read.csv('raw/audienceNielsen.csv',stringsAsFactor=FALSE)
fMap <- read.csv('raw/audienceNielsenMap.csv',stringsAsFactor=FALSE)
fs <- fs[,c("Campaign.Name","Demo.Segment","Computer","Mobile","Digital..C.M.")]
fs <- melt(fs,id.vars=c("Campaign.Name","Demo.Segment"))
colnames(fs) <- c("Campaign.Name","Demo.Segment","Platform","unique")
fs$aud <- fs$Campaign.Name
unique(fs$aud)
unique(fs$Campaign.Name)
fs$source <- "interest"
for(i in 1:nrow(fMap)){
fs[fs$Campaign.Name == fMap$name1[i],"aud"] <- fMap$name2[i]
fs[fs$Campaign.Name == fMap$name1[i],"source"] <- fMap$group[i]
}
fs <- fs[!grepl("Total",fs$Demo.Segment),]
fs$seg <- fs$aud %>% sub("pub ","",.) %>% gsub("[[:punct:]]"," ",.) %>% sub(" v","",.) %>% sub(" z","",.) %>% sub(" e","",.) %>% sub(" 1st","",.) %>% sub(" beha","",.) %>% sub("BZ SE ","",.) %>% sub("BZ SD ","",.) %>% sub("BZ SU ","",.) %>% sub("Pub ","",.)
uniT <- fs#ddply(fs,.(aud,seg,Demo.Segment,source),summarise,unique=sum(Total.Digital,na.rm=T))
write.csv(uniT,paste("out/audComp","Nielsen",".csv",sep=""))
}
if(TRUE){
## fs <- read.csv('raw/audCompAll.csv',stringsAsFactor=FALSE)[,c("Campaign.Name","Demo.Segment","Placement","Platform.Device","Unique.Audience")]
## fs1 <- read.csv('raw/audCompAll3.csv',stringsAsFactor=FALSE)[,c("Campaign.Name","Demo.Segment","Placement","Platform.Device","Unique.Audience")]
## fs1 <- fs1[fs1$Campaign.Name=="post z",]
## fs <- rbind(fs,fs1)
fs <- read.csv('raw/audienceNielsen2.csv',stringsAsFactor=FALSE)[,c("Campaign.Name","Demo.Segment","Placement","Platform.Device","Unique.Audience")]
fs = fs[fs$Platform.Device %in% c("Computer","Mobile","Digital (C/M)"),]
fs$Unique.Audience = fs$Unique.Audience %>% gsub(",","",.) %>% as.numeric(.)
fs$Platform <- fs$Platform.Device
## fs$Placement[fs$Placement=="test bk 1 dinamic"] <- "mediamond_plc0001"
#fs$Platform[fs$Platform=="Digital (C/M)"] <- "Total Digital"
## fs <- fs[fs1$Country=="ITALY",]
fs <- ddply(fs,.(Campaign.Name,Placement,Demo.Segment,Platform),summarise,unique=sum(Unique.Audience,na.rm=T))
fMap <- read.csv('raw/audCampListNie.csv',stringsAsFactor=FALSE)
cMap <- ddply(fMap,.(source,camp),summarise,imps=1)
fs$source = "rest"
for(i in 1:nrow(cMap)){fs[fs$Campaign.Name == cMap$camp[i],"source"] <- cMap$source[i]}
cMap <- ddply(fMap,.(source,pc,camp),summarise,name=head(name,1))
fs$aud = "rest"
for(i in 1:nrow(cMap)){
#set <- fs$Placement == cMap$pc[i] & fs$source == cMap$source[i]
set <- fs$Placement == cMap$pc[i] & fs$Campaign.Name == cMap$camp[i]
fs[set,"aud"] <- cMap$name[i]
}
table(fs$aud)
fs$aud <- gsub("pub ","",fs$aud) %>% gsub("Pub ","",.)
fs$source[grepl("beha",fs$aud)] = "zalando beha"
fs1 <- read.csv('raw/audCompBanzai.csv',stringsAsFactor=FALSE)
fs1 <- ddply(fs1,.(Campaign.Name,Placement,Demo.Segment,Platform),summarise,unique=sum(Unique.Audience,na.rm=T))
## fs1 <- fs1[fs1$Country=="ITALY",]
fs1$aud <- fs1$Placement
fs1$source <- "banzai"
fs1$aud <- fs1$aud %>% gsub("BZ - ","",.) %>% gsub("ALL","",.) %>% gsub("SD","F",.) %>% gsub("SU","M",.) %>% gsub("SE ","",.) %>% gsub("\\_","",.)
fs <- rbind(fs,fs1)
fs <- fs[!grepl("Total",fs$Demo.Segment),]
## fs <- fs[!(fs$Site=="All"),]
fs <- fs[!fs$Placement=="",]
##
fs$seg <- fs$aud %>% sub("pub ","",.) %>% gsub("[[:punct:]]"," ",.) %>% sub(" v","",.) %>% sub(" z","",.) %>% sub(" e","",.) %>% sub(" 1st","",.) %>% sub(" beha","",.) %>% sub("BZ SE ","",.) %>% sub("BZ SD ","",.) %>% sub("BZ SU ","",.) %>% sub("Pub ","",.)
segMap <- read.csv('raw/audCompSegMap.csv',row.names=1)
segMap <- segMap[!grepl("Total",rownames(segMap)),]
segMap2 <- read.csv('raw/audCompSegMap2.csv',row.names=1)
segMap2 <- ifelse(segMap2==1,TRUE,FALSE)
audL <- colnames(segMap2) %>% gsub("^X","",.) %>% gsub("[[:punct:]]"," ",.)
segN <- colnames(segMap) %>% gsub("pub ","",.) %>% gsub("X","",.) %>% gsub("[[:punct:]]"," ",.) %>% gsub(" $","",.)
audM <- audL %>% sub("pub ","",.) %>% gsub("[[:punct:]]"," ",.) %>% sub(" v","",.) %>% sub(" z","",.) %>% sub(" 1st","",.) %>% sub(" beha","",.) %>% sub("BZ SE ","",.) %>% sub("BZ SD ","",.) %>% sub("BZ SU ","",.)
audSeg <- match(audM,segN)
fs[grepl("Cultura",fs$aud),"source"] = "first i-t 2"
fs[grepl("Ecologiaambiente",fs$aud),"source"] = "first i-t 2"
fs[grepl("Scienza",fs$aud),"source"] = "first i-t 2"
fs[grepl("Sciure",fs$aud),"source"] = "first i-t 2"
fs[grepl("Musica",fs$aud),"source"] = "first i-t 2"
fs[grepl("Dinamici",fs$aud),"source"] = "first i-t 2"
uniT <- fs
write.csv(uniT,paste("out/audComp","PostVal",".csv",sep=""))
ab <- read.csv("raw/audCompBenchmarkGraph.csv")
ab$percent <- ab$percent/100
ab$target <- ab$target %>% gsub("[[:punct:]]"," ",.)
ab$device <- ab$device %>% gsub(" Only","",.) %>% gsub("Total ","",.)
}
##----------------------benchmark-----------------------------
## mMap <- melt(read.csv('raw/audCompSegMap.csv'),id.vars="X")
## mMap$variable <- mMap$variable %>% gsub("X","",.) %>% gsub("[[:punct:]]"," ",.) %>% gsub(" $","",.)
## tmp <- sqldf("SELECT * FROM fs AS f LEFT JOIN mMap AS s ON (t.Var2 = s.seg) AND (t.source = s.source)")
devL <- c("Digital","Computer","Mobile")
sourceL <- unique(fs$source)
meltTarget <- NULL
reachTarget <- NULL
sour = "none"
sour <- sourceL[8]
for(sour in c(sourceL[!grepl("i-t",sourceL)],"all","none")){
inTarget <- NULL
tReach <- NULL
d <- devL[1]
for(d in devL){
print(d)
set <- TRUE
if (sour=="all"){
set <- grepl(d,uniT$Platform) & (uniT$source %in% c("vodafone s-d","zalando s-d","first s-d"))
} else if (sour=="none"){
set <- grepl(d,uniT$Platform)
} else {
set <- grepl(d,uniT$Platform) & uniT$source==sour
}
uniD <- uniT[set,]
inTarget1 <- NULL
tReach1 <- NULL
i <- 4
for(i in 1:length(audL)){
set2 <- segMap2[,audL[i] == colnames(segMap2) %>% gsub("X","",.) %>% gsub("[[:punct:]]"," ",.)]
set3 <- FALSE
for(s in names(set2[set2])){set3 <- set3 | grepl(s,uniD$seg)}
if(sour=="none"){set3 <- TRUE}
uniD1 <- uniD[set3,]
setC <- unique(uniD1[,"seg"])
sel <- segMap[,audL[i] == segN]
selT <- rep(sel,length(setC))
inTarget1[i] <- NA
if(!any(audL[i] == segN)){next}
inTarget1[i] <- sum(uniD1[selT,"unique"],na.rm=T)/sum(uniD1[,"unique"],na.rm=T)
tReach1[i] <- sum(uniD1[,"unique"],na.rm=T)
}
inTarget <- rbind(inTarget,inTarget1)
tReach <- rbind(tReach,tReach1)
}
colnames(inTarget) <- audL
rownames(inTarget) <- devL
colnames(tReach) <- audL
rownames(tReach) <- devL
meltTarget <- rbind(meltTarget,cbind(melt(inTarget),source=sour))
reachTarget <- rbind(reachTarget,cbind(melt(tReach),source=sour))
}
meltTarget$reach <- reachTarget$value
breakN = unique(c(0,quantile(meltTarget$reach,seq(1,5)/5,na.rm=T)))
meltTarget$accuracy <- as.numeric(cut(meltTarget$reach,breaks=breakN,labels=1:(length(breakN)-1)))
write.csv(meltTarget,"out/audCompInTarget.csv")
##write.csv(as.data.frame.matrix(xtabs("value ~ Var2 + Var1",data=meltTarget)),"out/audCompInTarget.csv",sep=",")
write.csv(fs,"out/audCompAll.csv")
|
#' Calculates counts per millions of reads, possibly with log-transform
#'
#' @param X raw data matrix
#' @param const.mult a constant to multiply with
#' @param prior.count prior count to be added to the zeroes
#'
#' @return a normalized data matrix
#' @importFrom edgeR calcNormFactors
calculateCPM <- function(X, const.mult, prior.count){
norm.factors = edgeR::calcNormFactors(X)*colSums(X)
cpm = X %*% diag(1/(norm.factors)) * const.mult
colnames(cpm) = colnames(X)
log(cpm + prior.count)
}
|
/R/calculateCPM.R
|
no_license
|
CenterForStatistics-UGent/SPsimSeq
|
R
| false | false | 510 |
r
|
#' Calculates counts per millions of reads, possibly with log-transform
#'
#' @param X raw data matrix
#' @param const.mult a constant to multiply with
#' @param prior.count prior count to be added to the zeroes
#'
#' @return a normalized data matrix
#' @importFrom edgeR calcNormFactors
calculateCPM <- function(X, const.mult, prior.count){
norm.factors = edgeR::calcNormFactors(X)*colSums(X)
cpm = X %*% diag(1/(norm.factors)) * const.mult
colnames(cpm) = colnames(X)
log(cpm + prior.count)
}
|
library(plyr)
# Change to your working directory for the course project
setwd("C:/Rproject/Emmanuel/R_WD_Coursera/CourseProject3")
# Download dataset for project
download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip', dest="projectdataset.zip", mode="wb")
# unzip dataset
# unzip ("projectdataset.zip", exdir = ".")
# Step 1. of the course project
# Merge the training and test sets to create one data set
# Reading training tables:
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# Reading test tables:
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# Create 'x' dataset
x_data <- rbind(x_train, x_test)
# create 'y' dataset
y_data <- rbind(y_train, y_test)
# create 'subject' dataset
subject_data <- rbind(subject_train, subject_test)
# Step 2
# Extract only the measurements on the mean and standard deviation for each measurement
features <- read.table('./UCI HAR Dataset/features.txt')
# Get only the columns with mean() or std() in their names
Needed_mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2])
# Subset the desired columns
x_data <- x_data[, Needed_mean_and_std_features]
# correct the column names
names(x_data) <- features[Needed_mean_and_std_features, 2]
# Step 3
# Use descriptive activity names to name the activities in the data set
activityLabels = read.table('./UCI HAR Dataset/activity_labels.txt')
# update values with correct activity names
y_data[, 1] <- activityLabels[y_data[, 1], 2]
# update column name
names(y_data) <- "activity"
# Step 4
# Appropriately label the data set with descriptive variable names
# update column name
names(subject_data) <- "subject"
# bind all the data into a single dataset
all_dataset <- cbind(x_data, y_data, subject_data)
# Step 5
# Create a second, independent tidy data set with the average of each variable
tidy_data_avg <- ddply(all_dataset, .(subject, activity), function(x) colMeans(x[, 1:66]))
write.table(tidy_data_avg, "tidy_data_avg.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
eedekobi/datasciencecoursera
|
R
| false | false | 2,419 |
r
|
library(plyr)
# Change to your working directory for the course project
setwd("C:/Rproject/Emmanuel/R_WD_Coursera/CourseProject3")
# Download dataset for project
download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip', dest="projectdataset.zip", mode="wb")
# unzip dataset
# unzip ("projectdataset.zip", exdir = ".")
# Step 1. of the course project
# Merge the training and test sets to create one data set
# Reading training tables:
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# Reading test tables:
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# Create 'x' dataset
x_data <- rbind(x_train, x_test)
# create 'y' dataset
y_data <- rbind(y_train, y_test)
# create 'subject' dataset
subject_data <- rbind(subject_train, subject_test)
# Step 2
# Extract only the measurements on the mean and standard deviation for each measurement
features <- read.table('./UCI HAR Dataset/features.txt')
# Get only the columns with mean() or std() in their names
Needed_mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2])
# Subset the desired columns
x_data <- x_data[, Needed_mean_and_std_features]
# correct the column names
names(x_data) <- features[Needed_mean_and_std_features, 2]
# Step 3
# Use descriptive activity names to name the activities in the data set
activityLabels = read.table('./UCI HAR Dataset/activity_labels.txt')
# update values with correct activity names
y_data[, 1] <- activityLabels[y_data[, 1], 2]
# update column name
names(y_data) <- "activity"
# Step 4
# Appropriately label the data set with descriptive variable names
# update column name
names(subject_data) <- "subject"
# bind all the data into a single dataset
all_dataset <- cbind(x_data, y_data, subject_data)
# Step 5
# Create a second, independent tidy data set with the average of each variable
tidy_data_avg <- ddply(all_dataset, .(subject, activity), function(x) colMeans(x[, 1:66]))
write.table(tidy_data_avg, "tidy_data_avg.txt", row.name=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pet.R
\docType{class}
\name{Pet}
\alias{Pet}
\title{Pet}
\format{
An \code{R6Class} generator object
}
\description{
Pet Class
}
\details{
OpenAPI Petstore
This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters.
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{id}}{integer [optional]}
\item{\code{category}}{\link{Category} [optional]}
\item{\code{name}}{character}
\item{\code{photoUrls}}{list( character )}
\item{\code{tags}}{list( \link{Tag} ) [optional]}
\item{\code{status}}{character [optional]}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-Pet-new}{\code{Pet$new()}}
\item \href{#method-Pet-toJSON}{\code{Pet$toJSON()}}
\item \href{#method-Pet-fromJSON}{\code{Pet$fromJSON()}}
\item \href{#method-Pet-toJSONString}{\code{Pet$toJSONString()}}
\item \href{#method-Pet-fromJSONString}{\code{Pet$fromJSONString()}}
\item \href{#method-Pet-validateJSON}{\code{Pet$validateJSON()}}
\item \href{#method-Pet-toString}{\code{Pet$toString()}}
\item \href{#method-Pet-clone}{\code{Pet$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-new"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-new}{}}}
\subsection{Method \code{new()}}{
Initialize a new Pet class.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$new(
name,
photoUrls,
id = NULL,
category = NULL,
tags = NULL,
status = NULL,
...
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{name}}{name}
\item{\code{photoUrls}}{photoUrls}
\item{\code{id}}{id}
\item{\code{category}}{category}
\item{\code{tags}}{tags}
\item{\code{status}}{pet status in the store}
\item{\code{...}}{Other optional arguments.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
To JSON String
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$toJSON()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Pet in JSON format
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
Deserialize JSON string into an instance of Pet
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$fromJSON(input_json)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input_json}}{the JSON input}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the instance of Pet
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
To JSON String
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$toJSONString()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Pet in JSON format
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
Deserialize JSON string into an instance of Pet
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$fromJSONString(input_json)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input_json}}{the JSON input}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the instance of Pet
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-validateJSON"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-validateJSON}{}}}
\subsection{Method \code{validateJSON()}}{
Validate JSON input with respect to Pet and throw an exception if invalid
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$validateJSON(input)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input}}{the JSON input}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-toString"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-toString}{}}}
\subsection{Method \code{toString()}}{
To string (JSON format)
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$toString()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
String representation of Pet
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-clone"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/samples/client/petstore/R-httr2/man/Pet.Rd
|
permissive
|
OpenAPITools/openapi-generator
|
R
| false | true | 5,155 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pet.R
\docType{class}
\name{Pet}
\alias{Pet}
\title{Pet}
\format{
An \code{R6Class} generator object
}
\description{
Pet Class
}
\details{
OpenAPI Petstore
This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters.
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{id}}{integer [optional]}
\item{\code{category}}{\link{Category} [optional]}
\item{\code{name}}{character}
\item{\code{photoUrls}}{list( character )}
\item{\code{tags}}{list( \link{Tag} ) [optional]}
\item{\code{status}}{character [optional]}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-Pet-new}{\code{Pet$new()}}
\item \href{#method-Pet-toJSON}{\code{Pet$toJSON()}}
\item \href{#method-Pet-fromJSON}{\code{Pet$fromJSON()}}
\item \href{#method-Pet-toJSONString}{\code{Pet$toJSONString()}}
\item \href{#method-Pet-fromJSONString}{\code{Pet$fromJSONString()}}
\item \href{#method-Pet-validateJSON}{\code{Pet$validateJSON()}}
\item \href{#method-Pet-toString}{\code{Pet$toString()}}
\item \href{#method-Pet-clone}{\code{Pet$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-new"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-new}{}}}
\subsection{Method \code{new()}}{
Initialize a new Pet class.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$new(
name,
photoUrls,
id = NULL,
category = NULL,
tags = NULL,
status = NULL,
...
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{name}}{name}
\item{\code{photoUrls}}{photoUrls}
\item{\code{id}}{id}
\item{\code{category}}{category}
\item{\code{tags}}{tags}
\item{\code{status}}{pet status in the store}
\item{\code{...}}{Other optional arguments.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
To JSON String
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$toJSON()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Pet in JSON format
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
Deserialize JSON string into an instance of Pet
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$fromJSON(input_json)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input_json}}{the JSON input}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the instance of Pet
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
To JSON String
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$toJSONString()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Pet in JSON format
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
Deserialize JSON string into an instance of Pet
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$fromJSONString(input_json)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input_json}}{the JSON input}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the instance of Pet
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-validateJSON"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-validateJSON}{}}}
\subsection{Method \code{validateJSON()}}{
Validate JSON input with respect to Pet and throw an exception if invalid
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$validateJSON(input)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input}}{the JSON input}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-toString"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-toString}{}}}
\subsection{Method \code{toString()}}{
To string (JSON format)
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$toString()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
String representation of Pet
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Pet-clone"></a>}}
\if{latex}{\out{\hypertarget{method-Pet-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Pet$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
#plot2.R
#copy the files summarySCC_PM25.rds and Source_Classification_Code.rds
#in your work directory
#set your work directory with setwd
#setwd("E:/__DScienceJohnHopkins/ExploratoryDataAnalysis/tema2")
#load the data in R
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(dplyr)
#prepare data for graphic visualisation
#keep just PM2.5 emmisions from Baltimore (fips == "24510"()
#break the dataset down into groups (after year)
#computes sum of emmisions for each group
#creates a new column named txt, which it contains the value
#from column Total converted to string
dataG <- NEI %>%
filter(fips == "24510") %>%
group_by(year) %>%
summarise(Total=sum(Emissions, na.rm=TRUE)) %>%
mutate(txt=sprintf("%.2f", round(Total,2)))
##open png device
##create file plot2.png
png(width=480, height=480, filename="plot2.png")
#set the margins the graph
par(mar=c(5,5,4,1))
#create and save a barplot
bp<-barplot(dataG$Total,
col=c("orange","blue", "red", "green"),
xlab="Year",
ylab=expression("Emissions of PM"[2.5]* " (tons)"),
cex.main=1.2,
cex.lab=1.2,
cex.axis=1.2)
#put the text from column dataG$txt on each bar from barplot
text(bp, dataG$Total, labels=dataG$txt,pos=3,xpd=NA)
#write text into the top margin of the graph
mtext(side=3,
expression("Total emissions from PM"[2.5]* " in Baltimore City (1999-2008)"),
line=2,
cex=1.2)
#creates axis x and put labels on it
axis(1, at=bp, labels=dataG$year)
# Close the PNG file
dev.off()
|
/plot2.R
|
no_license
|
constantin345/ExData_Plotting2
|
R
| false | false | 1,603 |
r
|
#plot2.R
#copy the files summarySCC_PM25.rds and Source_Classification_Code.rds
#in your work directory
#set your work directory with setwd
#setwd("E:/__DScienceJohnHopkins/ExploratoryDataAnalysis/tema2")
#load the data in R
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(dplyr)
#prepare data for graphic visualisation
#keep just PM2.5 emmisions from Baltimore (fips == "24510"()
#break the dataset down into groups (after year)
#computes sum of emmisions for each group
#creates a new column named txt, which it contains the value
#from column Total converted to string
dataG <- NEI %>%
filter(fips == "24510") %>%
group_by(year) %>%
summarise(Total=sum(Emissions, na.rm=TRUE)) %>%
mutate(txt=sprintf("%.2f", round(Total,2)))
##open png device
##create file plot2.png
png(width=480, height=480, filename="plot2.png")
#set the margins the graph
par(mar=c(5,5,4,1))
#create and save a barplot
bp<-barplot(dataG$Total,
col=c("orange","blue", "red", "green"),
xlab="Year",
ylab=expression("Emissions of PM"[2.5]* " (tons)"),
cex.main=1.2,
cex.lab=1.2,
cex.axis=1.2)
#put the text from column dataG$txt on each bar from barplot
text(bp, dataG$Total, labels=dataG$txt,pos=3,xpd=NA)
#write text into the top margin of the graph
mtext(side=3,
expression("Total emissions from PM"[2.5]* " in Baltimore City (1999-2008)"),
line=2,
cex=1.2)
#creates axis x and put labels on it
axis(1, at=bp, labels=dataG$year)
# Close the PNG file
dev.off()
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(plotly)
# Define UI for application that draws a histogram
ui <- shinyUI(fluidPage(
# Application title
titlePanel("Logistic growth model"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("population",
"Initiall population",
min = 1,
max = 1000,
value = 40),
sliderInput('r',
'Biothic potential',
min = 0.1,
max = 1,
value = 0.4),
sliderInput('time',
'Number of generations',
min = 1,
max = 100,
value = 10
),
sliderInput('k',
'Environment strength',
min = 1,
max = 10000,
value = 200
)
),
# Show a plot of the generated distribution
mainPanel(
plotlyOutput("distPlot")
)
)
))
# Define server logic required to draw a histogram
server <- shinyServer(function(input, output) {
output$distPlot <- renderPlotly({
# generate bins based on input$bins from ui.R
x <- seq(0, input$time, 1)
logistic.growth <- function (xs) {
top <- (exp(1) ^ (input$r * xs)) * input$population * input$k
down <- input$k + (input$population * ((exp(1) ^ (input$r * xs)) - 1))
return(top / down)
}
output$distPlot <- renderPlotly({
# generate bins based on input$bins from ui.R
x <- seq(0, input$time, 1)
y <- Map(logistic.growth, x)
data <- list(time=x, population=y)
# draw the histogram with the specified number of bins
plot_ly(data, x=time, y=population, type='scatter', mode='lines')
})
data <- list(time=x, population=y)
# draw the histogram with the specified number of bins
plot_ly(data, x=time, y=population, type='scatter', mode='lines')
})
})
# Run the application
shinyApp(ui = ui, server = server)
|
/logistic_growth/app.R
|
no_license
|
Shamash2014/eco-modeling
|
R
| false | false | 2,308 |
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(plotly)
# Define UI for application that draws a histogram
ui <- shinyUI(fluidPage(
# Application title
titlePanel("Logistic growth model"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("population",
"Initiall population",
min = 1,
max = 1000,
value = 40),
sliderInput('r',
'Biothic potential',
min = 0.1,
max = 1,
value = 0.4),
sliderInput('time',
'Number of generations',
min = 1,
max = 100,
value = 10
),
sliderInput('k',
'Environment strength',
min = 1,
max = 10000,
value = 200
)
),
# Show a plot of the generated distribution
mainPanel(
plotlyOutput("distPlot")
)
)
))
# Define server logic required to draw a histogram
server <- shinyServer(function(input, output) {
output$distPlot <- renderPlotly({
# generate bins based on input$bins from ui.R
x <- seq(0, input$time, 1)
logistic.growth <- function (xs) {
top <- (exp(1) ^ (input$r * xs)) * input$population * input$k
down <- input$k + (input$population * ((exp(1) ^ (input$r * xs)) - 1))
return(top / down)
}
output$distPlot <- renderPlotly({
# generate bins based on input$bins from ui.R
x <- seq(0, input$time, 1)
y <- Map(logistic.growth, x)
data <- list(time=x, population=y)
# draw the histogram with the specified number of bins
plot_ly(data, x=time, y=population, type='scatter', mode='lines')
})
data <- list(time=x, population=y)
# draw the histogram with the specified number of bins
plot_ly(data, x=time, y=population, type='scatter', mode='lines')
})
})
# Run the application
shinyApp(ui = ui, server = server)
|
#!/usr/bin/Rscript --vanilla
# this script creates a easy readable html file containing stock symbols buy/sell recommendation
# for each analyzed version
# input: matches list from /home/voellenk/tronador/dailyreport/Nuggets-YYYY-MM-DD.Rdata
# output: html file in Dropbox/constellation/dailyReport/Nuggets-YYYY-MM-DD.html
runtime <- Sys.time()
# load required packages
suppressPackageStartupMessages(library(optparse))
suppressPackageStartupMessages(library(knitr))
# global parameters follow
inputDir <- "/home/voellenk/tronador/dailyReport"
outputDir <- "/home/voellenk/Dropbox/constellation/dailyReport"
knitrfileDir <- "/home/voellenk/tronador_workdir/tronador/knitrfiles"
option_list <- list(
make_option(c("-v", "--verbose"), action="store_true", default=FALSE,
help="Print extra output [default]")
)
opt <- parse_args(OptionParser(option_list=option_list))
args <- commandArgs(trailingOnly=TRUE)
setwd(knitrfileDir)
# load matches list from current nugget file
load(paste0(inputDir, "/Nuggets-", format(runtime, format="%Y-%m-%d"), ".Rdata"))
versions <- names(matches)
# create sorted table of best signals for each version
tbllist <- list()
for(i in 1:length(matches)) {
this.version <- names(matches)[i]
if (nrow(matches[[this.version]]) > 0) {
this.match <- matches[[this.version]]
this.match$sym <- as.character(this.match$sym) # transform factor to character
this.match$ID <- as.character(this.match$ID) # transform factor to character
dun <- unique(this.match[,c("sym", "date", "Close", "ATR20", "signal", "ID")])
dun <- cbind(dun, n3=NA, n5=NA, n10=NA, n20=NA)
for (n in c(3,5,10,20)) {
for(r in 1:nrow(dun)) {
line <- this.match[this.match$sym == dun[r,"sym"] &
this.match$date == dun[r,"date"] &
this.match$signal == dun[r,"signal"] &
this.match$ID == dun[r,"ID"] &
this.match$period == n ,]
if (nrow(line) == 1) {
dun[r,c(paste0("n", n))] <- line[1,"mean.BS"] - line[1,"mean.SS"]
#dun <- dun[with(dun, order(-n10, sym, date)), ] # order findings
} else if (nrow(line) > 1) {
stop ("Error: Filter condition lead to more than one line. This must not happen.")
}
}
}
tbllist[[this.version]] <- dun
} else { # no obs found for this version
tbllist[[this.version]] <- data.frame()
}
}
if (opt$verbose == TRUE) {
print ("The tbllist is:")
tbllist
}
clean.tbllist <- list()
# tbllist still may contain multiple IDs for distinct symbols. needs to get filtered out.
filtered.tbllist <- list()
for (i in 1:length(names(tbllist))) {
dun <- tbllist[[names(tbllist)[i]]]
if (nrow(dun) > 0) { # omitting versions with no observations
filtered.tbllist[[names(tbllist)[i]]] <-
do.call(rbind,lapply(split(dun, dun$sym), function(chunk) chunk[which.max(chunk$n10),]))
}
}
if (opt$verbose == TRUE) {
print ("The filtered.tbllist is:")
filtered.tbllist
}
if (length(filtered.tbllist) > 0) {
# round numeric columns to 2 decimals
omit <- c("sym", "date", "signal", "ID")
for (i in 1:length(names(filtered.tbllist))) {
dun <- filtered.tbllist[[names(filtered.tbllist)[i]]]
leave <- match(omit, names(dun))
out <- dun
out[-leave] <- round(dun[-leave], digits=3)
out <- out[with(out, order(-n10, sym, date)), ] # order findings
clean.tbllist[[names(filtered.tbllist)[i]]] <- out
}
if (opt$verbose == TRUE) {
print ("The clean.tbllist is:")
clean.tbllist
}
} else {
print ("found no nuggets for today!")
}
knit2html("dailyReport.Rmd", output="temp.html")
# move finished html (filename=file to Dropbox folder
file.copy("temp.html", paste0(outputDir, "/Nuggets-", format(runtime, format="%Y-%m-%d"), ".html"), overwrite=TRUE)
file.remove("temp.html")
|
/night/Rscript/nightlyRun_makeKnitrReport.R
|
permissive
|
wotuzu17/tronador
|
R
| false | false | 3,892 |
r
|
#!/usr/bin/Rscript --vanilla
# this script creates a easy readable html file containing stock symbols buy/sell recommendation
# for each analyzed version
# input: matches list from /home/voellenk/tronador/dailyreport/Nuggets-YYYY-MM-DD.Rdata
# output: html file in Dropbox/constellation/dailyReport/Nuggets-YYYY-MM-DD.html
runtime <- Sys.time()
# load required packages
suppressPackageStartupMessages(library(optparse))
suppressPackageStartupMessages(library(knitr))
# global parameters follow
inputDir <- "/home/voellenk/tronador/dailyReport"
outputDir <- "/home/voellenk/Dropbox/constellation/dailyReport"
knitrfileDir <- "/home/voellenk/tronador_workdir/tronador/knitrfiles"
option_list <- list(
make_option(c("-v", "--verbose"), action="store_true", default=FALSE,
help="Print extra output [default]")
)
opt <- parse_args(OptionParser(option_list=option_list))
args <- commandArgs(trailingOnly=TRUE)
setwd(knitrfileDir)
# load matches list from current nugget file
load(paste0(inputDir, "/Nuggets-", format(runtime, format="%Y-%m-%d"), ".Rdata"))
versions <- names(matches)
# create sorted table of best signals for each version
tbllist <- list()
for(i in 1:length(matches)) {
this.version <- names(matches)[i]
if (nrow(matches[[this.version]]) > 0) {
this.match <- matches[[this.version]]
this.match$sym <- as.character(this.match$sym) # transform factor to character
this.match$ID <- as.character(this.match$ID) # transform factor to character
dun <- unique(this.match[,c("sym", "date", "Close", "ATR20", "signal", "ID")])
dun <- cbind(dun, n3=NA, n5=NA, n10=NA, n20=NA)
for (n in c(3,5,10,20)) {
for(r in 1:nrow(dun)) {
line <- this.match[this.match$sym == dun[r,"sym"] &
this.match$date == dun[r,"date"] &
this.match$signal == dun[r,"signal"] &
this.match$ID == dun[r,"ID"] &
this.match$period == n ,]
if (nrow(line) == 1) {
dun[r,c(paste0("n", n))] <- line[1,"mean.BS"] - line[1,"mean.SS"]
#dun <- dun[with(dun, order(-n10, sym, date)), ] # order findings
} else if (nrow(line) > 1) {
stop ("Error: Filter condition lead to more than one line. This must not happen.")
}
}
}
tbllist[[this.version]] <- dun
} else { # no obs found for this version
tbllist[[this.version]] <- data.frame()
}
}
if (opt$verbose == TRUE) {
print ("The tbllist is:")
tbllist
}
clean.tbllist <- list()
# tbllist still may contain multiple IDs for distinct symbols. needs to get filtered out.
filtered.tbllist <- list()
for (i in 1:length(names(tbllist))) {
dun <- tbllist[[names(tbllist)[i]]]
if (nrow(dun) > 0) { # omitting versions with no observations
filtered.tbllist[[names(tbllist)[i]]] <-
do.call(rbind,lapply(split(dun, dun$sym), function(chunk) chunk[which.max(chunk$n10),]))
}
}
if (opt$verbose == TRUE) {
print ("The filtered.tbllist is:")
filtered.tbllist
}
if (length(filtered.tbllist) > 0) {
# round numeric columns to 2 decimals
omit <- c("sym", "date", "signal", "ID")
for (i in 1:length(names(filtered.tbllist))) {
dun <- filtered.tbllist[[names(filtered.tbllist)[i]]]
leave <- match(omit, names(dun))
out <- dun
out[-leave] <- round(dun[-leave], digits=3)
out <- out[with(out, order(-n10, sym, date)), ] # order findings
clean.tbllist[[names(filtered.tbllist)[i]]] <- out
}
if (opt$verbose == TRUE) {
print ("The clean.tbllist is:")
clean.tbllist
}
} else {
print ("found no nuggets for today!")
}
knit2html("dailyReport.Rmd", output="temp.html")
# move finished html (filename=file to Dropbox folder
file.copy("temp.html", paste0(outputDir, "/Nuggets-", format(runtime, format="%Y-%m-%d"), ".html"), overwrite=TRUE)
file.remove("temp.html")
|
url.exists = function(x, ...) {
if(requireNamespace2("RCurl"))
RCurl::url.exists(x, ...)
else {
con = url(x)
on.exit(close(con))
res = tryCatch(readLines(con), error = function(e) e)
if(is(res, "error"))
FALSE
else
TRUE
}
}
##' make file url
##'
##' @param path The path to wrap in a file:// URL
##' @return A valid file URL
##' @export
makeFileURL = function(path) {
if(Sys.info()["sysname"] == "Windows") {
.winFileURL(path)
} else
paste0("file://", normalizePath2(path))
}
isWindows = function( ) tolower(Sys.info()["sysname"]) == "windows"
##' Get path from file URL
##'
##' @param fileurl A file url (beginning in file://)
##' @return The system directory path that \code{fileurl} points to
##' @export
fileFromFileURL = function(fileurl) {
if(!isWindows())
ret = gsub("file://", "" , fileurl, fixed=TRUE)
else
ret = .winFileFromURL(fileurl)
}
.winFileFromURL = function(fileurl) {
if(grepl("file:///", fileurl))
ret = gsub("^file:///", "", fileurl)
else if (grepl("^file://[[:alpha:]]", fileurl))
ret = gsub("^file:", "", fileurl)
ret = gsub("%20", " ", ret)
normalizePath2(ret)
}
.winFileURL = function(path) {
path = normalizePath2(path, winslash="/")
path = gsub(" ", "%20", path)
if(grepl("^//", path))
paste0("file:", path)
else
paste0("file:///", path)
}
remOtherPkgVersions = function(pkgname, version, repodir, storagedir, verbose=FALSE) {
if(is.na(version))
return()
tballpat = paste0(pkgname, "_")
allinrepo = list.files(repodir, pattern = tballpat, full.names=TRUE)
wrongvers = !grepl(version, allinrepo, fixed=TRUE)
if(all(!wrongvers))
return()
if(verbose)
message(sprintf("found %d other versions of the package in the repo directory (%s). Moving them to the storage directory (%s)",
sum(wrongvers), repodir, storagedir))
file.rename(allinrepo[wrongvers],
file.path(storagedir, basename(allinrepo[wrongvers])))
}
fileFromBuiltPkg = function(archive, files, ...) {
filname = basename(archive)
ext = gsub(".*_[^[:alpha:]]*(\\..*)$", "\\1", filname)
fun = switch(ext,
".zip" = unzip,
".tar.gz"= untar,
"tgz" = untar,
stop(sprintf("unrecognized extension %s", ext))
)
fun(archive, files = files, ...)
}
##' R executable
##' @param cmd the R CMD to run. "build", "check", "INSTALL", or "" (for none)
##' @param options The options to pass to the command
##' @export
Rcmd = function(cmd = c("build", "check", "INSTALL", ""), options) {
cmd = match.arg(arg=cmd)
cmdpart = if(nchar(cmd)) paste("CMD", cmd) else ""
paste(file.path(R.home("bin"), "R"), cmdpart, options)
}
##'Check if a directory contains package sources
##' @param dir The directory
##' @export
checkIsPkgDir = function (dir)
{
fils = list.files(dir)
any(grepl("^DESCRIPTION$", fils))
}
##'Find a package directory within an SCM checkout
##' @param rootdir The directory of the checkout
##' @param branch The branch to navigate to
##' @param subdir The subdirectory to navigate to
##' @param param a SwitchrParam object
##' @return A path to the Package sources
##' @export
findPkgDir = function(rootdir, branch, subdir,param)
{
if(!length(subdir))
subdir = "."
ret = NULL
name = basename(rootdir)
#does it have the trunk, branches, tags layout?
if(checkStdSVN(rootdir))
{
if(is.null(branch) || branch %in% c("master", "trunk"))
{
ret = file.path(rootdir, "trunk")
} else {
ret = file.path(rootdir, "branches", branch)
}
} else if(file.exists(file.path(rootdir, ".git"))) {
if(is.null(branch) || branch == "trunk")
branch = "master"
gitChangeBranch(rootdir, branch, param = param)
ret = rootdir
} else if ( is.null(branch) || branch %in% c("master", "trunk")) {
ret = rootdir
} else {
warning(paste0("The svn repository at ", rootdir,
" does not appear to have branches. ",
"Unable to process this source."))
logfun(param)(name, paste("The SCM repository does not appear to have",
"branches and a non-trunk/non-master branch",
"was selected"), type="both")
return(NULL)
}
ret = file.path(ret, subdir)
##we somehow got a return file that doesn't exist on the file system.
##This is a problem with GRAN logic, not with packages/user activity
if(!file.exists(ret))
{
logfun(param)(name, paste("Unable to find subdirectory", subdir,
"in branch", branch), type="both")
warning(paste0("Constructed temporary package directory",ret,
" doesn't appear to exist after svn checkout. ",
"Missing branch?"))
return(NULL)
}
##Find a package. First look in ret, then in ret/package and ret/pkg
##we could be more general and allow people to specify subdirectories...
if(!checkIsPkgDir(ret))
{
logfun(param)(name, paste("Specified branch/subdirectory combination",
"does not appear to contain an R package"),
type="both")
ret = NULL
}
ret
}
makeUserFun = function(scm_auth, url)
{
ind = sapply(names(scm_auth), function(pat) grepl(pat, url, fixed=TRUE))
if(any(ind))
scm_auth[[which(ind)]][1]
else
""
}
makePwdFun = function(scm_auth, url)
{
ind = sapply(names(scm_auth), function(pat) grepl(pat, url, fixed=TRUE))
if(any(ind))
scm_auth[[which(ind)]][2]
else
""
}
##' Create a PkgSource object for a package
##' @param url The url of the package sources
##' @param type The source type.
##' @param user A function which, when called, returns the username to use when
##' when checking the soources out
##' @param password A function which returns the password to use when checking
##' out the sources
##' @param scm_auth A list of username-password pairs, named with regular
##' expressions to match against url when constructing the
##' defaults for \code{user} and \code{password}
##' @param prefer_svn Currently unused.
##' @param \dots Passed directly to constructors for PkgSource superclasses
##' @export
makeSource = function(url, type, user, password, scm_auth, prefer_svn = FALSE, ...) {
if(is.na(type))
type = "unknown"
type = tolower(type)
if(missing(user))
user = makeUserFun(scm_auth = scm_auth, url = url)
if(missing(password))
password = makePwdFun(scm_auth= scm_auth, url = url)
if(type == "git" && grepl("github", url))
type = "github"
ret = switch(type,
svn = new("SVNSource", location = url, user = user,
password = password, ...),
local = new("LocalSource", location = url, user = user,
password= password, ...),
git = new("GitSource", location = url, user = user,
password = password, ...),
github = new("GithubSource", location = url, user = user,
password = password, ...),
cran = new("CRANSource", location = url, user = "", password = "", ...),
bioc = new("BiocSource", location = url, user = "readonly", password = "readonly", ...),
tarball = new("TarballSource", location = url, user = "", password ="", ...),
stop("unsupported source type")
)
if( (type=="git" || type == "github") && is.na(ret@branch))
ret@branch = "master"
else if (type=="svn" && (!length(ret@branch) || is.na(ret@branch)))
ret@branch = "trunk"
if(is(ret, "GitSource") && prefer_svn) {
ret2 = tryCatch(as(ret, "SVNSource"), error = function(x) x)
if(!is(ret2, "error"))
ret = ret2
}
ret
}
##' Construct pockage directory path
##' @param basepath The parent directory for the package directory
##' @param name The name of the package
##' @param subdir The subdirectory within a package source that
##' the actual package root directory will reside in.
##' @param scm_type Tye type of scm the package sources will be
##' checked out from
##' @param branch The branch from which the package will be retrieved.
##' @return A path
##' @note Unlike \code{\link{findPkgDir}} this does not look for existing
##' package source directories. It only constructs the path.
##'
##' @export
getPkgDir = function(basepath,name, subdir, scm_type, branch)
{
basepath = normalizePath2(basepath)
if(!file.exists(file.path(basepath, name)))
stop("directory not found")
##svn
## if(file.exists(file.path(basepath, name, ".svn")))
if(scm_type == "svn")
{
if(checkStdSVN(file.path(basepath, name)))
{
if(is.na(branch) || branch == "trunk" || branch == "master")
brdir = "trunk"
else
brdir = file.path("branches", branch)
} else {
brdir = "."
}
} else ## git or local, neither have explicit dirs for branching
brdir = "."
normalizePath2(file.path(basepath,name, brdir, subdir))
}
##' normalizePath2
##'
##' Attempt to normalize a relative path to an absolute one. Optionally without
##' resolving symlinks on non-Windows systems
##' @param path The path to normalize
##' @param follow.symlinks Should symlinks (other than . and ..)
##' be resolved to their physical locations? (FALSE)
##' @param winslash The value of winslash to be passed down to normalizePath
##' on windows systems
##' @param mustWork logical. Passed to normalizePath on windows. Ignored otherwise.
##' @return The normalized path.
##' @export
normalizePath2 = function(path, follow.symlinks=FALSE, winslash = "\\", mustWork = NA)
{
if(follow.symlinks || Sys.info()["sysname"]=="Windows")
return(normalizePath(path, winslash = winslash, mustWork = mustWork))
else {
if(substr(path, 1, 1) == "~")
path = path.expand(path)
##paths starting with / for example
else if(substr(path, 1, 1) == .Platform$file.sep)
path = path
else if (substr(path, 1, 2) == "..") {
tmppath = getwd()
while(substr(path, 1, 2) == "..") {
tmppath = dirname(tmppath)
path = substr(path, 3, nchar(path))
if(substr(path, 1, 1) == .Platform$file.sep)
path = substr(path, 2, nchar(path))
}
path = file.path(tmppath, path)
} else if(grepl("^\\.*[[:alnum:]]", path))
path = file.path(getwd(), path)
else if (substr(path, 1,1) == ".")
path = file.path(getwd(), substr(path,2, nchar(path)))
path = gsub(paste(rep(.Platform$file.sep, 2), collapse=""), .Platform$file.sep, path, fixed=TRUE)
path
}
}
##source an initialization script (e.g. .bashrc) if specified
## in sh_init_script(repo)
##' system_w_init
##'
##' Run a system command with an optional intialization script (e.g. a .bashrc
##' sourced first).
##' @param cmd The text of the command. Must be length 1.
##' @param dir The directory that the command should be executed in. The working directory will be temporarily changed to this dir,
##' but will be changed back upon exit of system_w_init.
##' @param init (optional) a character value indicating the
##' location of an initialization shell script.
##' @param \dots additional parameters passed directly to \code{\link{system}}.
##' @param param A SwitchrParam object. The shell initialization
##' script associated with this object is used when \code{init} is
##' not specified (length 0).
##' @return Depends, see \code{\link{system}} for details.
##' @export
system_w_init = function(cmd, dir,
init = character(), ..., param = SwitchrParam())
{
pause = shell_timing(param) > 0
if(!(pause||isWindows()) && length(cmd) > 1)
cmd = paste(cmd, collapse=" ; ")
if(!length(init) && !is.null(param))
init = sh_init_script(param)
if(length(init) && nchar(init))
cmd = paste(paste("source", init, ";"), cmd)
if(!missing(dir)) {
oldwd = getwd()
setwd(dir)
on.exit(setwd(oldwd))
}
if(length(cmd) > 1) {
res = sapply(cmd, function(x, ...) {
res = system(x, ...)
Sys.sleep(shell_timing(param))
res
}, ...)
tail(res, 1)
} else {
system(cmd, ...)
}
}
highestVs = c(9, 14, 2)
decrBiocVersion = function(biocVers) {
vals = strsplit(biocVers, ".", fixed=TRUE)[[1]]
vals = as.numeric(vals)
if(identical(vals, c(1,0))) {
NULL
} else if (vals[2] == 0) {
vals[1] = vals[1] - 1 #decrement major version
vals[2] = highestVs[ vals[1] ] #set to highest minor version for that major
} else {
vals[2] = vals[2] - 1
}
paste(vals, collapse=".")
}
decrBiocRepo = function(repos, vers = biocVersFromRepo(repos)) {
if(!is.character(vers))
vers = as.character(vers)
pieces = strsplit(repos, vers, fixed=TRUE)
newvers = decrBiocVersion(vers)
if(is.null(newvers)) {
warning("Cannot decrement bioc repo version below 1.0")
return(NULL)
}
sapply(pieces, function(x) paste0(x, collapse = newvers))
}
biocVersFromRepo = function(repos) gsub(".*/([0-9][^/]*)/.*", "\\1", repos[1])
biocReposFromVers = function(vers = develVers) {
if(!requireNamespace2("BiocInstaller"))
stop("Unable to manipulate bioc versions without BiocInstaller installed")
repos = head(BiocInstaller::biocinstallRepos(), -1)
bef= gsub("(.*/)[0-9][^/]*/.*", "\\1", repos)
af = gsub(".*/[0-9][^/]*(/.*)", "\\1", repos)
paste0(bef, vers, af)
}
highestBiocVers = function(repos){
if(!requireNamespace2("BiocInstaller"))
stop("Unable to determine bioc versions without BiocInstaller installed")
else if(missing(repos))
## head -1 removes the last element
repos =head(BiocInstaller::biocinstallRepos(), -1)
majvers = length(highestVs)
vers = paste(majvers, highestVs[majvers], sep=".")
biocReposFromVers(vers = vers)
}
#system(..., intern=TRUE) throws an error if the the command fails,
#and has attr(out, "status") > 0 if the called program returns non-zero status.
##' Identify error states from R or external programs
##' @param out An R object representing output
##' @return TRUE if out is an error object, or has an attribute called "status" which is > 0
##' @export
errorOrNonZero = function(out)
{
if(is(out, "error") ||
(!is.null(attr(out, "status")) && attr(out, "status") > 0))
TRUE
else
FALSE
}
## NB this will give different behavior in R versions that
## provide requireNamespace and thsoe that don't re the search path.
## Not ideal, but otherwise switchr will fail to install at all.
requireNamespace2 = function(...) {
if(exists("requireNamespace"))
requireNamespace(...)
else
require(...)
}
## this will give identical behavior but will be less efficient
## when paste0 doesn't exist.
if(!exists("paste0"))
paste0 = function(...) paste(..., sep="")
sourceFromManifest = function(pkg, manifest, scm_auths = list(bioconductor=c("readonly", "readonly")), ...) {
mandf = manifest_df(manifest)
manrow = mandf[mandf$name == pkg, ]
##https://github.com/gmbecker/ProteinVis/archive/IndelsOverlay.zip
## for IndelsOverlay branch
src = makeSource(name = pkg,
type = manrow$type,
url = manrow$url, branch = manrow$branch,
subdir = manrow$subdir,
scm_auth = scm_auths,...)
src
}
isWindows = function() {
Sys.info()["sysname"] == "Windows"
}
haveGit = function() {
res = tryCatch(system2("git", args = "--version"), error = function(e) e)
st = attr(res, "status")
if(is(res, "error") || (!is.null(st) && st > 0))
FALSE
else
TRUE
}
|
/switchr/R/Utilities.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 16,443 |
r
|
url.exists = function(x, ...) {
if(requireNamespace2("RCurl"))
RCurl::url.exists(x, ...)
else {
con = url(x)
on.exit(close(con))
res = tryCatch(readLines(con), error = function(e) e)
if(is(res, "error"))
FALSE
else
TRUE
}
}
##' make file url
##'
##' @param path The path to wrap in a file:// URL
##' @return A valid file URL
##' @export
makeFileURL = function(path) {
if(Sys.info()["sysname"] == "Windows") {
.winFileURL(path)
} else
paste0("file://", normalizePath2(path))
}
isWindows = function( ) tolower(Sys.info()["sysname"]) == "windows"
##' Get path from file URL
##'
##' @param fileurl A file url (beginning in file://)
##' @return The system directory path that \code{fileurl} points to
##' @export
fileFromFileURL = function(fileurl) {
if(!isWindows())
ret = gsub("file://", "" , fileurl, fixed=TRUE)
else
ret = .winFileFromURL(fileurl)
}
.winFileFromURL = function(fileurl) {
if(grepl("file:///", fileurl))
ret = gsub("^file:///", "", fileurl)
else if (grepl("^file://[[:alpha:]]", fileurl))
ret = gsub("^file:", "", fileurl)
ret = gsub("%20", " ", ret)
normalizePath2(ret)
}
.winFileURL = function(path) {
path = normalizePath2(path, winslash="/")
path = gsub(" ", "%20", path)
if(grepl("^//", path))
paste0("file:", path)
else
paste0("file:///", path)
}
remOtherPkgVersions = function(pkgname, version, repodir, storagedir, verbose=FALSE) {
if(is.na(version))
return()
tballpat = paste0(pkgname, "_")
allinrepo = list.files(repodir, pattern = tballpat, full.names=TRUE)
wrongvers = !grepl(version, allinrepo, fixed=TRUE)
if(all(!wrongvers))
return()
if(verbose)
message(sprintf("found %d other versions of the package in the repo directory (%s). Moving them to the storage directory (%s)",
sum(wrongvers), repodir, storagedir))
file.rename(allinrepo[wrongvers],
file.path(storagedir, basename(allinrepo[wrongvers])))
}
fileFromBuiltPkg = function(archive, files, ...) {
filname = basename(archive)
ext = gsub(".*_[^[:alpha:]]*(\\..*)$", "\\1", filname)
fun = switch(ext,
".zip" = unzip,
".tar.gz"= untar,
"tgz" = untar,
stop(sprintf("unrecognized extension %s", ext))
)
fun(archive, files = files, ...)
}
##' R executable
##' @param cmd the R CMD to run. "build", "check", "INSTALL", or "" (for none)
##' @param options The options to pass to the command
##' @export
Rcmd = function(cmd = c("build", "check", "INSTALL", ""), options) {
cmd = match.arg(arg=cmd)
cmdpart = if(nchar(cmd)) paste("CMD", cmd) else ""
paste(file.path(R.home("bin"), "R"), cmdpart, options)
}
##'Check if a directory contains package sources
##' @param dir The directory
##' @export
checkIsPkgDir = function (dir)
{
fils = list.files(dir)
any(grepl("^DESCRIPTION$", fils))
}
##'Find a package directory within an SCM checkout
##' @param rootdir The directory of the checkout
##' @param branch The branch to navigate to
##' @param subdir The subdirectory to navigate to
##' @param param a SwitchrParam object
##' @return A path to the Package sources
##' @export
findPkgDir = function(rootdir, branch, subdir,param)
{
if(!length(subdir))
subdir = "."
ret = NULL
name = basename(rootdir)
#does it have the trunk, branches, tags layout?
if(checkStdSVN(rootdir))
{
if(is.null(branch) || branch %in% c("master", "trunk"))
{
ret = file.path(rootdir, "trunk")
} else {
ret = file.path(rootdir, "branches", branch)
}
} else if(file.exists(file.path(rootdir, ".git"))) {
if(is.null(branch) || branch == "trunk")
branch = "master"
gitChangeBranch(rootdir, branch, param = param)
ret = rootdir
} else if ( is.null(branch) || branch %in% c("master", "trunk")) {
ret = rootdir
} else {
warning(paste0("The svn repository at ", rootdir,
" does not appear to have branches. ",
"Unable to process this source."))
logfun(param)(name, paste("The SCM repository does not appear to have",
"branches and a non-trunk/non-master branch",
"was selected"), type="both")
return(NULL)
}
ret = file.path(ret, subdir)
##we somehow got a return file that doesn't exist on the file system.
##This is a problem with GRAN logic, not with packages/user activity
if(!file.exists(ret))
{
logfun(param)(name, paste("Unable to find subdirectory", subdir,
"in branch", branch), type="both")
warning(paste0("Constructed temporary package directory",ret,
" doesn't appear to exist after svn checkout. ",
"Missing branch?"))
return(NULL)
}
##Find a package. First look in ret, then in ret/package and ret/pkg
##we could be more general and allow people to specify subdirectories...
if(!checkIsPkgDir(ret))
{
logfun(param)(name, paste("Specified branch/subdirectory combination",
"does not appear to contain an R package"),
type="both")
ret = NULL
}
ret
}
makeUserFun = function(scm_auth, url)
{
ind = sapply(names(scm_auth), function(pat) grepl(pat, url, fixed=TRUE))
if(any(ind))
scm_auth[[which(ind)]][1]
else
""
}
makePwdFun = function(scm_auth, url)
{
ind = sapply(names(scm_auth), function(pat) grepl(pat, url, fixed=TRUE))
if(any(ind))
scm_auth[[which(ind)]][2]
else
""
}
##' Create a PkgSource object for a package
##' @param url The url of the package sources
##' @param type The source type.
##' @param user A function which, when called, returns the username to use when
##' when checking the soources out
##' @param password A function which returns the password to use when checking
##' out the sources
##' @param scm_auth A list of username-password pairs, named with regular
##' expressions to match against url when constructing the
##' defaults for \code{user} and \code{password}
##' @param prefer_svn Currently unused.
##' @param \dots Passed directly to constructors for PkgSource superclasses
##' @export
makeSource = function(url, type, user, password, scm_auth, prefer_svn = FALSE, ...) {
if(is.na(type))
type = "unknown"
type = tolower(type)
if(missing(user))
user = makeUserFun(scm_auth = scm_auth, url = url)
if(missing(password))
password = makePwdFun(scm_auth= scm_auth, url = url)
if(type == "git" && grepl("github", url))
type = "github"
ret = switch(type,
svn = new("SVNSource", location = url, user = user,
password = password, ...),
local = new("LocalSource", location = url, user = user,
password= password, ...),
git = new("GitSource", location = url, user = user,
password = password, ...),
github = new("GithubSource", location = url, user = user,
password = password, ...),
cran = new("CRANSource", location = url, user = "", password = "", ...),
bioc = new("BiocSource", location = url, user = "readonly", password = "readonly", ...),
tarball = new("TarballSource", location = url, user = "", password ="", ...),
stop("unsupported source type")
)
if( (type=="git" || type == "github") && is.na(ret@branch))
ret@branch = "master"
else if (type=="svn" && (!length(ret@branch) || is.na(ret@branch)))
ret@branch = "trunk"
if(is(ret, "GitSource") && prefer_svn) {
ret2 = tryCatch(as(ret, "SVNSource"), error = function(x) x)
if(!is(ret2, "error"))
ret = ret2
}
ret
}
##' Construct pockage directory path
##' @param basepath The parent directory for the package directory
##' @param name The name of the package
##' @param subdir The subdirectory within a package source that
##' the actual package root directory will reside in.
##' @param scm_type Tye type of scm the package sources will be
##' checked out from
##' @param branch The branch from which the package will be retrieved.
##' @return A path
##' @note Unlike \code{\link{findPkgDir}} this does not look for existing
##' package source directories. It only constructs the path.
##'
##' @export
getPkgDir = function(basepath,name, subdir, scm_type, branch)
{
basepath = normalizePath2(basepath)
if(!file.exists(file.path(basepath, name)))
stop("directory not found")
##svn
## if(file.exists(file.path(basepath, name, ".svn")))
if(scm_type == "svn")
{
if(checkStdSVN(file.path(basepath, name)))
{
if(is.na(branch) || branch == "trunk" || branch == "master")
brdir = "trunk"
else
brdir = file.path("branches", branch)
} else {
brdir = "."
}
} else ## git or local, neither have explicit dirs for branching
brdir = "."
normalizePath2(file.path(basepath,name, brdir, subdir))
}
##' normalizePath2
##'
##' Attempt to normalize a relative path to an absolute one. Optionally without
##' resolving symlinks on non-Windows systems
##' @param path The path to normalize
##' @param follow.symlinks Should symlinks (other than . and ..)
##' be resolved to their physical locations? (FALSE)
##' @param winslash The value of winslash to be passed down to normalizePath
##' on windows systems
##' @param mustWork logical. Passed to normalizePath on windows. Ignored otherwise.
##' @return The normalized path.
##' @export
normalizePath2 = function(path, follow.symlinks=FALSE, winslash = "\\", mustWork = NA)
{
if(follow.symlinks || Sys.info()["sysname"]=="Windows")
return(normalizePath(path, winslash = winslash, mustWork = mustWork))
else {
if(substr(path, 1, 1) == "~")
path = path.expand(path)
##paths starting with / for example
else if(substr(path, 1, 1) == .Platform$file.sep)
path = path
else if (substr(path, 1, 2) == "..") {
tmppath = getwd()
while(substr(path, 1, 2) == "..") {
tmppath = dirname(tmppath)
path = substr(path, 3, nchar(path))
if(substr(path, 1, 1) == .Platform$file.sep)
path = substr(path, 2, nchar(path))
}
path = file.path(tmppath, path)
} else if(grepl("^\\.*[[:alnum:]]", path))
path = file.path(getwd(), path)
else if (substr(path, 1,1) == ".")
path = file.path(getwd(), substr(path,2, nchar(path)))
path = gsub(paste(rep(.Platform$file.sep, 2), collapse=""), .Platform$file.sep, path, fixed=TRUE)
path
}
}
##source an initialization script (e.g. .bashrc) if specified
## in sh_init_script(repo)
##' system_w_init
##'
##' Run a system command with an optional intialization script (e.g. a .bashrc
##' sourced first).
##' @param cmd The text of the command. Must be length 1.
##' @param dir The directory that the command should be executed in. The working directory will be temporarily changed to this dir,
##' but will be changed back upon exit of system_w_init.
##' @param init (optional) a character value indicating the
##' location of an initialization shell script.
##' @param \dots additional parameters passed directly to \code{\link{system}}.
##' @param param A SwitchrParam object. The shell initialization
##' script associated with this object is used when \code{init} is
##' not specified (length 0).
##' @return Depends, see \code{\link{system}} for details.
##' @export
system_w_init = function(cmd, dir,
init = character(), ..., param = SwitchrParam())
{
pause = shell_timing(param) > 0
if(!(pause||isWindows()) && length(cmd) > 1)
cmd = paste(cmd, collapse=" ; ")
if(!length(init) && !is.null(param))
init = sh_init_script(param)
if(length(init) && nchar(init))
cmd = paste(paste("source", init, ";"), cmd)
if(!missing(dir)) {
oldwd = getwd()
setwd(dir)
on.exit(setwd(oldwd))
}
if(length(cmd) > 1) {
res = sapply(cmd, function(x, ...) {
res = system(x, ...)
Sys.sleep(shell_timing(param))
res
}, ...)
tail(res, 1)
} else {
system(cmd, ...)
}
}
highestVs = c(9, 14, 2)
decrBiocVersion = function(biocVers) {
vals = strsplit(biocVers, ".", fixed=TRUE)[[1]]
vals = as.numeric(vals)
if(identical(vals, c(1,0))) {
NULL
} else if (vals[2] == 0) {
vals[1] = vals[1] - 1 #decrement major version
vals[2] = highestVs[ vals[1] ] #set to highest minor version for that major
} else {
vals[2] = vals[2] - 1
}
paste(vals, collapse=".")
}
decrBiocRepo = function(repos, vers = biocVersFromRepo(repos)) {
if(!is.character(vers))
vers = as.character(vers)
pieces = strsplit(repos, vers, fixed=TRUE)
newvers = decrBiocVersion(vers)
if(is.null(newvers)) {
warning("Cannot decrement bioc repo version below 1.0")
return(NULL)
}
sapply(pieces, function(x) paste0(x, collapse = newvers))
}
biocVersFromRepo = function(repos) gsub(".*/([0-9][^/]*)/.*", "\\1", repos[1])
biocReposFromVers = function(vers = develVers) {
if(!requireNamespace2("BiocInstaller"))
stop("Unable to manipulate bioc versions without BiocInstaller installed")
repos = head(BiocInstaller::biocinstallRepos(), -1)
bef= gsub("(.*/)[0-9][^/]*/.*", "\\1", repos)
af = gsub(".*/[0-9][^/]*(/.*)", "\\1", repos)
paste0(bef, vers, af)
}
highestBiocVers = function(repos){
if(!requireNamespace2("BiocInstaller"))
stop("Unable to determine bioc versions without BiocInstaller installed")
else if(missing(repos))
## head -1 removes the last element
repos =head(BiocInstaller::biocinstallRepos(), -1)
majvers = length(highestVs)
vers = paste(majvers, highestVs[majvers], sep=".")
biocReposFromVers(vers = vers)
}
#system(..., intern=TRUE) throws an error if the the command fails,
#and has attr(out, "status") > 0 if the called program returns non-zero status.
##' Identify error states from R or external programs
##' @param out An R object representing output
##' @return TRUE if out is an error object, or has an attribute called "status" which is > 0
##' @export
errorOrNonZero = function(out)
{
if(is(out, "error") ||
(!is.null(attr(out, "status")) && attr(out, "status") > 0))
TRUE
else
FALSE
}
## NB this will give different behavior in R versions that
## provide requireNamespace and thsoe that don't re the search path.
## Not ideal, but otherwise switchr will fail to install at all.
requireNamespace2 = function(...) {
if(exists("requireNamespace"))
requireNamespace(...)
else
require(...)
}
## this will give identical behavior but will be less efficient
## when paste0 doesn't exist.
if(!exists("paste0"))
paste0 = function(...) paste(..., sep="")
sourceFromManifest = function(pkg, manifest, scm_auths = list(bioconductor=c("readonly", "readonly")), ...) {
mandf = manifest_df(manifest)
manrow = mandf[mandf$name == pkg, ]
##https://github.com/gmbecker/ProteinVis/archive/IndelsOverlay.zip
## for IndelsOverlay branch
src = makeSource(name = pkg,
type = manrow$type,
url = manrow$url, branch = manrow$branch,
subdir = manrow$subdir,
scm_auth = scm_auths,...)
src
}
isWindows = function() {
Sys.info()["sysname"] == "Windows"
}
haveGit = function() {
res = tryCatch(system2("git", args = "--version"), error = function(e) e)
st = attr(res, "status")
if(is(res, "error") || (!is.null(st) && st > 0))
FALSE
else
TRUE
}
|
# ####################
#
# Comapre the perforamnce of
# i) standard Monte Carlo simulation
# ii) antithetic variable technique
# iii) stratification sampling
# iv) control variates
#
# HW4 q3.r
######## Standard Monte Carlo ########
# n is number of simulations
# flag: 1 -> calculate variance, 0 -> no variance calculation
# setting flag 0 to compare estimate only calculation efficiency
MonteCarlo <- function(n) {
u <- runif(n,0,1)
s <- 4*u^3
# s to store the estimated theta value
mu <- mean(s)
ans <- c(mu,var(s))
return(ans)
}
######## Antithetic variable ########
# n is number of simulations
# flag: 1 -> calculate variance, 0 -> no variance calculation
# setting flag 0 to compare estimation only calculation efficiency
Antithetic <- function(n) {
k <- n/2
u <- runif(k,0,1)
x <- 4*(u)^3
y <- 4*(1-u)^3
# s to store the estimated theta value
s <- c(x,y)
mu <- mean(s)
varH <- var(s)
ans <- c(mu,varH)
return(ans)
}
######## Stratified Sampling ########
Stratified <- function(n) {
B <- 1000
NB <- n/B
ST <- rep(0,n)
for (i in 0:(B-1)) {
u <- runif(NB)
v <- (u+i)/B
for (j in 1:NB) {
ST[j+i*NB] <- 4*(v)^3
}
}
mu <- mean(ST)
ans <- c(mu,var(ST))
return(ans)
}
######## Control variate ########
CV <- function(n,flag) {
u <- runif(n,0,1)
x <- 4*u^3
y <- runif(n,0,1) # y ~ U(0,1) as control variate
c <- -cov(x,y)/var(y)
s <- x + c*(y - 0.5)
mu <- mean(s)
varH <- var(x)+c*c*var(y)+2*c*cov(x,y)
ans <- c(mu,varH)
return(ans)
}
######## Run simulation, 10000 times #######
n <- 10000
result <- rbind(MonteCarlo(n),Antithetic(n),
Stratified(n),CV(n))
result <- cbind(result,c(1-result[1,1],1-result[2,1],1-result[3,1],1-result[4,1]))
colnames(result) <- c("Estimate","Variance","Error")
rownames(result) <- c("Monte Carlo simulation","Antithetic variable",
"Stratified sampling","Control variates")
print(result)
|
/HW4 q3.r
|
no_license
|
iyeung144/rmsc5102
|
R
| false | false | 1,961 |
r
|
# ####################
#
# Comapre the perforamnce of
# i) standard Monte Carlo simulation
# ii) antithetic variable technique
# iii) stratification sampling
# iv) control variates
#
# HW4 q3.r
######## Standard Monte Carlo ########
# n is number of simulations
# flag: 1 -> calculate variance, 0 -> no variance calculation
# setting flag 0 to compare estimate only calculation efficiency
MonteCarlo <- function(n) {
u <- runif(n,0,1)
s <- 4*u^3
# s to store the estimated theta value
mu <- mean(s)
ans <- c(mu,var(s))
return(ans)
}
######## Antithetic variable ########
# n is number of simulations
# flag: 1 -> calculate variance, 0 -> no variance calculation
# setting flag 0 to compare estimation only calculation efficiency
Antithetic <- function(n) {
k <- n/2
u <- runif(k,0,1)
x <- 4*(u)^3
y <- 4*(1-u)^3
# s to store the estimated theta value
s <- c(x,y)
mu <- mean(s)
varH <- var(s)
ans <- c(mu,varH)
return(ans)
}
######## Stratified Sampling ########
Stratified <- function(n) {
B <- 1000
NB <- n/B
ST <- rep(0,n)
for (i in 0:(B-1)) {
u <- runif(NB)
v <- (u+i)/B
for (j in 1:NB) {
ST[j+i*NB] <- 4*(v)^3
}
}
mu <- mean(ST)
ans <- c(mu,var(ST))
return(ans)
}
######## Control variate ########
CV <- function(n,flag) {
u <- runif(n,0,1)
x <- 4*u^3
y <- runif(n,0,1) # y ~ U(0,1) as control variate
c <- -cov(x,y)/var(y)
s <- x + c*(y - 0.5)
mu <- mean(s)
varH <- var(x)+c*c*var(y)+2*c*cov(x,y)
ans <- c(mu,varH)
return(ans)
}
######## Run simulation, 10000 times #######
n <- 10000
result <- rbind(MonteCarlo(n),Antithetic(n),
Stratified(n),CV(n))
result <- cbind(result,c(1-result[1,1],1-result[2,1],1-result[3,1],1-result[4,1]))
colnames(result) <- c("Estimate","Variance","Error")
rownames(result) <- c("Monte Carlo simulation","Antithetic variable",
"Stratified sampling","Control variates")
print(result)
|
# Скрипичные диаграммы
# Прежде чем мы закончим обсуждать ящики с усами, нам стоит рассмотреть их модификацию,
# названную скрипичной диаграммой (violin plot). Это сочетание ящика с усами и диаграммы
# плотности ядра. Эту диаграмму можно создать при помощи функции vioplot() из пакета
# vioplot. Не забудьте установить этот пакет перед первым использованием.
# Формат применения функции vioplot() таков:
# vioplot(x1, x2, ... , names=, col=)
# где x1, x2, ... – это один или более числовых векторов, которые нужно изобразить графически
# (для каждого вектора будет построена своя скрипичная диаграмма). Параметр names задает
# текстовый вектор с подписями для диаграмм, col – вектор, содержащий названия цветов каждой
# диаграммы. Учтите, что для использования функции violin() нужно разделить группы наблюдений
# на отдельные переменные. Пример использования функции приведен в следующем программном коде.
library(vioplot)
x1 <- mtcars$mpg[mtcars$cyl==4]
x2 <- mtcars$mpg[mtcars$cyl==6]
x3 <- mtcars$mpg[mtcars$cyl==8]
vioplot(x1, x2, x3, names=c("4 cyl", "6 cyl", "8 cyl"), col="gold")
title("Violin Plots of Miles Per Gallon")
# Скрипичные диаграммы представляют собой зеркально отраженные диаграммы плотности ядра,
# наложенные на ящики с усами. Здесь белая точка – медиана, черный прямоугольник –
# межквартильный размах, а тонкие черные линии – «усы». Внешний контур фигуры – это диаграмма
# плотности ядра. Скрипичные диаграммы еще не вошли в моду. Опять же, это может быть
# обусловлено отсутствием доступного программного обеспечения. Время покажет.
|
/Diagrams/Boxplot/ViolinPlot.R
|
no_license
|
daoleen/Learn-R
|
R
| false | false | 2,574 |
r
|
# Скрипичные диаграммы
# Прежде чем мы закончим обсуждать ящики с усами, нам стоит рассмотреть их модификацию,
# названную скрипичной диаграммой (violin plot). Это сочетание ящика с усами и диаграммы
# плотности ядра. Эту диаграмму можно создать при помощи функции vioplot() из пакета
# vioplot. Не забудьте установить этот пакет перед первым использованием.
# Формат применения функции vioplot() таков:
# vioplot(x1, x2, ... , names=, col=)
# где x1, x2, ... – это один или более числовых векторов, которые нужно изобразить графически
# (для каждого вектора будет построена своя скрипичная диаграмма). Параметр names задает
# текстовый вектор с подписями для диаграмм, col – вектор, содержащий названия цветов каждой
# диаграммы. Учтите, что для использования функции violin() нужно разделить группы наблюдений
# на отдельные переменные. Пример использования функции приведен в следующем программном коде.
library(vioplot)
x1 <- mtcars$mpg[mtcars$cyl==4]
x2 <- mtcars$mpg[mtcars$cyl==6]
x3 <- mtcars$mpg[mtcars$cyl==8]
vioplot(x1, x2, x3, names=c("4 cyl", "6 cyl", "8 cyl"), col="gold")
title("Violin Plots of Miles Per Gallon")
# Скрипичные диаграммы представляют собой зеркально отраженные диаграммы плотности ядра,
# наложенные на ящики с усами. Здесь белая точка – медиана, черный прямоугольник –
# межквартильный размах, а тонкие черные линии – «усы». Внешний контур фигуры – это диаграмма
# плотности ядра. Скрипичные диаграммы еще не вошли в моду. Опять же, это может быть
# обусловлено отсутствием доступного программного обеспечения. Время покажет.
|
\name{getDensityKernelMode}
\alias{getDensityKernelMode}
\title{
Mode of the Density Kernel
}
\description{
Calculate the mode of the density kernel function
}
\usage{
getDensityKernelMode(v)
}
\arguments{
\item{v}{
numeric vector of values, typically gene or oligo expression abundance
}
}
\details{
This function is used in RMA normalization to estimate the amount
of non-specific hybridization on a microarray.
}
\value{
a numeric value on length 1, representing the most frequent value
in the distribution of \code{v}.
}
\note{
Uses the 'epanechnikov' kernel. See \code{\link{density}}.
}
\seealso{
\code{\link{duffyRMA}}, \code{\link{DKMshift}}
}
\examples{
getDensityKernelMode( rnorm( 100, mean=10, sd=2))
}
|
/man/getDensityKernelMode.Rd
|
no_license
|
robertdouglasmorrison/DuffyTools
|
R
| false | false | 760 |
rd
|
\name{getDensityKernelMode}
\alias{getDensityKernelMode}
\title{
Mode of the Density Kernel
}
\description{
Calculate the mode of the density kernel function
}
\usage{
getDensityKernelMode(v)
}
\arguments{
\item{v}{
numeric vector of values, typically gene or oligo expression abundance
}
}
\details{
This function is used in RMA normalization to estimate the amount
of non-specific hybridization on a microarray.
}
\value{
a numeric value on length 1, representing the most frequent value
in the distribution of \code{v}.
}
\note{
Uses the 'epanechnikov' kernel. See \code{\link{density}}.
}
\seealso{
\code{\link{duffyRMA}}, \code{\link{DKMshift}}
}
\examples{
getDensityKernelMode( rnorm( 100, mean=10, sd=2))
}
|
# install.packages("dplyr")
# install.packages("ggplot2")
# install.packages("mgcv")
# install.packages("REdaS")
# install.packages("gridExtra")
# library(dplyr)
# library(ggplot2)
# library(mgcv)
# library(REdaS)
# library(gridExtra)
## Download Relevant Data
fastballs <- read.csv("data/fastballs.csv")
Pitchers <- read.csv("data/BPC_pitchers.csv")
Pitchers$Pitch.Type <- gsub("[[:punct:]]", "", Pitchers$Pitch.Type)
AllPitchData <- read.csv("data/AllPitchData.csv")
##Prep the pitcher you want to evaluate
pitch_arsenal <- function(pitcher_name)
{
pitcher <- filter(Pitchers, Pitcher == pitcher_name)
pitcher <- filter(pitcher, BU != 0)
pitcher$Pitch.Type[pitcher$Pitch.Type == "2 Seam fastball"] <- "Fastball"
pitcher_stats <- pitcher %>%
select(Pitch.Type,Velo, Total.spin,H..BREAK, V_BREAK) %>%
group_by(Pitch.Type) %>%
summarise(Velo = mean(Velo), TS = mean(Total.spin), HB = -mean(H..BREAK), VB = mean(V_BREAK))
pitcher_stats$BU <- pitcher_stats$TS/pitcher_stats$Velo
pitcher_stats$Move_Angle <- rad2deg(atan2(pitcher_stats$VB,pitcher_stats$HB))
pitcher_stats %>% mutate_if(is.numeric, ~round(., 1))
}
whiff_chart <- function(pitcher_aresenal, pitcher_name)
{
# define the strike zone
topKzone <- 3.5
botKzone <- 1.6
inKzone <- -0.95
outKzone <- 0.95
kZone <- data.frame(
x=c(inKzone, inKzone, outKzone, outKzone, inKzone),
y=c(botKzone, topKzone, topKzone, botKzone, botKzone)
)
pitcher_stats <- pitcher_aresenal
pitcher <- filter(Pitchers, Pitcher == pitcher_name)
# define the 1/0 response variable
fastballs <- mutate(fastballs, Whiff=ifelse(description %in% c("ball", "blocked_ball","bunt_foul_tip", "called_strike", "foul", "foul_bunt", "foul_tip", "hit_by_pitch", "hit_into_play","hit_into_play_no_out", "hit_into_play_score", "missed_bunt", "pitchout"),
0, 1))
fastballs <- mutate(fastballs, Swing=ifelse(description %in% c("ball", "blocked_ball","bunt_foul_tip", "called_strike","foul_bunt", "hit_by_pitch", "missed_bunt", "pitchout"),
0, 1))
fastballs$release_spin_rate <- as.numeric(as.character(fastballs$release_spin_rate))
fastballs$BU <- fastballs$release_spin_rate/fastballs$release_speed
fastballs$Move_Angle <- rad2deg(atan2(fastballs$pfx_z,fastballs$pfx_x))
## Devleop the Same Handed Plot
pdata <- filter(fastballs, Swing == 1)
pdata <- filter(pdata, between(BU,pitcher_stats$BU[pitcher_stats$Pitch.Type == "Fastball"]-.75,pitcher_stats$BU[pitcher_stats$Pitch.Type == "Fastball"]+0.75))
pdata <- filter(pdata, between(Move_Angle, pitcher_stats$Move_Angle[pitcher_stats$Pitch.Type == "Fastball"]-10,pitcher_stats$Move_Angle[pitcher_stats$Pitch.Type == "Fastball"]+10))
if (pitcher$Handedness[1] == "R") {
pdata <- filter(pdata, p_throws == "R")
pdata <- filter(pdata, h_bats == "R")
} else {
pdata <- filter(pdata, p_throws == "L")
pdata <- filter(pdata, h_bats == "L")
}
fit <- gam(Whiff ~ s(plate_x,plate_z), family=binomial, data=pdata)
# find predicted probabilities over a 50 x 50 grid
plate_x <- seq(-1.5, 1.5, length.out=100)
plate_z <- seq(1.4, 3.75, length.out=100)
data.predict <- data.frame(plate_x = c(outer(plate_x, plate_z * 0 + 1)),
plate_z = c(outer(plate_x * 0 + 1, plate_z)))
lp <- predict(fit, data.predict)
data.predict$Probability <- exp(lp) / (1 + exp(lp))
# construct the plot V Same
Same <- ggplot(kZone, aes(x, y)) +
geom_tile(data=data.predict,
aes(x=plate_x, y=plate_z, fill= Probability)) +
scale_fill_distiller(palette = "Spectral") +
geom_path(lwd=1.5, col="black") +
coord_fixed()+labs(title="Whiff Rates Similar Fastballs V Same")
## Devleop the Oppo Handed Plot
pdata <- filter(fastballs, Swing == 1)
pdata <- filter(pdata, between(BU,pitcher_stats$BU[pitcher_stats$Pitch.Type == "Fastball"]-.75,pitcher_stats$BU[pitcher_stats$Pitch.Type == "Fastball"]+0.75))
pdata <- filter(pdata, between(Move_Angle, pitcher_stats$Move_Angle[pitcher_stats$Pitch.Type == "Fastball"]-10,pitcher_stats$Move_Angle[pitcher_stats$Pitch.Type == "Fastball"]+10))
if (pitcher$Handedness[1] == "R") {
pdata <- filter(pdata, p_throws == "R")
pdata <- filter(pdata, h_bats == "L" | h_bats == "S")
} else {
pdata <- filter(pdata, p_throws == "L")
pdata <- filter(pdata, h_bats == "R" | h_bats == "S")
}
fit <- gam(Whiff ~ s(plate_x,plate_z), family=binomial, data=pdata)
# find predicted probabilities over a 50 x 50 grid
plate_x <- seq(-1.5, 1.5, length.out=100)
plate_z <- seq(1.4, 3.75, length.out=100)
data.predict <- data.frame(plate_x = c(outer(plate_x, plate_z * 0 + 1)),
plate_z = c(outer(plate_x * 0 + 1, plate_z)))
lp <- predict(fit, data.predict)
data.predict$Probability <- exp(lp) / (1 + exp(lp))
# construct the plot V Oppo
Oppo <- ggplot(kZone, aes(x, y)) +
geom_tile(data=data.predict,
aes(x=plate_x, y=plate_z, fill= Probability)) +
scale_fill_distiller(palette = "Spectral") +
geom_path(lwd=1.5, col="black") +
coord_fixed()+labs(title="Whiff Rates Similar Fastballs V Oppo")
## Generate Plots
grid.arrange(Same, Oppo, ncol=2)
}
pitch_comps <- function(pitcher_name, pitch_type)
{
#AllPitchData[,c("X4SeamH.Move",
# "X2SeamH.Move",
# "CutterH.Move",
# "SliderH.Move",
# "Change.UpH.Move",
# "CurveballH.Move")] <- dplyr::select(AllPitchData, ends_with("H.Move")) * -1
AllPitchData$X4SeamMove_Angle <- rad2deg(atan2(AllPitchData$X4SeamV.Move,AllPitchData$X4SeamH.Move))
AllPitchData$X2SeamMove_Angle <- rad2deg(atan2(AllPitchData$X2SeamV.Move,AllPitchData$X2SeamH.Move))
AllPitchData$CutterMove_Angle <- rad2deg(atan2(AllPitchData$CutterV.Move,AllPitchData$CutterH.Move))
AllPitchData$SliderMove_Angle <- rad2deg(atan2(AllPitchData$SliderV.Move,AllPitchData$SliderH.Move))
AllPitchData$Change.UpMove_Angle <- rad2deg(atan2(AllPitchData$Change.UpV.Move,AllPitchData$Change.UpH.Move))
AllPitchData$CurveballMove_Angle <- rad2deg(atan2(AllPitchData$CurveballV.Move,AllPitchData$CurveballH.Move))
AllPitchData <- AllPitchData[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"X4SeamVelo",
"X4SeamSpin",
"X4SeamBU",
"X4SeamWhiff.Swing",
"X4SeamwOBA",
"X4SeamH.Move",
"X4SeamV.Move",
"X4SeamMove_Angle",
"X2SeamVelo",
"X2SeamSpin",
"X2SeamBU",
"X2SeamWhiff.Swing",
"X2SeamwOBA",
"X2SeamH.Move",
"X2SeamV.Move",
"X2SeamMove_Angle",
"CutterVelo",
"CutterSpin",
"CutterBU",
"CutterWhiff.Swing",
"CutterwOBA",
"CutterH.Move",
"CutterV.Move",
"CutterMove_Angle",
"SliderVelo",
"SliderSpin",
"SliderBU",
"SliderWhiff.Swing",
"SliderwOBA",
"SliderH.Move",
"SliderV.Move",
"SliderMove_Angle",
"Change.UpVelo",
"Change.UpSpin",
"Change.UpBU",
"Change.UpWhiff.Swing",
"Change.UpwOBA",
"Change.UpH.Move",
"Change.UpV.Move",
"Change.UpMove_Angle",
"CurveballVelo",
"CurveballSpin",
"CurveballBU",
"CurveballWhiff.Swing",
"CurveballwOBA",
"CurveballH.Move",
"CurveballV.Move",
"CurveballMove_Angle")]
test_pitcher <- pitch_arsenal(pitcher_name)
test_pitcher <- filter(test_pitcher, Pitch.Type == "Fastball")
AllPitchData$Dist <- sqrt((AllPitchData$X4SeamBU - test_pitcher$BU)^2+(AllPitchData$X4SeamMove_Angle - test_pitcher$Move_Angle)^2)
Pitcher_Comps <- filter(AllPitchData, !is.na(Dist))
Pitcher_Comps <- Pitcher_Comps[order(Pitcher_Comps$Dist),]
Pitcher_Comps$Change.UpDiff <- Pitcher_Comps$X4SeamVelo - Pitcher_Comps$Change.UpVelo
Pitcher_Comps <- Pitcher_Comps[c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"X4SeamBU",
"X4SeamMove_Angle",
"X4SeamWhiff.Swing",
"X4SeamwOBA",
"X2SeamBU",
"X2SeamMove_Angle",
"X2SeamWhiff.Swing",
"X2SeamwOBA",
"CutterBU",
"CutterMove_Angle",
"CutterWhiff.Swing",
"CutterwOBA",
"SliderBU",
"SliderMove_Angle",
"SliderWhiff.Swing",
"SliderwOBA",
"Change.UpBU",
"Change.UpMove_Angle",
"Change.UpDiff",
"Change.UpWhiff.Swing",
"Change.UpwOBA",
"CurveballBU",
"CurveballMove_Angle",
"CurveballWhiff.Swing",
"CurveballwOBA")]
Pitcher_Comps <- Pitcher_Comps[1:50,]
#Create 2 Seam Profile Matches
Two_Seam <- Pitcher_Comps[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"X2SeamBU",
"X2SeamMove_Angle",
"X2SeamWhiff.Swing",
"X2SeamwOBA")]
Two_Seam <- Two_Seam[complete.cases(Two_Seam), ]
Two_Seam$X2SeamMove_Angle <- round(Two_Seam$X2SeamMove_Angle,1)
Two_Seam$X2SeamWhiff.Swing <- round(Two_Seam$X2SeamWhiff.Swing,2)
Two_Seam$X2SeamwOBA <- round(Two_Seam$X2SeamwOBA,3)
#Create Cutter Profile Matches
Cutter <- Pitcher_Comps[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"CutterBU",
"CutterMove_Angle",
"CutterWhiff.Swing",
"CutterwOBA")]
Cutter <- Cutter[complete.cases(Cutter), ]
Cutter$CutterMove_Angle <- round(Cutter$CutterMove_Angle,1)
Cutter$CutterWhiff.Swing <- round(Cutter$CutterWhiff.Swing,2)
Cutter$CutterwOBA <- round(Cutter$CutterwOBA,3)
#Create Slider Profile Matches
Slider <- Pitcher_Comps[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"SliderBU",
"SliderMove_Angle",
"SliderWhiff.Swing",
"SliderwOBA")]
Slider <- Slider[complete.cases(Slider), ]
Slider$SliderMove_Angle <- round(Slider$SliderMove_Angle,1)
Slider$SliderWhiff.Swing <- round(Slider$SliderWhiff.Swing,2)
Slider$SliderwOBA <- round(Slider$SliderwOBA,3)
#Create Change Up Profile Matches
Change.Up <- Pitcher_Comps[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"Change.UpBU",
"Change.UpMove_Angle",
"Change.UpDiff",
"Change.UpWhiff.Swing",
"Change.UpwOBA")]
Change.Up <- Change.Up[complete.cases(Change.Up), ]
Change.Up$Change.UpMove_Angle <- round(Change.Up$Change.UpMove_Angle,1)
Change.Up$Change.UpWhiff.Swing <- round(Change.Up$Change.UpWhiff.Swing,2)
Change.Up$Change.UpDiff <- round(Change.Up$Change.UpDiff,1)
Change.Up$Change.UpwOBA <- round(Change.Up$Change.UpwOBA,3)
#Create Curveball Profile Matches
Curveball <- Pitcher_Comps[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"CurveballBU",
"CurveballMove_Angle",
"CurveballWhiff.Swing",
"CurveballwOBA")]
Curveball <- Curveball[complete.cases(Curveball), ]
Curveball$CurveballMove_Angle <- round(Curveball$CurveballMove_Angle,1)
Curveball$CurveballWhiff.Swing <- round(Curveball$CurveballWhiff.Swing,2)
Curveball$CurveballwOBA <- round(Curveball$CurveballwOBA,3)
if(pitch_type == "Two Seam")
{
data = Two_Seam
}
else if (pitch_type == "Cutter")
{
data = Cutter
}
else if (pitch_type == "Slider")
{
data = Slider
}
else if (pitch_type == "Change Up")
{
data = Change.Up
}
else if (pitch_type == "Curveball")
{
data = Curveball
}
else
{
data = "Please Select A Pitch Type"
}
}
|
/BPCPitchers/data/Charts.R
|
no_license
|
pmammino/BPC
|
R
| false | false | 14,546 |
r
|
# install.packages("dplyr")
# install.packages("ggplot2")
# install.packages("mgcv")
# install.packages("REdaS")
# install.packages("gridExtra")
# library(dplyr)
# library(ggplot2)
# library(mgcv)
# library(REdaS)
# library(gridExtra)
## Download Relevant Data
fastballs <- read.csv("data/fastballs.csv")
Pitchers <- read.csv("data/BPC_pitchers.csv")
Pitchers$Pitch.Type <- gsub("[[:punct:]]", "", Pitchers$Pitch.Type)
AllPitchData <- read.csv("data/AllPitchData.csv")
##Prep the pitcher you want to evaluate
pitch_arsenal <- function(pitcher_name)
{
pitcher <- filter(Pitchers, Pitcher == pitcher_name)
pitcher <- filter(pitcher, BU != 0)
pitcher$Pitch.Type[pitcher$Pitch.Type == "2 Seam fastball"] <- "Fastball"
pitcher_stats <- pitcher %>%
select(Pitch.Type,Velo, Total.spin,H..BREAK, V_BREAK) %>%
group_by(Pitch.Type) %>%
summarise(Velo = mean(Velo), TS = mean(Total.spin), HB = -mean(H..BREAK), VB = mean(V_BREAK))
pitcher_stats$BU <- pitcher_stats$TS/pitcher_stats$Velo
pitcher_stats$Move_Angle <- rad2deg(atan2(pitcher_stats$VB,pitcher_stats$HB))
pitcher_stats %>% mutate_if(is.numeric, ~round(., 1))
}
whiff_chart <- function(pitcher_aresenal, pitcher_name)
{
# define the strike zone
topKzone <- 3.5
botKzone <- 1.6
inKzone <- -0.95
outKzone <- 0.95
kZone <- data.frame(
x=c(inKzone, inKzone, outKzone, outKzone, inKzone),
y=c(botKzone, topKzone, topKzone, botKzone, botKzone)
)
pitcher_stats <- pitcher_aresenal
pitcher <- filter(Pitchers, Pitcher == pitcher_name)
# define the 1/0 response variable
fastballs <- mutate(fastballs, Whiff=ifelse(description %in% c("ball", "blocked_ball","bunt_foul_tip", "called_strike", "foul", "foul_bunt", "foul_tip", "hit_by_pitch", "hit_into_play","hit_into_play_no_out", "hit_into_play_score", "missed_bunt", "pitchout"),
0, 1))
fastballs <- mutate(fastballs, Swing=ifelse(description %in% c("ball", "blocked_ball","bunt_foul_tip", "called_strike","foul_bunt", "hit_by_pitch", "missed_bunt", "pitchout"),
0, 1))
fastballs$release_spin_rate <- as.numeric(as.character(fastballs$release_spin_rate))
fastballs$BU <- fastballs$release_spin_rate/fastballs$release_speed
fastballs$Move_Angle <- rad2deg(atan2(fastballs$pfx_z,fastballs$pfx_x))
## Devleop the Same Handed Plot
pdata <- filter(fastballs, Swing == 1)
pdata <- filter(pdata, between(BU,pitcher_stats$BU[pitcher_stats$Pitch.Type == "Fastball"]-.75,pitcher_stats$BU[pitcher_stats$Pitch.Type == "Fastball"]+0.75))
pdata <- filter(pdata, between(Move_Angle, pitcher_stats$Move_Angle[pitcher_stats$Pitch.Type == "Fastball"]-10,pitcher_stats$Move_Angle[pitcher_stats$Pitch.Type == "Fastball"]+10))
if (pitcher$Handedness[1] == "R") {
pdata <- filter(pdata, p_throws == "R")
pdata <- filter(pdata, h_bats == "R")
} else {
pdata <- filter(pdata, p_throws == "L")
pdata <- filter(pdata, h_bats == "L")
}
fit <- gam(Whiff ~ s(plate_x,plate_z), family=binomial, data=pdata)
# find predicted probabilities over a 50 x 50 grid
plate_x <- seq(-1.5, 1.5, length.out=100)
plate_z <- seq(1.4, 3.75, length.out=100)
data.predict <- data.frame(plate_x = c(outer(plate_x, plate_z * 0 + 1)),
plate_z = c(outer(plate_x * 0 + 1, plate_z)))
lp <- predict(fit, data.predict)
data.predict$Probability <- exp(lp) / (1 + exp(lp))
# construct the plot V Same
Same <- ggplot(kZone, aes(x, y)) +
geom_tile(data=data.predict,
aes(x=plate_x, y=plate_z, fill= Probability)) +
scale_fill_distiller(palette = "Spectral") +
geom_path(lwd=1.5, col="black") +
coord_fixed()+labs(title="Whiff Rates Similar Fastballs V Same")
## Devleop the Oppo Handed Plot
pdata <- filter(fastballs, Swing == 1)
pdata <- filter(pdata, between(BU,pitcher_stats$BU[pitcher_stats$Pitch.Type == "Fastball"]-.75,pitcher_stats$BU[pitcher_stats$Pitch.Type == "Fastball"]+0.75))
pdata <- filter(pdata, between(Move_Angle, pitcher_stats$Move_Angle[pitcher_stats$Pitch.Type == "Fastball"]-10,pitcher_stats$Move_Angle[pitcher_stats$Pitch.Type == "Fastball"]+10))
if (pitcher$Handedness[1] == "R") {
pdata <- filter(pdata, p_throws == "R")
pdata <- filter(pdata, h_bats == "L" | h_bats == "S")
} else {
pdata <- filter(pdata, p_throws == "L")
pdata <- filter(pdata, h_bats == "R" | h_bats == "S")
}
fit <- gam(Whiff ~ s(plate_x,plate_z), family=binomial, data=pdata)
# find predicted probabilities over a 50 x 50 grid
plate_x <- seq(-1.5, 1.5, length.out=100)
plate_z <- seq(1.4, 3.75, length.out=100)
data.predict <- data.frame(plate_x = c(outer(plate_x, plate_z * 0 + 1)),
plate_z = c(outer(plate_x * 0 + 1, plate_z)))
lp <- predict(fit, data.predict)
data.predict$Probability <- exp(lp) / (1 + exp(lp))
# construct the plot V Oppo
Oppo <- ggplot(kZone, aes(x, y)) +
geom_tile(data=data.predict,
aes(x=plate_x, y=plate_z, fill= Probability)) +
scale_fill_distiller(palette = "Spectral") +
geom_path(lwd=1.5, col="black") +
coord_fixed()+labs(title="Whiff Rates Similar Fastballs V Oppo")
## Generate Plots
grid.arrange(Same, Oppo, ncol=2)
}
pitch_comps <- function(pitcher_name, pitch_type)
{
#AllPitchData[,c("X4SeamH.Move",
# "X2SeamH.Move",
# "CutterH.Move",
# "SliderH.Move",
# "Change.UpH.Move",
# "CurveballH.Move")] <- dplyr::select(AllPitchData, ends_with("H.Move")) * -1
AllPitchData$X4SeamMove_Angle <- rad2deg(atan2(AllPitchData$X4SeamV.Move,AllPitchData$X4SeamH.Move))
AllPitchData$X2SeamMove_Angle <- rad2deg(atan2(AllPitchData$X2SeamV.Move,AllPitchData$X2SeamH.Move))
AllPitchData$CutterMove_Angle <- rad2deg(atan2(AllPitchData$CutterV.Move,AllPitchData$CutterH.Move))
AllPitchData$SliderMove_Angle <- rad2deg(atan2(AllPitchData$SliderV.Move,AllPitchData$SliderH.Move))
AllPitchData$Change.UpMove_Angle <- rad2deg(atan2(AllPitchData$Change.UpV.Move,AllPitchData$Change.UpH.Move))
AllPitchData$CurveballMove_Angle <- rad2deg(atan2(AllPitchData$CurveballV.Move,AllPitchData$CurveballH.Move))
AllPitchData <- AllPitchData[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"X4SeamVelo",
"X4SeamSpin",
"X4SeamBU",
"X4SeamWhiff.Swing",
"X4SeamwOBA",
"X4SeamH.Move",
"X4SeamV.Move",
"X4SeamMove_Angle",
"X2SeamVelo",
"X2SeamSpin",
"X2SeamBU",
"X2SeamWhiff.Swing",
"X2SeamwOBA",
"X2SeamH.Move",
"X2SeamV.Move",
"X2SeamMove_Angle",
"CutterVelo",
"CutterSpin",
"CutterBU",
"CutterWhiff.Swing",
"CutterwOBA",
"CutterH.Move",
"CutterV.Move",
"CutterMove_Angle",
"SliderVelo",
"SliderSpin",
"SliderBU",
"SliderWhiff.Swing",
"SliderwOBA",
"SliderH.Move",
"SliderV.Move",
"SliderMove_Angle",
"Change.UpVelo",
"Change.UpSpin",
"Change.UpBU",
"Change.UpWhiff.Swing",
"Change.UpwOBA",
"Change.UpH.Move",
"Change.UpV.Move",
"Change.UpMove_Angle",
"CurveballVelo",
"CurveballSpin",
"CurveballBU",
"CurveballWhiff.Swing",
"CurveballwOBA",
"CurveballH.Move",
"CurveballV.Move",
"CurveballMove_Angle")]
test_pitcher <- pitch_arsenal(pitcher_name)
test_pitcher <- filter(test_pitcher, Pitch.Type == "Fastball")
AllPitchData$Dist <- sqrt((AllPitchData$X4SeamBU - test_pitcher$BU)^2+(AllPitchData$X4SeamMove_Angle - test_pitcher$Move_Angle)^2)
Pitcher_Comps <- filter(AllPitchData, !is.na(Dist))
Pitcher_Comps <- Pitcher_Comps[order(Pitcher_Comps$Dist),]
Pitcher_Comps$Change.UpDiff <- Pitcher_Comps$X4SeamVelo - Pitcher_Comps$Change.UpVelo
Pitcher_Comps <- Pitcher_Comps[c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"X4SeamBU",
"X4SeamMove_Angle",
"X4SeamWhiff.Swing",
"X4SeamwOBA",
"X2SeamBU",
"X2SeamMove_Angle",
"X2SeamWhiff.Swing",
"X2SeamwOBA",
"CutterBU",
"CutterMove_Angle",
"CutterWhiff.Swing",
"CutterwOBA",
"SliderBU",
"SliderMove_Angle",
"SliderWhiff.Swing",
"SliderwOBA",
"Change.UpBU",
"Change.UpMove_Angle",
"Change.UpDiff",
"Change.UpWhiff.Swing",
"Change.UpwOBA",
"CurveballBU",
"CurveballMove_Angle",
"CurveballWhiff.Swing",
"CurveballwOBA")]
Pitcher_Comps <- Pitcher_Comps[1:50,]
#Create 2 Seam Profile Matches
Two_Seam <- Pitcher_Comps[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"X2SeamBU",
"X2SeamMove_Angle",
"X2SeamWhiff.Swing",
"X2SeamwOBA")]
Two_Seam <- Two_Seam[complete.cases(Two_Seam), ]
Two_Seam$X2SeamMove_Angle <- round(Two_Seam$X2SeamMove_Angle,1)
Two_Seam$X2SeamWhiff.Swing <- round(Two_Seam$X2SeamWhiff.Swing,2)
Two_Seam$X2SeamwOBA <- round(Two_Seam$X2SeamwOBA,3)
#Create Cutter Profile Matches
Cutter <- Pitcher_Comps[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"CutterBU",
"CutterMove_Angle",
"CutterWhiff.Swing",
"CutterwOBA")]
Cutter <- Cutter[complete.cases(Cutter), ]
Cutter$CutterMove_Angle <- round(Cutter$CutterMove_Angle,1)
Cutter$CutterWhiff.Swing <- round(Cutter$CutterWhiff.Swing,2)
Cutter$CutterwOBA <- round(Cutter$CutterwOBA,3)
#Create Slider Profile Matches
Slider <- Pitcher_Comps[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"SliderBU",
"SliderMove_Angle",
"SliderWhiff.Swing",
"SliderwOBA")]
Slider <- Slider[complete.cases(Slider), ]
Slider$SliderMove_Angle <- round(Slider$SliderMove_Angle,1)
Slider$SliderWhiff.Swing <- round(Slider$SliderWhiff.Swing,2)
Slider$SliderwOBA <- round(Slider$SliderwOBA,3)
#Create Change Up Profile Matches
Change.Up <- Pitcher_Comps[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"Change.UpBU",
"Change.UpMove_Angle",
"Change.UpDiff",
"Change.UpWhiff.Swing",
"Change.UpwOBA")]
Change.Up <- Change.Up[complete.cases(Change.Up), ]
Change.Up$Change.UpMove_Angle <- round(Change.Up$Change.UpMove_Angle,1)
Change.Up$Change.UpWhiff.Swing <- round(Change.Up$Change.UpWhiff.Swing,2)
Change.Up$Change.UpDiff <- round(Change.Up$Change.UpDiff,1)
Change.Up$Change.UpwOBA <- round(Change.Up$Change.UpwOBA,3)
#Create Curveball Profile Matches
Curveball <- Pitcher_Comps[,c("ID",
"FG.ID",
"Pitcher",
"Handedness",
"CurveballBU",
"CurveballMove_Angle",
"CurveballWhiff.Swing",
"CurveballwOBA")]
Curveball <- Curveball[complete.cases(Curveball), ]
Curveball$CurveballMove_Angle <- round(Curveball$CurveballMove_Angle,1)
Curveball$CurveballWhiff.Swing <- round(Curveball$CurveballWhiff.Swing,2)
Curveball$CurveballwOBA <- round(Curveball$CurveballwOBA,3)
if(pitch_type == "Two Seam")
{
data = Two_Seam
}
else if (pitch_type == "Cutter")
{
data = Cutter
}
else if (pitch_type == "Slider")
{
data = Slider
}
else if (pitch_type == "Change Up")
{
data = Change.Up
}
else if (pitch_type == "Curveball")
{
data = Curveball
}
else
{
data = "Please Select A Pitch Type"
}
}
|
library(ggplot2)
library(reshape2)
library(grid)
func1 <- function(x) {x}
func2 <- function(x) {x^2}
func3 <- function(x) {2*x-x^2}
lb1 <- data.frame(x = 1, y = 1, label = as.character(expression(cu)))
ggplot(data.frame(x=c(0,1)), aes(x)) +
theme_bw() +
stat_function(fun=func1, geom="line", aes(colour="f1"), size=1.1) +
geom_text(aes(0.5, 0.58, label="c[u]"), parse=TRUE, size=7.5) +
stat_function(fun=func2, geom="line", aes(colour="f2"), size=1) +
geom_text(aes(0.3, 0.75, label="2*c[u]-c[u]^2"), parse=TRUE, size=7.5) +
stat_function(fun=func3, geom="line", aes(colour="f3"), size=1) +
geom_text(aes(0.7, 0.4, label="c[u]^2"), parse=TRUE, size=7.5) +
xlab(expression(c[u])) +
ylab(expression(p[u](c[u]))) +
theme(
legend.position="none",
legend.text=element_text(size=14),
axis.text=element_text(size=15),
axis.title=element_text(size=18),
plot.title=element_text(size=18)
) +
expand_limits(x = 0, y = 0) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0))
# scale_colour_manual("Function", value=c("blue","red"), breaks=c("square","exp"))
|
/plot_utils/discount_func.R
|
permissive
|
Braylon1002/CIM
|
R
| false | false | 1,124 |
r
|
library(ggplot2)
library(reshape2)
library(grid)
func1 <- function(x) {x}
func2 <- function(x) {x^2}
func3 <- function(x) {2*x-x^2}
lb1 <- data.frame(x = 1, y = 1, label = as.character(expression(cu)))
ggplot(data.frame(x=c(0,1)), aes(x)) +
theme_bw() +
stat_function(fun=func1, geom="line", aes(colour="f1"), size=1.1) +
geom_text(aes(0.5, 0.58, label="c[u]"), parse=TRUE, size=7.5) +
stat_function(fun=func2, geom="line", aes(colour="f2"), size=1) +
geom_text(aes(0.3, 0.75, label="2*c[u]-c[u]^2"), parse=TRUE, size=7.5) +
stat_function(fun=func3, geom="line", aes(colour="f3"), size=1) +
geom_text(aes(0.7, 0.4, label="c[u]^2"), parse=TRUE, size=7.5) +
xlab(expression(c[u])) +
ylab(expression(p[u](c[u]))) +
theme(
legend.position="none",
legend.text=element_text(size=14),
axis.text=element_text(size=15),
axis.title=element_text(size=18),
plot.title=element_text(size=18)
) +
expand_limits(x = 0, y = 0) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0))
# scale_colour_manual("Function", value=c("blue","red"), breaks=c("square","exp"))
|
library("knitr")
library("rgl")
#knit("emamectin.Rmd")
#markdownToHTML('emamectin.md', 'emamectin.html', options=c("use_xhml"))
#system("pandoc -s emamectin.html -o emamectin.pdf")
knit2html('emamectin.Rmd')
|
/FDA_Pesticide_Glossary/emamectin.R
|
permissive
|
andrewdefries/andrewdefries.github.io
|
R
| false | false | 210 |
r
|
library("knitr")
library("rgl")
#knit("emamectin.Rmd")
#markdownToHTML('emamectin.md', 'emamectin.html', options=c("use_xhml"))
#system("pandoc -s emamectin.html -o emamectin.pdf")
knit2html('emamectin.Rmd')
|
#Read DataSet
hpower <- readDataSet()
# setup png file
png(file="plot3.png", width = 480, height = 480)
# plot master graph
par(mfrow=c(1,1))
plot(hpower$Time, hpower$Sub_metering_1, type="l", col="black",
xlab="", ylab="Energy sub metering")
# overlay second graph
lines(hpower$Time, hpower$Sub_metering_2, col="red")
# overlay 3rd graph
lines(hpower$Time, hpower$Sub_metering_3, col="blue")
# anchor legend at topright corner
legend('topright',lty=c(1,1,1),col=c("black","red","blue"),
cex=.7, adj=c(0,.5), box.lwd=1, text.font = .1, y.intersp=.9,
text.width = strwidth("10,000,000"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# shutoff png device. back to default display
dev.off()
|
/plot3.R
|
no_license
|
btaha10/ExploratoryDataAnalysis
|
R
| false | false | 736 |
r
|
#Read DataSet
hpower <- readDataSet()
# setup png file
png(file="plot3.png", width = 480, height = 480)
# plot master graph
par(mfrow=c(1,1))
plot(hpower$Time, hpower$Sub_metering_1, type="l", col="black",
xlab="", ylab="Energy sub metering")
# overlay second graph
lines(hpower$Time, hpower$Sub_metering_2, col="red")
# overlay 3rd graph
lines(hpower$Time, hpower$Sub_metering_3, col="blue")
# anchor legend at topright corner
legend('topright',lty=c(1,1,1),col=c("black","red","blue"),
cex=.7, adj=c(0,.5), box.lwd=1, text.font = .1, y.intersp=.9,
text.width = strwidth("10,000,000"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# shutoff png device. back to default display
dev.off()
|
# Lab 3 Stock Market Data
library(ISLR)
?Smarket
fix(Smarket)
dim(Smarket)
summary(Smarket)
pairs(Smarket)
cor(Smarket)
cor(Smarket[,-9])
attach(Smarket)
plot(Volume)
# Lab 3 Logistic Regression
# fit a gernalized linear regression using a logit link function, set distribution of the response variable to be binomial
glm.fit=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume,data=Smarket,family=binomial)
summary(glm.fit)
glm.probs=predict(glm.fit,type="response")
glm.probs[1:10]
contrasts(Direction)
glm.pred=rep("Down",1250)
# For predicted probabilities greater than 0.5, assign Y to be "Up"; otherwise assign Y to be "Down"
glm.pred[glm.probs>.5]="Up"
# Confusion matrix
table(glm.pred,Direction)
(507+145)/1250
mean(glm.pred==Direction)
# Generate training (before year 2005) and testing data sets (on year 2005)
# ID for training data
train=(Year<2005)
# Create testing data set
Smarket.2005=Smarket[!train,]
# Create testing data for Y
Direction.2005=Direction[!train]
# logistic regression on the training data set
glm.fit=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume,data=Smarket,family=binomial,subset=train)
# Predicted probabilities for the testing data set
glm.probs=predict(glm.fit,Smarket.2005,type="response")
# Sample size for the testing data
dim(Smarket.2005)
# For predicted probabilities greater than 0.5, assign Y to be "Up"; otherwise assign Y to be "Down"
glm.pred=rep("Down",252)
glm.pred[glm.probs>.5]="Up"
# Confusion matrix
table(glm.pred,Direction.2005)
# Proportation of make correct classification
mean(glm.pred==Direction.2005)
# Misclassfication error rate
# glm.pred is the predicted Y for testing data and Direction.2005 is the true Y for testing data
mean(glm.pred!=Direction.2005)
# Redo the logistic regression using only two predictors, 1st lag and 2nd lag
glm.fit=glm(Direction~Lag1+Lag2,data=Smarket,family=binomial,subset=train)
glm.probs=predict(glm.fit,Smarket.2005,type="response")
glm.pred=rep("Down",252)
glm.pred[glm.probs>.5]="Up"
# Proportation of make correct classification
table(glm.pred,Direction.2005)
# Misclassfication error rate
mean(glm.pred!=Direction.2005)
# Lab 3 Linear Discriminant Analysis
library(MASS)
# Perform LDA on the traning data set using only two predictors, 1st lag and 2nd lag
lda.fit=lda(Direction~Lag1+Lag2,data=Smarket,subset=train)
lda.fit
names(predict(lda.fit,Smarket.2005))
predict(lda.fit,Smarket.2005)$posterior
lda.pred=predict(lda.fit,Smarket.2005)$class
lda.pred
# Confusion matrix
table(lda.pred,Direction.2005)
# Misclassfication error rate
mean(lda.pred!=Direction.2005)
mean(lda.pred==Direction.2005)
# Lab 3 Quadratic Discriminant Analysis}}
# Perform QDA on the traning data set using only two predictors, 1st lag and 2nd lag
qda.fit=qda(Direction~Lag1+Lag2,data=Smarket,subset=train)
qda.fit
qda.pred=predict(qda.fit,Smarket.2005)$class
# Confusion matrix
table(qda.pred,Direction.2005)
# Misclassfication error rate
mean(qda.pred!=Direction.2005)
# Lab 3 k Nearest Neighbors
# Perform K-nearest neighbours on the traning data set
library(class)
# Create training data for X
train.X=cbind(Lag1,Lag2)[train,]
# Create testing data for X
X.2005=cbind(Lag1,Lag2)[!train,]
# Create training data for Y
train.Direction=Direction[train]
# The more number of k, the model become less flexible
# To find the optimal k, plot different k and get U shape (Select the bottom one)
# before deciding which k is the most suitable.(Bias-Variance Tradeoff)
# Set k=1
knn.pred=knn(train.X,X.2005,train.Direction,k=1)
table(knn.pred,Direction.2005)
mean(knn.pred!=Direction.2005)
# Set k=3
knn.pred=knn(train.X,X.2005,train.Direction,k=3)
table(knn.pred,Direction.2005)
mean(knn.pred!=Direction.2005)
len = 499
Grid = 1:len
Err = rep(1,len)
for (i in 1:len)
{
knn.pred=knn(train.X,X.2005,train.Direction,k=i)
table(knn.pred,Direction.2005)
Err[i]=mean(knn.pred!=Direction.2005)
}
plot(Grid,Err,xlab="Number of k",ylab="Misclassification Rate")
|
/Lab3.R
|
no_license
|
vorapras/Machine-Learning
|
R
| false | false | 3,956 |
r
|
# Lab 3 Stock Market Data
library(ISLR)
?Smarket
fix(Smarket)
dim(Smarket)
summary(Smarket)
pairs(Smarket)
cor(Smarket)
cor(Smarket[,-9])
attach(Smarket)
plot(Volume)
# Lab 3 Logistic Regression
# fit a gernalized linear regression using a logit link function, set distribution of the response variable to be binomial
glm.fit=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume,data=Smarket,family=binomial)
summary(glm.fit)
glm.probs=predict(glm.fit,type="response")
glm.probs[1:10]
contrasts(Direction)
glm.pred=rep("Down",1250)
# For predicted probabilities greater than 0.5, assign Y to be "Up"; otherwise assign Y to be "Down"
glm.pred[glm.probs>.5]="Up"
# Confusion matrix
table(glm.pred,Direction)
(507+145)/1250
mean(glm.pred==Direction)
# Generate training (before year 2005) and testing data sets (on year 2005)
# ID for training data
train=(Year<2005)
# Create testing data set
Smarket.2005=Smarket[!train,]
# Create testing data for Y
Direction.2005=Direction[!train]
# logistic regression on the training data set
glm.fit=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume,data=Smarket,family=binomial,subset=train)
# Predicted probabilities for the testing data set
glm.probs=predict(glm.fit,Smarket.2005,type="response")
# Sample size for the testing data
dim(Smarket.2005)
# For predicted probabilities greater than 0.5, assign Y to be "Up"; otherwise assign Y to be "Down"
glm.pred=rep("Down",252)
glm.pred[glm.probs>.5]="Up"
# Confusion matrix
table(glm.pred,Direction.2005)
# Proportation of make correct classification
mean(glm.pred==Direction.2005)
# Misclassfication error rate
# glm.pred is the predicted Y for testing data and Direction.2005 is the true Y for testing data
mean(glm.pred!=Direction.2005)
# Redo the logistic regression using only two predictors, 1st lag and 2nd lag
glm.fit=glm(Direction~Lag1+Lag2,data=Smarket,family=binomial,subset=train)
glm.probs=predict(glm.fit,Smarket.2005,type="response")
glm.pred=rep("Down",252)
glm.pred[glm.probs>.5]="Up"
# Proportation of make correct classification
table(glm.pred,Direction.2005)
# Misclassfication error rate
mean(glm.pred!=Direction.2005)
# Lab 3 Linear Discriminant Analysis
library(MASS)
# Perform LDA on the traning data set using only two predictors, 1st lag and 2nd lag
lda.fit=lda(Direction~Lag1+Lag2,data=Smarket,subset=train)
lda.fit
names(predict(lda.fit,Smarket.2005))
predict(lda.fit,Smarket.2005)$posterior
lda.pred=predict(lda.fit,Smarket.2005)$class
lda.pred
# Confusion matrix
table(lda.pred,Direction.2005)
# Misclassfication error rate
mean(lda.pred!=Direction.2005)
mean(lda.pred==Direction.2005)
# Lab 3 Quadratic Discriminant Analysis}}
# Perform QDA on the traning data set using only two predictors, 1st lag and 2nd lag
qda.fit=qda(Direction~Lag1+Lag2,data=Smarket,subset=train)
qda.fit
qda.pred=predict(qda.fit,Smarket.2005)$class
# Confusion matrix
table(qda.pred,Direction.2005)
# Misclassfication error rate
mean(qda.pred!=Direction.2005)
# Lab 3 k Nearest Neighbors
# Perform K-nearest neighbours on the traning data set
library(class)
# Create training data for X
train.X=cbind(Lag1,Lag2)[train,]
# Create testing data for X
X.2005=cbind(Lag1,Lag2)[!train,]
# Create training data for Y
train.Direction=Direction[train]
# The more number of k, the model become less flexible
# To find the optimal k, plot different k and get U shape (Select the bottom one)
# before deciding which k is the most suitable.(Bias-Variance Tradeoff)
# Set k=1
knn.pred=knn(train.X,X.2005,train.Direction,k=1)
table(knn.pred,Direction.2005)
mean(knn.pred!=Direction.2005)
# Set k=3
knn.pred=knn(train.X,X.2005,train.Direction,k=3)
table(knn.pred,Direction.2005)
mean(knn.pred!=Direction.2005)
len = 499
Grid = 1:len
Err = rep(1,len)
for (i in 1:len)
{
knn.pred=knn(train.X,X.2005,train.Direction,k=i)
table(knn.pred,Direction.2005)
Err[i]=mean(knn.pred!=Direction.2005)
}
plot(Grid,Err,xlab="Number of k",ylab="Misclassification Rate")
|
rm(list = ls())
gc();
gc();
library(data.table)
library(e1071)
'%nin%' = Negate('%in%')
train = fread("D:/data_science/data/house-prices-advanced-regression-techniques/train.csv",sep = ",",
colClasses = "character",na.strings = c(NA,'NULL','null','N/A','',' '))
names(train)
fact_vars = c("MSSubClass", "MSZoning", "Street",
"Alley", "LotShape", "LandContour", "Utilities", "LotConfig",
"LandSlope", "Neighborhood", "Condition1", "Condition2", "BldgType",
"HouseStyle", "OverallQual", "OverallCond",
"RoofStyle", "RoofMatl", "Exterior1st", "Exterior2nd", "MasVnrType",
"ExterQual", "ExterCond", "Foundation", "BsmtQual",
"BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2",
"Heating", "HeatingQC", "Fireplaces",
"CentralAir", "Electrical", "BsmtFullBath", "BsmtHalfBath", "FullBath", "HalfBath",
"BedroomAbvGr", "KitchenAbvGr", "KitchenQual",
"Functional", "FireplaceQu", "GarageType",
"GarageFinish","GarageQual", "GarageCond",
"PavedDrive", "PoolQC", "Fence", "MiscFeature",
"SaleType", "SaleCondition","MoSold")
date_field = c( "YearBuilt", "YearRemodAdd","GarageYrBlt", "YrSold")
num_vars = c("LotFrontage","LotArea","MasVnrArea", "BsmtFinSF1","BsmtFinSF2","BsmtUnfSF",
"TotalBsmtSF","1stFlrSF", "2ndFlrSF", "LowQualFinSF",
"GrLivArea","TotRmsAbvGrd","GarageCars","GarageArea",
"WoodDeckSF", "OpenPorchSF", "EnclosedPorch", "3SsnPorch",
"ScreenPorch", "PoolArea","MiscVal")
target = "SalePrice"
train = train[, (num_vars) := lapply(.SD, as.numeric), .SDcols = num_vars]
skew_dt = as.data.frame(as.numeric(sapply(train[,.SD,.SDcols = num_vars], skewness)))
names(skew_dt) = c('skewness')
skew_dt$num_vars = num_vars
skew_dt = as.data.table(skew_dt)
skew_dt = skew_dt[,.(num_vars,skewness)]
skew_dt$skewness[1] = skewness(train$LotFrontage,na.rm = T)
skew_dt$skewness[3] = skewness(train$MasVnrArea,na.rm = T)
mean_dt = as.data.frame(sapply(train[,.SD,.SDcols = num_vars], mean))
names(mean_dt) = c('mean')
mean_dt$num_vars = num_vars
mean_dt = as.data.table(mean_dt)
mean_dt = mean_dt[,.(num_vars,mean)]
mean_dt$mean[1] = mean(train$LotFrontage,na.rm =T)
mean_dt$mean[3] = mean(train$MasVnrArea,na.rm =T)
median_dt = as.data.frame(sapply(train[,.SD,.SDcols = num_vars], median))
names(median_dt) = c('median')
median_dt$num_vars = num_vars
median_dt = as.data.table(median_dt)
median_dt = median_dt[,.(num_vars,median)]
median_dt$median[1] = median(train$LotFrontage,na.rm =T)
median_dt$median[3] = median(train$MasVnrArea,na.rm =T)
sd_dt = as.data.frame(sapply(train[,.SD,.SDcols = num_vars], sd))
names(sd_dt) = c('sd')
sd_dt$num_vars = num_vars
sd_dt = as.data.table(sd_dt)
sd_dt = sd_dt[,.(num_vars,sd)]
sd_dt$sd[1] = sd(train$LotFrontage,na.rm =T)
sd_dt$sd[3] = sd(train$MasVnrArea,na.rm =T)
na_count_dt = as.data.frame(sapply(train[,.SD,.SDcols = c(fact_vars,num_vars,date_field)], function(x) sum(is.na(x))))
names(na_count_dt) = c('na_count')
na_count_dt$vars = c(fact_vars,num_vars,date_field)
na_count_dt = as.data.table(na_count_dt)
na_count_dt = na_count_dt[,.(vars,na_count)]
na_count_dt = na_count_dt[,na_perc := na_count/dim(train)[1]*100]
na_count_dt = na_count_dt[,to_drop := ifelse((na_count/dim(train)[1]*100>=30),1,0)]
class_desc = c(rep('factor',length(fact_vars)),rep('numeric',length(num_vars)),rep('date',length(date_field)))
na_count_dt = na_count_dt[,class_desc:= class_desc]
all_var_one_view = na_count_dt
setkey(all_var_one_view,vars)
setkey(mean_dt,num_vars)
all_var_one_view = merge(all_var_one_view,mean_dt,all.x = T,by.x = "vars",by.y = "num_vars")
setkey(median_dt,num_vars)
all_var_one_view = merge(all_var_one_view,median_dt,all.x = T,by.x = "vars",by.y = "num_vars")
setkey(sd_dt,num_vars)
all_var_one_view = merge(all_var_one_view,sd_dt,all.x = T,by.x = "vars",by.y = "num_vars")
setkey(skew_dt,num_vars)
all_var_one_view = merge(all_var_one_view,skew_dt,all.x = T,by.x = "vars",by.y = "num_vars")
to_drop = na_count_dt[to_drop==1,.(vars)]
col_to_drop = c()
for(i in 1:dim(to_drop)[1])
{
col_to_drop[i] = as.character(to_drop[i])
}
for(i in 1:dim(all_var_one_view)[1])
{
if(all_var_one_view$class_desc[i]=="factor" & (all_var_one_view$na_perc[i]<=30) && (all_var_one_view$na_perc[i]>0))
{
print("cleaning factor variables")
print(all_var_one_view$vars[i])
req_col = as.vector(train[,.SD,.SDcols = all_var_one_view$vars[i]])
ind = which(is.na(req_col))
req_col[ind] = "MISSING"
train = train[,all_var_one_view$vars[i] := req_col]
}
}
for(i in 1:dim(all_var_one_view)[1])
{
if(all_var_one_view$class_desc[i]=="numeric" & (all_var_one_view$na_perc[i]<=30) && (all_var_one_view$na_perc[i]>0))
{
print("cleaning numeric variables")
print(all_var_one_view$vars[i])
req_col1 = as.vector(train[,.SD,.SDcols = all_var_one_view$vars[i]])
ind = which(is.na(req_col1))
median_val = all_var_one_view$median[i]
req_col1[ind] = median_val
train = train[,all_var_one_view$vars[i] := req_col1]
}
}
all_var_one_view = all_var_one_view[,entropy_value := NA]
for(i in 1:dim(all_var_one_view)[1])
{
if(all_var_one_view$class_desc[i] %in% c("factor","date"))
{
print("cleaning factor variables")
print(all_var_one_view$vars[i])
req_col = as.vector(train[,.SD,.SDcols = all_var_one_view$vars[i]])
ent1 = as.vector(prop.table(table(req_col)))
ent2 = (-1)*log(ent1)
ent3 = ent1*ent2
ent4 = sum(ent3)
all_var_one_view$entropy_value[i] = ent4
}
}
train = train[,LotFrontage_c := ifelse(LotFrontage <=59,'<=59', ifelse(LotFrontage<=69,'<=69',ifelse(LotFrontage<=80,'<=80','>80')))]
train = train[,LotArea_c := ifelse(LotArea <=7553.5,'<=7553.5', ifelse(LotArea<=9478.5,'<=9478.5',ifelse(LotArea<=11601.5,'<=11601.5','>11601.5')))]
train = train[,MasVnrArea_c := ifelse(MasVnrArea<=166,'<=166','>166')]
train = train[,BsmtFinSF1_c := ifelse(BsmtFinSF1<=383.5,'<=383.5',ifelse(BsmtFinSF1<=712.25,'<=712.25','>712.25'))]
train = train[,BsmtUnfSF_c := ifelse(BsmtUnfSF <=223,'<=223', ifelse(BsmtUnfSF<=477.5,'<=477.5',ifelse(BsmtUnfSF<=808,'<=808','>808')))]
train = train[,TotalBsmtSF_c := ifelse(TotalBsmtSF <=795.75,'<=795.75', ifelse(TotalBsmtSF<=991.5,'<=991.5',ifelse(TotalBsmtSF<=1298.25,'<=1298.25','>1298.25')))]
train = train[,FirstFlrSF_c := ifelse(`1stFlrSF` <=882,'<=882', ifelse(`1stFlrSF`<=1087,'<=1087',ifelse(`1stFlrSF`<=1391.25,'<=1391.25','>1391.25')))]
train = train[,GrLivArea_c := ifelse(GrLivArea <=1129.5,'<=1129.5', ifelse(GrLivArea<=1464,'<=1464',ifelse(GrLivArea<=1776.75,'<=1776.75','>1776.75')))]
train = train[,TotRmsAbvGrd_c := ifelse(TotRmsAbvGrd <=5,'<=5', ifelse(TotRmsAbvGrd<=6,'<=6',ifelse(TotRmsAbvGrd<=7,'<=7','>7')))]
train = train[,GarageCars_c := ifelse(GarageCars <=1,'<=1', ifelse(GarageCars<=2,'<=2',ifelse(GarageCars<=2,'<=2','>2')))]
train = train[,GarageArea_c := ifelse(GarageArea <=334.5,'<=334.5', ifelse(GarageArea<=480,'<=480',ifelse(GarageArea<=576,'<=576','>576')))]
train = train[,WoodDeckSF_c := ifelse(WoodDeckSF<=168,'<=168','>168')]
train = train[,OpenPorchSF_c := ifelse(OpenPorchSF<=25,'<=25',ifelse(OpenPorchSF<=68,'<=68','>68'))]
for(i in 1:dim(all_var_one_view)[1])
{
if(all_var_one_view$class_desc[i]=="numeric" & all_var_one_view$vars[i] %nin% c('1stFlrSF','2ndFlrSF','3SsnPorch',
'BsmtFinSF2','EnclosedPorch','LowQualFinSF',
'MiscVal','PoolArea','ScreenPorch'))
{
print("cleaning numeric variables")
print(all_var_one_view$vars[i])
req_col_name = paste0(all_var_one_view$vars[i],"_c")
req_col = as.vector(train[,.SD,.SDcols = req_col_name])
ent1 = as.vector(prop.table(table(req_col)))
ent2 = (-1)*log(ent1)
ent3 = ent1*ent2
ent4 = sum(ent3)
all_var_one_view$entropy_value[i] = ent4
}
}
train = train[,house_vintage := as.numeric(YrSold)- as.numeric(YearBuilt)]
train = train[,garage_vintage := as.numeric(YrSold)- as.numeric(GarageYrBlt)]
train = train[,time_since_remodelling := as.numeric(YrSold) - as.numeric(YearRemodAdd)]
train = train[, SalePrice := as.numeric(SalePrice)]
train = train[, SalePrice_log := log(SalePrice)]
mean_target = mean(train$SalePrice)
sd_target = sd(train$SalePrice)
train = train[,SalePrice_norm := (SalePrice-mean_target)/sd_target]
# train$TotalBsmtSF || train$`1stFlrSF`
# train$GrLivArea || train$1`1stFlrSF`
# train$`2ndFlrSF` || train$GrLivArea
# train$
for(i in 1:dim(all_var_one_view)[1])
{
if(all_var_one_view$class_desc[i]=="numeric")
{
print("analysing the outliers")
print(all_var_one_view$vars[i])
req_col1 = as.vector(train[,.SD,.SDcols = all_var_one_view$vars[i]])
ind = which(((req_col1- all_var_one_view$mean[i])/all_var_one_view$sd[i])>=3.5 | ((req_col1- all_var_one_view$mean[i])/all_var_one_view$sd[i])<= -3.5)
median_val = all_var_one_view$median[i]
req_col1[ind] = median_val
train = train[,all_var_one_view$vars[i] := req_col1]
}
}
cols_to_keep = names(train)[names(train) %nin% col_to_drop]
train = train[,.SD,.SDcols = cols_to_keep]
|
/data_cleaning.R
|
no_license
|
zshnhaque/Data_Science_R
|
R
| false | false | 9,561 |
r
|
rm(list = ls())
gc();
gc();
library(data.table)
library(e1071)
'%nin%' = Negate('%in%')
train = fread("D:/data_science/data/house-prices-advanced-regression-techniques/train.csv",sep = ",",
colClasses = "character",na.strings = c(NA,'NULL','null','N/A','',' '))
names(train)
fact_vars = c("MSSubClass", "MSZoning", "Street",
"Alley", "LotShape", "LandContour", "Utilities", "LotConfig",
"LandSlope", "Neighborhood", "Condition1", "Condition2", "BldgType",
"HouseStyle", "OverallQual", "OverallCond",
"RoofStyle", "RoofMatl", "Exterior1st", "Exterior2nd", "MasVnrType",
"ExterQual", "ExterCond", "Foundation", "BsmtQual",
"BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2",
"Heating", "HeatingQC", "Fireplaces",
"CentralAir", "Electrical", "BsmtFullBath", "BsmtHalfBath", "FullBath", "HalfBath",
"BedroomAbvGr", "KitchenAbvGr", "KitchenQual",
"Functional", "FireplaceQu", "GarageType",
"GarageFinish","GarageQual", "GarageCond",
"PavedDrive", "PoolQC", "Fence", "MiscFeature",
"SaleType", "SaleCondition","MoSold")
date_field = c( "YearBuilt", "YearRemodAdd","GarageYrBlt", "YrSold")
num_vars = c("LotFrontage","LotArea","MasVnrArea", "BsmtFinSF1","BsmtFinSF2","BsmtUnfSF",
"TotalBsmtSF","1stFlrSF", "2ndFlrSF", "LowQualFinSF",
"GrLivArea","TotRmsAbvGrd","GarageCars","GarageArea",
"WoodDeckSF", "OpenPorchSF", "EnclosedPorch", "3SsnPorch",
"ScreenPorch", "PoolArea","MiscVal")
target = "SalePrice"
train = train[, (num_vars) := lapply(.SD, as.numeric), .SDcols = num_vars]
skew_dt = as.data.frame(as.numeric(sapply(train[,.SD,.SDcols = num_vars], skewness)))
names(skew_dt) = c('skewness')
skew_dt$num_vars = num_vars
skew_dt = as.data.table(skew_dt)
skew_dt = skew_dt[,.(num_vars,skewness)]
skew_dt$skewness[1] = skewness(train$LotFrontage,na.rm = T)
skew_dt$skewness[3] = skewness(train$MasVnrArea,na.rm = T)
mean_dt = as.data.frame(sapply(train[,.SD,.SDcols = num_vars], mean))
names(mean_dt) = c('mean')
mean_dt$num_vars = num_vars
mean_dt = as.data.table(mean_dt)
mean_dt = mean_dt[,.(num_vars,mean)]
mean_dt$mean[1] = mean(train$LotFrontage,na.rm =T)
mean_dt$mean[3] = mean(train$MasVnrArea,na.rm =T)
median_dt = as.data.frame(sapply(train[,.SD,.SDcols = num_vars], median))
names(median_dt) = c('median')
median_dt$num_vars = num_vars
median_dt = as.data.table(median_dt)
median_dt = median_dt[,.(num_vars,median)]
median_dt$median[1] = median(train$LotFrontage,na.rm =T)
median_dt$median[3] = median(train$MasVnrArea,na.rm =T)
sd_dt = as.data.frame(sapply(train[,.SD,.SDcols = num_vars], sd))
names(sd_dt) = c('sd')
sd_dt$num_vars = num_vars
sd_dt = as.data.table(sd_dt)
sd_dt = sd_dt[,.(num_vars,sd)]
sd_dt$sd[1] = sd(train$LotFrontage,na.rm =T)
sd_dt$sd[3] = sd(train$MasVnrArea,na.rm =T)
na_count_dt = as.data.frame(sapply(train[,.SD,.SDcols = c(fact_vars,num_vars,date_field)], function(x) sum(is.na(x))))
names(na_count_dt) = c('na_count')
na_count_dt$vars = c(fact_vars,num_vars,date_field)
na_count_dt = as.data.table(na_count_dt)
na_count_dt = na_count_dt[,.(vars,na_count)]
na_count_dt = na_count_dt[,na_perc := na_count/dim(train)[1]*100]
na_count_dt = na_count_dt[,to_drop := ifelse((na_count/dim(train)[1]*100>=30),1,0)]
class_desc = c(rep('factor',length(fact_vars)),rep('numeric',length(num_vars)),rep('date',length(date_field)))
na_count_dt = na_count_dt[,class_desc:= class_desc]
all_var_one_view = na_count_dt
setkey(all_var_one_view,vars)
setkey(mean_dt,num_vars)
all_var_one_view = merge(all_var_one_view,mean_dt,all.x = T,by.x = "vars",by.y = "num_vars")
setkey(median_dt,num_vars)
all_var_one_view = merge(all_var_one_view,median_dt,all.x = T,by.x = "vars",by.y = "num_vars")
setkey(sd_dt,num_vars)
all_var_one_view = merge(all_var_one_view,sd_dt,all.x = T,by.x = "vars",by.y = "num_vars")
setkey(skew_dt,num_vars)
all_var_one_view = merge(all_var_one_view,skew_dt,all.x = T,by.x = "vars",by.y = "num_vars")
to_drop = na_count_dt[to_drop==1,.(vars)]
col_to_drop = c()
for(i in 1:dim(to_drop)[1])
{
col_to_drop[i] = as.character(to_drop[i])
}
for(i in 1:dim(all_var_one_view)[1])
{
if(all_var_one_view$class_desc[i]=="factor" & (all_var_one_view$na_perc[i]<=30) && (all_var_one_view$na_perc[i]>0))
{
print("cleaning factor variables")
print(all_var_one_view$vars[i])
req_col = as.vector(train[,.SD,.SDcols = all_var_one_view$vars[i]])
ind = which(is.na(req_col))
req_col[ind] = "MISSING"
train = train[,all_var_one_view$vars[i] := req_col]
}
}
for(i in 1:dim(all_var_one_view)[1])
{
if(all_var_one_view$class_desc[i]=="numeric" & (all_var_one_view$na_perc[i]<=30) && (all_var_one_view$na_perc[i]>0))
{
print("cleaning numeric variables")
print(all_var_one_view$vars[i])
req_col1 = as.vector(train[,.SD,.SDcols = all_var_one_view$vars[i]])
ind = which(is.na(req_col1))
median_val = all_var_one_view$median[i]
req_col1[ind] = median_val
train = train[,all_var_one_view$vars[i] := req_col1]
}
}
all_var_one_view = all_var_one_view[,entropy_value := NA]
for(i in 1:dim(all_var_one_view)[1])
{
if(all_var_one_view$class_desc[i] %in% c("factor","date"))
{
print("cleaning factor variables")
print(all_var_one_view$vars[i])
req_col = as.vector(train[,.SD,.SDcols = all_var_one_view$vars[i]])
ent1 = as.vector(prop.table(table(req_col)))
ent2 = (-1)*log(ent1)
ent3 = ent1*ent2
ent4 = sum(ent3)
all_var_one_view$entropy_value[i] = ent4
}
}
train = train[,LotFrontage_c := ifelse(LotFrontage <=59,'<=59', ifelse(LotFrontage<=69,'<=69',ifelse(LotFrontage<=80,'<=80','>80')))]
train = train[,LotArea_c := ifelse(LotArea <=7553.5,'<=7553.5', ifelse(LotArea<=9478.5,'<=9478.5',ifelse(LotArea<=11601.5,'<=11601.5','>11601.5')))]
train = train[,MasVnrArea_c := ifelse(MasVnrArea<=166,'<=166','>166')]
train = train[,BsmtFinSF1_c := ifelse(BsmtFinSF1<=383.5,'<=383.5',ifelse(BsmtFinSF1<=712.25,'<=712.25','>712.25'))]
train = train[,BsmtUnfSF_c := ifelse(BsmtUnfSF <=223,'<=223', ifelse(BsmtUnfSF<=477.5,'<=477.5',ifelse(BsmtUnfSF<=808,'<=808','>808')))]
train = train[,TotalBsmtSF_c := ifelse(TotalBsmtSF <=795.75,'<=795.75', ifelse(TotalBsmtSF<=991.5,'<=991.5',ifelse(TotalBsmtSF<=1298.25,'<=1298.25','>1298.25')))]
train = train[,FirstFlrSF_c := ifelse(`1stFlrSF` <=882,'<=882', ifelse(`1stFlrSF`<=1087,'<=1087',ifelse(`1stFlrSF`<=1391.25,'<=1391.25','>1391.25')))]
train = train[,GrLivArea_c := ifelse(GrLivArea <=1129.5,'<=1129.5', ifelse(GrLivArea<=1464,'<=1464',ifelse(GrLivArea<=1776.75,'<=1776.75','>1776.75')))]
train = train[,TotRmsAbvGrd_c := ifelse(TotRmsAbvGrd <=5,'<=5', ifelse(TotRmsAbvGrd<=6,'<=6',ifelse(TotRmsAbvGrd<=7,'<=7','>7')))]
train = train[,GarageCars_c := ifelse(GarageCars <=1,'<=1', ifelse(GarageCars<=2,'<=2',ifelse(GarageCars<=2,'<=2','>2')))]
train = train[,GarageArea_c := ifelse(GarageArea <=334.5,'<=334.5', ifelse(GarageArea<=480,'<=480',ifelse(GarageArea<=576,'<=576','>576')))]
train = train[,WoodDeckSF_c := ifelse(WoodDeckSF<=168,'<=168','>168')]
train = train[,OpenPorchSF_c := ifelse(OpenPorchSF<=25,'<=25',ifelse(OpenPorchSF<=68,'<=68','>68'))]
for(i in 1:dim(all_var_one_view)[1])
{
if(all_var_one_view$class_desc[i]=="numeric" & all_var_one_view$vars[i] %nin% c('1stFlrSF','2ndFlrSF','3SsnPorch',
'BsmtFinSF2','EnclosedPorch','LowQualFinSF',
'MiscVal','PoolArea','ScreenPorch'))
{
print("cleaning numeric variables")
print(all_var_one_view$vars[i])
req_col_name = paste0(all_var_one_view$vars[i],"_c")
req_col = as.vector(train[,.SD,.SDcols = req_col_name])
ent1 = as.vector(prop.table(table(req_col)))
ent2 = (-1)*log(ent1)
ent3 = ent1*ent2
ent4 = sum(ent3)
all_var_one_view$entropy_value[i] = ent4
}
}
train = train[,house_vintage := as.numeric(YrSold)- as.numeric(YearBuilt)]
train = train[,garage_vintage := as.numeric(YrSold)- as.numeric(GarageYrBlt)]
train = train[,time_since_remodelling := as.numeric(YrSold) - as.numeric(YearRemodAdd)]
train = train[, SalePrice := as.numeric(SalePrice)]
train = train[, SalePrice_log := log(SalePrice)]
mean_target = mean(train$SalePrice)
sd_target = sd(train$SalePrice)
train = train[,SalePrice_norm := (SalePrice-mean_target)/sd_target]
# train$TotalBsmtSF || train$`1stFlrSF`
# train$GrLivArea || train$1`1stFlrSF`
# train$`2ndFlrSF` || train$GrLivArea
# train$
for(i in 1:dim(all_var_one_view)[1])
{
if(all_var_one_view$class_desc[i]=="numeric")
{
print("analysing the outliers")
print(all_var_one_view$vars[i])
req_col1 = as.vector(train[,.SD,.SDcols = all_var_one_view$vars[i]])
ind = which(((req_col1- all_var_one_view$mean[i])/all_var_one_view$sd[i])>=3.5 | ((req_col1- all_var_one_view$mean[i])/all_var_one_view$sd[i])<= -3.5)
median_val = all_var_one_view$median[i]
req_col1[ind] = median_val
train = train[,all_var_one_view$vars[i] := req_col1]
}
}
cols_to_keep = names(train)[names(train) %nin% col_to_drop]
train = train[,.SD,.SDcols = cols_to_keep]
|
library(wiqid)
### Name: occ2sps
### Title: Single-season two-species occupancy estimation
### Aliases: occ2sps
### ** Examples
data(railSims)
# Extract the two detection histories
DHA <- railSims[, 1:3]
DHB <- railSims[, 4:6]
# Default model (no interaction)
occ2sps(DHA, DHB)
## No test:
# Add a submodel for psiBA, so that psiBA and psiBa are separated:
occ2sps(DHA, DHB, model = psiBA ~ 1)
# Add covariates for psiA and psiBA; only display beta coefficients:
occ2sps(DHA, DHB, model = list(psiA ~ logArea, psiBA ~ reeds), data=railSims)$beta
# Model corresponding to the data generation model
occ2sps(DHA, DHB, list(psiA ~ logArea, psiBA ~ reeds, rBA ~ 1), data=railSims)$beta
## End(No test)
|
/data/genthat_extracted_code/wiqid/examples/occ2sps.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 712 |
r
|
library(wiqid)
### Name: occ2sps
### Title: Single-season two-species occupancy estimation
### Aliases: occ2sps
### ** Examples
data(railSims)
# Extract the two detection histories
DHA <- railSims[, 1:3]
DHB <- railSims[, 4:6]
# Default model (no interaction)
occ2sps(DHA, DHB)
## No test:
# Add a submodel for psiBA, so that psiBA and psiBa are separated:
occ2sps(DHA, DHB, model = psiBA ~ 1)
# Add covariates for psiA and psiBA; only display beta coefficients:
occ2sps(DHA, DHB, model = list(psiA ~ logArea, psiBA ~ reeds), data=railSims)$beta
# Model corresponding to the data generation model
occ2sps(DHA, DHB, list(psiA ~ logArea, psiBA ~ reeds, rBA ~ 1), data=railSims)$beta
## End(No test)
|
\name{p.eotaxin}
\alias{p.eotaxin}
\docType{data}
\title{
5PL parameters from an Eotaxin dataset.
}
\description{
5PL parameters from an Eotaxin dataset.
}
\format{
A matrix with 6 rows and 12 columns: c, d, loge, logmb, logf, b, e, f, g, h, logh, and logtao. Each row represents one curve.
}
|
/man/p.eotaxin.Rd
|
no_license
|
cran/nCal
|
R
| false | false | 311 |
rd
|
\name{p.eotaxin}
\alias{p.eotaxin}
\docType{data}
\title{
5PL parameters from an Eotaxin dataset.
}
\description{
5PL parameters from an Eotaxin dataset.
}
\format{
A matrix with 6 rows and 12 columns: c, d, loge, logmb, logf, b, e, f, g, h, logh, and logtao. Each row represents one curve.
}
|
server = function(input, output, session) {
output$example1.1 <- renderUI({ nestRmd('background.Rmd') })
output$lzbearing <- DT::renderDataTable({ DT::datatable(SMRD::lzbearing,
options = list(pageLength = 10)) })
output$plotfig1 <- renderPlot({
par(oma = c(0,0,0,0), mar = c(4,4,2,2))
input$evalfig1
return(isolate(eval(parse(text=input$fig1plot))))
})
output$plotfig2 <- renderPlot({
par(oma = c(0,0,0,0), mar = c(4,4,2,2))
input$evalfig2
return(isolate(eval(parse(text=input$fig2plot))))
})
}
|
/SMRD_3.4.4/apps/example1_1/server.R
|
no_license
|
erhard1/stat687
|
R
| false | false | 595 |
r
|
server = function(input, output, session) {
output$example1.1 <- renderUI({ nestRmd('background.Rmd') })
output$lzbearing <- DT::renderDataTable({ DT::datatable(SMRD::lzbearing,
options = list(pageLength = 10)) })
output$plotfig1 <- renderPlot({
par(oma = c(0,0,0,0), mar = c(4,4,2,2))
input$evalfig1
return(isolate(eval(parse(text=input$fig1plot))))
})
output$plotfig2 <- renderPlot({
par(oma = c(0,0,0,0), mar = c(4,4,2,2))
input$evalfig2
return(isolate(eval(parse(text=input$fig2plot))))
})
}
|
## Generates a BINpack (bins and pre-computed GC tracks) from a capture bed
BINpack.Maker <- function(bed.file = NULL, bin.size = 50, genome.pkg = "BSgenome.Hsapiens.UCSC.hg19", extend.multi = c(0, 50, 100, 200, 400, 800, 1600, 3200, 6400), blocksize = 1E+04, nthread = 1, out.dir = getwd(), return.data = FALSE) {
# setwd("/mnt/data_cigogne/job/PUBLI_EaCoN/TCGA/RESOURCES/test/")
# bed.file <- "SureSelect_ClinicalResearchExome.padded_GRCh37-lite_merged_sorted.bed"
# # bed.file = "test1.bed"
# bin.size = 50
# genome.pkg = "BSgenome.Hsapiens.1000genomes.hs37d5"
# extend.multi = c(0, 100, 500, 1000, 2000, 4000)
# blocksize = 1E+04
# nthread = 5
# out.dir = getwd()
# return.data = FALSE
# source("~/git_gustaveroussy/EaCoN/R/BED_functions.R")
# source("~/git_gustaveroussy/EaCoN/R/wes_process.R")
# source("~/git_gustaveroussy/EaCoN/R/mini_functions.R")
## Checks
if (is.null(bed.file)) stop("A BED file is required !", call. = FALSE)
if (!file.exists(bed.file)) stop("Could not find the BED file !", call. = FALSE)
if (is.null(out.dir)) message("NOTE : Checked / cleaned bed will be written in the same directory as source.") else {
if (!file.exists(out.dir)) stop("Could not find the output directory !", call. = FALSE)
if (!file.info(out.dir)$isdir) stop("out.dir is not a directory !", call. = FALSE)
}
if (is.null(genome.pkg)) stop(tmsg("A BSgenome package name is required !"), call. = FALSE)
if (!genome.pkg %in% BSgenome::installed.genomes()) {
if (genome.pkg %in% BSgenome::available.genomes()) {
stop(tmsg(paste0("BSgenome ", genome.pkg, " available but not installed. Please install it !")), call. = FALSE)
} else {
stop(tmsg(paste0("BSgenome ", genome.pkg, " not available in valid BSgenomes and not installed ... Please check your genome name or install your custom BSgenome !")), call. = FALSE)
}
}
bin.size <- as.integer(bin.size)
extend.multi <- as.integer(extend.multi)
### Loading genome
message(paste0("Loading ", genome.pkg, " ..."))
suppressPackageStartupMessages(require(genome.pkg, character.only = TRUE))
BSg.obj <- getExportedValue(genome.pkg, genome.pkg)
genome <- BSgenome::providerVersion(BSg.obj)
organism <- BSgenome::organism(BSg.obj)
## Cleaning BED
bed.clean <- BedCheck(bed.file = bed.file, genome.pkg = genome.pkg, out.dir = out.dir, return.data = TRUE)
## Binning
message(paste0("Performing binning (", bin.size, ") ..."))
bed.binned <- bedBinner(bed = bed.clean, bin.size = bin.size, nthread = nthread)
bed.binned <- data.frame(ProbeSetName = seq_len(nrow(bed.binned)), bed.binned, stringsAsFactors = TRUE)
message("Generating GC% tracks ...")
wes.gc <- loc.nt.gcc.hs.multi(loc.df = bed.binned, genome.pkg = genome.pkg, extend.multi = extend.multi, blocksize = blocksize, nthread = nthread)
rm(bed.binned)
wes.meta.key <- c("genome-species", "genome-version", "genome-package", "array_type", "track_type", "bin_size")
wes.meta.value <- c(organism, genome, genome.pkg, "WES", "GC", bin.size)
# renorm.data <- list(tracks = wes.gc, info = list("genome-version" = genome, "genome-package" = genome.pkg, bin.size = bin.size, track.type = "GC"), bed.clean = bed.clean, bed.binned = bed.binned)
renorm.data <- list(tracks = wes.gc, info = data.frame(key = wes.meta.key, value = wes.meta.value, stringsAsFactors = FALSE), bed.clean = bed.clean)
rm(wes.gc, wes.meta.value, wes.meta.key, bed.clean)
save("renorm.data", file = paste0(out.dir, "/", sub(pattern = "\\.bed$", replacement = paste0("_", genome, "_b", bin.size, ".GC.rda"), x = basename(bed.file), ignore.case = TRUE)), compress = "xz")
message("Done.")
if (return.data) return(renorm.data)
}
WES.Bin <- function(testBAM = NULL, refBAM = NULL, BINpack = NULL, samplename = "SAMPLE", Q = 20, nsubthread = 1, cluster.type = "PSOCK", out.dir = getwd(), return.data = FALSE, write.data = TRUE, plot = TRUE, force = FALSE) {
# setwd("/home/job/WORKSPACE/EaCoN_tests/WES/AlexandreLefranc/Results")
# # setwd("/home/job/WORKSPACE/EaCoN_tests")
# testBAM <- "/home/job/WORKSPACE/EaCoN_tests/WES/AlexandreLefranc/DATA/Sample_PHEO_AG_HS_048_DNA.reord.sorted.dedup.recal.reheaded.bam"
# refBAM <- "/home/job/WORKSPACE/EaCoN_tests/WES/AlexandreLefranc/DATA/Sample_PHEO_AG_HS_048G_DNA.reord.sorted.dedup.recal.reheaded.bam"
# # # # BINpack <- "/mnt/data_cigogne/job/PUBLI_EaCoN/MATCHR/RESOURCES/SureSelect_ClinicalResearchExome.padded_hg19_b50.rda"
# # # BINpack <- "/mnt/data_cigogne/job/PUBLI_EaCoN/TCGA/RESOURCES/SureSelect_ClinicalResearchExome.padded_hs37d5_b50.GC.rda"
# BINpack <- "/home/job/WORKSPACE/EaCoN_tests/WES/AlexandreLefranc/Results/V4-UTRs.hg38.fragment_targets_minimal_sorted_longChr_hg38_b50.GC.rda"
# samplename <- "ALEX1"
# Q <- 20
# out.dir = getwd()
# nsubthread = 4
# cluster.type = "PSOCK"
# return.data = FALSE
# write.data = TRUE
# plot = TRUE
# source("/home/job/git_gustaveroussy/EaCoN/R/mini_functions.R")
# source("/home/job/git_gustaveroussy/EaCoN/R/BED_functions.R")
# source("/home/job/git_gustaveroussy/EaCoN/R/wes_process.R")
# suppressPackageStartupMessages(require(foreach))
# require(magrittr)
## CHECKS (files/parameters)
if (is.null(BINpack)) stop(tmsg("A BINpack file is required !"), call. = FALSE)
if (!file.exists(BINpack)) stop(tmsg("Could not find the BINpack file !"), call. = FALSE)
if (is.null(refBAM)) stop(tmsg("A reference BAM file is required !"), call. = FALSE)
if (!file.exists(refBAM)) stop(tmsg("Could not find the refBAM file !"), call. = FALSE)
if (is.null(testBAM)) stop(tmsg("A test BAM file is required !"), call. = FALSE)
if (!file.exists(testBAM)) stop(tmsg("Could not find the testBAM file !"), call. = FALSE)
if (!is.numeric(Q)) stop(tmsg("Q must be numeric !"), call. = FALSE)
if (is.null(out.dir)) stop(tmsg("An output directory is required !"), call. = FALSE)
if (!file.exists(out.dir)) stop(tmsg("Could not find the output directory !"), call. = FALSE)
if (!file.info(out.dir)$isdir) stop(tmsg("out.dir is not a directory"), call. = FALSE)
if (!return.data & !write.data) stop(tmsg("Data should be returned and/or written on disk, but not none !"), call. = FALSE)
if (Q < 0) stop(tmsg("Q should be positive !"), call. = FALSE)
## WARNINGS
if (return.data) tmsg("Data will be returned.")
if (write.data) tmsg("Data will be written on disk.")
if (!plot) tmsg("No plot will get drawn.")
## Loading binpack
tmsg("Loading BINpack ...")
load(BINpack)
## CHECKS (genome)
# genome.pkg <- GC.data$info$genome.pkg
genome.pkg <- renorm.data$info$value[renorm.data$info$key == "genome-package"]
if (!genome.pkg %in% BSgenome::installed.genomes()) {
if (genome.pkg %in% BSgenome::available.genomes()) {
stop(tmsg(paste0("BSgenome ", genome.pkg, " available but not installed. Please install it !")), call. = FALSE)
} else {
stop(tmsg(paste0("BSgenome ", genome.pkg, " not available in valid BSgenomes and not installed ... Please check your genome name or install your custom BSgenome !")), call. = FALSE)
}
}
if (dir.exists(samplename)) { if (!force) stop(tmsg(paste0("A [", samplename, '] dir already exists !')), call. = FALSE) else unlink(samplename, recursive = TRUE, force = FALSE) }
### Loading genome
tmsg(paste0("Loading ", genome.pkg, " ..."))
suppressPackageStartupMessages(require(genome.pkg, character.only = TRUE))
BSg.obj <- getExportedValue(genome.pkg, genome.pkg)
genome <- BSgenome::providerVersion(BSg.obj)
## Files controls
tmsg("Checking BINpack and BAMs compatibility ...")
## Inspecting BAM headers
refBAM.h <- Rsamtools::scanBamHeader(refBAM)
testBAM.h <- Rsamtools::scanBamHeader(testBAM)
bed.data <- renorm.data$tracks[,1:4]
renorm.data$tracks <- NULL
gc()
if (!all(names(refBAM.h[[1]]$targets) %in% names(testBAM.h[[1]]$targets))) stop(tmsg("Reference BAM and Test BAM are not compatible (different chr names) !"), call. = FALSE)
if (!all(bed.data$chr %in% names(refBAM.h[[1]]$targets))) stop(tmsg("Reference BAM and BED are not compatible (different chr names) !"), call. = FALSE)
if (!all(unique(bed.data$chr) %in% names(testBAM.h[[1]]$targets))) stop(tmsg("Test BAM and BED are not compatible (different chr names) !"), call. = FALSE)
if (!all(unique(bed.data$chr) %in% BSgenome::seqnames(BSg.obj))) stop(tmsg("BED and BSgenome are not compatible (different chr names) !"), call. = FALSE)
## Identifying the platform
testBAM.h.unl <- unlist(testBAM.h)
manuf.hentry <- grep(pattern = "^PL:", testBAM.h.unl)[1]
manufacturer <- if(!is.na(manuf.hentry)) sub(pattern = "^PL:", replacement = "", testBAM.h.unl[[manuf.hentry]]) else "NA"
meta.b <- list(
samplename = samplename,
source = "WES",
source.file = list(refBAM = refBAM, testBAM = testBAM, BINpack = BINpack),
type = "WES",
manufacturer = manufacturer,
species = GenomeInfoDb::organism(BSg.obj),
genome = genome,
genome.pkg = genome.pkg,
predicted.gender = "NA"
)
meta.w <- list(
testBAM.header = paste0(testBAM.h, collapse = " "),
refBAM.header = paste0(refBAM.h, collapse = " "),
samtools.Q = Q,
# bin.size = GC.data$info$bin.size
bin.size = as.numeric(renorm.data$info$value[renorm.data$info$key == "bin_size"])
)
rm(testBAM.h, refBAM.h)
### Common BAM flags
param.FLAG <- Rsamtools::scanBamFlag(isSecondaryAlignment=FALSE, isNotPassingQualityControls=FALSE, isDuplicate=FALSE)
param.PILEUP <- Rsamtools::PileupParam(distinguish_strands = FALSE, max_depth = 5E+04, min_base_quality = Q, min_nucleotide_depth = 0, distinguish_nucleotides = TRUE)
bedbam2pup <- function(BamFile = NULL, bed.data = NULL, scanBamFlag = NULL, pileupParam = NULL) {
### Making pileup
tmsg(" Getting pileup ...")
param.BAM <- Rsamtools::ScanBamParam(which = GenomicRanges::makeGRangesFromDataFrame(bed.data, seqnames.field = "chr"), flag = scanBamFlag)
pres <- dplyr::as.tbl(Rsamtools::pileup(BamFile, scanBamParam = param.BAM, pileupParam = pileupParam))
colnames(pres)[c(1,5)] <- c("chr", "bin")
levels(pres$bin) <- bed.data$ProbeSetName
pres$bin <- as.integer(as.character(pres$bin))
# levels(pres$bin) <- rownames(bed.data)
gc()
if("=" %in% unique(pres$nucleotide)) tmsg(" Bam contains the '=' sign !")
### Building genomic sequence of reference block
tmsg(" Getting reference genome sequence ...")
refblock <- pres[!duplicated(pres$pos), c(1,2)]
pres <- dplyr::group_by(pres, pos)
refblock$tot_count <- dplyr::summarize(pres, tot_count = sum(count))$tot_count
pres <- dplyr::ungroup(pres)
refblock$nucleotide <- as.factor(BSgenome::getSeq(BSg.obj, names = GenomicRanges::makeGRangesFromDataFrame(refblock, start.field = "pos", end.field = "pos"), as.character = TRUE))
### Merging blocks
tmsg(" Computing alt counts ...")
# refblock <- dplyr::group_by(refblock, pos, nucleotide)
# pres <- dplyr::group_by(pres, pos, nucleotide)
merged <- suppressWarnings(dplyr::left_join(refblock, pres, by = c("chr", "pos", "nucleotide")))
rm(pres, refblock)
gc()
bed.based <- dplyr::as.tbl(data.frame(chr = unique(bed.data$chr), pos = as.integer(unlist(seq.int2(from = bed.data$start, to = bed.data$end, by = 1))), bin = rep(bed.data$ProbeSetName, times = (bed.data$end - bed.data$start +1))))
bed.joint <- suppressWarnings(dplyr::left_join(bed.based, merged, c("chr", "pos", "bin"))) ### yeah !
rm(bed.based)
gc()
### Cleaning and formatting
bed.joint$nucleotide <- NULL
bed.joint$chr <- as.factor(bed.joint$chr)
# bed.joint$which_label <- as.factor(bed.joint$which_label)
bed.joint$count <- bed.joint$tot_count - bed.joint$count
colnames(bed.joint) <- c("chr", "pos", "bin", "tot_count", "alt_count")
bed.joint$tot_count[is.na(bed.joint$tot_count)] <- 0L
bed.joint$alt_count[is.na(bed.joint$alt_count)] <- 0L
# bed.joint$nt[is.na(bed.joint$nt)] <- bed.based$nt[is.na(bed.joint$nt)]
return(bed.joint)
}
pileup.go <- function(testBAM = NULL, refBAM = NULL, bed.data = NULL, scanBamFlag = NULL, pileupParam = NULL, nsubthread = 1, cluster.type = "PSOCK") {
# BamFile <- testBAM
# scanBamFlag <- param.FLAG
# pileupParam <- param.PILEUP
# nsubthread <- 1
# cluster.type = "PSOCK"
#### Indexing BAM if needed
if (!(file.exists(paste0(testBAM, ".bai")) || file.exists(sub(pattern = "\\.bam", replacement = ".bai", x = testBAM, ignore.case = TRUE)))) {
tmsg("Indexing Test BAM ...")
Rsamtools::indexBam(testBAM)
} else tmsg("Test BAM is already indexed.")
if (!(file.exists(paste0(refBAM, ".bai")) || file.exists(sub(pattern = "\\.bam", replacement = ".bai", x = refBAM, ignore.case = TRUE)))) {
tmsg("Indexing Ref BAM ...")
Rsamtools::indexBam(refBAM)
} else tmsg("Ref BAM is already indexed.")
#### Launching cluster
if (length(unique(bed.data$chr)) < nsubthread) nsubthread <- length(unique(bed.data$chr))
cl <- parallel::makeCluster(spec = nsubthread, type = cluster.type, outfile = "")
doParallel::registerDoParallel(cl)
k <- 0
BAMcounts <- foreach::foreach(k = unique(as.character(bed.data$chr)), .inorder = TRUE, .export = c("tmsg", "BSg.obj", "bedbam2pup", "seq.int2")) %dopar% {
tmsg(paste0(" Sequence : ", k))
### Computing counts for both BAMs
bed.data.k <- bed.data[bed.data$chr == k,]
## Opening BAM connections
openBAM <- Rsamtools::BamFileList(c(testBAM, refBAM))
tmsg(" Computing TEST counts ...")
testPUP <- bedbam2pup(BamFile = openBAM[[1]], bed.data = bed.data.k, scanBamFlag = scanBamFlag, pileupParam = pileupParam)
tmsg(" Computing REF counts ...")
refPUP <- bedbam2pup(BamFile = openBAM[[2]], bed.data = bed.data.k, scanBamFlag = scanBamFlag, pileupParam = pileupParam)
### Merge counts
tmsg(" Merging TEST and REF ...")
mPUP <- dplyr::inner_join(testPUP, refPUP, c("chr", "pos", "bin"))
rm(testPUP, refPUP)
gc()
mPUP <- dplyr::group_by(mPUP, bin)
### Adding missing nucleotides
# mPUP$nt <- as.factor(unlist(strsplit(as.character(BSgenome::getSeq(BSg.obj, names = GenomicRanges::makeGRangesFromDataFrame(mPUP, seqnames.field = "chr", start.field = "pos", end.field = "pos"))), split = "")))
### Binning
tmsg(" Binning ...")
# CN.table <- dplyr::summarize(mPUP, chr = unique(chr), start = as.integer(min(pos)), end = as.integer(max(pos)), tot_count.test = as.integer(round(mean(tot_count.x))), tot_count.ref = as.integer(round(mean(tot_count.y))), GCPC = as.integer(round(seqinr::GC(as.character(nt))*100)))
CN.table <- dplyr::summarize(mPUP, chr = unique(chr), start = as.integer(min(pos)), end = as.integer(max(pos)), tot_count.test = as.integer(round(mean(tot_count.x))), tot_count.ref = as.integer(round(mean(tot_count.y))))
mPUP <- dplyr::ungroup(mPUP)
SNP.table <- mPUP[mPUP$alt_count.x > 0 | mPUP$alt_count.y > 0,]
### Cleaning
rm(mPUP)
gc()
CN.table <- dplyr::arrange(CN.table, chr, start, end)
# CN.table <- dplyr::select(CN.table, chr, start, end, bin, tot_count.test, tot_count.ref, GCPC)
CN.table <- dplyr::select(CN.table, chr, start, end, bin, tot_count.test, tot_count.ref)
colnames(SNP.table) <- c("chr", "pos", "bin", "tot_count.test", "alt_count.test", "tot_count.ref", "alt_count.ref")
gc()
return(list(CN = CN.table, SNP = SNP.table))
}
stopCluster(cl)
return(BAMcounts)
}
COUNTS.all <- pileup.go(testBAM = testBAM, refBAM = refBAM, bed.data = bed.data, scanBamFlag = param.FLAG, pileupParam = param.PILEUP, nsubthread = nsubthread, cluster.type = cluster.type)
CN.all <- foreach(k = seq_along(COUNTS.all), .combine = "rbind") %do% {
k.tmp <- COUNTS.all[[k]]$CN
COUNTS.all[[k]]$CN <- NULL
gc()
return(k.tmp)
}
SNP.all <- foreach(k = seq_along(COUNTS.all), .combine = "rbind") %do% {
k.tmp <- COUNTS.all[[k]]$SNP
COUNTS.all[[k]]$SNP <- NULL
gc()
return(k.tmp)
}
rm(COUNTS.all)
gc()
## Building WESobj
CN.all$bin <- as.integer(CN.all$bin)
SNP.all$bin <- as.integer(SNP.all$bin)
### summaries (recoded function as R internal summary uses too much RAM !)
my.summary <- function(myv = NULL) {
vsum <- c(min(myv, na.rm = TRUE), quantile(myv, .25, na.rm = TRUE), median(myv, na.rm = TRUE), mean(myv, na.rm = TRUE), quantile(myv, .75, na.rm = TRUE), max(myv, na.rm = TRUE))
names(vsum) <- c("min", "q25", "median", "mean", "q75", "max")
return(vsum)
}
meta.w$BIN.tot.count.test.mean.summary <- my.summary(CN.all$tot_count.test[!is.na(CN.all$tot_count.test)])
meta.w$BIN.tot.count.ref.mean.summary <- my.summary(CN.all$tot_count.ref[!is.na(CN.all$tot_count.ref)])
meta.w$SNP.tot.count.test.summary <- my.summary(SNP.all$tot_count.test[!is.na(SNP.all$tot_count.test)])
meta.w$SNP.tot.count.ref.summary <- my.summary(SNP.all$tot_count.ref[!is.na(SNP.all$tot_count.ref)])
gc()
WESobj <- list(RD = CN.all, SNP = SNP.all, meta = list(basic = meta.b, WES = meta.w))
rm(CN.all, SNP.all)
gc()
## QC : Computing coverages
tmsg("Computing coverages ...")
gw.rd <- sum(WESobj$RD$end - WESobj$RD$start +1)
gw.snp <- nrow(WESobj$SNP)
rd.cov <- data.frame(cuts = c(1, 5, 10, 20, 30, 40, 50, 75, 100, 150, 200), stringsAsFactors = FALSE)
rd.cov <- cbind(rd.cov, t(foreach::foreach(x = rd.cov$cuts, .combine = "cbind") %do% {
test.rd.in <- WESobj$RD$tot_count.test >= x
ref.rd.in <- WESobj$RD$tot_count.ref >= x
test.snprd.in <- WESobj$SNP$tot_count.test >= x
ref.snprd.in <- WESobj$SNP$tot_count.ref >= x
test.cut.cov <- if(!any(test.rd.in)) NA else (sum(WESobj$RD$end[test.rd.in] - WESobj$RD$start[test.rd.in] +1)/gw.rd)
ref.cut.cov <- if(!any(ref.rd.in)) NA else (sum(WESobj$RD$end[ref.rd.in] - WESobj$RD$start[ref.rd.in] +1)/gw.rd)
test.snpcut.cov <- if(!any(test.snprd.in)) NA else (length(which(test.snprd.in))/gw.snp)
ref.snpcut.cov <- if(!any(ref.snprd.in)) NA else (length(which(ref.snprd.in))/gw.snp)
return(c(test.cut.cov, ref.cut.cov, test.snpcut.cov, ref.snpcut.cov))
}))
colnames(rd.cov) <- c("MinDepth", "TestBINCoverage", "RefBINCoverage", "TestBAFCoverage", "RefBAFCoverage")
rm(test.snprd.in, ref.snprd.in, test.rd.in, ref.rd.in)
if (write.data || plot) dir.create(paste0(out.dir, "/", samplename))
if (write.data) write.table(rd.cov, file = paste0(out.dir, "/", samplename, "/", samplename, '_WES_', genome, "_b", meta.w$bin.size, "_coverage.txt"), sep = "\t", quote = FALSE, row.names = FALSE)
## QC : Plotting coverages
if (plot) {
### Coverage plot
png(paste0(out.dir, "/", samplename, "/", samplename, "_WES_", genome, "_b", meta.w$bin.size, "_coverage.png"), 800, 640)
plot(rd.cov$MinDepth, rd.cov$TestBAFCoverage, type = "b", col = 2, lty = 3, pch = 20, main = paste0(WESobj$meta$basic$samplename, "\nCoverage Plot"), xlab = "Minimum depth", ylab = "Coverage", ylim = c(0,1), xaxp = c(0,200,10))
abline(v = rd.cov$MinDepth, lty = 2, col = "grey75")
abline(h = seq(0,1,.1), lty = 2, col = "grey75")
lines(rd.cov$MinDepth, rd.cov$RefBAFCoverage, type = "b", col = 1, lty = 3, pch = 20)
lines(rd.cov$MinDepth, rd.cov$TestBINCoverage, type = "b", col = 2)
lines(rd.cov$MinDepth, rd.cov$RefBINCoverage, type = "b", col = 1)
abline(h = .5, lty = 2)
legend("topright", legend = c("Test BAF", "Ref BAF", "Test CN", "Ref CN"), inset = .02, col = c(2,1,2,1), lty = c(3,3,1,1), pch = c(20,20,1,1))
dev.off()
### RD plots
png(paste0(out.dir, "/", samplename, "/", samplename, "_WES_", genome, "_b", meta.w$bin.size, "_rawdepth.png"), 1600, 1050)
par(mfrow = c(2,1))
test.l10 <- log10(WESobj$RD$tot_count.test +1)
plot(test.l10, pch = ".", xaxs = "i", xlab = "Index", ylab = "log10(RD+1)", main = paste0(samplename, " TEST (", round(10^median(test.l10, na.rm = TRUE)), ")"))
abline(h = median(test.l10, na.rm = TRUE), lty = 2, col = "cyan")
lines(runmed(test.l10, 9999), col = 2)
ref.l10 <- log10(WESobj$RD$tot_count.ref +1)
plot(ref.l10, pch = ".", xaxs = "i", xlab = "Index", ylab = "log10(RD+1)", main = paste0(samplename, " REF (", round(10^median(ref.l10, na.rm = TRUE)), ")"))
abline(h = median(ref.l10, na.rm = TRUE), lty = 2, col = "cyan")
lines(runmed(ref.l10, 9999), col = 2)
dev.off()
}
rm(rd.cov)
## Saving
if (write.data) {
tmsg("Saving counts data ...")
saveRDS(WESobj, file = paste0(out.dir, "/", samplename, "/", samplename, "_", genome, "_b", meta.w$bin.size, "_binned.RDS"), compress = "bzip2")
}
if (return.data) return(WESobj)
}
## Performs the binning of BAMs using a BINpack, batch mode
WES.Bin.Batch <- function(BAM.list.file = NULL, BINpack = NULL, nthread = 1, cluster.type = "PSOCK", ...) {
if (!file.exists(BAM.list.file)) stop("Could not find BAM.list.file !", call. = FALSE)
message("Reading and checking BAM.list.file ...")
myBAMs <- read.table(file = BAM.list.file, header = TRUE, sep="\t", check.names = FALSE, as.is = TRUE)
head.ok <- c("testBAM", "refBAM", "SampleName")
head.chk <- all(colnames(BAM.list.file) == head.ok)
if (!head.chk) {
message("Invalid header in BAM.list.file !")
message(paste0("EXPECTED : ", head.ok))
message(paste0("FOUND : ", colnames(myBAMs)))
stop("Invalid header.", call. = FALSE)
}
tb.chk <- file.exists(myBAMs$testBAM)
rb.chk <- file.exists(myBAMs$refBAM)
if (!all(tb.chk) || !all(tb.chk)) {
message("Some BAM files from the BAM.list.file could not be found (wrong path or filename ?) !")
message("Missing testBAM file(s) :")
message(myBAMs$testBAM[which(!tb.chk)])
message("Missing refBAM file(s) :")
message(myBAMs$refBAM[which(!rb.chk)])
stop("Missing BAM file(s).", call. = FALSE)
}
sn.chk <- duplicated(myBAMs$SampleName)
if (any(sn.chk)) {
message("BAM.list.file contains duplicated samplenames !")
message(myBAMs$SampleName[which(duplicated(myBAMs$SampleName))])
stop("Duplicated samplenames.", call. = FALSE)
}
if(any(myBAMs$testBAM == myBAMs$refBAM)) {
message("Some testBAM and refBAM are identical for at least one sample !")
stop("Identical BAM files for Test and Ref.", call. = FALSE)
}
## Adjusting cores/threads
message("Adjusting number of cores if needed ...")
if (is.null(nthread)) nthread <- parallel::detectCores(logical = TRUE) -1
if (nrow(myBAMs) < nthread) nthread <- nrow(myBAMs)
message("Running EaCoN.WES.Bin() in batch mode ...")
message(paste0("Found ", nrow(myBAMs), " samples to process ..."))
current.bitmapType <- getOption("bitmapType")
`%dopar%` <- foreach::"%dopar%"
cl <- parallel::makeCluster(spec = nthread, type = cluster.type, outfile = "")
doParallel::registerDoParallel(cl)
eacon.batchres <- foreach::foreach(r = seq_len(nrow(myBAMs)), .inorder = TRUE, .errorhandling = "stop", .export = c("EaCoN.set.bitmapType", "WES.Bin", "tmsg")) %dopar% {
EaCoN.set.bitmapType(type = current.bitmapType)
WES.Bin(testBAM = myBAMs$testBAM[r], refBAM = myBAMs$refBAM[r], BINpack = BINpack, samplename = myBAMs$SampleName[r], cluster.type = cluster.type, ...)
}
parallel::stopCluster(cl)
}
## Performs the normalization of WES L2R and BAF signals
WES.Normalize <- function(data = NULL, BINpack = NULL, gc.renorm = TRUE, wave.renorm = FALSE, wave.rda = NULL, RD.tot.min = 20, RD.alt.min = 3, BAF.hetmin = .33, sex.chr = c("chrX", "chrY"), TumorBoost = FALSE, out.dir = getwd(), return.data = FALSE, write.data = TRUE, plot = TRUE) {
# setwd("/mnt/data_cigogne/job/PUBLI_EaCoN/TCGA/ANALYSES/EaCoN_0.3.0_beta2/WES/TCGA-A7-A0CE-01A_vs_10A")
# data <- readRDS("Sample_PHEO_AG_HS_048_DNA_hg38_b50_binned.RDS")
# BINpack <- "V4-UTRs.hg38.fragment_targets_minimal_sorted_longChr_hg38_b50.GC.rda"
# gc.renorm <- TRUE
# wave.renorm <- FALSE
# # wave.rda <- "/mnt/data_cigogne/job/PUBLI_EaCoN/TCGA/RESOURCES/SureSelect_ClinicalResearchExome.padded_GRCh37-lite_merged_sorted_hs37d5_b50.Wave.rda"
# RD.tot.min = 20
# RD.alt.min = 3
# TumorBoost = FALSE
# # sex.chr <- c("X", "Y")
# sex.chr <- c("chrX", "chrY")
# out.dir = getwd()
# return.data = FALSE
# write.data = TRUE
# plot = TRUE
# BAF.hetmin <- .33
# source("/home/job/git_gustaveroussy/EaCoN/R/mini_functions.R")
# source("/home/job/git_gustaveroussy/EaCoN/R/renorm_functions.R")
# require(foreach)
### AJOUTER UN CONTROLE DU RDS (pour que ce ne soit pas un _processed.RDS donné en entrée!)
## CHECKS
if (!is.list(data)) stop(tmsg("data should be a list !"), call. = FALSE)
if (is.null(BINpack)) stop(tmsg("A BINpack file is required !"), call. = FALSE)
if (!file.exists(BINpack)) stop(tmsg("Could not find the BINpack file !"), call. = FALSE)
if (wave.renorm) { if (!is.null(wave.rda)) { if (!file.exists(wave.rda)) stop(tmsg(paste0("Could not find wave.rda file ", wave.rda)), call. = FALSE) } }
if (RD.tot.min < 0) stop(tmsg("RD.tot.min must be >= 0 !"), call. = FALSE)
if (RD.alt.min <= 0) stop(tmsg("RD.alt.min must be > 0 !"), call. = FALSE)
## TAGS
data$meta$WES$TumorBoost <- as.character(TumorBoost)
data$meta$WES$RD.tot.min <- RD.tot.min
data$meta$WES$GC.renorm <- as.character(gc.renorm)
data$meta$WES$Wave.renorm <- as.character(wave.renorm)
samplename <- data$meta$basic$samplename
## Loading BINpack
load(BINpack)
gc()
genome.pkg <- renorm.data$info$value[renorm.data$info$key == "genome-package"]
if (!genome.pkg %in% BSgenome::installed.genomes()) {
if (genome.pkg %in% BSgenome::available.genomes()) {
stop(tmsg(paste0("BSgenome ", genome.pkg, " available but not installed. Please install it !")), call. = FALSE)
} else {
stop(tmsg(paste0("BSgenome ", genome.pkg, " not available in valid BSgenomes and not installed ... Please check your genome name or install your custom BSgenome !")), call. = FALSE)
}
}
### Loading genome
tmsg(paste0("Loading ", genome.pkg, " ..."))
suppressPackageStartupMessages(require(genome.pkg, character.only = TRUE))
# requireNamespace(genome.pkg, quietly = TRUE)
BSg.obj <- getExportedValue(genome.pkg, genome.pkg)
genome <- BSgenome::providerVersion(BSg.obj)
cs <- chromobjector(BSg.obj)
## BAF HANDLING
tmsg("Processing BAF ...")
BAF.adder <- function(Bdata = NULL, Bvalues = NULL, newname = NULL, type = "test") {
Bdata[[paste0("BAF.", type)]] <- Bdata[[newname]] <- Bvalues
Bdata[[paste0("mBAF.", type)]] <- BAF2mBAF(Bdata[[paste0("BAF.", type)]])
return(Bdata)
}
### BAF : Filtering (low depth)
rd.ori <- nrow(data$SNP)
#### 1) Filtering for low ref or test depth
RDlow <- data$SNP$tot_count.test < RD.tot.min | data$SNP$tot_count.ref < RD.tot.min
if (length(which(RDlow)) == nrow(data$SNP)) stop(tmsg("All SNP positions were discarded for their low read count ! You may consider lowering the BAF.tot.min value."), call. = FALSE)
tmsg(paste0("Removed ", length(which(RDlow)), " (", round(length(which(RDlow)) / rd.ori * 100, digits = 2), "%) SNP positions with low depth (<", RD.tot.min, ")"))
data$SNP <- data$SNP[!RDlow,]
#### 2) Filtering for low alt test depth
BRDlow <- data$SNP$alt_count.test < RD.alt.min
if (length(which(BRDlow)) == nrow(data$SNP)) stop(tmsg("All SNP positions were discarded for their low alternative allele count ! You may consider lowering the RD.alt.min value."), call. = FALSE)
tmsg(paste0("Removed ", length(which(BRDlow)), " (", round(length(which(BRDlow)) / rd.ori * 100, digits = 2), "%) SNP positions with low alt RD (<", RD.alt.min, ")"))
data$SNP <- data$SNP[!BRDlow,]
gc()
### Computing BAF
data$SNP$BAF.test.ori <- data$SNP$alt_count.test / data$SNP$tot_count.test
data$SNP$BAF.ref.ori <- data$SNP$alt_count.ref / data$SNP$tot_count.ref
odd.idx <- which(data$SNP$pos %% 2 == 1)
data$SNP$BAF.test.ori[odd.idx] <- -data$SNP$BAF.test.ori[odd.idx] +1L
data$SNP$BAF.ref.ori[odd.idx] <- -data$SNP$BAF.ref.ori[odd.idx] +1L
data$SNP <- BAF.adder(Bdata = data$SNP, Bvalues = data$SNP$BAF.test.ori, newname = "BAF.test.ori")
data$SNP <- BAF.adder(Bdata = data$SNP, Bvalues = data$SNP$BAF.ref.ori, newname = "BAF.ref.ori", type = "ref")
###### Computing LOR (and variance)
rcmat <- round(cbind(data$SNP$BAF.test.ori*data$SNP$tot_count.test, (1-data$SNP$BAF.test.ori)*data$SNP$tot_count.test))
data$SNP$LOR <- log(rcmat[,1]+1/6) - log(rcmat[,2]+1/6)
data$SNP$LORvar <- 1/(rcmat[,1]+1/6) + 1/(rcmat[,2]+1/6)
rm(rcmat)
gc()
### BAF : TumorBoost
if (TumorBoost) {
message(tmsg("Applying TumorBoost BAF normalization ..."))
# data$SNP$BAF.test <- data$SNP$BAF.test.TB <- as.numeric(aroma.light::normalizeTumorBoost(data$SNP$BAF.test, data$SNP$BAF.ref, flavor = "v4", preserveScale = FALSE))
BTB <- as.numeric(aroma.light::normalizeTumorBoost(data$SNP$BAF.test, data$SNP$BAF.ref, flavor = "v4", preserveScale = FALSE))
data$SNP <- BAF.adder(Bdata = data$SNP, Bvalues = BTB, newname = "BAF.test.TB")
}
### Getting heterozygous probes from Ref
Ref.hetero <- data$SNP$mBAF.ref >= BAF.hetmin
if (!any(Ref.hetero)) stop(tmsg("All SNP positions were tagged as homozygous in Ref : there may be a problem with your reference BAM ploidy !"), call. = FALSE)
### Keeping hetero positions
data$SNP <- data$SNP[Ref.hetero,]
### Removing additional values per bin
data$SNP <- data$SNP[!duplicated(data$SNP$bin),]
gc()
### BAF filtering
# smoB <- round(nrow(data$SNP) / 3300)
# if(smoB%%2 == 0) smoB <- smoB+1
# mBAF.rm <- runmed(data$SNP$mBAF.test, smoB)
# mBAF.diff <- abs(data$SNP$mBAF.test - mBAF.rm)
# Bfiltered <- mBAF.diff < quantile(mBAF.diff, BAF.filter)
# data$SNP <- data$SNP[Bfiltered,]
### Adding LOR
data$SNP$LOR.test <- log(data$SNP$alt_count.test / (data$SNP$tot_count.test - data$SNP$alt_count.test))
## L2R
tmsg("Processing RD bins ...")
#### Computing L2R
data$RD$L2R <- data$RD$L2R.ori <- log2((data$RD$tot_count.test+1) / (data$RD$tot_count.ref+1))
### L2R : Filtering
#### 1) low depth
rd.ori <- nrow(data$RD)
RDlow <- data$RD$tot_count.test < RD.tot.min | data$RD$tot_count.ref < RD.tot.min
data$meta$WES$Imputed.lowdepth.bins <- length(which(RDlow))
if (length(which(RDlow)) == nrow(data$RD)) stop(tmsg("All RD bins were flagged for their low read count ! You may consider lowering the BAF.tot.min value."), call. = FALSE)
tmsg(paste0("Flagged ", length(which(RDlow)), " (", round(length(which(RDlow)) / rd.ori * 100, digits = 2), "%) RD bins with low depth (<", RD.tot.min, ")"))
#### 2) GC% outliers
GCOL <- renorm.data$tracks[,5] < 200 | renorm.data$tracks[,5] > 800
data$meta$WES$Imputed.GCoutlier.bins <- length(which(GCOL))
if (length(which(GCOL)) == nrow(data$RD)) stop(tmsg("All RD bins were flagged as GC% outliers ! There may be something wrong with your reference genome and/or capture BED."), call. = FALSE)
tmsg(paste0("Flagged ", length(which(GCOL)), " (", round(length(which(GCOL)) / rd.ori * 100, digits = 2), "%) RD bins as GC% outliers."))
### Pooling and imputing
FLAGS <- RDlow + GCOL > 0
if (any(FLAGS)) {
tmsg(paste0(" Imputed ", length(which(FLAGS)), " (", round(length(which(FLAGS))/rd.ori*100, digits = 2), "%) L2R bins."))
l2r.tmp <- data$RD$L2R
l2r.tmp[FLAGS] <- NA
data$RD$L2R <- data$RD$L2R.imp <- approxfun(seq_along(l2r.tmp), l2r.tmp, rule = 2)(seq_along(l2r.tmp))
} else data$RD$L2R.imp <- data$RD$L2R
gc()
## L2R : Normalization
smo <- round(nrow(data$RD) / 550)
if(smo%%2 == 0) smo <- smo+1
### Wave
if (wave.renorm) {
tmsg("Wave normalization ...")
l2r2norm <- data.frame(ProbeSetName = data$RD$bin, chr = as.character(data$RD$chr), pos = data$RD$start, L2R = data$RD$L2R)
# rownames(l2r2norm) <- seq_len(nrow(l2r2norm))
ren.res <- renorm.go(input.data = l2r2norm, renorm.rda = wave.rda, track.type = "Wave", smo = smo, arraytype = data$meta$basic$type, genome = genome)
fitted.l2r <- ren.res$renorm$l2r$l2r
GCF <- is.na(fitted.l2r)
if (any(GCF)) {
l2r.tmp <- fitted.l2r
l2r.tmp[GCF] <- NA
fitted.l2r <- approxfun(seq_along(l2r.tmp), l2r.tmp, rule = 2)(seq_along(l2r.tmp))
}
if(is.null(ren.res$renorm$pos)) {
# meta.b <- setmeta("gc.renorm", "None", meta.b)
data$meta$WES <- setmeta("wave.renorm", "None", data$meta$WES)
tmsg(" No positive fit.")
} else {
## Tweaking sex chromosomes
sex.idx <- data$RD$chr %in% sex.chr
auto.ori.med <- median(data$RD$L2R[!sex.idx], na.rm = TRUE)
auto.rn.med <- median(fitted.l2r[!sex.idx], na.rm = TRUE)
if (any(sex.idx)) {
for (k in sex.chr) {
k.idx <- data$RD$chr == k
if (any(k.idx)) {
k.ori.diffmed <- median(data$RD$L2R.ori[k.idx], na.rm = TRUE) - auto.ori.med
k.rn.diffmed <- median(fitted.l2r[k.idx], na.rm = TRUE) - auto.rn.med
fitted.l2r[k.idx] <- fitted.l2r[k.idx] - k.rn.diffmed + k.ori.diffmed
}
}
}
# meta.b <- setmeta("wave.renorm", paste0(ren.res$renorm$pos, collapse = ","), meta.b)
data$meta$WES <- setmeta("wave.renorm", paste0(ren.res$renorm$pos, collapse = ","), data$meta$WES)
}
rm(ren.res)
data$RD$L2R.WAVE <- data$RD$L2R <- fitted.l2r - median(fitted.l2r, na.rm = TRUE)
} else {
# meta.b <- setmeta("wave.renorm", "FALSE", meta.b)
data$meta$WES <- setmeta("wave.renorm", "FALSE", data$meta$WES)
}
#### GC%
# message("GC% normalization ...")
# data$RD$L2R.GC <- data$RD$L2R <- limma::loessFit(x = data$RD$GCPC, y = data$RD$L2R)$residuals
# if (any(is.na(data$RD$L2R))) {
# l2r.tmp <- data$RD$L2R
# l2r.tmp[is.na(data$RD$L2R)] <- NA
# data$RD$L2R <- data$RD$L2R.GC <- approxfun(seq_along(l2r.tmp), l2r.tmp, rule = 2)(seq_along(l2r.tmp))
# }
if (gc.renorm) {
tmsg("GC% normalization ...")
l2r2norm <- data.frame(ProbeSetName = data$RD$bin, chr = as.character(data$RD$chr), pos = data$RD$start, L2R = data$RD$L2R)
# rownames(l2r2norm) <- seq_len(nrow(l2r2norm))
ren.res <- renorm.go(input.data = l2r2norm, renorm.rda = BINpack, track.type = "GC", smo = smo, arraytype = data$meta$basic$type, genome = genome)
fitted.l2r <- ren.res$renorm$l2r$l2r
GCF <- is.na(fitted.l2r)
if (any(GCF)) {
l2r.tmp <- fitted.l2r
l2r.tmp[GCF] <- NA
fitted.l2r <- approxfun(seq_along(l2r.tmp), l2r.tmp, rule = 2)(seq_along(l2r.tmp))
}
if(is.null(ren.res$renorm$pos)) {
# meta.b <- setmeta("gc.renorm", "None", meta.b)
data$meta$eacon <- setmeta("gc.renorm", "None", data$meta$eacon)
tmsg(" No positive fit.")
} else {
## Tweaking sex chromosomes
sex.idx <- data$RD$chr %in% sex.chr
auto.ori.med <- median(data$RD$L2R.ori[!sex.idx], na.rm = TRUE)
auto.rn.med <- median(fitted.l2r[!sex.idx], na.rm = TRUE)
if (any(sex.idx)) {
for (k in sex.chr) {
k.idx <- data$RD$chr == k
if (any(k.idx)) {
# k.ori.diffmed <- median(data$RD$L2R.ori[k.idx], na.rm = TRUE) - auto.ori.med
k.ori.diffmed <- median(data$RD$L2R.ori[k.idx], na.rm = TRUE) - auto.ori.med
k.rn.diffmed <- median(fitted.l2r[k.idx], na.rm = TRUE) - auto.rn.med
fitted.l2r[k.idx] <- fitted.l2r[k.idx] - k.rn.diffmed + k.ori.diffmed
}
}
}
# meta.b <- setmeta("gc.renorm", paste0(ren.res$renorm$pos, collapse = ","), meta.b)
data$meta$eacon <- setmeta("gc.renorm", paste0(ren.res$renorm$pos, collapse = ","), data$meta$eacon)
}
rm(ren.res)
data$RD$L2R.GC <- data$RD$L2R <- fitted.l2r - median(fitted.l2r, na.rm = TRUE)
} else {
# meta.b <- setmeta("gc.renorm", "FALSE", meta.b)
data$meta$eacon <- setmeta("gc.renorm", "FALSE", data$meta$eacon)
}
## Merging
data$RD$BAF <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "BAF.test"))], by = c("chr", "bin"))$BAF.test
# data$RD$LOR <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "LOR.test"))], by = c("chr", "bin"))$LOR.test
data$RD$LOR <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "LOR"))], by = c("chr", "bin"))$LOR
data$RD$LORvar <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "LORvar"))], by = c("chr", "bin"))$LORvar
data$RD$RD.test <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "tot_count.test"))], by = c("chr", "bin"))$tot_count.test
data$RD$RD.ref <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "tot_count.ref"))], by = c("chr", "bin"))$tot_count.ref
## Building ASCAT object
tmsg("Building normalized object ...")
my.ch <- sapply(unique(data$RD$chr), function(x) { which(data$RD$chr == x) })
my.ascat.obj <- list(
data = list(
Tumor_LogR.ori = data.frame(sample = data$RD$L2R.ori, row.names = data$RD$bin),
Tumor_LogR = data.frame(sample = data$RD$L2R, row.names = data$RD$bin),
Tumor_BAF = data.frame(sample = data$RD$BAF, row.names = data$RD$bin),
Tumor_LogR_segmented = NULL,
Tumor_BAF_segmented = NULL,
Germline_LogR = NULL,
Germline_BAF = NULL,
SNPpos = data.frame(chrs = data$RD$chr, pos = round((data$RD$start + data$RD$end)/2)),
ch = my.ch,
chr = my.ch,
chrs = levels(data$RD$chr),
samples = samplename,
gender = "NA",
sexchromosomes = sex.chr,
failedarrays = NULL,
additional = data$RD[,colnames(data$RD) %in% c("RD.test", "RD.ref", "LOR", "LORvar")]
),
meta = data$meta,
germline = list(germlinegenotypes = matrix(is.na(data$RD$BAF), ncol = 1, dimnames = list(data$RD$bin, samplename)), failedarrays = NULL)
)
colnames(my.ascat.obj$data$Tumor_LogR) <- colnames(my.ascat.obj$data$Tumor_LogR.ori) <- colnames(my.ascat.obj$data$Tumor_BAF) <- samplename
# rm(my.ch, data)
gc()
# plot(ares$Tumor_LogR[,1], pch = ".", cex = 3, xaxs = "i", ylim = c(-2,2))
# points(ares$Tumor_LogR_segmented, pch = ".", cex = 3, col = 2)
# plot(ares$Tumor_BAF[!is.na(ares$Tumor_BAF),1], pch = ".", xaxs = "i", cex = 3)
# points(ares$Tumor_BAF_segmented[[1]], pch = ".", cex = 3, col = 2)
# points(1 - ares$Tumor_BAF_segmented[[1]], pch = ".", cex = 3, col = 2)
#
## Saving data
if (write.data) {
tmsg("Saving normalized data ...")
saveRDS(my.ascat.obj, paste0(out.dir, "/", samplename, "_", data$meta$basic$genome, "_b", data$meta$WES$bin.size, "_processed.RDS"), compress = "bzip2")
}
## Plot
tmsg("Plotting ...")
if (plot) {
l2r <- my.ascat.obj$data$Tumor_LogR[,1]
l2r.rm <- runmed(l2r, smo)
l2r.dif <- diff(l2r)
l2r.mad <- median(abs(l2r.dif[l2r.dif != 0]))
l2r.rm.dif <- diff(l2r.rm)
l2r.ssad <- sum(abs(l2r.rm.dif[l2r.rm.dif != 0]))
l2r.ori <- my.ascat.obj$data$Tumor_LogR.ori[,1]
l2r.ori.rm <- runmed(l2r.ori, smo)
l2r.ori.dif <- diff(l2r.ori)
l2r.ori.mad <- median(abs(l2r.ori.dif[l2r.ori.dif != 0]))
l2r.ori.rm.dif <- diff(l2r.ori.rm)
l2r.ori.ssad <- sum(abs(l2r.ori.rm.dif[l2r.ori.rm.dif != 0]))
l2r.genopos <- my.ascat.obj$data$SNPpos$pos + cs$chromosomes$chr.length.toadd[my.ascat.obj$data$SNPpos$chrs]
l2r <- l2r - median(l2r, na.rm = TRUE)
l2r.ori <- l2r.ori - median(l2r.ori, na.rm = TRUE)
kend <- l2r.genopos[vapply(unique(my.ascat.obj$data$SNPpos$chr), function(k) { max(which(my.ascat.obj$data$SNPpos$chrs == k))}, 1)]
png(paste0(out.dir, "/", samplename, "_WES_", data$meta$basic$genome, "_rawplot.png"), 1600, 1050)
par(mfrow = c(3,1))
plot(l2r.genopos, l2r.ori, pch = ".", cex = 3, col = "grey70", xaxs = "i", yaxs = "i", ylim = c(-2,2), main = paste0(samplename, " WES (", data$meta$basic$manufacturer, ") raw L2R profile (median-centered)\nMAD = ", round(l2r.ori.mad, digits = 2), " ; SSAD = ", round(l2r.ori.ssad, digits = 2)), xlab = "Genomic position", ylab = "L2R")
lines(l2r.genopos, l2r.ori.rm, col = 1)
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = 0, col = 2, lty = 2, lwd = 2)
plot(l2r.genopos, l2r, pch = ".", cex = 3, col = "grey70", xaxs = "i", yaxs = "i", ylim = c(-2,2), main = paste0(samplename, " WES (", data$meta$basic$manufacturer, ") normalized L2R profile (median-centered)\nMAD = ", round(l2r.mad, digits = 2), " ; SSAD = ", round(l2r.ssad, digits = 2)), xlab = "Genomic position", ylab = "L2R")
lines(l2r.genopos, l2r.rm, col = 1)
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = 0, col = 2, lty = 2, lwd = 2)
plot(l2r.genopos, my.ascat.obj$data$Tumor_BAF[,1], pch = ".", cex = 3, col = "grey75", xaxs = "i", yaxs = "i", ylim = c(0,1), main = paste0(samplename, " WES (", data$meta$basic$manufacturer, ")", if(TumorBoost) " TumorBoost-normalized", " BAF profile"), xlab = "Genomic position", ylab = "BAF")
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = .5, col = 2, lty = 2, lwd = 2)
dev.off()
}
tmsg("Done.")
if(return.data) return(my.ascat.obj)
}
## Runs WES.Normalize using a RDS filename
WES.Normalize.ff <- function(BIN.RDS.file = NULL, ...) {
## CHECKS
if (is.null(BIN.RDS.file)) stop(tmsg("An RDS file from EaCoN::EaCoN.WES.Bin is required !"), call. = FALSE)
if (!file.exists(BIN.RDS.file)) stop(tmsg(paste0("Could not find ", BIN.RDS.file, " .")), call. = FALSE)
tmsg("Loading binned WES data ...")
my.data <- readRDS(BIN.RDS.file)
WES.Normalize(data = my.data, out.dir = dirname(BIN.RDS.file), ...)
}
## Runs WES.Normalize.ff, batch mode
WES.Normalize.ff.Batch <- function(BIN.RDS.files = list.files(path = getwd(), pattern = "_binned.RDS$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE), nthread = 1, cluster.type = "PSOCK", ...) {
if (length(BIN.RDS.files) == 0) stop("No file found to process !", call. = FALSE)
message("Running EaCoN.WES.Normalize.ff() in batch mode ...")
message(paste0("Found ", length(BIN.RDS.files), " samples to process ..."))
current.bitmapType <- getOption("bitmapType")
`%dopar%` <- foreach::"%dopar%"
cl <- parallel::makeCluster(spec = nthread, type = cluster.type, outfile = "")
doParallel::registerDoParallel(cl)
eacon.batchres <- foreach::foreach(r = seq_along(BIN.RDS.files), .inorder = TRUE, .errorhandling = "stop") %dopar% {
EaCoN.set.bitmapType(type = current.bitmapType)
WES.Normalize.ff(BIN.RDS.file = BIN.RDS.files[r], ...)
}
parallel::stopCluster(cl)
}
bedBinner <- function(bed = NULL, bin.size = 50, nthread = 1) {
bin.size <- as.integer(bin.size)
cl <- parallel::makeCluster(spec = nthread, type = "PSOCK", outfile = "")
suppressPackageStartupMessages(require(foreach))
doParallel::registerDoParallel(cl)
k <- 0
bed.binned <- foreach::foreach(k = unique(bed$chr), .combine = "rbind", .export = "tmsg") %dopar% {
tmsg(k)
bedk <- bed[bed$chr == k,]
b <- 0
bbk <- foreach::foreach(b = seq_len(nrow(bedk)), .combine = "rbind", .export = "bin.size") %do% {
### Smaller exon
exon.length <- (bedk$end[b] - bedk$start[b] + 1L)
if (exon.length <= bin.size) return(bedk[b,])
mod.rest <- exon.length %% bin.size
mod.count <- as.integer((exon.length - mod.rest) / bin.size)
bin.starts <- bedk$start[b] + ((seq_len(mod.count)-1L) * bin.size)
bin.ends <- bin.starts + bin.size - 1L
## Non-Round count
if (mod.rest > 0L) {
## Enough for a new bin
if (mod.rest >= (bin.size / 2L)) {
bin.starts <- c(bin.starts, bin.starts[mod.count]+bin.size)
bin.ends <- c(bin.ends, bedk$end[b])
mod.count <- mod.count+1L
} else { ## Dispatch to inside bins
if (mod.rest >= mod.count) {
mod.rest2 <- mod.rest %% mod.count
mod.count2 <- as.integer((mod.rest - mod.rest2) / mod.count)
bin.starts <- bedk$start[b] + ((seq_len(mod.count)-1L) * (bin.size + mod.count2))
bin.ends <- bin.starts + (bin.size + mod.count2) - 1L
bin.ends[mod.count] <- bin.ends[mod.count] + mod.rest2
} else {
# bin.ends[mod.count] <- bin.ends[mod.count] + mod.rest
bin.ends[mod.count] <- bedk$end[b]
}
}
}
# if(length(bin.starts) != length(bin.starts))
chrs = rep(bedk$chr[b], mod.count)
return(data.frame(chr = chrs, start = bin.starts, end = bin.ends, stringsAsFactors = FALSE))
}
return(bbk)
}
parallel::stopCluster(cl)
return(bed.binned)
}
## Compute letter composition of nucleotidic sequences from a (chr, start, end) dataframe, with possible extension.
loc.nt.count.hs <- function(loc.df = NULL, genome.pkg = "BSgenome.Hsapiens.UCSC.hg19", extend = 0, blocksize = 1E+04, nthread = 5) {
if (is.null(loc.df)) stop("loc.df is required !", call. = FALSE)
if (extend < 0) stop("extend should be >= 0", call. = FALSE)
if (blocksize <= 0) stop("blocksize should be > 0", call. = FALSE)
if (!all(is.character(loc.df$chr) | is.factor(loc.df$chr))) stop("chr should be character !", call. = FALSE)
if (!all(is.numeric(loc.df$start))) stop("start should be numeric !", call. = FALSE)
if (!all(is.numeric(loc.df$end))) stop("end should be numeric !", call. = FALSE)
if (!genome.pkg %in% BSgenome::installed.genomes()) {
if (genome.pkg %in% BSgenome::available.genomes()) {
stop(tmsg(paste0("BSgenome ", genome.pkg, " available but not installed. Please install it !")), call. = FALSE)
} else {
stop(tmsg(paste0("BSgenome ", genome.pkg, " not available in valid BSgenomes and not installed ... Please check your genome name or install your custom BSgenome !")), call. = FALSE)
}
}
print(paste0("Loading ", genome.pkg, " sequence ..."))
# requireNamespace(genome.pkg, quietly = TRUE)
suppressPackageStartupMessages(require(genome.pkg, character.only = TRUE))
# require(genome.pkg, character.only = TRUE)
BSg.obj <- getExportedValue(genome.pkg, genome.pkg)
genome <- BSgenome::providerVersion(BSg.obj)
cs <- chromobjector(BSg.obj)
print("Removing replicated locations ...")
idz <- paste0(loc.df$chr, ":", loc.df$start, "-", loc.df$end)
loc.df <- loc.df[!duplicated(idz),]
print("Removing non-canonical sequences ...")
loc.df <- loc.df[loc.df$chr %in% seqnames(BSg.obj),]
print("Ordering data ...")
loc.df <- loc.df[order(unlist(cs$chrom2chr[loc.df$chr]), loc.df$start, loc.df$ProbeSetName),]
myGR.ex <- suppressPackageStartupMessages(GenomicRanges::makeGRangesFromDataFrame(loc.df, seqinfo = seqinfo(BSg.obj)))
if (extend > 0) myGR.ex <- GenomicRanges::trim(myGR.ex + extend)
instep <- as.numeric(cut(seq_along(myGR.ex), seq.int(0, length(myGR.ex) + blocksize, blocksize)))
print("Computing base composition ...")
print("Starting cluster ...")
if (length(unique(instep)) < nthread) nthread <- length(unique(instep))
cl <- parallel::makeCluster(spec = nthread, type = "PSOCK", outfile = "")
doParallel::registerDoParallel(cl)
requireNamespace("foreach", quietly = TRUE)
`%dopar%` <- foreach::"%dopar%"
xcounts <- foreach::foreach(x = unique(instep), .combine = "rbind", .packages = c("Biostrings", "BSgenome"), .export = "getSeq") %dopar% {
return(Biostrings::alphabetFrequency(BSgenome::getSeq(BSg.obj, myGR.ex[which(instep == x)]), baseOnly = TRUE))
}
print("Stopping cluster ...")
parallel::stopCluster(cl)
out.df <- cbind(loc.df, xcounts)
return(out.df)
}
loc.nt.gcc.hs <- function(loc.counts = NULL) {
gcc <- (loc.counts$C + loc.counts$G) / (loc.counts$A + loc.counts$C + loc.counts$G + loc.counts$T)
return(data.frame(loc.counts, GC = gcc, stringsAsFactors = FALSE))
}
## Compute GC on a (chr, start, end) dataframe using multiple extend values
loc.nt.gcc.hs.multi <- function(loc.df = NULL, extend.multi = c(50, 100, 200, 400, 800, 1600, 3200, 6400), ...) {
# require(foreach)
requireNamespace("foreach", quietly = TRUE)
`%do%` <- foreach::"%do%"
gc.list <- foreach::foreach(nt.add = extend.multi) %do% {
print(paste0("Computing GC +", nt.add, " ..."))
adb.counts <- loc.nt.count.hs(loc.df = loc.df, extend = nt.add, ...)
adb.gc <- loc.nt.gcc.hs(loc.counts = adb.counts)
return(adb.gc)
}
base.df <- gc.list[[1]][,1:4]
gc.df <- foreach::foreach(nt.add = seq_along(gc.list), .combine = "cbind") %do% { return(as.integer(round(gc.list[[nt.add]][["GC"]] * 1000))) }
colnames(gc.df) <- paste0("GC", extend.multi)
return(data.frame(base.df, gc.df, stringsAsFactors = FALSE))
}
genome.build.finder <- function(BAM.header = NULL, valid.genomes = NULL) {
BAM.header <- unlist(BAM.header)
query <- paste0("(", paste0(valid.genomes, collapse = "|"), ")")
stvh.grep <- grep(query, unlist(BAM.header))
if (length(stvh.grep) == 0) stop(tmsg("Could not automatically determine genome build ! Please specify it !"), call. = FALSE)
stvh.regexec <- unique(vapply(stvh.grep, function(x) {
rc.res <- regexec(query, BAM.header[x])[[1]]
return(as.character(substr(BAM.header[x], start = rc.res[1], stop = rc.res[1]+attr(rc.res, "match.length")[1]-1)))
}, "a"))
ok.genome <- unique(stvh.regexec[stvh.regexec %in% valid.genomes])
if (length(ok.genome) == 0) stop(tmsg(paste0("Identified a putative genome build (", ok.genome, "), but not a supported one !")), call. = FALSE)
if (length(ok.genome) >= 2) stop(tmsg(paste0("Identified more than one putative genome build (", ok.genome, ") !")), call. = FALSE)
return(ok.genome)
}
|
/R/wes_process.R
|
permissive
|
ChristopherEeles/EaCoN
|
R
| false | false | 49,794 |
r
|
## Generates a BINpack (bins and pre-computed GC tracks) from a capture bed
BINpack.Maker <- function(bed.file = NULL, bin.size = 50, genome.pkg = "BSgenome.Hsapiens.UCSC.hg19", extend.multi = c(0, 50, 100, 200, 400, 800, 1600, 3200, 6400), blocksize = 1E+04, nthread = 1, out.dir = getwd(), return.data = FALSE) {
# setwd("/mnt/data_cigogne/job/PUBLI_EaCoN/TCGA/RESOURCES/test/")
# bed.file <- "SureSelect_ClinicalResearchExome.padded_GRCh37-lite_merged_sorted.bed"
# # bed.file = "test1.bed"
# bin.size = 50
# genome.pkg = "BSgenome.Hsapiens.1000genomes.hs37d5"
# extend.multi = c(0, 100, 500, 1000, 2000, 4000)
# blocksize = 1E+04
# nthread = 5
# out.dir = getwd()
# return.data = FALSE
# source("~/git_gustaveroussy/EaCoN/R/BED_functions.R")
# source("~/git_gustaveroussy/EaCoN/R/wes_process.R")
# source("~/git_gustaveroussy/EaCoN/R/mini_functions.R")
## Checks
if (is.null(bed.file)) stop("A BED file is required !", call. = FALSE)
if (!file.exists(bed.file)) stop("Could not find the BED file !", call. = FALSE)
if (is.null(out.dir)) message("NOTE : Checked / cleaned bed will be written in the same directory as source.") else {
if (!file.exists(out.dir)) stop("Could not find the output directory !", call. = FALSE)
if (!file.info(out.dir)$isdir) stop("out.dir is not a directory !", call. = FALSE)
}
if (is.null(genome.pkg)) stop(tmsg("A BSgenome package name is required !"), call. = FALSE)
if (!genome.pkg %in% BSgenome::installed.genomes()) {
if (genome.pkg %in% BSgenome::available.genomes()) {
stop(tmsg(paste0("BSgenome ", genome.pkg, " available but not installed. Please install it !")), call. = FALSE)
} else {
stop(tmsg(paste0("BSgenome ", genome.pkg, " not available in valid BSgenomes and not installed ... Please check your genome name or install your custom BSgenome !")), call. = FALSE)
}
}
bin.size <- as.integer(bin.size)
extend.multi <- as.integer(extend.multi)
### Loading genome
message(paste0("Loading ", genome.pkg, " ..."))
suppressPackageStartupMessages(require(genome.pkg, character.only = TRUE))
BSg.obj <- getExportedValue(genome.pkg, genome.pkg)
genome <- BSgenome::providerVersion(BSg.obj)
organism <- BSgenome::organism(BSg.obj)
## Cleaning BED
bed.clean <- BedCheck(bed.file = bed.file, genome.pkg = genome.pkg, out.dir = out.dir, return.data = TRUE)
## Binning
message(paste0("Performing binning (", bin.size, ") ..."))
bed.binned <- bedBinner(bed = bed.clean, bin.size = bin.size, nthread = nthread)
bed.binned <- data.frame(ProbeSetName = seq_len(nrow(bed.binned)), bed.binned, stringsAsFactors = TRUE)
message("Generating GC% tracks ...")
wes.gc <- loc.nt.gcc.hs.multi(loc.df = bed.binned, genome.pkg = genome.pkg, extend.multi = extend.multi, blocksize = blocksize, nthread = nthread)
rm(bed.binned)
wes.meta.key <- c("genome-species", "genome-version", "genome-package", "array_type", "track_type", "bin_size")
wes.meta.value <- c(organism, genome, genome.pkg, "WES", "GC", bin.size)
# renorm.data <- list(tracks = wes.gc, info = list("genome-version" = genome, "genome-package" = genome.pkg, bin.size = bin.size, track.type = "GC"), bed.clean = bed.clean, bed.binned = bed.binned)
renorm.data <- list(tracks = wes.gc, info = data.frame(key = wes.meta.key, value = wes.meta.value, stringsAsFactors = FALSE), bed.clean = bed.clean)
rm(wes.gc, wes.meta.value, wes.meta.key, bed.clean)
save("renorm.data", file = paste0(out.dir, "/", sub(pattern = "\\.bed$", replacement = paste0("_", genome, "_b", bin.size, ".GC.rda"), x = basename(bed.file), ignore.case = TRUE)), compress = "xz")
message("Done.")
if (return.data) return(renorm.data)
}
WES.Bin <- function(testBAM = NULL, refBAM = NULL, BINpack = NULL, samplename = "SAMPLE", Q = 20, nsubthread = 1, cluster.type = "PSOCK", out.dir = getwd(), return.data = FALSE, write.data = TRUE, plot = TRUE, force = FALSE) {
# setwd("/home/job/WORKSPACE/EaCoN_tests/WES/AlexandreLefranc/Results")
# # setwd("/home/job/WORKSPACE/EaCoN_tests")
# testBAM <- "/home/job/WORKSPACE/EaCoN_tests/WES/AlexandreLefranc/DATA/Sample_PHEO_AG_HS_048_DNA.reord.sorted.dedup.recal.reheaded.bam"
# refBAM <- "/home/job/WORKSPACE/EaCoN_tests/WES/AlexandreLefranc/DATA/Sample_PHEO_AG_HS_048G_DNA.reord.sorted.dedup.recal.reheaded.bam"
# # # # BINpack <- "/mnt/data_cigogne/job/PUBLI_EaCoN/MATCHR/RESOURCES/SureSelect_ClinicalResearchExome.padded_hg19_b50.rda"
# # # BINpack <- "/mnt/data_cigogne/job/PUBLI_EaCoN/TCGA/RESOURCES/SureSelect_ClinicalResearchExome.padded_hs37d5_b50.GC.rda"
# BINpack <- "/home/job/WORKSPACE/EaCoN_tests/WES/AlexandreLefranc/Results/V4-UTRs.hg38.fragment_targets_minimal_sorted_longChr_hg38_b50.GC.rda"
# samplename <- "ALEX1"
# Q <- 20
# out.dir = getwd()
# nsubthread = 4
# cluster.type = "PSOCK"
# return.data = FALSE
# write.data = TRUE
# plot = TRUE
# source("/home/job/git_gustaveroussy/EaCoN/R/mini_functions.R")
# source("/home/job/git_gustaveroussy/EaCoN/R/BED_functions.R")
# source("/home/job/git_gustaveroussy/EaCoN/R/wes_process.R")
# suppressPackageStartupMessages(require(foreach))
# require(magrittr)
## CHECKS (files/parameters)
if (is.null(BINpack)) stop(tmsg("A BINpack file is required !"), call. = FALSE)
if (!file.exists(BINpack)) stop(tmsg("Could not find the BINpack file !"), call. = FALSE)
if (is.null(refBAM)) stop(tmsg("A reference BAM file is required !"), call. = FALSE)
if (!file.exists(refBAM)) stop(tmsg("Could not find the refBAM file !"), call. = FALSE)
if (is.null(testBAM)) stop(tmsg("A test BAM file is required !"), call. = FALSE)
if (!file.exists(testBAM)) stop(tmsg("Could not find the testBAM file !"), call. = FALSE)
if (!is.numeric(Q)) stop(tmsg("Q must be numeric !"), call. = FALSE)
if (is.null(out.dir)) stop(tmsg("An output directory is required !"), call. = FALSE)
if (!file.exists(out.dir)) stop(tmsg("Could not find the output directory !"), call. = FALSE)
if (!file.info(out.dir)$isdir) stop(tmsg("out.dir is not a directory"), call. = FALSE)
if (!return.data & !write.data) stop(tmsg("Data should be returned and/or written on disk, but not none !"), call. = FALSE)
if (Q < 0) stop(tmsg("Q should be positive !"), call. = FALSE)
## WARNINGS
if (return.data) tmsg("Data will be returned.")
if (write.data) tmsg("Data will be written on disk.")
if (!plot) tmsg("No plot will get drawn.")
## Loading binpack
tmsg("Loading BINpack ...")
load(BINpack)
## CHECKS (genome)
# genome.pkg <- GC.data$info$genome.pkg
genome.pkg <- renorm.data$info$value[renorm.data$info$key == "genome-package"]
if (!genome.pkg %in% BSgenome::installed.genomes()) {
if (genome.pkg %in% BSgenome::available.genomes()) {
stop(tmsg(paste0("BSgenome ", genome.pkg, " available but not installed. Please install it !")), call. = FALSE)
} else {
stop(tmsg(paste0("BSgenome ", genome.pkg, " not available in valid BSgenomes and not installed ... Please check your genome name or install your custom BSgenome !")), call. = FALSE)
}
}
if (dir.exists(samplename)) { if (!force) stop(tmsg(paste0("A [", samplename, '] dir already exists !')), call. = FALSE) else unlink(samplename, recursive = TRUE, force = FALSE) }
### Loading genome
tmsg(paste0("Loading ", genome.pkg, " ..."))
suppressPackageStartupMessages(require(genome.pkg, character.only = TRUE))
BSg.obj <- getExportedValue(genome.pkg, genome.pkg)
genome <- BSgenome::providerVersion(BSg.obj)
## Files controls
tmsg("Checking BINpack and BAMs compatibility ...")
## Inspecting BAM headers
refBAM.h <- Rsamtools::scanBamHeader(refBAM)
testBAM.h <- Rsamtools::scanBamHeader(testBAM)
bed.data <- renorm.data$tracks[,1:4]
renorm.data$tracks <- NULL
gc()
if (!all(names(refBAM.h[[1]]$targets) %in% names(testBAM.h[[1]]$targets))) stop(tmsg("Reference BAM and Test BAM are not compatible (different chr names) !"), call. = FALSE)
if (!all(bed.data$chr %in% names(refBAM.h[[1]]$targets))) stop(tmsg("Reference BAM and BED are not compatible (different chr names) !"), call. = FALSE)
if (!all(unique(bed.data$chr) %in% names(testBAM.h[[1]]$targets))) stop(tmsg("Test BAM and BED are not compatible (different chr names) !"), call. = FALSE)
if (!all(unique(bed.data$chr) %in% BSgenome::seqnames(BSg.obj))) stop(tmsg("BED and BSgenome are not compatible (different chr names) !"), call. = FALSE)
## Identifying the platform
testBAM.h.unl <- unlist(testBAM.h)
manuf.hentry <- grep(pattern = "^PL:", testBAM.h.unl)[1]
manufacturer <- if(!is.na(manuf.hentry)) sub(pattern = "^PL:", replacement = "", testBAM.h.unl[[manuf.hentry]]) else "NA"
meta.b <- list(
samplename = samplename,
source = "WES",
source.file = list(refBAM = refBAM, testBAM = testBAM, BINpack = BINpack),
type = "WES",
manufacturer = manufacturer,
species = GenomeInfoDb::organism(BSg.obj),
genome = genome,
genome.pkg = genome.pkg,
predicted.gender = "NA"
)
meta.w <- list(
testBAM.header = paste0(testBAM.h, collapse = " "),
refBAM.header = paste0(refBAM.h, collapse = " "),
samtools.Q = Q,
# bin.size = GC.data$info$bin.size
bin.size = as.numeric(renorm.data$info$value[renorm.data$info$key == "bin_size"])
)
rm(testBAM.h, refBAM.h)
### Common BAM flags
param.FLAG <- Rsamtools::scanBamFlag(isSecondaryAlignment=FALSE, isNotPassingQualityControls=FALSE, isDuplicate=FALSE)
param.PILEUP <- Rsamtools::PileupParam(distinguish_strands = FALSE, max_depth = 5E+04, min_base_quality = Q, min_nucleotide_depth = 0, distinguish_nucleotides = TRUE)
bedbam2pup <- function(BamFile = NULL, bed.data = NULL, scanBamFlag = NULL, pileupParam = NULL) {
### Making pileup
tmsg(" Getting pileup ...")
param.BAM <- Rsamtools::ScanBamParam(which = GenomicRanges::makeGRangesFromDataFrame(bed.data, seqnames.field = "chr"), flag = scanBamFlag)
pres <- dplyr::as.tbl(Rsamtools::pileup(BamFile, scanBamParam = param.BAM, pileupParam = pileupParam))
colnames(pres)[c(1,5)] <- c("chr", "bin")
levels(pres$bin) <- bed.data$ProbeSetName
pres$bin <- as.integer(as.character(pres$bin))
# levels(pres$bin) <- rownames(bed.data)
gc()
if("=" %in% unique(pres$nucleotide)) tmsg(" Bam contains the '=' sign !")
### Building genomic sequence of reference block
tmsg(" Getting reference genome sequence ...")
refblock <- pres[!duplicated(pres$pos), c(1,2)]
pres <- dplyr::group_by(pres, pos)
refblock$tot_count <- dplyr::summarize(pres, tot_count = sum(count))$tot_count
pres <- dplyr::ungroup(pres)
refblock$nucleotide <- as.factor(BSgenome::getSeq(BSg.obj, names = GenomicRanges::makeGRangesFromDataFrame(refblock, start.field = "pos", end.field = "pos"), as.character = TRUE))
### Merging blocks
tmsg(" Computing alt counts ...")
# refblock <- dplyr::group_by(refblock, pos, nucleotide)
# pres <- dplyr::group_by(pres, pos, nucleotide)
merged <- suppressWarnings(dplyr::left_join(refblock, pres, by = c("chr", "pos", "nucleotide")))
rm(pres, refblock)
gc()
bed.based <- dplyr::as.tbl(data.frame(chr = unique(bed.data$chr), pos = as.integer(unlist(seq.int2(from = bed.data$start, to = bed.data$end, by = 1))), bin = rep(bed.data$ProbeSetName, times = (bed.data$end - bed.data$start +1))))
bed.joint <- suppressWarnings(dplyr::left_join(bed.based, merged, c("chr", "pos", "bin"))) ### yeah !
rm(bed.based)
gc()
### Cleaning and formatting
bed.joint$nucleotide <- NULL
bed.joint$chr <- as.factor(bed.joint$chr)
# bed.joint$which_label <- as.factor(bed.joint$which_label)
bed.joint$count <- bed.joint$tot_count - bed.joint$count
colnames(bed.joint) <- c("chr", "pos", "bin", "tot_count", "alt_count")
bed.joint$tot_count[is.na(bed.joint$tot_count)] <- 0L
bed.joint$alt_count[is.na(bed.joint$alt_count)] <- 0L
# bed.joint$nt[is.na(bed.joint$nt)] <- bed.based$nt[is.na(bed.joint$nt)]
return(bed.joint)
}
pileup.go <- function(testBAM = NULL, refBAM = NULL, bed.data = NULL, scanBamFlag = NULL, pileupParam = NULL, nsubthread = 1, cluster.type = "PSOCK") {
# BamFile <- testBAM
# scanBamFlag <- param.FLAG
# pileupParam <- param.PILEUP
# nsubthread <- 1
# cluster.type = "PSOCK"
#### Indexing BAM if needed
if (!(file.exists(paste0(testBAM, ".bai")) || file.exists(sub(pattern = "\\.bam", replacement = ".bai", x = testBAM, ignore.case = TRUE)))) {
tmsg("Indexing Test BAM ...")
Rsamtools::indexBam(testBAM)
} else tmsg("Test BAM is already indexed.")
if (!(file.exists(paste0(refBAM, ".bai")) || file.exists(sub(pattern = "\\.bam", replacement = ".bai", x = refBAM, ignore.case = TRUE)))) {
tmsg("Indexing Ref BAM ...")
Rsamtools::indexBam(refBAM)
} else tmsg("Ref BAM is already indexed.")
#### Launching cluster
if (length(unique(bed.data$chr)) < nsubthread) nsubthread <- length(unique(bed.data$chr))
cl <- parallel::makeCluster(spec = nsubthread, type = cluster.type, outfile = "")
doParallel::registerDoParallel(cl)
k <- 0
BAMcounts <- foreach::foreach(k = unique(as.character(bed.data$chr)), .inorder = TRUE, .export = c("tmsg", "BSg.obj", "bedbam2pup", "seq.int2")) %dopar% {
tmsg(paste0(" Sequence : ", k))
### Computing counts for both BAMs
bed.data.k <- bed.data[bed.data$chr == k,]
## Opening BAM connections
openBAM <- Rsamtools::BamFileList(c(testBAM, refBAM))
tmsg(" Computing TEST counts ...")
testPUP <- bedbam2pup(BamFile = openBAM[[1]], bed.data = bed.data.k, scanBamFlag = scanBamFlag, pileupParam = pileupParam)
tmsg(" Computing REF counts ...")
refPUP <- bedbam2pup(BamFile = openBAM[[2]], bed.data = bed.data.k, scanBamFlag = scanBamFlag, pileupParam = pileupParam)
### Merge counts
tmsg(" Merging TEST and REF ...")
mPUP <- dplyr::inner_join(testPUP, refPUP, c("chr", "pos", "bin"))
rm(testPUP, refPUP)
gc()
mPUP <- dplyr::group_by(mPUP, bin)
### Adding missing nucleotides
# mPUP$nt <- as.factor(unlist(strsplit(as.character(BSgenome::getSeq(BSg.obj, names = GenomicRanges::makeGRangesFromDataFrame(mPUP, seqnames.field = "chr", start.field = "pos", end.field = "pos"))), split = "")))
### Binning
tmsg(" Binning ...")
# CN.table <- dplyr::summarize(mPUP, chr = unique(chr), start = as.integer(min(pos)), end = as.integer(max(pos)), tot_count.test = as.integer(round(mean(tot_count.x))), tot_count.ref = as.integer(round(mean(tot_count.y))), GCPC = as.integer(round(seqinr::GC(as.character(nt))*100)))
CN.table <- dplyr::summarize(mPUP, chr = unique(chr), start = as.integer(min(pos)), end = as.integer(max(pos)), tot_count.test = as.integer(round(mean(tot_count.x))), tot_count.ref = as.integer(round(mean(tot_count.y))))
mPUP <- dplyr::ungroup(mPUP)
SNP.table <- mPUP[mPUP$alt_count.x > 0 | mPUP$alt_count.y > 0,]
### Cleaning
rm(mPUP)
gc()
CN.table <- dplyr::arrange(CN.table, chr, start, end)
# CN.table <- dplyr::select(CN.table, chr, start, end, bin, tot_count.test, tot_count.ref, GCPC)
CN.table <- dplyr::select(CN.table, chr, start, end, bin, tot_count.test, tot_count.ref)
colnames(SNP.table) <- c("chr", "pos", "bin", "tot_count.test", "alt_count.test", "tot_count.ref", "alt_count.ref")
gc()
return(list(CN = CN.table, SNP = SNP.table))
}
stopCluster(cl)
return(BAMcounts)
}
COUNTS.all <- pileup.go(testBAM = testBAM, refBAM = refBAM, bed.data = bed.data, scanBamFlag = param.FLAG, pileupParam = param.PILEUP, nsubthread = nsubthread, cluster.type = cluster.type)
CN.all <- foreach(k = seq_along(COUNTS.all), .combine = "rbind") %do% {
k.tmp <- COUNTS.all[[k]]$CN
COUNTS.all[[k]]$CN <- NULL
gc()
return(k.tmp)
}
SNP.all <- foreach(k = seq_along(COUNTS.all), .combine = "rbind") %do% {
k.tmp <- COUNTS.all[[k]]$SNP
COUNTS.all[[k]]$SNP <- NULL
gc()
return(k.tmp)
}
rm(COUNTS.all)
gc()
## Building WESobj
CN.all$bin <- as.integer(CN.all$bin)
SNP.all$bin <- as.integer(SNP.all$bin)
### summaries (recoded function as R internal summary uses too much RAM !)
my.summary <- function(myv = NULL) {
vsum <- c(min(myv, na.rm = TRUE), quantile(myv, .25, na.rm = TRUE), median(myv, na.rm = TRUE), mean(myv, na.rm = TRUE), quantile(myv, .75, na.rm = TRUE), max(myv, na.rm = TRUE))
names(vsum) <- c("min", "q25", "median", "mean", "q75", "max")
return(vsum)
}
meta.w$BIN.tot.count.test.mean.summary <- my.summary(CN.all$tot_count.test[!is.na(CN.all$tot_count.test)])
meta.w$BIN.tot.count.ref.mean.summary <- my.summary(CN.all$tot_count.ref[!is.na(CN.all$tot_count.ref)])
meta.w$SNP.tot.count.test.summary <- my.summary(SNP.all$tot_count.test[!is.na(SNP.all$tot_count.test)])
meta.w$SNP.tot.count.ref.summary <- my.summary(SNP.all$tot_count.ref[!is.na(SNP.all$tot_count.ref)])
gc()
WESobj <- list(RD = CN.all, SNP = SNP.all, meta = list(basic = meta.b, WES = meta.w))
rm(CN.all, SNP.all)
gc()
## QC : Computing coverages
tmsg("Computing coverages ...")
gw.rd <- sum(WESobj$RD$end - WESobj$RD$start +1)
gw.snp <- nrow(WESobj$SNP)
rd.cov <- data.frame(cuts = c(1, 5, 10, 20, 30, 40, 50, 75, 100, 150, 200), stringsAsFactors = FALSE)
rd.cov <- cbind(rd.cov, t(foreach::foreach(x = rd.cov$cuts, .combine = "cbind") %do% {
test.rd.in <- WESobj$RD$tot_count.test >= x
ref.rd.in <- WESobj$RD$tot_count.ref >= x
test.snprd.in <- WESobj$SNP$tot_count.test >= x
ref.snprd.in <- WESobj$SNP$tot_count.ref >= x
test.cut.cov <- if(!any(test.rd.in)) NA else (sum(WESobj$RD$end[test.rd.in] - WESobj$RD$start[test.rd.in] +1)/gw.rd)
ref.cut.cov <- if(!any(ref.rd.in)) NA else (sum(WESobj$RD$end[ref.rd.in] - WESobj$RD$start[ref.rd.in] +1)/gw.rd)
test.snpcut.cov <- if(!any(test.snprd.in)) NA else (length(which(test.snprd.in))/gw.snp)
ref.snpcut.cov <- if(!any(ref.snprd.in)) NA else (length(which(ref.snprd.in))/gw.snp)
return(c(test.cut.cov, ref.cut.cov, test.snpcut.cov, ref.snpcut.cov))
}))
colnames(rd.cov) <- c("MinDepth", "TestBINCoverage", "RefBINCoverage", "TestBAFCoverage", "RefBAFCoverage")
rm(test.snprd.in, ref.snprd.in, test.rd.in, ref.rd.in)
if (write.data || plot) dir.create(paste0(out.dir, "/", samplename))
if (write.data) write.table(rd.cov, file = paste0(out.dir, "/", samplename, "/", samplename, '_WES_', genome, "_b", meta.w$bin.size, "_coverage.txt"), sep = "\t", quote = FALSE, row.names = FALSE)
## QC : Plotting coverages
if (plot) {
### Coverage plot
png(paste0(out.dir, "/", samplename, "/", samplename, "_WES_", genome, "_b", meta.w$bin.size, "_coverage.png"), 800, 640)
plot(rd.cov$MinDepth, rd.cov$TestBAFCoverage, type = "b", col = 2, lty = 3, pch = 20, main = paste0(WESobj$meta$basic$samplename, "\nCoverage Plot"), xlab = "Minimum depth", ylab = "Coverage", ylim = c(0,1), xaxp = c(0,200,10))
abline(v = rd.cov$MinDepth, lty = 2, col = "grey75")
abline(h = seq(0,1,.1), lty = 2, col = "grey75")
lines(rd.cov$MinDepth, rd.cov$RefBAFCoverage, type = "b", col = 1, lty = 3, pch = 20)
lines(rd.cov$MinDepth, rd.cov$TestBINCoverage, type = "b", col = 2)
lines(rd.cov$MinDepth, rd.cov$RefBINCoverage, type = "b", col = 1)
abline(h = .5, lty = 2)
legend("topright", legend = c("Test BAF", "Ref BAF", "Test CN", "Ref CN"), inset = .02, col = c(2,1,2,1), lty = c(3,3,1,1), pch = c(20,20,1,1))
dev.off()
### RD plots
png(paste0(out.dir, "/", samplename, "/", samplename, "_WES_", genome, "_b", meta.w$bin.size, "_rawdepth.png"), 1600, 1050)
par(mfrow = c(2,1))
test.l10 <- log10(WESobj$RD$tot_count.test +1)
plot(test.l10, pch = ".", xaxs = "i", xlab = "Index", ylab = "log10(RD+1)", main = paste0(samplename, " TEST (", round(10^median(test.l10, na.rm = TRUE)), ")"))
abline(h = median(test.l10, na.rm = TRUE), lty = 2, col = "cyan")
lines(runmed(test.l10, 9999), col = 2)
ref.l10 <- log10(WESobj$RD$tot_count.ref +1)
plot(ref.l10, pch = ".", xaxs = "i", xlab = "Index", ylab = "log10(RD+1)", main = paste0(samplename, " REF (", round(10^median(ref.l10, na.rm = TRUE)), ")"))
abline(h = median(ref.l10, na.rm = TRUE), lty = 2, col = "cyan")
lines(runmed(ref.l10, 9999), col = 2)
dev.off()
}
rm(rd.cov)
## Saving
if (write.data) {
tmsg("Saving counts data ...")
saveRDS(WESobj, file = paste0(out.dir, "/", samplename, "/", samplename, "_", genome, "_b", meta.w$bin.size, "_binned.RDS"), compress = "bzip2")
}
if (return.data) return(WESobj)
}
## Performs the binning of BAMs using a BINpack, batch mode
WES.Bin.Batch <- function(BAM.list.file = NULL, BINpack = NULL, nthread = 1, cluster.type = "PSOCK", ...) {
if (!file.exists(BAM.list.file)) stop("Could not find BAM.list.file !", call. = FALSE)
message("Reading and checking BAM.list.file ...")
myBAMs <- read.table(file = BAM.list.file, header = TRUE, sep="\t", check.names = FALSE, as.is = TRUE)
head.ok <- c("testBAM", "refBAM", "SampleName")
head.chk <- all(colnames(BAM.list.file) == head.ok)
if (!head.chk) {
message("Invalid header in BAM.list.file !")
message(paste0("EXPECTED : ", head.ok))
message(paste0("FOUND : ", colnames(myBAMs)))
stop("Invalid header.", call. = FALSE)
}
tb.chk <- file.exists(myBAMs$testBAM)
rb.chk <- file.exists(myBAMs$refBAM)
if (!all(tb.chk) || !all(tb.chk)) {
message("Some BAM files from the BAM.list.file could not be found (wrong path or filename ?) !")
message("Missing testBAM file(s) :")
message(myBAMs$testBAM[which(!tb.chk)])
message("Missing refBAM file(s) :")
message(myBAMs$refBAM[which(!rb.chk)])
stop("Missing BAM file(s).", call. = FALSE)
}
sn.chk <- duplicated(myBAMs$SampleName)
if (any(sn.chk)) {
message("BAM.list.file contains duplicated samplenames !")
message(myBAMs$SampleName[which(duplicated(myBAMs$SampleName))])
stop("Duplicated samplenames.", call. = FALSE)
}
if(any(myBAMs$testBAM == myBAMs$refBAM)) {
message("Some testBAM and refBAM are identical for at least one sample !")
stop("Identical BAM files for Test and Ref.", call. = FALSE)
}
## Adjusting cores/threads
message("Adjusting number of cores if needed ...")
if (is.null(nthread)) nthread <- parallel::detectCores(logical = TRUE) -1
if (nrow(myBAMs) < nthread) nthread <- nrow(myBAMs)
message("Running EaCoN.WES.Bin() in batch mode ...")
message(paste0("Found ", nrow(myBAMs), " samples to process ..."))
current.bitmapType <- getOption("bitmapType")
`%dopar%` <- foreach::"%dopar%"
cl <- parallel::makeCluster(spec = nthread, type = cluster.type, outfile = "")
doParallel::registerDoParallel(cl)
eacon.batchres <- foreach::foreach(r = seq_len(nrow(myBAMs)), .inorder = TRUE, .errorhandling = "stop", .export = c("EaCoN.set.bitmapType", "WES.Bin", "tmsg")) %dopar% {
EaCoN.set.bitmapType(type = current.bitmapType)
WES.Bin(testBAM = myBAMs$testBAM[r], refBAM = myBAMs$refBAM[r], BINpack = BINpack, samplename = myBAMs$SampleName[r], cluster.type = cluster.type, ...)
}
parallel::stopCluster(cl)
}
## Performs the normalization of WES L2R and BAF signals
WES.Normalize <- function(data = NULL, BINpack = NULL, gc.renorm = TRUE, wave.renorm = FALSE, wave.rda = NULL, RD.tot.min = 20, RD.alt.min = 3, BAF.hetmin = .33, sex.chr = c("chrX", "chrY"), TumorBoost = FALSE, out.dir = getwd(), return.data = FALSE, write.data = TRUE, plot = TRUE) {
# setwd("/mnt/data_cigogne/job/PUBLI_EaCoN/TCGA/ANALYSES/EaCoN_0.3.0_beta2/WES/TCGA-A7-A0CE-01A_vs_10A")
# data <- readRDS("Sample_PHEO_AG_HS_048_DNA_hg38_b50_binned.RDS")
# BINpack <- "V4-UTRs.hg38.fragment_targets_minimal_sorted_longChr_hg38_b50.GC.rda"
# gc.renorm <- TRUE
# wave.renorm <- FALSE
# # wave.rda <- "/mnt/data_cigogne/job/PUBLI_EaCoN/TCGA/RESOURCES/SureSelect_ClinicalResearchExome.padded_GRCh37-lite_merged_sorted_hs37d5_b50.Wave.rda"
# RD.tot.min = 20
# RD.alt.min = 3
# TumorBoost = FALSE
# # sex.chr <- c("X", "Y")
# sex.chr <- c("chrX", "chrY")
# out.dir = getwd()
# return.data = FALSE
# write.data = TRUE
# plot = TRUE
# BAF.hetmin <- .33
# source("/home/job/git_gustaveroussy/EaCoN/R/mini_functions.R")
# source("/home/job/git_gustaveroussy/EaCoN/R/renorm_functions.R")
# require(foreach)
### AJOUTER UN CONTROLE DU RDS (pour que ce ne soit pas un _processed.RDS donné en entrée!)
## CHECKS
if (!is.list(data)) stop(tmsg("data should be a list !"), call. = FALSE)
if (is.null(BINpack)) stop(tmsg("A BINpack file is required !"), call. = FALSE)
if (!file.exists(BINpack)) stop(tmsg("Could not find the BINpack file !"), call. = FALSE)
if (wave.renorm) { if (!is.null(wave.rda)) { if (!file.exists(wave.rda)) stop(tmsg(paste0("Could not find wave.rda file ", wave.rda)), call. = FALSE) } }
if (RD.tot.min < 0) stop(tmsg("RD.tot.min must be >= 0 !"), call. = FALSE)
if (RD.alt.min <= 0) stop(tmsg("RD.alt.min must be > 0 !"), call. = FALSE)
## TAGS
data$meta$WES$TumorBoost <- as.character(TumorBoost)
data$meta$WES$RD.tot.min <- RD.tot.min
data$meta$WES$GC.renorm <- as.character(gc.renorm)
data$meta$WES$Wave.renorm <- as.character(wave.renorm)
samplename <- data$meta$basic$samplename
## Loading BINpack
load(BINpack)
gc()
genome.pkg <- renorm.data$info$value[renorm.data$info$key == "genome-package"]
if (!genome.pkg %in% BSgenome::installed.genomes()) {
if (genome.pkg %in% BSgenome::available.genomes()) {
stop(tmsg(paste0("BSgenome ", genome.pkg, " available but not installed. Please install it !")), call. = FALSE)
} else {
stop(tmsg(paste0("BSgenome ", genome.pkg, " not available in valid BSgenomes and not installed ... Please check your genome name or install your custom BSgenome !")), call. = FALSE)
}
}
### Loading genome
tmsg(paste0("Loading ", genome.pkg, " ..."))
suppressPackageStartupMessages(require(genome.pkg, character.only = TRUE))
# requireNamespace(genome.pkg, quietly = TRUE)
BSg.obj <- getExportedValue(genome.pkg, genome.pkg)
genome <- BSgenome::providerVersion(BSg.obj)
cs <- chromobjector(BSg.obj)
## BAF HANDLING
tmsg("Processing BAF ...")
BAF.adder <- function(Bdata = NULL, Bvalues = NULL, newname = NULL, type = "test") {
Bdata[[paste0("BAF.", type)]] <- Bdata[[newname]] <- Bvalues
Bdata[[paste0("mBAF.", type)]] <- BAF2mBAF(Bdata[[paste0("BAF.", type)]])
return(Bdata)
}
### BAF : Filtering (low depth)
rd.ori <- nrow(data$SNP)
#### 1) Filtering for low ref or test depth
RDlow <- data$SNP$tot_count.test < RD.tot.min | data$SNP$tot_count.ref < RD.tot.min
if (length(which(RDlow)) == nrow(data$SNP)) stop(tmsg("All SNP positions were discarded for their low read count ! You may consider lowering the BAF.tot.min value."), call. = FALSE)
tmsg(paste0("Removed ", length(which(RDlow)), " (", round(length(which(RDlow)) / rd.ori * 100, digits = 2), "%) SNP positions with low depth (<", RD.tot.min, ")"))
data$SNP <- data$SNP[!RDlow,]
#### 2) Filtering for low alt test depth
BRDlow <- data$SNP$alt_count.test < RD.alt.min
if (length(which(BRDlow)) == nrow(data$SNP)) stop(tmsg("All SNP positions were discarded for their low alternative allele count ! You may consider lowering the RD.alt.min value."), call. = FALSE)
tmsg(paste0("Removed ", length(which(BRDlow)), " (", round(length(which(BRDlow)) / rd.ori * 100, digits = 2), "%) SNP positions with low alt RD (<", RD.alt.min, ")"))
data$SNP <- data$SNP[!BRDlow,]
gc()
### Computing BAF
data$SNP$BAF.test.ori <- data$SNP$alt_count.test / data$SNP$tot_count.test
data$SNP$BAF.ref.ori <- data$SNP$alt_count.ref / data$SNP$tot_count.ref
odd.idx <- which(data$SNP$pos %% 2 == 1)
data$SNP$BAF.test.ori[odd.idx] <- -data$SNP$BAF.test.ori[odd.idx] +1L
data$SNP$BAF.ref.ori[odd.idx] <- -data$SNP$BAF.ref.ori[odd.idx] +1L
data$SNP <- BAF.adder(Bdata = data$SNP, Bvalues = data$SNP$BAF.test.ori, newname = "BAF.test.ori")
data$SNP <- BAF.adder(Bdata = data$SNP, Bvalues = data$SNP$BAF.ref.ori, newname = "BAF.ref.ori", type = "ref")
###### Computing LOR (and variance)
rcmat <- round(cbind(data$SNP$BAF.test.ori*data$SNP$tot_count.test, (1-data$SNP$BAF.test.ori)*data$SNP$tot_count.test))
data$SNP$LOR <- log(rcmat[,1]+1/6) - log(rcmat[,2]+1/6)
data$SNP$LORvar <- 1/(rcmat[,1]+1/6) + 1/(rcmat[,2]+1/6)
rm(rcmat)
gc()
### BAF : TumorBoost
if (TumorBoost) {
message(tmsg("Applying TumorBoost BAF normalization ..."))
# data$SNP$BAF.test <- data$SNP$BAF.test.TB <- as.numeric(aroma.light::normalizeTumorBoost(data$SNP$BAF.test, data$SNP$BAF.ref, flavor = "v4", preserveScale = FALSE))
BTB <- as.numeric(aroma.light::normalizeTumorBoost(data$SNP$BAF.test, data$SNP$BAF.ref, flavor = "v4", preserveScale = FALSE))
data$SNP <- BAF.adder(Bdata = data$SNP, Bvalues = BTB, newname = "BAF.test.TB")
}
### Getting heterozygous probes from Ref
Ref.hetero <- data$SNP$mBAF.ref >= BAF.hetmin
if (!any(Ref.hetero)) stop(tmsg("All SNP positions were tagged as homozygous in Ref : there may be a problem with your reference BAM ploidy !"), call. = FALSE)
### Keeping hetero positions
data$SNP <- data$SNP[Ref.hetero,]
### Removing additional values per bin
data$SNP <- data$SNP[!duplicated(data$SNP$bin),]
gc()
### BAF filtering
# smoB <- round(nrow(data$SNP) / 3300)
# if(smoB%%2 == 0) smoB <- smoB+1
# mBAF.rm <- runmed(data$SNP$mBAF.test, smoB)
# mBAF.diff <- abs(data$SNP$mBAF.test - mBAF.rm)
# Bfiltered <- mBAF.diff < quantile(mBAF.diff, BAF.filter)
# data$SNP <- data$SNP[Bfiltered,]
### Adding LOR
data$SNP$LOR.test <- log(data$SNP$alt_count.test / (data$SNP$tot_count.test - data$SNP$alt_count.test))
## L2R
tmsg("Processing RD bins ...")
#### Computing L2R
data$RD$L2R <- data$RD$L2R.ori <- log2((data$RD$tot_count.test+1) / (data$RD$tot_count.ref+1))
### L2R : Filtering
#### 1) low depth
rd.ori <- nrow(data$RD)
RDlow <- data$RD$tot_count.test < RD.tot.min | data$RD$tot_count.ref < RD.tot.min
data$meta$WES$Imputed.lowdepth.bins <- length(which(RDlow))
if (length(which(RDlow)) == nrow(data$RD)) stop(tmsg("All RD bins were flagged for their low read count ! You may consider lowering the BAF.tot.min value."), call. = FALSE)
tmsg(paste0("Flagged ", length(which(RDlow)), " (", round(length(which(RDlow)) / rd.ori * 100, digits = 2), "%) RD bins with low depth (<", RD.tot.min, ")"))
#### 2) GC% outliers
GCOL <- renorm.data$tracks[,5] < 200 | renorm.data$tracks[,5] > 800
data$meta$WES$Imputed.GCoutlier.bins <- length(which(GCOL))
if (length(which(GCOL)) == nrow(data$RD)) stop(tmsg("All RD bins were flagged as GC% outliers ! There may be something wrong with your reference genome and/or capture BED."), call. = FALSE)
tmsg(paste0("Flagged ", length(which(GCOL)), " (", round(length(which(GCOL)) / rd.ori * 100, digits = 2), "%) RD bins as GC% outliers."))
### Pooling and imputing
FLAGS <- RDlow + GCOL > 0
if (any(FLAGS)) {
tmsg(paste0(" Imputed ", length(which(FLAGS)), " (", round(length(which(FLAGS))/rd.ori*100, digits = 2), "%) L2R bins."))
l2r.tmp <- data$RD$L2R
l2r.tmp[FLAGS] <- NA
data$RD$L2R <- data$RD$L2R.imp <- approxfun(seq_along(l2r.tmp), l2r.tmp, rule = 2)(seq_along(l2r.tmp))
} else data$RD$L2R.imp <- data$RD$L2R
gc()
## L2R : Normalization
smo <- round(nrow(data$RD) / 550)
if(smo%%2 == 0) smo <- smo+1
### Wave
if (wave.renorm) {
tmsg("Wave normalization ...")
l2r2norm <- data.frame(ProbeSetName = data$RD$bin, chr = as.character(data$RD$chr), pos = data$RD$start, L2R = data$RD$L2R)
# rownames(l2r2norm) <- seq_len(nrow(l2r2norm))
ren.res <- renorm.go(input.data = l2r2norm, renorm.rda = wave.rda, track.type = "Wave", smo = smo, arraytype = data$meta$basic$type, genome = genome)
fitted.l2r <- ren.res$renorm$l2r$l2r
GCF <- is.na(fitted.l2r)
if (any(GCF)) {
l2r.tmp <- fitted.l2r
l2r.tmp[GCF] <- NA
fitted.l2r <- approxfun(seq_along(l2r.tmp), l2r.tmp, rule = 2)(seq_along(l2r.tmp))
}
if(is.null(ren.res$renorm$pos)) {
# meta.b <- setmeta("gc.renorm", "None", meta.b)
data$meta$WES <- setmeta("wave.renorm", "None", data$meta$WES)
tmsg(" No positive fit.")
} else {
## Tweaking sex chromosomes
sex.idx <- data$RD$chr %in% sex.chr
auto.ori.med <- median(data$RD$L2R[!sex.idx], na.rm = TRUE)
auto.rn.med <- median(fitted.l2r[!sex.idx], na.rm = TRUE)
if (any(sex.idx)) {
for (k in sex.chr) {
k.idx <- data$RD$chr == k
if (any(k.idx)) {
k.ori.diffmed <- median(data$RD$L2R.ori[k.idx], na.rm = TRUE) - auto.ori.med
k.rn.diffmed <- median(fitted.l2r[k.idx], na.rm = TRUE) - auto.rn.med
fitted.l2r[k.idx] <- fitted.l2r[k.idx] - k.rn.diffmed + k.ori.diffmed
}
}
}
# meta.b <- setmeta("wave.renorm", paste0(ren.res$renorm$pos, collapse = ","), meta.b)
data$meta$WES <- setmeta("wave.renorm", paste0(ren.res$renorm$pos, collapse = ","), data$meta$WES)
}
rm(ren.res)
data$RD$L2R.WAVE <- data$RD$L2R <- fitted.l2r - median(fitted.l2r, na.rm = TRUE)
} else {
# meta.b <- setmeta("wave.renorm", "FALSE", meta.b)
data$meta$WES <- setmeta("wave.renorm", "FALSE", data$meta$WES)
}
#### GC%
# message("GC% normalization ...")
# data$RD$L2R.GC <- data$RD$L2R <- limma::loessFit(x = data$RD$GCPC, y = data$RD$L2R)$residuals
# if (any(is.na(data$RD$L2R))) {
# l2r.tmp <- data$RD$L2R
# l2r.tmp[is.na(data$RD$L2R)] <- NA
# data$RD$L2R <- data$RD$L2R.GC <- approxfun(seq_along(l2r.tmp), l2r.tmp, rule = 2)(seq_along(l2r.tmp))
# }
if (gc.renorm) {
tmsg("GC% normalization ...")
l2r2norm <- data.frame(ProbeSetName = data$RD$bin, chr = as.character(data$RD$chr), pos = data$RD$start, L2R = data$RD$L2R)
# rownames(l2r2norm) <- seq_len(nrow(l2r2norm))
ren.res <- renorm.go(input.data = l2r2norm, renorm.rda = BINpack, track.type = "GC", smo = smo, arraytype = data$meta$basic$type, genome = genome)
fitted.l2r <- ren.res$renorm$l2r$l2r
GCF <- is.na(fitted.l2r)
if (any(GCF)) {
l2r.tmp <- fitted.l2r
l2r.tmp[GCF] <- NA
fitted.l2r <- approxfun(seq_along(l2r.tmp), l2r.tmp, rule = 2)(seq_along(l2r.tmp))
}
if(is.null(ren.res$renorm$pos)) {
# meta.b <- setmeta("gc.renorm", "None", meta.b)
data$meta$eacon <- setmeta("gc.renorm", "None", data$meta$eacon)
tmsg(" No positive fit.")
} else {
## Tweaking sex chromosomes
sex.idx <- data$RD$chr %in% sex.chr
auto.ori.med <- median(data$RD$L2R.ori[!sex.idx], na.rm = TRUE)
auto.rn.med <- median(fitted.l2r[!sex.idx], na.rm = TRUE)
if (any(sex.idx)) {
for (k in sex.chr) {
k.idx <- data$RD$chr == k
if (any(k.idx)) {
# k.ori.diffmed <- median(data$RD$L2R.ori[k.idx], na.rm = TRUE) - auto.ori.med
k.ori.diffmed <- median(data$RD$L2R.ori[k.idx], na.rm = TRUE) - auto.ori.med
k.rn.diffmed <- median(fitted.l2r[k.idx], na.rm = TRUE) - auto.rn.med
fitted.l2r[k.idx] <- fitted.l2r[k.idx] - k.rn.diffmed + k.ori.diffmed
}
}
}
# meta.b <- setmeta("gc.renorm", paste0(ren.res$renorm$pos, collapse = ","), meta.b)
data$meta$eacon <- setmeta("gc.renorm", paste0(ren.res$renorm$pos, collapse = ","), data$meta$eacon)
}
rm(ren.res)
data$RD$L2R.GC <- data$RD$L2R <- fitted.l2r - median(fitted.l2r, na.rm = TRUE)
} else {
# meta.b <- setmeta("gc.renorm", "FALSE", meta.b)
data$meta$eacon <- setmeta("gc.renorm", "FALSE", data$meta$eacon)
}
## Merging
data$RD$BAF <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "BAF.test"))], by = c("chr", "bin"))$BAF.test
# data$RD$LOR <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "LOR.test"))], by = c("chr", "bin"))$LOR.test
data$RD$LOR <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "LOR"))], by = c("chr", "bin"))$LOR
data$RD$LORvar <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "LORvar"))], by = c("chr", "bin"))$LORvar
data$RD$RD.test <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "tot_count.test"))], by = c("chr", "bin"))$tot_count.test
data$RD$RD.ref <- dplyr::left_join(data$RD[, c(1:4,9)], data$SNP[, c(1,3,which(colnames(data$SNP) == "tot_count.ref"))], by = c("chr", "bin"))$tot_count.ref
## Building ASCAT object
tmsg("Building normalized object ...")
my.ch <- sapply(unique(data$RD$chr), function(x) { which(data$RD$chr == x) })
my.ascat.obj <- list(
data = list(
Tumor_LogR.ori = data.frame(sample = data$RD$L2R.ori, row.names = data$RD$bin),
Tumor_LogR = data.frame(sample = data$RD$L2R, row.names = data$RD$bin),
Tumor_BAF = data.frame(sample = data$RD$BAF, row.names = data$RD$bin),
Tumor_LogR_segmented = NULL,
Tumor_BAF_segmented = NULL,
Germline_LogR = NULL,
Germline_BAF = NULL,
SNPpos = data.frame(chrs = data$RD$chr, pos = round((data$RD$start + data$RD$end)/2)),
ch = my.ch,
chr = my.ch,
chrs = levels(data$RD$chr),
samples = samplename,
gender = "NA",
sexchromosomes = sex.chr,
failedarrays = NULL,
additional = data$RD[,colnames(data$RD) %in% c("RD.test", "RD.ref", "LOR", "LORvar")]
),
meta = data$meta,
germline = list(germlinegenotypes = matrix(is.na(data$RD$BAF), ncol = 1, dimnames = list(data$RD$bin, samplename)), failedarrays = NULL)
)
colnames(my.ascat.obj$data$Tumor_LogR) <- colnames(my.ascat.obj$data$Tumor_LogR.ori) <- colnames(my.ascat.obj$data$Tumor_BAF) <- samplename
# rm(my.ch, data)
gc()
# plot(ares$Tumor_LogR[,1], pch = ".", cex = 3, xaxs = "i", ylim = c(-2,2))
# points(ares$Tumor_LogR_segmented, pch = ".", cex = 3, col = 2)
# plot(ares$Tumor_BAF[!is.na(ares$Tumor_BAF),1], pch = ".", xaxs = "i", cex = 3)
# points(ares$Tumor_BAF_segmented[[1]], pch = ".", cex = 3, col = 2)
# points(1 - ares$Tumor_BAF_segmented[[1]], pch = ".", cex = 3, col = 2)
#
## Saving data
if (write.data) {
tmsg("Saving normalized data ...")
saveRDS(my.ascat.obj, paste0(out.dir, "/", samplename, "_", data$meta$basic$genome, "_b", data$meta$WES$bin.size, "_processed.RDS"), compress = "bzip2")
}
## Plot
tmsg("Plotting ...")
if (plot) {
l2r <- my.ascat.obj$data$Tumor_LogR[,1]
l2r.rm <- runmed(l2r, smo)
l2r.dif <- diff(l2r)
l2r.mad <- median(abs(l2r.dif[l2r.dif != 0]))
l2r.rm.dif <- diff(l2r.rm)
l2r.ssad <- sum(abs(l2r.rm.dif[l2r.rm.dif != 0]))
l2r.ori <- my.ascat.obj$data$Tumor_LogR.ori[,1]
l2r.ori.rm <- runmed(l2r.ori, smo)
l2r.ori.dif <- diff(l2r.ori)
l2r.ori.mad <- median(abs(l2r.ori.dif[l2r.ori.dif != 0]))
l2r.ori.rm.dif <- diff(l2r.ori.rm)
l2r.ori.ssad <- sum(abs(l2r.ori.rm.dif[l2r.ori.rm.dif != 0]))
l2r.genopos <- my.ascat.obj$data$SNPpos$pos + cs$chromosomes$chr.length.toadd[my.ascat.obj$data$SNPpos$chrs]
l2r <- l2r - median(l2r, na.rm = TRUE)
l2r.ori <- l2r.ori - median(l2r.ori, na.rm = TRUE)
kend <- l2r.genopos[vapply(unique(my.ascat.obj$data$SNPpos$chr), function(k) { max(which(my.ascat.obj$data$SNPpos$chrs == k))}, 1)]
png(paste0(out.dir, "/", samplename, "_WES_", data$meta$basic$genome, "_rawplot.png"), 1600, 1050)
par(mfrow = c(3,1))
plot(l2r.genopos, l2r.ori, pch = ".", cex = 3, col = "grey70", xaxs = "i", yaxs = "i", ylim = c(-2,2), main = paste0(samplename, " WES (", data$meta$basic$manufacturer, ") raw L2R profile (median-centered)\nMAD = ", round(l2r.ori.mad, digits = 2), " ; SSAD = ", round(l2r.ori.ssad, digits = 2)), xlab = "Genomic position", ylab = "L2R")
lines(l2r.genopos, l2r.ori.rm, col = 1)
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = 0, col = 2, lty = 2, lwd = 2)
plot(l2r.genopos, l2r, pch = ".", cex = 3, col = "grey70", xaxs = "i", yaxs = "i", ylim = c(-2,2), main = paste0(samplename, " WES (", data$meta$basic$manufacturer, ") normalized L2R profile (median-centered)\nMAD = ", round(l2r.mad, digits = 2), " ; SSAD = ", round(l2r.ssad, digits = 2)), xlab = "Genomic position", ylab = "L2R")
lines(l2r.genopos, l2r.rm, col = 1)
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = 0, col = 2, lty = 2, lwd = 2)
plot(l2r.genopos, my.ascat.obj$data$Tumor_BAF[,1], pch = ".", cex = 3, col = "grey75", xaxs = "i", yaxs = "i", ylim = c(0,1), main = paste0(samplename, " WES (", data$meta$basic$manufacturer, ")", if(TumorBoost) " TumorBoost-normalized", " BAF profile"), xlab = "Genomic position", ylab = "BAF")
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = .5, col = 2, lty = 2, lwd = 2)
dev.off()
}
tmsg("Done.")
if(return.data) return(my.ascat.obj)
}
## Runs WES.Normalize using a RDS filename
WES.Normalize.ff <- function(BIN.RDS.file = NULL, ...) {
## CHECKS
if (is.null(BIN.RDS.file)) stop(tmsg("An RDS file from EaCoN::EaCoN.WES.Bin is required !"), call. = FALSE)
if (!file.exists(BIN.RDS.file)) stop(tmsg(paste0("Could not find ", BIN.RDS.file, " .")), call. = FALSE)
tmsg("Loading binned WES data ...")
my.data <- readRDS(BIN.RDS.file)
WES.Normalize(data = my.data, out.dir = dirname(BIN.RDS.file), ...)
}
## Runs WES.Normalize.ff, batch mode
WES.Normalize.ff.Batch <- function(BIN.RDS.files = list.files(path = getwd(), pattern = "_binned.RDS$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE), nthread = 1, cluster.type = "PSOCK", ...) {
if (length(BIN.RDS.files) == 0) stop("No file found to process !", call. = FALSE)
message("Running EaCoN.WES.Normalize.ff() in batch mode ...")
message(paste0("Found ", length(BIN.RDS.files), " samples to process ..."))
current.bitmapType <- getOption("bitmapType")
`%dopar%` <- foreach::"%dopar%"
cl <- parallel::makeCluster(spec = nthread, type = cluster.type, outfile = "")
doParallel::registerDoParallel(cl)
eacon.batchres <- foreach::foreach(r = seq_along(BIN.RDS.files), .inorder = TRUE, .errorhandling = "stop") %dopar% {
EaCoN.set.bitmapType(type = current.bitmapType)
WES.Normalize.ff(BIN.RDS.file = BIN.RDS.files[r], ...)
}
parallel::stopCluster(cl)
}
bedBinner <- function(bed = NULL, bin.size = 50, nthread = 1) {
bin.size <- as.integer(bin.size)
cl <- parallel::makeCluster(spec = nthread, type = "PSOCK", outfile = "")
suppressPackageStartupMessages(require(foreach))
doParallel::registerDoParallel(cl)
k <- 0
bed.binned <- foreach::foreach(k = unique(bed$chr), .combine = "rbind", .export = "tmsg") %dopar% {
tmsg(k)
bedk <- bed[bed$chr == k,]
b <- 0
bbk <- foreach::foreach(b = seq_len(nrow(bedk)), .combine = "rbind", .export = "bin.size") %do% {
### Smaller exon
exon.length <- (bedk$end[b] - bedk$start[b] + 1L)
if (exon.length <= bin.size) return(bedk[b,])
mod.rest <- exon.length %% bin.size
mod.count <- as.integer((exon.length - mod.rest) / bin.size)
bin.starts <- bedk$start[b] + ((seq_len(mod.count)-1L) * bin.size)
bin.ends <- bin.starts + bin.size - 1L
## Non-Round count
if (mod.rest > 0L) {
## Enough for a new bin
if (mod.rest >= (bin.size / 2L)) {
bin.starts <- c(bin.starts, bin.starts[mod.count]+bin.size)
bin.ends <- c(bin.ends, bedk$end[b])
mod.count <- mod.count+1L
} else { ## Dispatch to inside bins
if (mod.rest >= mod.count) {
mod.rest2 <- mod.rest %% mod.count
mod.count2 <- as.integer((mod.rest - mod.rest2) / mod.count)
bin.starts <- bedk$start[b] + ((seq_len(mod.count)-1L) * (bin.size + mod.count2))
bin.ends <- bin.starts + (bin.size + mod.count2) - 1L
bin.ends[mod.count] <- bin.ends[mod.count] + mod.rest2
} else {
# bin.ends[mod.count] <- bin.ends[mod.count] + mod.rest
bin.ends[mod.count] <- bedk$end[b]
}
}
}
# if(length(bin.starts) != length(bin.starts))
chrs = rep(bedk$chr[b], mod.count)
return(data.frame(chr = chrs, start = bin.starts, end = bin.ends, stringsAsFactors = FALSE))
}
return(bbk)
}
parallel::stopCluster(cl)
return(bed.binned)
}
## Compute letter composition of nucleotidic sequences from a (chr, start, end) dataframe, with possible extension.
loc.nt.count.hs <- function(loc.df = NULL, genome.pkg = "BSgenome.Hsapiens.UCSC.hg19", extend = 0, blocksize = 1E+04, nthread = 5) {
if (is.null(loc.df)) stop("loc.df is required !", call. = FALSE)
if (extend < 0) stop("extend should be >= 0", call. = FALSE)
if (blocksize <= 0) stop("blocksize should be > 0", call. = FALSE)
if (!all(is.character(loc.df$chr) | is.factor(loc.df$chr))) stop("chr should be character !", call. = FALSE)
if (!all(is.numeric(loc.df$start))) stop("start should be numeric !", call. = FALSE)
if (!all(is.numeric(loc.df$end))) stop("end should be numeric !", call. = FALSE)
if (!genome.pkg %in% BSgenome::installed.genomes()) {
if (genome.pkg %in% BSgenome::available.genomes()) {
stop(tmsg(paste0("BSgenome ", genome.pkg, " available but not installed. Please install it !")), call. = FALSE)
} else {
stop(tmsg(paste0("BSgenome ", genome.pkg, " not available in valid BSgenomes and not installed ... Please check your genome name or install your custom BSgenome !")), call. = FALSE)
}
}
print(paste0("Loading ", genome.pkg, " sequence ..."))
# requireNamespace(genome.pkg, quietly = TRUE)
suppressPackageStartupMessages(require(genome.pkg, character.only = TRUE))
# require(genome.pkg, character.only = TRUE)
BSg.obj <- getExportedValue(genome.pkg, genome.pkg)
genome <- BSgenome::providerVersion(BSg.obj)
cs <- chromobjector(BSg.obj)
print("Removing replicated locations ...")
idz <- paste0(loc.df$chr, ":", loc.df$start, "-", loc.df$end)
loc.df <- loc.df[!duplicated(idz),]
print("Removing non-canonical sequences ...")
loc.df <- loc.df[loc.df$chr %in% seqnames(BSg.obj),]
print("Ordering data ...")
loc.df <- loc.df[order(unlist(cs$chrom2chr[loc.df$chr]), loc.df$start, loc.df$ProbeSetName),]
myGR.ex <- suppressPackageStartupMessages(GenomicRanges::makeGRangesFromDataFrame(loc.df, seqinfo = seqinfo(BSg.obj)))
if (extend > 0) myGR.ex <- GenomicRanges::trim(myGR.ex + extend)
instep <- as.numeric(cut(seq_along(myGR.ex), seq.int(0, length(myGR.ex) + blocksize, blocksize)))
print("Computing base composition ...")
print("Starting cluster ...")
if (length(unique(instep)) < nthread) nthread <- length(unique(instep))
cl <- parallel::makeCluster(spec = nthread, type = "PSOCK", outfile = "")
doParallel::registerDoParallel(cl)
requireNamespace("foreach", quietly = TRUE)
`%dopar%` <- foreach::"%dopar%"
xcounts <- foreach::foreach(x = unique(instep), .combine = "rbind", .packages = c("Biostrings", "BSgenome"), .export = "getSeq") %dopar% {
return(Biostrings::alphabetFrequency(BSgenome::getSeq(BSg.obj, myGR.ex[which(instep == x)]), baseOnly = TRUE))
}
print("Stopping cluster ...")
parallel::stopCluster(cl)
out.df <- cbind(loc.df, xcounts)
return(out.df)
}
loc.nt.gcc.hs <- function(loc.counts = NULL) {
gcc <- (loc.counts$C + loc.counts$G) / (loc.counts$A + loc.counts$C + loc.counts$G + loc.counts$T)
return(data.frame(loc.counts, GC = gcc, stringsAsFactors = FALSE))
}
## Compute GC on a (chr, start, end) dataframe using multiple extend values
loc.nt.gcc.hs.multi <- function(loc.df = NULL, extend.multi = c(50, 100, 200, 400, 800, 1600, 3200, 6400), ...) {
# require(foreach)
requireNamespace("foreach", quietly = TRUE)
`%do%` <- foreach::"%do%"
gc.list <- foreach::foreach(nt.add = extend.multi) %do% {
print(paste0("Computing GC +", nt.add, " ..."))
adb.counts <- loc.nt.count.hs(loc.df = loc.df, extend = nt.add, ...)
adb.gc <- loc.nt.gcc.hs(loc.counts = adb.counts)
return(adb.gc)
}
base.df <- gc.list[[1]][,1:4]
gc.df <- foreach::foreach(nt.add = seq_along(gc.list), .combine = "cbind") %do% { return(as.integer(round(gc.list[[nt.add]][["GC"]] * 1000))) }
colnames(gc.df) <- paste0("GC", extend.multi)
return(data.frame(base.df, gc.df, stringsAsFactors = FALSE))
}
genome.build.finder <- function(BAM.header = NULL, valid.genomes = NULL) {
BAM.header <- unlist(BAM.header)
query <- paste0("(", paste0(valid.genomes, collapse = "|"), ")")
stvh.grep <- grep(query, unlist(BAM.header))
if (length(stvh.grep) == 0) stop(tmsg("Could not automatically determine genome build ! Please specify it !"), call. = FALSE)
stvh.regexec <- unique(vapply(stvh.grep, function(x) {
rc.res <- regexec(query, BAM.header[x])[[1]]
return(as.character(substr(BAM.header[x], start = rc.res[1], stop = rc.res[1]+attr(rc.res, "match.length")[1]-1)))
}, "a"))
ok.genome <- unique(stvh.regexec[stvh.regexec %in% valid.genomes])
if (length(ok.genome) == 0) stop(tmsg(paste0("Identified a putative genome build (", ok.genome, "), but not a supported one !")), call. = FALSE)
if (length(ok.genome) >= 2) stop(tmsg(paste0("Identified more than one putative genome build (", ok.genome, ") !")), call. = FALSE)
return(ok.genome)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.