content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
dir.create("reproducible research")
setwd(paste(getwd(),"/reproducible research",sep=""))
download.file("https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2Factivity.zip", "./repdata1.zip")
unzip("./repdata1.zip")
list.files()
projData<-read.csv("activity.csv")
day <- gl(61,288)
projData <- cbind(projData, day)
byDay <- aggregate(projData["steps"],by=list(projData$day),FUN=sum, na.rm=0)
names(byDay)[1]<-"Day"
mean(byDay$steps,na.rm=TRUE)
median(byDay$steps,na.rm=TRUE)
projData$interval <- as.factor(projData$interval)
byInterval <- aggregate(projData["steps"],by=list(projData$interval),FUN=mean, na.rm=TRUE)
names(byInterval)[1] <- "interval"
byInterval <- cbind(interval_index=1:288, byInterval)
with(byInterval, plot(interval_index, steps, type='l',main="Average steps in 5 min intervals", xlab="Interval Index (Total=288)", ylab="Avergae Steps"))
which(byInterval$steps==max(byInterval$steps))
table(is.na(projData$steps))
naidx <- which(is.na(projData$steps)==TRUE)
nadays<- projData$day[naidx]
nameans <- byDay$steps[nadays]
byDay$steps[is.na(byDay$steps)==TRUE]=0
projData$week <- as.factor(ifelse(weekdays(as.Date(projData$date)) %in% c("Saturday","Sunday"), "Weekend", "Weekday"))
projData$steps[naidx]=nameans
mean(byDay$steps,na.rm=TRUE)
median(byDay$steps,na.rm=TRUE)
byInterval_wd <- aggregate(projData["steps"],by=list(projData$interval,projData$week),FUN=mean, na.rm=TRUE)
names(byInterval_wd)[1:2]<-c("interval","week")
byInterval_we<-byInterval_wd[byInterval_wd$week=="Weekend",]
byInterval_wd<-byInterval_wd[byInterval_wd$week=="Weekday",]
par(mfrow=c(1,2))
with(byInterval_wd, plot(1:288, steps, type='l',main="Weekdays", xlab="Interval Index (Total=288)", ylab="Avergae Steps"))
with(byInterval_we, plot(1:288, steps, type='l',main="Weekends", xlab="Interval Index (Total=288)", ylab="Avergae Steps"))
|
/repproj1.R
|
no_license
|
saadkhalid90/represearch
|
R
| false | false | 1,843 |
r
|
dir.create("reproducible research")
setwd(paste(getwd(),"/reproducible research",sep=""))
download.file("https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2Factivity.zip", "./repdata1.zip")
unzip("./repdata1.zip")
list.files()
projData<-read.csv("activity.csv")
day <- gl(61,288)
projData <- cbind(projData, day)
byDay <- aggregate(projData["steps"],by=list(projData$day),FUN=sum, na.rm=0)
names(byDay)[1]<-"Day"
mean(byDay$steps,na.rm=TRUE)
median(byDay$steps,na.rm=TRUE)
projData$interval <- as.factor(projData$interval)
byInterval <- aggregate(projData["steps"],by=list(projData$interval),FUN=mean, na.rm=TRUE)
names(byInterval)[1] <- "interval"
byInterval <- cbind(interval_index=1:288, byInterval)
with(byInterval, plot(interval_index, steps, type='l',main="Average steps in 5 min intervals", xlab="Interval Index (Total=288)", ylab="Avergae Steps"))
which(byInterval$steps==max(byInterval$steps))
table(is.na(projData$steps))
naidx <- which(is.na(projData$steps)==TRUE)
nadays<- projData$day[naidx]
nameans <- byDay$steps[nadays]
byDay$steps[is.na(byDay$steps)==TRUE]=0
projData$week <- as.factor(ifelse(weekdays(as.Date(projData$date)) %in% c("Saturday","Sunday"), "Weekend", "Weekday"))
projData$steps[naidx]=nameans
mean(byDay$steps,na.rm=TRUE)
median(byDay$steps,na.rm=TRUE)
byInterval_wd <- aggregate(projData["steps"],by=list(projData$interval,projData$week),FUN=mean, na.rm=TRUE)
names(byInterval_wd)[1:2]<-c("interval","week")
byInterval_we<-byInterval_wd[byInterval_wd$week=="Weekend",]
byInterval_wd<-byInterval_wd[byInterval_wd$week=="Weekday",]
par(mfrow=c(1,2))
with(byInterval_wd, plot(1:288, steps, type='l',main="Weekdays", xlab="Interval Index (Total=288)", ylab="Avergae Steps"))
with(byInterval_we, plot(1:288, steps, type='l',main="Weekends", xlab="Interval Index (Total=288)", ylab="Avergae Steps"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nba_stats_player.R
\name{playercareerstats}
\alias{playercareerstats}
\alias{nba_playercareerstats}
\title{\strong{Get NBA Stats API Player Career Stats}}
\usage{
nba_playercareerstats(
league_id = "00",
per_mode = "Totals",
player_id = "2544"
)
}
\arguments{
\item{league_id}{League - default: '00'. Other options include '01','02','03'}
\item{per_mode}{Per Mode - PerGame, Totals}
\item{player_id}{Player ID}
}
\description{
\strong{Get NBA Stats API Player Career Stats}
\strong{Get NBA Stats API Player Career Stats}
}
\author{
Saiem Gilani
}
|
/man/playercareerstats.Rd
|
permissive
|
mrcaseb/hoopR
|
R
| false | true | 634 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nba_stats_player.R
\name{playercareerstats}
\alias{playercareerstats}
\alias{nba_playercareerstats}
\title{\strong{Get NBA Stats API Player Career Stats}}
\usage{
nba_playercareerstats(
league_id = "00",
per_mode = "Totals",
player_id = "2544"
)
}
\arguments{
\item{league_id}{League - default: '00'. Other options include '01','02','03'}
\item{per_mode}{Per Mode - PerGame, Totals}
\item{player_id}{Player ID}
}
\description{
\strong{Get NBA Stats API Player Career Stats}
\strong{Get NBA Stats API Player Career Stats}
}
\author{
Saiem Gilani
}
|
\name{panel.stratify}
\alias{panel.stratify}
\title{Handle Each Level of a Stripplot Separately}
\description{
Just as \code{panel.superpose} handles each group of data separately, \code{panel.stratify}
handles each \sQuote{level} of data separately. Typically, levels are the unique
values of \code{y} (\code{horizontal==TRUE}) that result from a call to \code{stripplot} or \code{bwplot}.
The default panel functions treat all levels simultaneously. Plotting some
transformation of the data (e.g. density polygons for each level) is much easier if
the levels are presented individually.
}
\usage{
panel.stratify(
x,
y,
type = 'p',
groups = NULL,
pch = if (is.null(groups))
plot.symbol$pch else superpose.symbol$pch,
col,
col.line = if (is.null(groups))
plot.line$col else superpose.line$col,
col.symbol = if (is.null(groups)) plot.symbol$col else superpose.symbol$col,
font = if (is.null(groups))
plot.symbol$font else superpose.symbol$font,
fontfamily = if (is.null(groups)) plot.symbol$fontfamily else superpose.symbol$fontfamily,
fontface = if (is.null(groups)) plot.symbol$fontface else superpose.symbol$fontface,
lty = if (is.null(groups))
plot.line$lty else superpose.line$lty,
cex = if (is.null(groups)) plot.symbol$cex else superpose.symbol$cex,
fill = if (is.null(groups)) plot.symbol$fill else superpose.symbol$fill,
lwd = if (is.null(groups)) plot.line$lwd else superpose.line$lwd,
horizontal = FALSE,
panel.levels = 'panel.xyplot',
...,
jitter.x = FALSE,
jitter.y = FALSE,
factor = 0.5,
amount = NULL
)
}
\arguments{
\item{x}{See \code{panel.xyplot}}
\item{y}{See \code{panel.xyplot}}
\item{type}{See \code{panel.xyplot}}
\item{groups}{See \code{panel.xyplot}}
\item{pch}{See \code{panel.xyplot}}
\item{col}{See \code{panel.xyplot}}
\item{col.line}{See \code{panel.xyplot}}
\item{col.symbol}{See \code{panel.xyplot}}
\item{font}{See \code{panel.xyplot}}
\item{fontfamily}{See \code{panel.xyplot}}
\item{fontface}{See \code{panel.xyplot}}
\item{lty}{See \code{panel.xyplot}}
\item{cex}{See \code{panel.xyplot}}
\item{fill}{See \code{panel.xyplot}}
\item{lwd}{See \code{panel.xyplot}}
\item{horizontal}{See \code{panel.xyplot}}
\item{panel.levels}{a function to handle each unique level of the data}
\item{\dots}{See \code{panel.xyplot}}
\item{jitter.x}{See \code{panel.xyplot}}
\item{jitter.y}{See \code{panel.xyplot}}
\item{factor}{See \code{panel.xyplot}}
\item{amount}{See \code{panel.xyplot}}
}
\details{
\code{panel.stratify} is defined almost identically to \code{panel.xyplot}. \code{panel.levels}
is analogous to \code{panel.groups}. \code{panel.levels} may want to handle special cases
of \code{col}, which may be missing if \code{groups} is \code{NULL} and may be \code{NA} if \code{groups} is
not \code{NULL} (set to \code{NA} by \code{panel.superpose}).
\code{x} and \code{y} are split into subsets by whichever of them represents levels (\code{y} if
\code{horizontal} is \code{TRUE}, \code{x} otherwise). Corresponding subsets of \code{x} and \code{y} are
forwarded one at a time, along with the other arguments, to \code{panel.levels}.
Additionally, the current value of \code{level} as well as the complete vector of
\code{levels} are available to \code{panel.levels}.
}
\value{used for side effects}
\references{\url{http://mifuns.googlecode.com}}
\author{Tim Bergsma}
\seealso{
\itemize{
\item \code{\link{panel.covplot}}
\item \code{\link{panel.densitystrip}}
\item \code{\link{panel.hist}}
\item \code{\link{panel.xyplot}}
}
}
\keyword{manip}
|
/man/panel.stratify.Rd
|
no_license
|
cran/MIfuns
|
R
| false | false | 3,611 |
rd
|
\name{panel.stratify}
\alias{panel.stratify}
\title{Handle Each Level of a Stripplot Separately}
\description{
Just as \code{panel.superpose} handles each group of data separately, \code{panel.stratify}
handles each \sQuote{level} of data separately. Typically, levels are the unique
values of \code{y} (\code{horizontal==TRUE}) that result from a call to \code{stripplot} or \code{bwplot}.
The default panel functions treat all levels simultaneously. Plotting some
transformation of the data (e.g. density polygons for each level) is much easier if
the levels are presented individually.
}
\usage{
panel.stratify(
x,
y,
type = 'p',
groups = NULL,
pch = if (is.null(groups))
plot.symbol$pch else superpose.symbol$pch,
col,
col.line = if (is.null(groups))
plot.line$col else superpose.line$col,
col.symbol = if (is.null(groups)) plot.symbol$col else superpose.symbol$col,
font = if (is.null(groups))
plot.symbol$font else superpose.symbol$font,
fontfamily = if (is.null(groups)) plot.symbol$fontfamily else superpose.symbol$fontfamily,
fontface = if (is.null(groups)) plot.symbol$fontface else superpose.symbol$fontface,
lty = if (is.null(groups))
plot.line$lty else superpose.line$lty,
cex = if (is.null(groups)) plot.symbol$cex else superpose.symbol$cex,
fill = if (is.null(groups)) plot.symbol$fill else superpose.symbol$fill,
lwd = if (is.null(groups)) plot.line$lwd else superpose.line$lwd,
horizontal = FALSE,
panel.levels = 'panel.xyplot',
...,
jitter.x = FALSE,
jitter.y = FALSE,
factor = 0.5,
amount = NULL
)
}
\arguments{
\item{x}{See \code{panel.xyplot}}
\item{y}{See \code{panel.xyplot}}
\item{type}{See \code{panel.xyplot}}
\item{groups}{See \code{panel.xyplot}}
\item{pch}{See \code{panel.xyplot}}
\item{col}{See \code{panel.xyplot}}
\item{col.line}{See \code{panel.xyplot}}
\item{col.symbol}{See \code{panel.xyplot}}
\item{font}{See \code{panel.xyplot}}
\item{fontfamily}{See \code{panel.xyplot}}
\item{fontface}{See \code{panel.xyplot}}
\item{lty}{See \code{panel.xyplot}}
\item{cex}{See \code{panel.xyplot}}
\item{fill}{See \code{panel.xyplot}}
\item{lwd}{See \code{panel.xyplot}}
\item{horizontal}{See \code{panel.xyplot}}
\item{panel.levels}{a function to handle each unique level of the data}
\item{\dots}{See \code{panel.xyplot}}
\item{jitter.x}{See \code{panel.xyplot}}
\item{jitter.y}{See \code{panel.xyplot}}
\item{factor}{See \code{panel.xyplot}}
\item{amount}{See \code{panel.xyplot}}
}
\details{
\code{panel.stratify} is defined almost identically to \code{panel.xyplot}. \code{panel.levels}
is analogous to \code{panel.groups}. \code{panel.levels} may want to handle special cases
of \code{col}, which may be missing if \code{groups} is \code{NULL} and may be \code{NA} if \code{groups} is
not \code{NULL} (set to \code{NA} by \code{panel.superpose}).
\code{x} and \code{y} are split into subsets by whichever of them represents levels (\code{y} if
\code{horizontal} is \code{TRUE}, \code{x} otherwise). Corresponding subsets of \code{x} and \code{y} are
forwarded one at a time, along with the other arguments, to \code{panel.levels}.
Additionally, the current value of \code{level} as well as the complete vector of
\code{levels} are available to \code{panel.levels}.
}
\value{used for side effects}
\references{\url{http://mifuns.googlecode.com}}
\author{Tim Bergsma}
\seealso{
\itemize{
\item \code{\link{panel.covplot}}
\item \code{\link{panel.densitystrip}}
\item \code{\link{panel.hist}}
\item \code{\link{panel.xyplot}}
}
}
\keyword{manip}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prep_nhdplus.R
\name{prepare_nhdplus}
\alias{prepare_nhdplus}
\title{Prep NHDPlus Data}
\usage{
prepare_nhdplus(
flines,
min_network_size,
min_path_length,
min_path_size = 0,
purge_non_dendritic = TRUE,
warn = TRUE,
error = TRUE,
skip_toCOMID = FALSE
)
}
\arguments{
\item{flines}{data.frame NHDPlus flowlines including:
COMID, LENGTHKM, FTYPE (or FCODE), TerminalFl, FromNode, ToNode, TotDASqKM,
StartFlag, StreamOrde, StreamCalc, TerminalPa, Pathlength,
and Divergence variables.}
\item{min_network_size}{numeric Minimum size (sqkm) of drainage network
to include in output.}
\item{min_path_length}{numeric Minimum length (km) of terminal level
path of a network.}
\item{min_path_size}{numeric Minimum size (sqkm) of outlet level
path of a drainage basin. Drainage basins with an outlet drainage area
smaller than this will be removed.}
\item{purge_non_dendritic}{boolean Should non dendritic paths be removed
or not.}
\item{warn}{boolean controls whether warning an status messages are printed}
\item{error}{boolean controls whether to return potentially invalid data with a warning rather than an error}
\item{skip_toCOMID}{boolean if TRUE, toCOMID will not be added to output.}
}
\value{
data.frame ready to be used with the refactor_flowlines function.
}
\description{
Function to prep NHDPlus data for use by nhdplusTools functions
}
\examples{
source(system.file("extdata", "sample_flines.R", package = "nhdplusTools"))
prepare_nhdplus(sample_flines,
min_network_size = 10,
min_path_length = 1,
warn = FALSE)
}
\concept{refactor functions}
|
/man/prepare_nhdplus.Rd
|
permissive
|
Otoliths/nhdplusTools
|
R
| false | true | 1,700 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prep_nhdplus.R
\name{prepare_nhdplus}
\alias{prepare_nhdplus}
\title{Prep NHDPlus Data}
\usage{
prepare_nhdplus(
flines,
min_network_size,
min_path_length,
min_path_size = 0,
purge_non_dendritic = TRUE,
warn = TRUE,
error = TRUE,
skip_toCOMID = FALSE
)
}
\arguments{
\item{flines}{data.frame NHDPlus flowlines including:
COMID, LENGTHKM, FTYPE (or FCODE), TerminalFl, FromNode, ToNode, TotDASqKM,
StartFlag, StreamOrde, StreamCalc, TerminalPa, Pathlength,
and Divergence variables.}
\item{min_network_size}{numeric Minimum size (sqkm) of drainage network
to include in output.}
\item{min_path_length}{numeric Minimum length (km) of terminal level
path of a network.}
\item{min_path_size}{numeric Minimum size (sqkm) of outlet level
path of a drainage basin. Drainage basins with an outlet drainage area
smaller than this will be removed.}
\item{purge_non_dendritic}{boolean Should non dendritic paths be removed
or not.}
\item{warn}{boolean controls whether warning an status messages are printed}
\item{error}{boolean controls whether to return potentially invalid data with a warning rather than an error}
\item{skip_toCOMID}{boolean if TRUE, toCOMID will not be added to output.}
}
\value{
data.frame ready to be used with the refactor_flowlines function.
}
\description{
Function to prep NHDPlus data for use by nhdplusTools functions
}
\examples{
source(system.file("extdata", "sample_flines.R", package = "nhdplusTools"))
prepare_nhdplus(sample_flines,
min_network_size = 10,
min_path_length = 1,
warn = FALSE)
}
\concept{refactor functions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/epigrowthfit-data.R
\name{epigrowthfit-data}
\alias{epigrowthfit-data}
\title{Data in the \pkg{epigrowthfit} package}
\description{
Epidemic time series to which growth rates can be fit,
and a few other data sets.
}
\details{
Below is a list of available data sets with links to their
documentation:
\describe{
\item{\code{\link{canadacovid}}}{
Daily confirmations of COVID-19 in Canadian provinces
and territories, from first confirmation to May 8, 2021.
}
\item{\code{\link{covid_generation_interval}}}{
Gamma distribution of the COVID-19 generation interval
fit to data from a cluster of 45 cases in Tianjin, China.
}
\item{\code{\link{husting}}}{
Counts of wills probated in the Court of Husting
during four plague epidemics in 14th century London.
}
\item{\code{\link{canterbury}}}{
Counts of wills probated in the Prerogative Court of Canterbury
during 24 plague epidemics in 16th and 17th century London.
}
\item{\code{\link{londonparishes}}}{
Weekly counts of burials listed in extant parish registers
during 24 plague epidemics in 16th and 17th century London.
}
\item{\code{\link{londonbills}}}{
Weekly counts of plague deaths recorded in the London Bills of
Mortality during 24 plague epidemics in 16th and 17th century
London.
}
\item{\code{\link{plague_latent_period}}}{
Empirical distribution of the latent period of pneumonic plague.
}
\item{\code{\link{plague_infectious_period}}}{
Empirical distribution of the infectious period of pneumonic plague.
}
}
}
\keyword{internal}
|
/man/epigrowthfit-data.Rd
|
no_license
|
davidearn/epigrowthfit
|
R
| false | true | 1,600 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/epigrowthfit-data.R
\name{epigrowthfit-data}
\alias{epigrowthfit-data}
\title{Data in the \pkg{epigrowthfit} package}
\description{
Epidemic time series to which growth rates can be fit,
and a few other data sets.
}
\details{
Below is a list of available data sets with links to their
documentation:
\describe{
\item{\code{\link{canadacovid}}}{
Daily confirmations of COVID-19 in Canadian provinces
and territories, from first confirmation to May 8, 2021.
}
\item{\code{\link{covid_generation_interval}}}{
Gamma distribution of the COVID-19 generation interval
fit to data from a cluster of 45 cases in Tianjin, China.
}
\item{\code{\link{husting}}}{
Counts of wills probated in the Court of Husting
during four plague epidemics in 14th century London.
}
\item{\code{\link{canterbury}}}{
Counts of wills probated in the Prerogative Court of Canterbury
during 24 plague epidemics in 16th and 17th century London.
}
\item{\code{\link{londonparishes}}}{
Weekly counts of burials listed in extant parish registers
during 24 plague epidemics in 16th and 17th century London.
}
\item{\code{\link{londonbills}}}{
Weekly counts of plague deaths recorded in the London Bills of
Mortality during 24 plague epidemics in 16th and 17th century
London.
}
\item{\code{\link{plague_latent_period}}}{
Empirical distribution of the latent period of pneumonic plague.
}
\item{\code{\link{plague_infectious_period}}}{
Empirical distribution of the infectious period of pneumonic plague.
}
}
}
\keyword{internal}
|
set.seed(12450)
y <- rnorm(100)
n <- length(y)
plot(0,1,xlim = c(1,n), ylim = c(-10 , 10),type = "n" )
plot(1:n,y,xlim = c(1,n),ylim = c(-10,10),)
for (i in 1:200) {
abline()
}
plot(0, 1 ,xlim = c(0, 200), ylim = c(3.5,6.5), type = "n")
m <- data.frame(x=c(1:n),y)
library(ggplot2)
library(animation)
windows(7,7)
oopt <- ani.options(interval=0.1)
m <- data.frame(x=c(1:length(y)),y=y)
p <- ggplot(data = m ,mapping = aes(x=x,y=y))
for (i in 1:100) {
m1 <- data.frame(x=c(1:i),y=y[1:i])
p+geom_point(data = m1)
print(p)
ani.pause()
}
ani.options(oopt)
boot.iid(x = runif(20), statistic = mean, m = length(x), mat = matrix(1:2, 2),
widths = rep(1, ncol(mat)), heights = rep(1, nrow(mat)), col = c("black", "red",
"bisque", "red", "gray"), cex = c(1.5, 0.8), main)
brownian.motion(n = 10, xlim = c(-20, 20), ylim = c(-20, 20))
buffon.needle(l = 0.8, d = 1, redraw = TRUE, mat = matrix(c(1, 3, 2, 3), 2),
heights = c(3, 2), col = c("lightgray", "red", "gray", "red", "blue", "black",
"red"), expand = 0.4, type = "l")
ani.options("C:/Software/LyX/etc/ImageMagick/convert.exe")
saveGIF({ brownian.motion(pch = 21, cex = 5, col = "red", bg = "yellow") }, movie.name = "brownian_motion.gif", interval = 0.1, nmax = 30, ani.width = 600, ani.height = 600)
des = c("Random walk of 10 points on the 2D plane:", "for each point (x, y),",
"x = x + rnorm(1) and y = y + rnorm(1).")
saveHTML({
par(mar = c(3, 3, 1, 0.5), mgp = c(2, 0.5, 0), tcl = -0.3, cex.axis = 0.8,
cex.lab = 0.8, cex.main = 1)
ani.options(interval = 0.05, nmax = ifelse(interactive(), 150,
2))
buffon.needle(l = 0.8, d = 1, redraw = TRUE, mat = matrix(c(1, 3, 2, 3), 2),
heights = c(3, 2), col = c("lightgray", "red", "gray", "red", "blue", "black",
"red"), expand = 0.4, type = "l")
}, img.name = "buffon.needle", htmlfile = "buffon.needle.html")
|
/R/R-class/script/ani.R
|
no_license
|
hunterlinsq/R-in-SOE
|
R
| false | false | 2,119 |
r
|
set.seed(12450)
y <- rnorm(100)
n <- length(y)
plot(0,1,xlim = c(1,n), ylim = c(-10 , 10),type = "n" )
plot(1:n,y,xlim = c(1,n),ylim = c(-10,10),)
for (i in 1:200) {
abline()
}
plot(0, 1 ,xlim = c(0, 200), ylim = c(3.5,6.5), type = "n")
m <- data.frame(x=c(1:n),y)
library(ggplot2)
library(animation)
windows(7,7)
oopt <- ani.options(interval=0.1)
m <- data.frame(x=c(1:length(y)),y=y)
p <- ggplot(data = m ,mapping = aes(x=x,y=y))
for (i in 1:100) {
m1 <- data.frame(x=c(1:i),y=y[1:i])
p+geom_point(data = m1)
print(p)
ani.pause()
}
ani.options(oopt)
boot.iid(x = runif(20), statistic = mean, m = length(x), mat = matrix(1:2, 2),
widths = rep(1, ncol(mat)), heights = rep(1, nrow(mat)), col = c("black", "red",
"bisque", "red", "gray"), cex = c(1.5, 0.8), main)
brownian.motion(n = 10, xlim = c(-20, 20), ylim = c(-20, 20))
buffon.needle(l = 0.8, d = 1, redraw = TRUE, mat = matrix(c(1, 3, 2, 3), 2),
heights = c(3, 2), col = c("lightgray", "red", "gray", "red", "blue", "black",
"red"), expand = 0.4, type = "l")
ani.options("C:/Software/LyX/etc/ImageMagick/convert.exe")
saveGIF({ brownian.motion(pch = 21, cex = 5, col = "red", bg = "yellow") }, movie.name = "brownian_motion.gif", interval = 0.1, nmax = 30, ani.width = 600, ani.height = 600)
des = c("Random walk of 10 points on the 2D plane:", "for each point (x, y),",
"x = x + rnorm(1) and y = y + rnorm(1).")
saveHTML({
par(mar = c(3, 3, 1, 0.5), mgp = c(2, 0.5, 0), tcl = -0.3, cex.axis = 0.8,
cex.lab = 0.8, cex.main = 1)
ani.options(interval = 0.05, nmax = ifelse(interactive(), 150,
2))
buffon.needle(l = 0.8, d = 1, redraw = TRUE, mat = matrix(c(1, 3, 2, 3), 2),
heights = c(3, 2), col = c("lightgray", "red", "gray", "red", "blue", "black",
"red"), expand = 0.4, type = "l")
}, img.name = "buffon.needle", htmlfile = "buffon.needle.html")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-reexports.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe operator}
\usage{
lhs \%>\% rhs
}
\description{
Pipe operator
}
\keyword{internal}
|
/man/pipe.Rd
|
permissive
|
paithiov909/rjavacmecab
|
R
| false | true | 218 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-reexports.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe operator}
\usage{
lhs \%>\% rhs
}
\description{
Pipe operator
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PrometheeS4.R
\name{PrometheeIIPlot}
\alias{PrometheeIIPlot}
\alias{RPrometheeIIPlot}
\alias{PrometheeIIPlot,RPrometheeII-method}
\title{PrometheeIIPlot}
\usage{
PrometheeIIPlot(RPrometheeII)
}
\arguments{
\item{RPrometheeII}{An object resulting from RPrometheeII method.}
}
\description{
Plots the net Phi, resulting from RPrometheeII method.
}
\references{
\itemize{
\item
J. P. Brans, Ph. Vincke\cr
\emph{A Preference Ranking Organisation Method: (The PROMETHEE Method
for Multiple Criteria Decision-Making)}\cr
Management science, v. 31, n. 6, p. 647-656, 1985.\cr
\url{https://pdfs.semanticscholar.org/edd6/f5ae9c1bfb2fdd5c9a5d66e56bdb22770460.pdf}
\item
J. P. Brans, B. Mareschal \cr
\emph{PROMETHEE methods. In: Figueria J, Greco S, Ehrgott M (eds)
Multiple criteria decision analysis: state of the art surveys.}\cr
Springer Science, Business Media Inc., Boston pp 163???195.\cr
\url{http://www.springer.com/la/book/9780387230818}
}
}
\seealso{
Other RPromethee methods: \code{\link{PrometheeIIIPlot}},
\code{\link{PrometheeIPlot}},
\code{\link{PrometheeIVPlot}},
\code{\link{RPrometheeConstructor}},
\code{\link{RPrometheeIII}}, \code{\link{RPrometheeII}},
\code{\link{RPrometheeIVKernel}},
\code{\link{RPrometheeIV}}, \code{\link{RPrometheeI}},
\code{\link{RPrometheeV}},
\code{\link{SensitivityAnalysis}},
\code{\link{UpdateRPrometheeAlternatives}},
\code{\link{UpdateRPrometheeArguments}},
\code{\link{WalkingWeightsPlot}},
\code{\link{plot,RPrometheeI-method}}
}
\author{
Pedro Henrique Melo Albuquerque, \email{pedroa@unb.br}
Gustavo Monteiro Pereira, \email{monteirogustavop@gmail.com}
}
\keyword{decision-analysis}
\keyword{decision-method}
\keyword{mcda}
\keyword{promethee}
|
/man/PrometheeIIPlot.Rd
|
no_license
|
lamfo-unb/RMCriteria
|
R
| false | true | 1,871 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PrometheeS4.R
\name{PrometheeIIPlot}
\alias{PrometheeIIPlot}
\alias{RPrometheeIIPlot}
\alias{PrometheeIIPlot,RPrometheeII-method}
\title{PrometheeIIPlot}
\usage{
PrometheeIIPlot(RPrometheeII)
}
\arguments{
\item{RPrometheeII}{An object resulting from RPrometheeII method.}
}
\description{
Plots the net Phi, resulting from RPrometheeII method.
}
\references{
\itemize{
\item
J. P. Brans, Ph. Vincke\cr
\emph{A Preference Ranking Organisation Method: (The PROMETHEE Method
for Multiple Criteria Decision-Making)}\cr
Management science, v. 31, n. 6, p. 647-656, 1985.\cr
\url{https://pdfs.semanticscholar.org/edd6/f5ae9c1bfb2fdd5c9a5d66e56bdb22770460.pdf}
\item
J. P. Brans, B. Mareschal \cr
\emph{PROMETHEE methods. In: Figueria J, Greco S, Ehrgott M (eds)
Multiple criteria decision analysis: state of the art surveys.}\cr
Springer Science, Business Media Inc., Boston pp 163???195.\cr
\url{http://www.springer.com/la/book/9780387230818}
}
}
\seealso{
Other RPromethee methods: \code{\link{PrometheeIIIPlot}},
\code{\link{PrometheeIPlot}},
\code{\link{PrometheeIVPlot}},
\code{\link{RPrometheeConstructor}},
\code{\link{RPrometheeIII}}, \code{\link{RPrometheeII}},
\code{\link{RPrometheeIVKernel}},
\code{\link{RPrometheeIV}}, \code{\link{RPrometheeI}},
\code{\link{RPrometheeV}},
\code{\link{SensitivityAnalysis}},
\code{\link{UpdateRPrometheeAlternatives}},
\code{\link{UpdateRPrometheeArguments}},
\code{\link{WalkingWeightsPlot}},
\code{\link{plot,RPrometheeI-method}}
}
\author{
Pedro Henrique Melo Albuquerque, \email{pedroa@unb.br}
Gustavo Monteiro Pereira, \email{monteirogustavop@gmail.com}
}
\keyword{decision-analysis}
\keyword{decision-method}
\keyword{mcda}
\keyword{promethee}
|
## makeCacheMatrix takes square matrix as an argument.
## sets the inverse of a matrix by invoking the setinverse from cacheSolve
## returns the list containg the subfunctions like 1.set 2.get 3.setinverse 4.getinverse
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(n)
{
x<<-n; ## superassaingnment operator is used to asaign a value to the variable in the parent function(the one step above )
i<<-NULL;
}
get<-function()
x
setinverse<-function(inverse)
{
i<<-inverse
}
getinverse<-function()
i
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
##cacheSolve gets the inverse matrix(i.e already calculated matrix) by calling getinverse,checks if calculated or not before
##otherwise takes the given matrix ,calculates the inverse matrix by solve() and returns it.
cacheSolve <- function(x, ...) {
i=x$getinverse()
if(!is.null(i))
{
message("got cached data")
return(i)
}
data=x$get()
inv=solve(data)
x$setinverse(inv)
inv
## Return a matrix that is the inverse of 'x'
}
|
/cachematrix.R
|
no_license
|
hkallam/ProgrammingAssignment2
|
R
| false | false | 1,092 |
r
|
## makeCacheMatrix takes square matrix as an argument.
## sets the inverse of a matrix by invoking the setinverse from cacheSolve
## returns the list containg the subfunctions like 1.set 2.get 3.setinverse 4.getinverse
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(n)
{
x<<-n; ## superassaingnment operator is used to asaign a value to the variable in the parent function(the one step above )
i<<-NULL;
}
get<-function()
x
setinverse<-function(inverse)
{
i<<-inverse
}
getinverse<-function()
i
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
##cacheSolve gets the inverse matrix(i.e already calculated matrix) by calling getinverse,checks if calculated or not before
##otherwise takes the given matrix ,calculates the inverse matrix by solve() and returns it.
cacheSolve <- function(x, ...) {
i=x$getinverse()
if(!is.null(i))
{
message("got cached data")
return(i)
}
data=x$get()
inv=solve(data)
x$setinverse(inv)
inv
## Return a matrix that is the inverse of 'x'
}
|
85c1f6de30a83ce523dd9b545401a109 trivial_query48_1344.qdimacs 1589 5919
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/trivial_query48_1344/trivial_query48_1344.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 71 |
r
|
85c1f6de30a83ce523dd9b545401a109 trivial_query48_1344.qdimacs 1589 5919
|
library(tidyverse)
#๊ฐํธ ๊ฒฐ์ ์ด์ฉ์ก์ ํ๊ฒ ๋ณ์๋ก ํ ํ๊ท ๋ถ์(user ๋์)
data<-read.csv("C:/Users/user/Desktop/2019-1/DATA/EMBRAIN/payments_ppdb_app_g_cp949.csv", encoding="CP949")
colnames(data)[1222]
old<-data%>%filter(data$age>=50) #50๋ ์ด์๋ง ์ถ์ถ
user<-old%>%filter(price_sum_by_by_approval_type_LT01>0) #user๋ง ์ถ์ถ
#outlier ์ ๊ฑฐ (๊ฐํธ๊ฒฐ์ ์ด์ฉ์ก ๊ธฐ์ค ์์ 3๊ฐ ํ ์ ๊ฑฐ)
sorted <- user[c(order(user$price_sum_by_by_approval_type_LT01, decreasing = TRUE)),]
user<- sorted[-c(1,2,3), ] #์์ 3๊ฐ ์์๋ผ์ด์ด ์ ๊ฑฐ
#x ๋ณ์์ 0์ด ๋ง์ผ๋ฏ๋ก log(x+1)์ ์ทจํด์ค.
category<-user%>%dplyr::select(starts_with("category"))
category<-log(category+1)
company<-user%>%dplyr::select(starts_with("company"))
company<-log(company+1)
usagetime<-user%>%dplyr::select(ends_with("usagetime"))
usagetime<-log(usagetime+1)
price_sum<-user%>%dplyr::select(starts_with("price_sum"))
transformed_data<-data.frame(user$age, category, company, usagetime, price_sum)
#forward selection ๋ฐฉ๋ฒ์ผ๋ก ๋ณ์ ์ ํ
lm.null<-lm(price_sum_by_by_approval_type_LT01~1, data=transformed_data)
lm.full<-lm(price_sum_by_by_approval_type_LT01~., data=transformed_data)
#๋ก๋ฉ์๋๋ฅผ ์ํด ์ฝ๋๋ง ์ ์ด๋๊ฒ ์ต๋๋ค
#forward.selection<- step(lm.null, direction = "forward", scope=list(lower=lm.null, upper=lm.full))
#์ต์ข
๋ชจ๋ธ ํ๋ฒ๋ ์
๋ ฅํด์ฃผ๊ฒ ์ต๋๋ค (๋ก๋ฉ์๋๋ฅผ ์ํด....)
lm.forward<-lm(formula = price_sum_by_by_approval_type_LT01 ~ company_code_PA00004_count +
company_code_PA00011_count + price_sum_by_by_approval_type_LA +
X.U.C0AC..U.C9C4...U.C778..U.D654._usagetime + category_code_6_count +
X.U.D544..U.D130...U.CE74..U.BA54..U.B77C._usagetime + X.U.B0A0..U.C528._usagetime +
X.U.AE30..U.C5C5...U.D559..U.D68C._usagetime + X.U.CE74..U.BA54..U.B77C...U.CEE8..U.D2B8..U.B864..U.B7EC._usagetime +
X.U.AC8C..U.C784...U.CEE4..U.BBA4..U.B2C8..U.D2F0._usagetime +
company_code_PA00010_count + X.U.C911..U.ACE0..U.AC70..U.B798._usagetime +
X.U.C790..U.C0B0..U.D1B5..U.D569..U.AD00..U.B9AC._usagetime +
price_sum_by_by_approval_type_LW + CCTV.U.BDF0..U.C5B4._usagetime +
X.U.D504..U.B9B0..U.D130...U.D329..U.C2A4._usagetime + X.U.B3C8..U.BC84..U.B294...U.B9AC..U.C6CC..U.B4DC._usagetime +
AR.VR_usagetime + X.U.BC31..U.D654..U.C810...U.C628..U.B77C..U.C778..U.BAB0._usagetime +
price_sum_by_by_approval_type_FC + X.U.C131..U.D615..U.C815..U.BCF4._usagetime +
X.U.C219..U.BC15._usagetime + X.U.C74C..U.C131...U.C601..U.C0C1..U.D1B5..U.D654._usagetime +
price_sum_by_by_approval_type_LD + X.U.D2F0..U.CF13...U.C608..U.B9E4._usagetime +
X.U.BB38..U.C11C._usagetime + X.U.C571..U.C124..U.CE58..U.D504..U.B85C..U.ADF8..U.B7A8._usagetime +
X.U.B300..U.D559..U.AD50._usagetime + X.U.D30C..U.C77C...U.B2E4..U.C6B4..U.B85C..U.B354._usagetime +
X.U.D30C..U.C77C..U.AD00..U.B9AC._usagetime + price_sum_by_by_approval_type_FA +
price_sum_by_by_approval_type_LC + QR.U.C2A4..U.CE90..U.B108._usagetime +
X.U.AC80..U.C0C9..U.D3EC..U.D138._usagetime + X.U.C601..U.D654._usagetime +
X.U.B3C4..U.C11C..U.AD00._usagetime + category_code_12_count +
X.U.B300..U.CD9C._usagetime + X.U.AD50..U.D1B5..U.CE74..U.B4DC._usagetime +
X.U.C74C..U.C545...U.C2A4..U.D2B8..U.B9AC..U.BC0D._usagetime +
X.U.B179..U.C74C..U.AE30._usagetime + X.U.BB34..U.C74C..U.CE74..U.BA54..U.B77C._usagetime +
company_code_PA00012_count + X.U.C5EC..U.D589...U.C815..U.BCF4._usagetime +
X.U.C190..U.C804..U.B4F1._usagetime + X.U.C2E0..U.C6A9..U.C870..U.D68C._usagetime +
X.U.C1FC..U.D551._usagetime + X.U.D648..U.C6D0..U.ACA9..U.AD00..U.B9AC._usagetime +
X.U.B2EC..U.B825...U.C77C..U.C815..U.AD00..U.B9AC._usagetime +
X.U.C8FC..U.C720..U.C18C._usagetime + X.U.B3D9..U.D638..U.D68C...U.C18C..U.BAA8..U.C784._usagetime +
X.U.BC18..U.B824..U.B3D9..U.BB3C._usagetime + X.U.C18C..U.C15C...U.C5F0..U.ACC4._usagetime +
X.U.CF58..U.D150..U.CE20._usagetime + X.U.B80C..U.D130..U.CE74._usagetime,
data = transformed_data)
#lm.forwad ๋ชจ๋ธ์ ๋ํด ๋ค์ค๊ณต์ ์ฑ ์ฒดํฌ๋ฅผ ํด๋ณด๊ฒ ์ต๋๋ค
library(car)
which(car::vif(lm.forward)>4) #๊ตญ๋ด ์
๊ธ๊ณผ ์ถ๊ธ์ด ๋ค์ค ๊ณต์ ์ฑ์ด ์๋ค๊ณ ๋์ค๋ค์ผ. ์ฐ๋ฆฌ๋ '์๋น'์ ์ด์ ์ ๋ง์ถ์์ผ๋ฏ๋ก ์ถ๊ธ๋ด์ญ๋ง ์ ํํ๊ฒ ์ต๋๋ค
lm.after.vif<-lm(formula = price_sum_by_by_approval_type_LT01 ~ company_code_PA00004_count +
company_code_PA00011_count + price_sum_by_by_approval_type_LA +
X.U.C0AC..U.C9C4...U.C778..U.D654._usagetime + category_code_6_count +
X.U.D544..U.D130...U.CE74..U.BA54..U.B77C._usagetime + X.U.B0A0..U.C528._usagetime +
X.U.AE30..U.C5C5...U.D559..U.D68C._usagetime + X.U.CE74..U.BA54..U.B77C...U.CEE8..U.D2B8..U.B864..U.B7EC._usagetime +
X.U.AC8C..U.C784...U.CEE4..U.BBA4..U.B2C8..U.D2F0._usagetime +
company_code_PA00010_count + X.U.C911..U.ACE0..U.AC70..U.B798._usagetime +
X.U.C790..U.C0B0..U.D1B5..U.D569..U.AD00..U.B9AC._usagetime +
price_sum_by_by_approval_type_LW + CCTV.U.BDF0..U.C5B4._usagetime +
X.U.D504..U.B9B0..U.D130...U.D329..U.C2A4._usagetime + X.U.B3C8..U.BC84..U.B294...U.B9AC..U.C6CC..U.B4DC._usagetime +
AR.VR_usagetime + X.U.BC31..U.D654..U.C810...U.C628..U.B77C..U.C778..U.BAB0._usagetime +
price_sum_by_by_approval_type_FC + X.U.C131..U.D615..U.C815..U.BCF4._usagetime +
X.U.C219..U.BC15._usagetime + X.U.C74C..U.C131...U.C601..U.C0C1..U.D1B5..U.D654._usagetime +
X.U.D2F0..U.CF13...U.C608..U.B9E4._usagetime +
X.U.BB38..U.C11C._usagetime + X.U.C571..U.C124..U.CE58..U.D504..U.B85C..U.ADF8..U.B7A8._usagetime +
X.U.B300..U.D559..U.AD50._usagetime + X.U.D30C..U.C77C...U.B2E4..U.C6B4..U.B85C..U.B354._usagetime +
X.U.D30C..U.C77C..U.AD00..U.B9AC._usagetime + price_sum_by_by_approval_type_FA +
price_sum_by_by_approval_type_LC + QR.U.C2A4..U.CE90..U.B108._usagetime +
X.U.AC80..U.C0C9..U.D3EC..U.D138._usagetime + X.U.C601..U.D654._usagetime +
X.U.B3C4..U.C11C..U.AD00._usagetime + category_code_12_count +
X.U.B300..U.CD9C._usagetime + X.U.AD50..U.D1B5..U.CE74..U.B4DC._usagetime +
X.U.C74C..U.C545...U.C2A4..U.D2B8..U.B9AC..U.BC0D._usagetime +
X.U.B179..U.C74C..U.AE30._usagetime + X.U.BB34..U.C74C..U.CE74..U.BA54..U.B77C._usagetime +
company_code_PA00012_count + X.U.C5EC..U.D589...U.C815..U.BCF4._usagetime +
X.U.C190..U.C804..U.B4F1._usagetime + X.U.C2E0..U.C6A9..U.C870..U.D68C._usagetime +
X.U.C1FC..U.D551._usagetime + X.U.D648..U.C6D0..U.ACA9..U.AD00..U.B9AC._usagetime +
X.U.B2EC..U.B825...U.C77C..U.C815..U.AD00..U.B9AC._usagetime +
X.U.C8FC..U.C720..U.C18C._usagetime + X.U.B3D9..U.D638..U.D68C...U.C18C..U.BAA8..U.C784._usagetime +
X.U.BC18..U.B824..U.B3D9..U.BB3C._usagetime + X.U.C18C..U.C15C...U.C5F0..U.ACC4._usagetime +
X.U.CF58..U.D150..U.CE20._usagetime + X.U.B80C..U.D130..U.CE74._usagetime,
data = transformed_data)
#r-squred ๊ฐ์ด ๊ฝค ๋๋ค์ (0.6)
summary(lm.after.vif)
# ๊ฒ์ ๊ฒฐ๊ณผ ๋ณํ์ด ํ์ํด๋ณด์
๋๋ค
plot(lm.after.vif)
#boxcox transformation
library(MASS)
bc_norm <- MASS::boxcox(lm.after.vif)
lambda <- bc_norm$x[which.max(bc_norm$y)]
lm.boxcox<-lm(formula = price_sum_by_by_approval_type_LT01^lambda ~ company_code_PA00004_count +
company_code_PA00011_count + price_sum_by_by_approval_type_LA +
X.U.C0AC..U.C9C4...U.C778..U.D654._usagetime + category_code_6_count +
X.U.D544..U.D130...U.CE74..U.BA54..U.B77C._usagetime + X.U.B0A0..U.C528._usagetime +
X.U.AE30..U.C5C5...U.D559..U.D68C._usagetime + X.U.CE74..U.BA54..U.B77C...U.CEE8..U.D2B8..U.B864..U.B7EC._usagetime +
X.U.AC8C..U.C784...U.CEE4..U.BBA4..U.B2C8..U.D2F0._usagetime +
company_code_PA00010_count + X.U.C911..U.ACE0..U.AC70..U.B798._usagetime +
X.U.C790..U.C0B0..U.D1B5..U.D569..U.AD00..U.B9AC._usagetime +
price_sum_by_by_approval_type_LW + CCTV.U.BDF0..U.C5B4._usagetime +
X.U.D504..U.B9B0..U.D130...U.D329..U.C2A4._usagetime + X.U.B3C8..U.BC84..U.B294...U.B9AC..U.C6CC..U.B4DC._usagetime +
AR.VR_usagetime + X.U.BC31..U.D654..U.C810...U.C628..U.B77C..U.C778..U.BAB0._usagetime +
price_sum_by_by_approval_type_FC + X.U.C131..U.D615..U.C815..U.BCF4._usagetime +
X.U.C219..U.BC15._usagetime + X.U.C74C..U.C131...U.C601..U.C0C1..U.D1B5..U.D654._usagetime +
X.U.D2F0..U.CF13...U.C608..U.B9E4._usagetime +
X.U.BB38..U.C11C._usagetime + X.U.C571..U.C124..U.CE58..U.D504..U.B85C..U.ADF8..U.B7A8._usagetime +
X.U.B300..U.D559..U.AD50._usagetime + X.U.D30C..U.C77C...U.B2E4..U.C6B4..U.B85C..U.B354._usagetime +
X.U.D30C..U.C77C..U.AD00..U.B9AC._usagetime + price_sum_by_by_approval_type_FA +
price_sum_by_by_approval_type_LC + QR.U.C2A4..U.CE90..U.B108._usagetime +
X.U.AC80..U.C0C9..U.D3EC..U.D138._usagetime + X.U.C601..U.D654._usagetime +
X.U.B3C4..U.C11C..U.AD00._usagetime + category_code_12_count +
X.U.B300..U.CD9C._usagetime + X.U.AD50..U.D1B5..U.CE74..U.B4DC._usagetime +
X.U.C74C..U.C545...U.C2A4..U.D2B8..U.B9AC..U.BC0D._usagetime +
X.U.B179..U.C74C..U.AE30._usagetime + X.U.BB34..U.C74C..U.CE74..U.BA54..U.B77C._usagetime +
company_code_PA00012_count + X.U.C5EC..U.D589...U.C815..U.BCF4._usagetime +
X.U.C190..U.C804..U.B4F1._usagetime + X.U.C2E0..U.C6A9..U.C870..U.D68C._usagetime +
X.U.C1FC..U.D551._usagetime + X.U.D648..U.C6D0..U.ACA9..U.AD00..U.B9AC._usagetime +
X.U.B2EC..U.B825...U.C77C..U.C815..U.AD00..U.B9AC._usagetime +
X.U.C8FC..U.C720..U.C18C._usagetime + X.U.B3D9..U.D638..U.D68C...U.C18C..U.BAA8..U.C784._usagetime +
X.U.BC18..U.B824..U.B3D9..U.BB3C._usagetime + X.U.C18C..U.C15C...U.C5F0..U.ACC4._usagetime +
X.U.CF58..U.D150..U.CE20._usagetime + X.U.B80C..U.D130..U.CE74._usagetime,
data = transformed_data)
# ์์ฐจ๊ทธ๋ฆผ์ ๋ง์ด ์ข์์ก์ผ๋ ๊ฒฐ์ ๊ณ์๊ฐ ์ ๋ ๋จ์ด์ก์๊น์ฌ.. ํํ
summary(lm.boxcox)
plot(lm.boxcox)
#์
๊ธ ๋ด์ญ์ ๋ค์ ๋ฃ๊ณ boxcox ํด๋ณผ๊ฒ์ฌ..
bc_norm <- MASS::boxcox(lm.after.vif)
lambda <- bc_norm$x[which.max(bc_norm$y)]
lm.boxcox_1<-lm(formula = price_sum_by_by_approval_type_LT01^lambda ~ company_code_PA00004_count +
company_code_PA00011_count + price_sum_by_by_approval_type_LA +
X.U.C0AC..U.C9C4...U.C778..U.D654._usagetime + category_code_6_count +
X.U.D544..U.D130...U.CE74..U.BA54..U.B77C._usagetime + X.U.B0A0..U.C528._usagetime +
X.U.AE30..U.C5C5...U.D559..U.D68C._usagetime + X.U.CE74..U.BA54..U.B77C...U.CEE8..U.D2B8..U.B864..U.B7EC._usagetime +
X.U.AC8C..U.C784...U.CEE4..U.BBA4..U.B2C8..U.D2F0._usagetime +
company_code_PA00010_count + X.U.C911..U.ACE0..U.AC70..U.B798._usagetime +
X.U.C790..U.C0B0..U.D1B5..U.D569..U.AD00..U.B9AC._usagetime +
price_sum_by_by_approval_type_LW + CCTV.U.BDF0..U.C5B4._usagetime +
X.U.D504..U.B9B0..U.D130...U.D329..U.C2A4._usagetime + X.U.B3C8..U.BC84..U.B294...U.B9AC..U.C6CC..U.B4DC._usagetime +
AR.VR_usagetime + X.U.BC31..U.D654..U.C810...U.C628..U.B77C..U.C778..U.BAB0._usagetime +
price_sum_by_by_approval_type_FC + X.U.C131..U.D615..U.C815..U.BCF4._usagetime +
X.U.C219..U.BC15._usagetime + X.U.C74C..U.C131...U.C601..U.C0C1..U.D1B5..U.D654._usagetime +
price_sum_by_by_approval_type_LD + X.U.D2F0..U.CF13...U.C608..U.B9E4._usagetime +
X.U.BB38..U.C11C._usagetime + X.U.C571..U.C124..U.CE58..U.D504..U.B85C..U.ADF8..U.B7A8._usagetime +
X.U.B300..U.D559..U.AD50._usagetime + X.U.D30C..U.C77C...U.B2E4..U.C6B4..U.B85C..U.B354._usagetime +
X.U.D30C..U.C77C..U.AD00..U.B9AC._usagetime + price_sum_by_by_approval_type_FA +
price_sum_by_by_approval_type_LC + QR.U.C2A4..U.CE90..U.B108._usagetime +
X.U.AC80..U.C0C9..U.D3EC..U.D138._usagetime + X.U.C601..U.D654._usagetime +
X.U.B3C4..U.C11C..U.AD00._usagetime + category_code_12_count +
X.U.B300..U.CD9C._usagetime + X.U.AD50..U.D1B5..U.CE74..U.B4DC._usagetime +
X.U.C74C..U.C545...U.C2A4..U.D2B8..U.B9AC..U.BC0D._usagetime +
X.U.B179..U.C74C..U.AE30._usagetime + X.U.BB34..U.C74C..U.CE74..U.BA54..U.B77C._usagetime +
company_code_PA00012_count + X.U.C5EC..U.D589...U.C815..U.BCF4._usagetime +
X.U.C190..U.C804..U.B4F1._usagetime + X.U.C2E0..U.C6A9..U.C870..U.D68C._usagetime +
X.U.C1FC..U.D551._usagetime + X.U.D648..U.C6D0..U.ACA9..U.AD00..U.B9AC._usagetime +
X.U.B2EC..U.B825...U.C77C..U.C815..U.AD00..U.B9AC._usagetime +
X.U.C8FC..U.C720..U.C18C._usagetime + X.U.B3D9..U.D638..U.D68C...U.C18C..U.BAA8..U.C784._usagetime +
X.U.BC18..U.B824..U.B3D9..U.BB3C._usagetime + X.U.C18C..U.C15C...U.C5F0..U.ACC4._usagetime +
X.U.CF58..U.D150..U.CE20._usagetime + X.U.B80C..U.D130..U.CE74._usagetime,
data = transformed_data)
summary(lm.boxcox_1)
plot(lm.boxcox_1)
#boxcox๋ฅผ ํ๋ฉด ๊ฒฐ์ ๊ณ์๊ฐ ๋จ์ด์ง๋ค์ฉ..
#์
๊ธ ๋ง๊ณ ์ถ๊ธ์ ๋นผ๋ณผ๊ฒ์ ^^
lm_again<-lm(formula = price_sum_by_by_approval_type_LT01 ~ company_code_PA00004_count +
company_code_PA00011_count + price_sum_by_by_approval_type_LA +
X.U.C0AC..U.C9C4...U.C778..U.D654._usagetime + category_code_6_count +
X.U.D544..U.D130...U.CE74..U.BA54..U.B77C._usagetime + X.U.B0A0..U.C528._usagetime +
X.U.AE30..U.C5C5...U.D559..U.D68C._usagetime + X.U.CE74..U.BA54..U.B77C...U.CEE8..U.D2B8..U.B864..U.B7EC._usagetime +
X.U.AC8C..U.C784...U.CEE4..U.BBA4..U.B2C8..U.D2F0._usagetime +
company_code_PA00010_count + X.U.C911..U.ACE0..U.AC70..U.B798._usagetime +
X.U.C790..U.C0B0..U.D1B5..U.D569..U.AD00..U.B9AC._usagetime +
CCTV.U.BDF0..U.C5B4._usagetime +
X.U.D504..U.B9B0..U.D130...U.D329..U.C2A4._usagetime + X.U.B3C8..U.BC84..U.B294...U.B9AC..U.C6CC..U.B4DC._usagetime +
AR.VR_usagetime + X.U.BC31..U.D654..U.C810...U.C628..U.B77C..U.C778..U.BAB0._usagetime +
price_sum_by_by_approval_type_FC + X.U.C131..U.D615..U.C815..U.BCF4._usagetime +
X.U.C219..U.BC15._usagetime + X.U.C74C..U.C131...U.C601..U.C0C1..U.D1B5..U.D654._usagetime +
price_sum_by_by_approval_type_LD + X.U.D2F0..U.CF13...U.C608..U.B9E4._usagetime +
X.U.BB38..U.C11C._usagetime + X.U.C571..U.C124..U.CE58..U.D504..U.B85C..U.ADF8..U.B7A8._usagetime +
X.U.B300..U.D559..U.AD50._usagetime + X.U.D30C..U.C77C...U.B2E4..U.C6B4..U.B85C..U.B354._usagetime +
X.U.D30C..U.C77C..U.AD00..U.B9AC._usagetime + price_sum_by_by_approval_type_FA +
price_sum_by_by_approval_type_LC + QR.U.C2A4..U.CE90..U.B108._usagetime +
X.U.AC80..U.C0C9..U.D3EC..U.D138._usagetime + X.U.C601..U.D654._usagetime +
X.U.B3C4..U.C11C..U.AD00._usagetime + category_code_12_count +
X.U.B300..U.CD9C._usagetime + X.U.AD50..U.D1B5..U.CE74..U.B4DC._usagetime +
X.U.C74C..U.C545...U.C2A4..U.D2B8..U.B9AC..U.BC0D._usagetime +
X.U.B179..U.C74C..U.AE30._usagetime + X.U.BB34..U.C74C..U.CE74..U.BA54..U.B77C._usagetime +
company_code_PA00012_count + X.U.C5EC..U.D589...U.C815..U.BCF4._usagetime +
X.U.C190..U.C804..U.B4F1._usagetime + X.U.C2E0..U.C6A9..U.C870..U.D68C._usagetime +
X.U.C1FC..U.D551._usagetime + X.U.D648..U.C6D0..U.ACA9..U.AD00..U.B9AC._usagetime +
X.U.B2EC..U.B825...U.C77C..U.C815..U.AD00..U.B9AC._usagetime +
X.U.C8FC..U.C720..U.C18C._usagetime + X.U.B3D9..U.D638..U.D68C...U.C18C..U.BAA8..U.C784._usagetime +
X.U.BC18..U.B824..U.B3D9..U.BB3C._usagetime + X.U.C18C..U.C15C...U.C5F0..U.ACC4._usagetime +
X.U.CF58..U.D150..U.CE20._usagetime + X.U.B80C..U.D130..U.CE74._usagetime,
data = transformed_data)
summary(lm_again)
plot(lm_again)
#๋น์ ํ ๋ชจ๋ธ์ ํด์ผํ๋๋ด์..
|
/analysis/analysis_by_ar/regression_0525_checkvif_removeoutlier.R
|
permissive
|
desaip2468/deguri
|
R
| false | false | 17,570 |
r
|
library(tidyverse)
#๊ฐํธ ๊ฒฐ์ ์ด์ฉ์ก์ ํ๊ฒ ๋ณ์๋ก ํ ํ๊ท ๋ถ์(user ๋์)
data<-read.csv("C:/Users/user/Desktop/2019-1/DATA/EMBRAIN/payments_ppdb_app_g_cp949.csv", encoding="CP949")
colnames(data)[1222]
old<-data%>%filter(data$age>=50) #50๋ ์ด์๋ง ์ถ์ถ
user<-old%>%filter(price_sum_by_by_approval_type_LT01>0) #user๋ง ์ถ์ถ
#outlier ์ ๊ฑฐ (๊ฐํธ๊ฒฐ์ ์ด์ฉ์ก ๊ธฐ์ค ์์ 3๊ฐ ํ ์ ๊ฑฐ)
sorted <- user[c(order(user$price_sum_by_by_approval_type_LT01, decreasing = TRUE)),]
user<- sorted[-c(1,2,3), ] #์์ 3๊ฐ ์์๋ผ์ด์ด ์ ๊ฑฐ
#x ๋ณ์์ 0์ด ๋ง์ผ๋ฏ๋ก log(x+1)์ ์ทจํด์ค.
category<-user%>%dplyr::select(starts_with("category"))
category<-log(category+1)
company<-user%>%dplyr::select(starts_with("company"))
company<-log(company+1)
usagetime<-user%>%dplyr::select(ends_with("usagetime"))
usagetime<-log(usagetime+1)
price_sum<-user%>%dplyr::select(starts_with("price_sum"))
transformed_data<-data.frame(user$age, category, company, usagetime, price_sum)
#forward selection ๋ฐฉ๋ฒ์ผ๋ก ๋ณ์ ์ ํ
lm.null<-lm(price_sum_by_by_approval_type_LT01~1, data=transformed_data)
lm.full<-lm(price_sum_by_by_approval_type_LT01~., data=transformed_data)
#๋ก๋ฉ์๋๋ฅผ ์ํด ์ฝ๋๋ง ์ ์ด๋๊ฒ ์ต๋๋ค
#forward.selection<- step(lm.null, direction = "forward", scope=list(lower=lm.null, upper=lm.full))
#์ต์ข
๋ชจ๋ธ ํ๋ฒ๋ ์
๋ ฅํด์ฃผ๊ฒ ์ต๋๋ค (๋ก๋ฉ์๋๋ฅผ ์ํด....)
lm.forward<-lm(formula = price_sum_by_by_approval_type_LT01 ~ company_code_PA00004_count +
company_code_PA00011_count + price_sum_by_by_approval_type_LA +
X.U.C0AC..U.C9C4...U.C778..U.D654._usagetime + category_code_6_count +
X.U.D544..U.D130...U.CE74..U.BA54..U.B77C._usagetime + X.U.B0A0..U.C528._usagetime +
X.U.AE30..U.C5C5...U.D559..U.D68C._usagetime + X.U.CE74..U.BA54..U.B77C...U.CEE8..U.D2B8..U.B864..U.B7EC._usagetime +
X.U.AC8C..U.C784...U.CEE4..U.BBA4..U.B2C8..U.D2F0._usagetime +
company_code_PA00010_count + X.U.C911..U.ACE0..U.AC70..U.B798._usagetime +
X.U.C790..U.C0B0..U.D1B5..U.D569..U.AD00..U.B9AC._usagetime +
price_sum_by_by_approval_type_LW + CCTV.U.BDF0..U.C5B4._usagetime +
X.U.D504..U.B9B0..U.D130...U.D329..U.C2A4._usagetime + X.U.B3C8..U.BC84..U.B294...U.B9AC..U.C6CC..U.B4DC._usagetime +
AR.VR_usagetime + X.U.BC31..U.D654..U.C810...U.C628..U.B77C..U.C778..U.BAB0._usagetime +
price_sum_by_by_approval_type_FC + X.U.C131..U.D615..U.C815..U.BCF4._usagetime +
X.U.C219..U.BC15._usagetime + X.U.C74C..U.C131...U.C601..U.C0C1..U.D1B5..U.D654._usagetime +
price_sum_by_by_approval_type_LD + X.U.D2F0..U.CF13...U.C608..U.B9E4._usagetime +
X.U.BB38..U.C11C._usagetime + X.U.C571..U.C124..U.CE58..U.D504..U.B85C..U.ADF8..U.B7A8._usagetime +
X.U.B300..U.D559..U.AD50._usagetime + X.U.D30C..U.C77C...U.B2E4..U.C6B4..U.B85C..U.B354._usagetime +
X.U.D30C..U.C77C..U.AD00..U.B9AC._usagetime + price_sum_by_by_approval_type_FA +
price_sum_by_by_approval_type_LC + QR.U.C2A4..U.CE90..U.B108._usagetime +
X.U.AC80..U.C0C9..U.D3EC..U.D138._usagetime + X.U.C601..U.D654._usagetime +
X.U.B3C4..U.C11C..U.AD00._usagetime + category_code_12_count +
X.U.B300..U.CD9C._usagetime + X.U.AD50..U.D1B5..U.CE74..U.B4DC._usagetime +
X.U.C74C..U.C545...U.C2A4..U.D2B8..U.B9AC..U.BC0D._usagetime +
X.U.B179..U.C74C..U.AE30._usagetime + X.U.BB34..U.C74C..U.CE74..U.BA54..U.B77C._usagetime +
company_code_PA00012_count + X.U.C5EC..U.D589...U.C815..U.BCF4._usagetime +
X.U.C190..U.C804..U.B4F1._usagetime + X.U.C2E0..U.C6A9..U.C870..U.D68C._usagetime +
X.U.C1FC..U.D551._usagetime + X.U.D648..U.C6D0..U.ACA9..U.AD00..U.B9AC._usagetime +
X.U.B2EC..U.B825...U.C77C..U.C815..U.AD00..U.B9AC._usagetime +
X.U.C8FC..U.C720..U.C18C._usagetime + X.U.B3D9..U.D638..U.D68C...U.C18C..U.BAA8..U.C784._usagetime +
X.U.BC18..U.B824..U.B3D9..U.BB3C._usagetime + X.U.C18C..U.C15C...U.C5F0..U.ACC4._usagetime +
X.U.CF58..U.D150..U.CE20._usagetime + X.U.B80C..U.D130..U.CE74._usagetime,
data = transformed_data)
#lm.forwad ๋ชจ๋ธ์ ๋ํด ๋ค์ค๊ณต์ ์ฑ ์ฒดํฌ๋ฅผ ํด๋ณด๊ฒ ์ต๋๋ค
library(car)
which(car::vif(lm.forward)>4) #๊ตญ๋ด ์
๊ธ๊ณผ ์ถ๊ธ์ด ๋ค์ค ๊ณต์ ์ฑ์ด ์๋ค๊ณ ๋์ค๋ค์ผ. ์ฐ๋ฆฌ๋ '์๋น'์ ์ด์ ์ ๋ง์ถ์์ผ๋ฏ๋ก ์ถ๊ธ๋ด์ญ๋ง ์ ํํ๊ฒ ์ต๋๋ค
lm.after.vif<-lm(formula = price_sum_by_by_approval_type_LT01 ~ company_code_PA00004_count +
company_code_PA00011_count + price_sum_by_by_approval_type_LA +
X.U.C0AC..U.C9C4...U.C778..U.D654._usagetime + category_code_6_count +
X.U.D544..U.D130...U.CE74..U.BA54..U.B77C._usagetime + X.U.B0A0..U.C528._usagetime +
X.U.AE30..U.C5C5...U.D559..U.D68C._usagetime + X.U.CE74..U.BA54..U.B77C...U.CEE8..U.D2B8..U.B864..U.B7EC._usagetime +
X.U.AC8C..U.C784...U.CEE4..U.BBA4..U.B2C8..U.D2F0._usagetime +
company_code_PA00010_count + X.U.C911..U.ACE0..U.AC70..U.B798._usagetime +
X.U.C790..U.C0B0..U.D1B5..U.D569..U.AD00..U.B9AC._usagetime +
price_sum_by_by_approval_type_LW + CCTV.U.BDF0..U.C5B4._usagetime +
X.U.D504..U.B9B0..U.D130...U.D329..U.C2A4._usagetime + X.U.B3C8..U.BC84..U.B294...U.B9AC..U.C6CC..U.B4DC._usagetime +
AR.VR_usagetime + X.U.BC31..U.D654..U.C810...U.C628..U.B77C..U.C778..U.BAB0._usagetime +
price_sum_by_by_approval_type_FC + X.U.C131..U.D615..U.C815..U.BCF4._usagetime +
X.U.C219..U.BC15._usagetime + X.U.C74C..U.C131...U.C601..U.C0C1..U.D1B5..U.D654._usagetime +
X.U.D2F0..U.CF13...U.C608..U.B9E4._usagetime +
X.U.BB38..U.C11C._usagetime + X.U.C571..U.C124..U.CE58..U.D504..U.B85C..U.ADF8..U.B7A8._usagetime +
X.U.B300..U.D559..U.AD50._usagetime + X.U.D30C..U.C77C...U.B2E4..U.C6B4..U.B85C..U.B354._usagetime +
X.U.D30C..U.C77C..U.AD00..U.B9AC._usagetime + price_sum_by_by_approval_type_FA +
price_sum_by_by_approval_type_LC + QR.U.C2A4..U.CE90..U.B108._usagetime +
X.U.AC80..U.C0C9..U.D3EC..U.D138._usagetime + X.U.C601..U.D654._usagetime +
X.U.B3C4..U.C11C..U.AD00._usagetime + category_code_12_count +
X.U.B300..U.CD9C._usagetime + X.U.AD50..U.D1B5..U.CE74..U.B4DC._usagetime +
X.U.C74C..U.C545...U.C2A4..U.D2B8..U.B9AC..U.BC0D._usagetime +
X.U.B179..U.C74C..U.AE30._usagetime + X.U.BB34..U.C74C..U.CE74..U.BA54..U.B77C._usagetime +
company_code_PA00012_count + X.U.C5EC..U.D589...U.C815..U.BCF4._usagetime +
X.U.C190..U.C804..U.B4F1._usagetime + X.U.C2E0..U.C6A9..U.C870..U.D68C._usagetime +
X.U.C1FC..U.D551._usagetime + X.U.D648..U.C6D0..U.ACA9..U.AD00..U.B9AC._usagetime +
X.U.B2EC..U.B825...U.C77C..U.C815..U.AD00..U.B9AC._usagetime +
X.U.C8FC..U.C720..U.C18C._usagetime + X.U.B3D9..U.D638..U.D68C...U.C18C..U.BAA8..U.C784._usagetime +
X.U.BC18..U.B824..U.B3D9..U.BB3C._usagetime + X.U.C18C..U.C15C...U.C5F0..U.ACC4._usagetime +
X.U.CF58..U.D150..U.CE20._usagetime + X.U.B80C..U.D130..U.CE74._usagetime,
data = transformed_data)
#r-squred ๊ฐ์ด ๊ฝค ๋๋ค์ (0.6)
summary(lm.after.vif)
# ๊ฒ์ ๊ฒฐ๊ณผ ๋ณํ์ด ํ์ํด๋ณด์
๋๋ค
plot(lm.after.vif)
#boxcox transformation
library(MASS)
bc_norm <- MASS::boxcox(lm.after.vif)
lambda <- bc_norm$x[which.max(bc_norm$y)]
lm.boxcox<-lm(formula = price_sum_by_by_approval_type_LT01^lambda ~ company_code_PA00004_count +
company_code_PA00011_count + price_sum_by_by_approval_type_LA +
X.U.C0AC..U.C9C4...U.C778..U.D654._usagetime + category_code_6_count +
X.U.D544..U.D130...U.CE74..U.BA54..U.B77C._usagetime + X.U.B0A0..U.C528._usagetime +
X.U.AE30..U.C5C5...U.D559..U.D68C._usagetime + X.U.CE74..U.BA54..U.B77C...U.CEE8..U.D2B8..U.B864..U.B7EC._usagetime +
X.U.AC8C..U.C784...U.CEE4..U.BBA4..U.B2C8..U.D2F0._usagetime +
company_code_PA00010_count + X.U.C911..U.ACE0..U.AC70..U.B798._usagetime +
X.U.C790..U.C0B0..U.D1B5..U.D569..U.AD00..U.B9AC._usagetime +
price_sum_by_by_approval_type_LW + CCTV.U.BDF0..U.C5B4._usagetime +
X.U.D504..U.B9B0..U.D130...U.D329..U.C2A4._usagetime + X.U.B3C8..U.BC84..U.B294...U.B9AC..U.C6CC..U.B4DC._usagetime +
AR.VR_usagetime + X.U.BC31..U.D654..U.C810...U.C628..U.B77C..U.C778..U.BAB0._usagetime +
price_sum_by_by_approval_type_FC + X.U.C131..U.D615..U.C815..U.BCF4._usagetime +
X.U.C219..U.BC15._usagetime + X.U.C74C..U.C131...U.C601..U.C0C1..U.D1B5..U.D654._usagetime +
X.U.D2F0..U.CF13...U.C608..U.B9E4._usagetime +
X.U.BB38..U.C11C._usagetime + X.U.C571..U.C124..U.CE58..U.D504..U.B85C..U.ADF8..U.B7A8._usagetime +
X.U.B300..U.D559..U.AD50._usagetime + X.U.D30C..U.C77C...U.B2E4..U.C6B4..U.B85C..U.B354._usagetime +
X.U.D30C..U.C77C..U.AD00..U.B9AC._usagetime + price_sum_by_by_approval_type_FA +
price_sum_by_by_approval_type_LC + QR.U.C2A4..U.CE90..U.B108._usagetime +
X.U.AC80..U.C0C9..U.D3EC..U.D138._usagetime + X.U.C601..U.D654._usagetime +
X.U.B3C4..U.C11C..U.AD00._usagetime + category_code_12_count +
X.U.B300..U.CD9C._usagetime + X.U.AD50..U.D1B5..U.CE74..U.B4DC._usagetime +
X.U.C74C..U.C545...U.C2A4..U.D2B8..U.B9AC..U.BC0D._usagetime +
X.U.B179..U.C74C..U.AE30._usagetime + X.U.BB34..U.C74C..U.CE74..U.BA54..U.B77C._usagetime +
company_code_PA00012_count + X.U.C5EC..U.D589...U.C815..U.BCF4._usagetime +
X.U.C190..U.C804..U.B4F1._usagetime + X.U.C2E0..U.C6A9..U.C870..U.D68C._usagetime +
X.U.C1FC..U.D551._usagetime + X.U.D648..U.C6D0..U.ACA9..U.AD00..U.B9AC._usagetime +
X.U.B2EC..U.B825...U.C77C..U.C815..U.AD00..U.B9AC._usagetime +
X.U.C8FC..U.C720..U.C18C._usagetime + X.U.B3D9..U.D638..U.D68C...U.C18C..U.BAA8..U.C784._usagetime +
X.U.BC18..U.B824..U.B3D9..U.BB3C._usagetime + X.U.C18C..U.C15C...U.C5F0..U.ACC4._usagetime +
X.U.CF58..U.D150..U.CE20._usagetime + X.U.B80C..U.D130..U.CE74._usagetime,
data = transformed_data)
# ์์ฐจ๊ทธ๋ฆผ์ ๋ง์ด ์ข์์ก์ผ๋ ๊ฒฐ์ ๊ณ์๊ฐ ์ ๋ ๋จ์ด์ก์๊น์ฌ.. ํํ
summary(lm.boxcox)
plot(lm.boxcox)
#์
๊ธ ๋ด์ญ์ ๋ค์ ๋ฃ๊ณ boxcox ํด๋ณผ๊ฒ์ฌ..
bc_norm <- MASS::boxcox(lm.after.vif)
lambda <- bc_norm$x[which.max(bc_norm$y)]
lm.boxcox_1<-lm(formula = price_sum_by_by_approval_type_LT01^lambda ~ company_code_PA00004_count +
company_code_PA00011_count + price_sum_by_by_approval_type_LA +
X.U.C0AC..U.C9C4...U.C778..U.D654._usagetime + category_code_6_count +
X.U.D544..U.D130...U.CE74..U.BA54..U.B77C._usagetime + X.U.B0A0..U.C528._usagetime +
X.U.AE30..U.C5C5...U.D559..U.D68C._usagetime + X.U.CE74..U.BA54..U.B77C...U.CEE8..U.D2B8..U.B864..U.B7EC._usagetime +
X.U.AC8C..U.C784...U.CEE4..U.BBA4..U.B2C8..U.D2F0._usagetime +
company_code_PA00010_count + X.U.C911..U.ACE0..U.AC70..U.B798._usagetime +
X.U.C790..U.C0B0..U.D1B5..U.D569..U.AD00..U.B9AC._usagetime +
price_sum_by_by_approval_type_LW + CCTV.U.BDF0..U.C5B4._usagetime +
X.U.D504..U.B9B0..U.D130...U.D329..U.C2A4._usagetime + X.U.B3C8..U.BC84..U.B294...U.B9AC..U.C6CC..U.B4DC._usagetime +
AR.VR_usagetime + X.U.BC31..U.D654..U.C810...U.C628..U.B77C..U.C778..U.BAB0._usagetime +
price_sum_by_by_approval_type_FC + X.U.C131..U.D615..U.C815..U.BCF4._usagetime +
X.U.C219..U.BC15._usagetime + X.U.C74C..U.C131...U.C601..U.C0C1..U.D1B5..U.D654._usagetime +
price_sum_by_by_approval_type_LD + X.U.D2F0..U.CF13...U.C608..U.B9E4._usagetime +
X.U.BB38..U.C11C._usagetime + X.U.C571..U.C124..U.CE58..U.D504..U.B85C..U.ADF8..U.B7A8._usagetime +
X.U.B300..U.D559..U.AD50._usagetime + X.U.D30C..U.C77C...U.B2E4..U.C6B4..U.B85C..U.B354._usagetime +
X.U.D30C..U.C77C..U.AD00..U.B9AC._usagetime + price_sum_by_by_approval_type_FA +
price_sum_by_by_approval_type_LC + QR.U.C2A4..U.CE90..U.B108._usagetime +
X.U.AC80..U.C0C9..U.D3EC..U.D138._usagetime + X.U.C601..U.D654._usagetime +
X.U.B3C4..U.C11C..U.AD00._usagetime + category_code_12_count +
X.U.B300..U.CD9C._usagetime + X.U.AD50..U.D1B5..U.CE74..U.B4DC._usagetime +
X.U.C74C..U.C545...U.C2A4..U.D2B8..U.B9AC..U.BC0D._usagetime +
X.U.B179..U.C74C..U.AE30._usagetime + X.U.BB34..U.C74C..U.CE74..U.BA54..U.B77C._usagetime +
company_code_PA00012_count + X.U.C5EC..U.D589...U.C815..U.BCF4._usagetime +
X.U.C190..U.C804..U.B4F1._usagetime + X.U.C2E0..U.C6A9..U.C870..U.D68C._usagetime +
X.U.C1FC..U.D551._usagetime + X.U.D648..U.C6D0..U.ACA9..U.AD00..U.B9AC._usagetime +
X.U.B2EC..U.B825...U.C77C..U.C815..U.AD00..U.B9AC._usagetime +
X.U.C8FC..U.C720..U.C18C._usagetime + X.U.B3D9..U.D638..U.D68C...U.C18C..U.BAA8..U.C784._usagetime +
X.U.BC18..U.B824..U.B3D9..U.BB3C._usagetime + X.U.C18C..U.C15C...U.C5F0..U.ACC4._usagetime +
X.U.CF58..U.D150..U.CE20._usagetime + X.U.B80C..U.D130..U.CE74._usagetime,
data = transformed_data)
summary(lm.boxcox_1)
plot(lm.boxcox_1)
#boxcox๋ฅผ ํ๋ฉด ๊ฒฐ์ ๊ณ์๊ฐ ๋จ์ด์ง๋ค์ฉ..
#์
๊ธ ๋ง๊ณ ์ถ๊ธ์ ๋นผ๋ณผ๊ฒ์ ^^
lm_again<-lm(formula = price_sum_by_by_approval_type_LT01 ~ company_code_PA00004_count +
company_code_PA00011_count + price_sum_by_by_approval_type_LA +
X.U.C0AC..U.C9C4...U.C778..U.D654._usagetime + category_code_6_count +
X.U.D544..U.D130...U.CE74..U.BA54..U.B77C._usagetime + X.U.B0A0..U.C528._usagetime +
X.U.AE30..U.C5C5...U.D559..U.D68C._usagetime + X.U.CE74..U.BA54..U.B77C...U.CEE8..U.D2B8..U.B864..U.B7EC._usagetime +
X.U.AC8C..U.C784...U.CEE4..U.BBA4..U.B2C8..U.D2F0._usagetime +
company_code_PA00010_count + X.U.C911..U.ACE0..U.AC70..U.B798._usagetime +
X.U.C790..U.C0B0..U.D1B5..U.D569..U.AD00..U.B9AC._usagetime +
CCTV.U.BDF0..U.C5B4._usagetime +
X.U.D504..U.B9B0..U.D130...U.D329..U.C2A4._usagetime + X.U.B3C8..U.BC84..U.B294...U.B9AC..U.C6CC..U.B4DC._usagetime +
AR.VR_usagetime + X.U.BC31..U.D654..U.C810...U.C628..U.B77C..U.C778..U.BAB0._usagetime +
price_sum_by_by_approval_type_FC + X.U.C131..U.D615..U.C815..U.BCF4._usagetime +
X.U.C219..U.BC15._usagetime + X.U.C74C..U.C131...U.C601..U.C0C1..U.D1B5..U.D654._usagetime +
price_sum_by_by_approval_type_LD + X.U.D2F0..U.CF13...U.C608..U.B9E4._usagetime +
X.U.BB38..U.C11C._usagetime + X.U.C571..U.C124..U.CE58..U.D504..U.B85C..U.ADF8..U.B7A8._usagetime +
X.U.B300..U.D559..U.AD50._usagetime + X.U.D30C..U.C77C...U.B2E4..U.C6B4..U.B85C..U.B354._usagetime +
X.U.D30C..U.C77C..U.AD00..U.B9AC._usagetime + price_sum_by_by_approval_type_FA +
price_sum_by_by_approval_type_LC + QR.U.C2A4..U.CE90..U.B108._usagetime +
X.U.AC80..U.C0C9..U.D3EC..U.D138._usagetime + X.U.C601..U.D654._usagetime +
X.U.B3C4..U.C11C..U.AD00._usagetime + category_code_12_count +
X.U.B300..U.CD9C._usagetime + X.U.AD50..U.D1B5..U.CE74..U.B4DC._usagetime +
X.U.C74C..U.C545...U.C2A4..U.D2B8..U.B9AC..U.BC0D._usagetime +
X.U.B179..U.C74C..U.AE30._usagetime + X.U.BB34..U.C74C..U.CE74..U.BA54..U.B77C._usagetime +
company_code_PA00012_count + X.U.C5EC..U.D589...U.C815..U.BCF4._usagetime +
X.U.C190..U.C804..U.B4F1._usagetime + X.U.C2E0..U.C6A9..U.C870..U.D68C._usagetime +
X.U.C1FC..U.D551._usagetime + X.U.D648..U.C6D0..U.ACA9..U.AD00..U.B9AC._usagetime +
X.U.B2EC..U.B825...U.C77C..U.C815..U.AD00..U.B9AC._usagetime +
X.U.C8FC..U.C720..U.C18C._usagetime + X.U.B3D9..U.D638..U.D68C...U.C18C..U.BAA8..U.C784._usagetime +
X.U.BC18..U.B824..U.B3D9..U.BB3C._usagetime + X.U.C18C..U.C15C...U.C5F0..U.ACC4._usagetime +
X.U.CF58..U.D150..U.CE20._usagetime + X.U.B80C..U.D130..U.CE74._usagetime,
data = transformed_data)
summary(lm_again)
plot(lm_again)
#๋น์ ํ ๋ชจ๋ธ์ ํด์ผํ๋๋ด์..
|
#' Odd_Even
#'
#' Adds two numbers together and says whether the sum is odd or even
#'
#' @param num1 The first number
#' @param num2 The second number
#'
#' @return
#' @export
#'
#' @examples odd_even(10, 53)
odd_even <- function(num1, num2) {
sum <- num1 + num2
if ((sum %% 2) == 1) {
print("Odd")
} else {
print("Even")
}
}
|
/R/odd_even.R
|
no_license
|
wyliehampson/lasagnashark
|
R
| false | false | 343 |
r
|
#' Odd_Even
#'
#' Adds two numbers together and says whether the sum is odd or even
#'
#' @param num1 The first number
#' @param num2 The second number
#'
#' @return
#' @export
#'
#' @examples odd_even(10, 53)
odd_even <- function(num1, num2) {
sum <- num1 + num2
if ((sum %% 2) == 1) {
print("Odd")
} else {
print("Even")
}
}
|
#' @title Convert Spatial HydroData Objects to Simple Features
#' @description A function to convert all Spatial* HydroData objects to simple feature geometries.
#' Non-Spatial objects (eg raster and list components) will be skipped over.
#' @param hydro.data.object a HydroData object with Spatial components
#' @return a list with the same length as the input
#' @examples
#' \dontrun{
#' AOI = getAOI(clip = 'UCSB') %>% findNED %>% findNHD %>% findNWIS %>% to_sf
#' }
#' @export
#' @author Mike Johnson
to_sf = function(hydro.data.object = NULL){
`%+%` = crayon::`%+%`
b = vector()
for(i in seq_along(hydro.data.object)){ b = append(b, grepl("Spatial", class(hydro.data.object[[i]]))) }
sf = c(hydro.data.object[!b], lapply(hydro.data.object[b], sf::st_as_sf))
cat(crayon::white('\nConverted to simple features: ') %+% crayon::cyan(paste(names(hydro.data.object[b]), collapse = ", ")), "\n")
return(sf)
}
|
/R/to_sf.R
|
permissive
|
mikejohnson51/HydroData
|
R
| false | false | 927 |
r
|
#' @title Convert Spatial HydroData Objects to Simple Features
#' @description A function to convert all Spatial* HydroData objects to simple feature geometries.
#' Non-Spatial objects (eg raster and list components) will be skipped over.
#' @param hydro.data.object a HydroData object with Spatial components
#' @return a list with the same length as the input
#' @examples
#' \dontrun{
#' AOI = getAOI(clip = 'UCSB') %>% findNED %>% findNHD %>% findNWIS %>% to_sf
#' }
#' @export
#' @author Mike Johnson
to_sf = function(hydro.data.object = NULL){
`%+%` = crayon::`%+%`
b = vector()
for(i in seq_along(hydro.data.object)){ b = append(b, grepl("Spatial", class(hydro.data.object[[i]]))) }
sf = c(hydro.data.object[!b], lapply(hydro.data.object[b], sf::st_as_sf))
cat(crayon::white('\nConverted to simple features: ') %+% crayon::cyan(paste(names(hydro.data.object[b]), collapse = ", ")), "\n")
return(sf)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_meta.R
\name{add_readme}
\alias{add_readme}
\title{Add a README.md file to the project directory}
\usage{
add_readme(path = ".", package = FALSE, rmd = FALSE)
}
\arguments{
\item{path}{Directory path (default \code{"."})}
\item{package}{Is this a package or a regular project? (Default \code{FALSE})}
\item{rmd}{Should rmarkdown file be used (Default \code{FALSE})}
}
\description{
Add a README.md file to the project directory
}
\seealso{
\code{\link{add_contributing}}, \code{\link{add_license}}, \code{\link{add_license_header}}
}
|
/man/add_readme.Rd
|
permissive
|
theodoreback/bcgovr
|
R
| false | true | 618 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_meta.R
\name{add_readme}
\alias{add_readme}
\title{Add a README.md file to the project directory}
\usage{
add_readme(path = ".", package = FALSE, rmd = FALSE)
}
\arguments{
\item{path}{Directory path (default \code{"."})}
\item{package}{Is this a package or a regular project? (Default \code{FALSE})}
\item{rmd}{Should rmarkdown file be used (Default \code{FALSE})}
}
\description{
Add a README.md file to the project directory
}
\seealso{
\code{\link{add_contributing}}, \code{\link{add_license}}, \code{\link{add_license_header}}
}
|
library(dplyr)
library(caTools)
dataset = read.csv("home_data.csv",stringsAsFactors = F, sep=",")
str(dataset)
head(dataset)
dataset$date = as.Date(substring(dataset$date,1,8),"%Y%m%d")
dataset$date
## Ans 1
agg = aggregate(dataset$price, list(dataset$zipcode), FUN = mean)
agg[agg$x==max(agg$x),]
(s=filter(agg,agg$x==max(agg$x)))
#Ans 2
(f_sqft = filter(dataset, dataset$sqft_living>2000 & dataset$sqft_living<4000))
(nrow(f_sqft)/nrow(dataset))*100
#Ans 3
?subset
basic = data.frame(dataset$bedrooms, dataset$bathrooms, dataset$sqft_living, dataset$sqft_lot, dataset$floors, dataset$zipcode)
str(basic)
basic
split = sample.split(basic$price, SplitRatio = 0.80)
training_set = subset(basic, split==TRUE)
training_set
test_set = subset(basic, split==FALSE)
test_set
fit1 = lm(price~bedrooms+bathrooms+sqft_living+sqft_lot+floors+zipcode, data=dataset)
summary(fit1)
fit2 = lm(price~bedrooms+bathrooms+sqft_living+sqft_lot+zipcode, data=dataset)
summary(fit2)
fit3 = lm(price~bedrooms+sqft_living+sqft_lot+zipcode, data=dataset)
summary(fit3)
regressor = lm(formula = price~., data=training_set)
regressor
y_predict = predict(regressor, newdata = test_set)
test_set$dataset_pred = y_predict
test_diff = test_set$price - test_set$dataset_pred
result_test = data.frame(test_set$price,y_predict, round(test_diff,2))
result_test
|
/Assignment.R
|
no_license
|
Vijayoswalia/SAR
|
R
| false | false | 1,419 |
r
|
library(dplyr)
library(caTools)
dataset = read.csv("home_data.csv",stringsAsFactors = F, sep=",")
str(dataset)
head(dataset)
dataset$date = as.Date(substring(dataset$date,1,8),"%Y%m%d")
dataset$date
## Ans 1
agg = aggregate(dataset$price, list(dataset$zipcode), FUN = mean)
agg[agg$x==max(agg$x),]
(s=filter(agg,agg$x==max(agg$x)))
#Ans 2
(f_sqft = filter(dataset, dataset$sqft_living>2000 & dataset$sqft_living<4000))
(nrow(f_sqft)/nrow(dataset))*100
#Ans 3
?subset
basic = data.frame(dataset$bedrooms, dataset$bathrooms, dataset$sqft_living, dataset$sqft_lot, dataset$floors, dataset$zipcode)
str(basic)
basic
split = sample.split(basic$price, SplitRatio = 0.80)
training_set = subset(basic, split==TRUE)
training_set
test_set = subset(basic, split==FALSE)
test_set
fit1 = lm(price~bedrooms+bathrooms+sqft_living+sqft_lot+floors+zipcode, data=dataset)
summary(fit1)
fit2 = lm(price~bedrooms+bathrooms+sqft_living+sqft_lot+zipcode, data=dataset)
summary(fit2)
fit3 = lm(price~bedrooms+sqft_living+sqft_lot+zipcode, data=dataset)
summary(fit3)
regressor = lm(formula = price~., data=training_set)
regressor
y_predict = predict(regressor, newdata = test_set)
test_set$dataset_pred = y_predict
test_diff = test_set$price - test_set$dataset_pred
result_test = data.frame(test_set$price,y_predict, round(test_diff,2))
result_test
|
# Method 3: age-adjusted cross-section (survivors + incident cohorts + decedents) (P.4)
setwd("C:/Users/janet/Documents/Codes/DM Project")
library(Hmisc); library(dplyr); library(data.table)
# For year t=baseline (2007-2008), calculate the following: ---------
# (1) For those who survive the year (to the end of year t), calculate expected remaining life-years for each individual, based on risk prediction model using biomarkers measured in year t. The predicted remaining life expectancy (LE) is calculated in the same way as for the survivor panel, method 1, but for each individual use their own actual age and duration of diagnosis to predict risk of death and, based on that, their remaining LE.
mylist <- readRDS("pt_clinicalvalues.rds")
load("8a MI_clinicalvalues_0711.Rdata")
rm(imp_bl, imp_fn)
baseline_imputed <- as.data.table(baseline_imputed)
final_imputed <- as.data.table(final_imputed)
load("6 participant status.Rdata")
# using model equation for HKU-SG model eq
HKU_SG_mortality <- function(input) {
100*(1-0.912^exp(2.727159
+0.02659452*as.numeric(input$age)
+4.075628e-5*(as.numeric(input$age) - 41)^3
-0.0001070358*(as.numeric(input$age) - 58)^3
+7.311264e-5*(as.numeric(input$age) - 70)^3
-6.833147e-6*(as.numeric(input$age) - 85)^3
+0.2650322*as.numeric(input$duration)
-0.01608406*(as.numeric(input$duration) - 0.04654346)^3
+0.01883374*(as.numeric(input$duration) - 0.9609856)^3
-0.00277583*(as.numeric(input$duration) - 6.466804)^3
+2.614735e-5*(as.numeric(input$duration) - 22.96235)^3
-0.1983312*(as.numeric(input$female==1))
-0.3118533*(as.numeric(input$smoking==1)) # ex-smoker
-0.6109742*(as.numeric(input$smoking==2)) # non-smoker
+0.5252391*(as.numeric(input$af==1))
+1.077321*(as.numeric(input$ckd==1))
+0.4913603*(as.numeric(input$stroke==1))
+0.2324324*(as.numeric(input$chd==1))
-0.3320009*as.numeric(input$hba1c)
+0.06135776*(as.numeric(input$hba1c) - 5.6)^3
-0.1198288*(as.numeric(input$hba1c) - 6.6)^3
+0.05774934*(as.numeric(input$hba1c) - 7.6)^3
+0.0007216831*(as.numeric(input$hba1c) - 11.6)^3
-0.006923551*as.numeric(input$sbp)
+3.548158e-6*(as.numeric(input$sbp) - 108)^3
-8.185037e-6*(as.numeric(input$sbp) - 130)^3
+4.343557e-6*(as.numeric(input$sbp) - 145)^3
+2.93321e-7*(as.numeric(input$sbp) - 174)^3
-0.00510383*as.numeric(input$dbp)
+8.585339e-6*(as.numeric(input$dbp) - 58)^3
-1.604159e-5*(as.numeric(input$dbp) - 71)^3
+4.674797e-6*(as.numeric(input$dbp) - 80)^3
+2.781449e-6*(as.numeric(input$dbp) - 96)^3
-0.1802774*as.numeric(input$ldl)
+0.03426755*(as.numeric(input$ldl) - 1.62)^3
-0.06139979*(as.numeric(input$ldl) - 2.6606)^3
+0.01499461*(as.numeric(input$ldl) - 3.3636)^3
+0.01213762*(as.numeric(input$ldl) - 4.73)^3
-0.0506029*as.numeric(input$bmi)
+0.0003252084*(as.numeric(input$bmi) - 19.7)^3
-0.0004954199*(as.numeric(input$bmi) - 23.95)^3
+2.750309e-5*(as.numeric(input$bmi) - 26.83)^3
+0.0001427083*(as.numeric(input$bmi) - 33.08)^3))
}
# select survivors + decedents who enter within the baseline period (2007-08) and are still alive by the end of baseline period (before 2009-01-01)
bl <- baseline_imputed[serial_no %in% survivor_no | serial_no %in% decedent_no]
bl <- bl[!(death.date < "2009-01-01") | is.na(death.date)] #369600 left
# for each individual, use their own actual age and duration of DM
# for baseline, calculate age at entry year
bl$entry.year <- as.numeric(substr(bl$entry.date,1,4))
bl$age <- bl$entry.year - bl$dob
# predict risk
r <- data.frame(serial_no = bl$serial_no, bl_risk = HKU_SG_mortality(bl))
# calculate remaining life years
# lifetable at 2006
male_lifetable <- data.frame(age = seq(1:100), le=c(78.53, 77.56, 76.58, 75.60, 74.61, 73.63, 72.64, 71.65, 70.65, 69.66, 68.67, 67.68, 66.68, 65.69, 64.70, 63.71, 62.72, 61.73, 60.74, 59.76, 58.78, 57.80, 56.82, 55.85, 54.87, 53.90, 52.94, 51.97, 51.00, 50.04, 49.07, 48.10, 47.14, 46.17, 45.20, 44.24, 43.28, 42.32, 41.36, 40.41, 39.46, 38.51, 37.56, 36.61, 35.67, 34.73, 33.79, 32.86, 31.93, 31.01, 30.10, 29.20, 28.30, 27.41, 26.52, 25.65, 24.78, 23.92, 23.07, 22.23, 21.40, 20.59, 19.79, 18.99, 18.21, 17.44, 16.67, 15.92, 15.19, 14.47, 13.77, 13.09, 12.42, 11.78, 11.15, 10.55, 9.96, 9.40, 8.86, 8.34, 7.84, 7.37, 6.92, 6.49, 6.09, 5.70, 5.33, 4.98, 4.65, 4.34, 4.05, 3.77, 3.51, 3.27, 3.04, 2.83, 2.63, 2.44, 2.27, 2.11))
female_lifetable <- data.frame(seq =seq(1:100), le = c(84.69, 83.71, 82.72, 81.73, 80.74, 79.75, 78.76, 77.77, 76.77, 75.78, 74.78, 73.79, 72.80, 71.81, 70.81, 69.82, 68.83, 67.84, 66.85, 65.86, 64.86, 63.87, 62.88, 61.89, 60.90, 59.91, 58.93, 57.94, 56.96, 55.97, 54.99, 54.01, 53.03, 52.05, 51.07, 50.09, 49.12, 48.14, 47.17, 46.20, 45.23, 44.26, 43.29, 42.33, 41.37, 40.40, 39.45, 38.49, 37.54, 36.60, 35.66, 34.72, 33.79, 32.86, 31.93, 31.01, 30.09, 29.17, 28.25, 27.35, 26.44, 25.55, 24.65, 23.77, 22.89, 22.01, 21.14, 20.28, 19.44, 18.60, 17.78, 16.98, 16.20, 15.43, 14.68, 13.94, 13.23, 12.54, 11.87, 11.23, 10.60, 10.01, 9.43, 8.88, 8.35, 7.84, 7.35, 6.88, 6.44, 6.01, 5.61, 5.22, 4.86, 4.52, 4.19, 3.89, 3.60, 3.33, 3.08, 2.85))
le <- mylist[[1]][c("serial_no", "female", "age")]
le$le <- ifelse(le$female==TRUE, female_lifetable$le[le$age], male_lifetable$le[le$age])
# (3) Multiply remaining LYs by the value of a LY to get the value of remaining life, V.
# we approximate the predicted probability of death in the next 5 years by giving one fifth of the predicted probability(0.2 Pjt) to each of the first 5 years, and assume that all patients surviving beyond year 5 (with probability 1- Pjt)have the same age- and sex-specific remaining life expectancy as a general individual from the lifetable of that population.
# Below adapted from Brian's Stata Codes (09_lifetable_netvalue)
df <- merge(le, r, by="serial_no", all.y=T)
df$bl_risk <- df$bl_risk/100
df <- df[complete.cases(df)==TRUE,] #15 participants with age >100 eliminated
df <- as.data.table(df)
rate <- 0.03 # 3%
df$le_int <- df$le - df$le %% 1
df$le_rem <- df$le %% 1
for (i in c(50, 100, 200)){
assign(paste0("valyear", i, "k1"), i*1000*((1-(1/(1+rate)^1))/rate))
assign(paste0("valyear", i, "k2"), i*1000*((1-(1/(1+rate)^2))/rate))
assign(paste0("valyear", i, "k3"), i*1000*((1-(1/(1+rate)^3))/rate))
assign(paste0("valyear", i, "k4"), i*1000*((1-(1/(1+rate)^4))/rate))
assign(paste0("valyear", i, "k5"), i*1000*((1-(1/(1+rate)^5))/rate))
}
df$le_50k_bl <-
((df$bl_risk/5)*(valyear50k1 + valyear50k2 + valyear50k3 + valyear50k4 + valyear50k5)
+ (1-df$bl_risk)*
(50000*((1-(1/(1+rate)^df$le_int))/rate)
+ 50000*df$le_rem/(1+rate)^(df$le_int+1)))
df$le_100k_bl <-
((df$bl_risk/5)*(valyear100k1 + valyear100k2 + valyear100k3 + valyear100k4 + valyear100k5)
+ (1-df$bl_risk)*
(100000*((1-(1/(1+rate)^df$le_int))/rate)
+ 100000*df$le_rem/(1+rate)^(df$le_int+1)))
df$le_200k_bl <-
((df$bl_risk/5)*(valyear200k1 + valyear200k2 + valyear200k3 + valyear200k4 + valyear200k5)
+ (1-df$bl_risk)*
(200000*((1-(1/(1+rate)^df$le_int))/rate)
+ 200000*df$le_rem/(1+rate)^(df$le_int+1)))
df <- df[, -c("le", "bl_risk", "le_int", "le_rem")]
# (2) For those who die during year t, give them 0.5 as their remaining life-years. In other words, decedents received 0.5LY for the year.
# only include decedents who died before 2009-01-01
decedent <- as.data.table(mylist[[1]])
decedent <- decedent[serial_no %in% decedent_no]
decedent <- decedent[death.date < "2009-01-01", c("serial_no", "female", "dob", "entry.date", "death.date")] #27218 obs
decedent$entry.yr <- as.numeric(substr(decedent$entry.date, 1, 4))
decedent$death.yr <- as.numeric(substr(decedent$death.date, 1, 4))
decedent <- decedent[!death.yr == 2006] # remove those who died in 2006 (before entry year)
decedent$le <- decedent$death.yr - decedent$entry.yr + 0.5 # for those who die during year t, give them 0.5 remaining life-years
decedent$age <- decedent$entry.yr - decedent$dob
decedent <- decedent[, c("serial_no", "female", "age", "le")]
decedent$le_int <- decedent$le - decedent$le %% 1
decedent$le_rem <- decedent$le %% 1
decedent$le_50k_bl <- 50000*((1-(1/(1+rate)^decedent$le_int))/rate) + 50000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent$le_100k_bl <- 100000*((1-(1/(1+rate)^decedent$le_int))/rate) + 100000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent$le_200k_bl <- 200000*((1-(1/(1+rate)^decedent$le_int))/rate) + 200000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent <- decedent[, -c("le", "le_int", "le_rem")]
# Combine decedents and survivors dataframes
df <- rbind(df, decedent)
# (4) For each individual in year t (including decedents), subtract c, their total medical spending in year t, from V, the value of remaining life. We call this Vt - ct.
spending <- readRDS("4c total adjusted spending.rds")
spending$spend.adj <- spending$spend.adj / 7.84975 # Convert from HKD to USD
s <- reshape(spending, direction = "wide", timevar = "yr", idvar = "serial_no")
s <- data.table(serial_no = s$serial_no, spending = s$spend.adj.2007 + s$spend.adj.2008)
df <- merge(df, s, by = "serial_no", all.x = TRUE)
# crude results
crude_baseline <- c((mean(df$le_50k_bl) - mean(df$spending)), (mean(df$le_100k_bl) - mean(df$spending)), (mean(df$le_200k_bl) - mean(df$spending)))
# (5) For each age- and sex- group (e.g. males age 40-45), obtain average Vt - ct
# 15-19, 20-24 etc. until >85
df <- mutate(df, group = cut(age, breaks=c(seq(from = 14, to = 84, by = 5), Inf), labels=c("15-19", "20-24", "25-29", "30-34", "35-39", "40-44", "45-49", "50-54", "55-59", "60-64", "65-69", "70-74", "75-79", "80-84", "85+")))
df <- as.data.table(df)
df <- df[, .(spending = mean(spending), le50k = mean(le_50k_bl), le100k = mean(le_100k_bl), le200k = mean(le_200k_bl)), keyby = .(group, female)]
df$nv50k <- df$le50k - df$spending
df$nv100k <- df$le100k - df$spending
df$nv200k <- df$le200k - df$spending
df <- df[, -c("le50k", "le100k", "le200k", "spending")]
# (6) Create a weighted average Vt - ct based on the weights of the reference population,by multiplying the average for each age-sex group by the weight assigned to that age-sex group in the reference population.
# WHO reference population from age 15+ to 85+
df$who <- rep(c(0.057318597, 0.055626862, 0.053664342, 0.051498732, 0.048386049, 0.0445964, 0.040874449, 0.03634014, 0.03079106, 0.025174285, 0.020031384, 0.014955503, 0.010286478, 0.006158347, 0.004297372), each = 2)
df$hkdm <- c(0.001340, 0.000997, 0.001315, 0.001293, 0.001794, 0.002307, 0.003605, 0.004583, 0.007690, 0.008642, 0.016029, 0.014628, 0.029435, 0.023546, 0.059159, 0.046097, 0.082705, 0.068599, 0.090264, 0.081155, 0.090036, 0.084927, 0.070938, 0.066088, 0.067006, 0.075821, NA, NA, NA, NA)
who_baseline <- c(sum(df$nv50k * df$who), sum(df$nv100k * df$who), sum(df$nv200k * df$who))
df2 <- df[complete.cases(df)]
hkdm_baseline <- c(sum(df2$nv50k * df2$hkdm), sum(df2$nv100k * df2$hkdm), sum(df2$nv200k * df2$hkdm))
results <- cbind(crude_baseline, who_baseline, hkdm_baseline)
row.names(results) <- c("Net value per life-year 50k", "Net value per life-year 100k", "Net value per life-year 200k")
#######################
# For year t=final (2013-2014), calculate the following: ---------
# (1) For those who survive the year (to the end of year t), calculate expected remaining life-years for each individual, based on risk prediction model using biomarkers measured in year t. The predicted remaining life expectancy (LE) is calculated in the same way as for the survivor panel, method (1), but for each individual use their own actual age and duration of diagnosis to predict risk of death and, based on that, their remaining LE.
# select survivors
fn <- final_imputed[is.na(death.date)]
# for each individual, use their own actual age and duration of DM
# for final period, calculate age and duration at 2014-01-01
fn$age <- 2014 - fn$dob
fn$duration <- as.Date("2014-12-31") - fn$dm.date
fn$duration <- as.numeric(fn$duration/365.25) # change format from "difftime" to "numeric", unit from "day" to "year"
# predict risk
r <- data.frame(serial_no = fn$serial_no, fn_risk = HKU_SG_mortality(fn))
# calculate remaining life years
le <- mylist[[1]][c("serial_no","female","duration","age")]
le$le <- ifelse(le$female==TRUE, female_lifetable$le[le$age], male_lifetable$le[le$age])
df <- merge(le, r, by="serial_no", all.y=T)
df$fn_risk <- df$fn_risk/100
df <- df[complete.cases(df)==TRUE,] #35 participants with age >100 eliminated
df <- as.data.table(df)
df$le_int <- df$le - df$le %% 1
df$le_rem <- df$le %% 1
df$le_50k_fn <-
((df$fn_risk/5)*(valyear50k1 + valyear50k2 + valyear50k3 + valyear50k4 + valyear50k5)
+ (1-df$fn_risk)*
(50000*((1-(1/(1+rate)^df$le_int))/rate)
+ 50000*df$le_rem/(1+rate)^(df$le_int+1)))
df$le_100k_fn <-
((df$fn_risk/5)*(valyear100k1 + valyear100k2 + valyear100k3 + valyear100k4 + valyear100k5)
+ (1-df$fn_risk)*
(100000*((1-(1/(1+rate)^df$le_int))/rate)
+ 100000*df$le_rem/(1+rate)^(df$le_int+1)))
df$le_200k_fn <-
((df$fn_risk/5)*(valyear200k1 + valyear200k2 + valyear200k3 + valyear200k4 + valyear200k5)
+ (1-df$fn_risk)*
(200000*((1-(1/(1+rate)^df$le_int))/rate)
+ 200000*df$le_rem/(1+rate)^(df$le_int+1)))
df <- df[, -c("le", "fn_risk", "le_int", "le_rem", "duration")]
# (2) For those who die during year t, give them 0.5 as their remaining life-years. In other words, decedents received 0.5LY for the year.
# only include participants who died in 2013 and 2014
decedent <- as.data.table(mylist[[1]])
decedent <- decedent[death.date > "2012-12-31" & death.date < "2015-01-01", c("serial_no", "female", "dob", "entry.date", "death.date")]
decedent$entry.yr <- as.numeric(substr(decedent$entry.date, 1, 4))
decedent$death.yr <- as.numeric(substr(decedent$death.date, 1, 4))
decedent$entry.yr <- ifelse(decedent$entry.yr == 2014, 2014, 2013) # only consider spending / life value during final period, so only the life-year of 2013-14 is considered, even if participants entered before 2013
decedent$le <- decedent$death.yr - decedent$entry.yr + 0.5
decedent$age <- as.numeric(2014 - decedent$dob)
decedent <- decedent[, c("serial_no", "female", "age", "le")]
# (3) Multiply remaining LYs by the value of a LY to get the value of remaining life, V, assuming value of life-year = $50 000, $100,000 and $200 000
decedent$le_int <- decedent$le - decedent$le %% 1
decedent$le_rem <- decedent$le %% 1
decedent$le_50k_fn <- 50000*((1-(1/(1+rate)^decedent$le_int))/rate) + 50000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent$le_100k_fn <- 100000*((1-(1/(1+rate)^decedent$le_int))/rate) + 100000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent$le_200k_fn <- 200000*((1-(1/(1+rate)^decedent$le_int))/rate) + 200000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent <- decedent[, -c("le", "le_int", "le_rem")]
# Combine decedents and survivors dataframes
df <- rbind(df, decedent)
# (4) For each individual in year t (including decedents), subtract c, their total medical spending in year t, from V, the value of remaining life. We call this Vt - ct.
s <- reshape(spending, direction = "wide", timevar = "yr", idvar = "serial_no")
s <- data.table(serial_no = s$serial_no, spending = s$spend.adj.2013 + s$spend.adj.2014)
df <- merge(df, s, by = "serial_no", all.x = TRUE)
# crude results
crude_final <- c((mean(df$le_50k_fn) - mean(df$spending)), (mean(df$le_100k_fn) - mean(df$spending)), (mean(df$le_200k_fn) - mean(df$spending)))
# (5) For each age- and sex- group (e.g. males age 40-45), obtain average Vt - ct
# 15-19, 20-24 etc. until >85
df <- mutate(df, group = cut(age, breaks=c(seq(from = 14, to = 84, by = 5), Inf), labels=c("15-19", "20-24", "25-29", "30-34", "35-39", "40-44", "45-49", "50-54", "55-59", "60-64", "65-69", "70-74", "75-79", "80-84", "85+")))
df <- as.data.table(df)
df <- df[, .(spending = mean(spending), le50k = mean(le_50k_fn), le100k = mean(le_100k_fn), le200k = mean(le_200k_fn)), keyby = .(group, female)]
df$nv50k <- df$le50k - df$spending
df$nv100k <- df$le100k - df$spending
df$nv200k <- df$le200k - df$spending
df <- df[, -c("le50k", "le100k", "le200k", "spending")]
# (6) Create a weighted average Vt - ct based on the weights of the reference population,by multiplying the average for each age-sex group by the weight assigned to that age-sex group in the reference population.
# WHO reference population from age 15+ to 85+
df$who <- rep(c(0.057318597, 0.055626862, 0.053664342, 0.051498732, 0.048386049, 0.0445964, 0.040874449, 0.03634014, 0.03079106, 0.025174285, 0.020031384, 0.014955503, 0.010286478, 0.006158347, 0.004297372), each = 2)
df$hkdm <- c(0.001340, 0.000997, 0.001315, 0.001293, 0.001794, 0.002307, 0.003605, 0.004583, 0.007690, 0.008642, 0.016029, 0.014628, 0.029435, 0.023546, 0.059159, 0.046097, 0.082705, 0.068599, 0.090264, 0.081155, 0.090036, 0.084927, 0.070938, 0.066088, 0.067006, 0.075821, NA, NA, NA, NA)
who_final <- c(sum(df$nv50k * df$who), sum(df$nv100k * df$who), sum(df$nv200k * df$who))
df2 <- df[complete.cases(df)]
hkdm_final <- c(sum(df2$nv50k * df2$hkdm), sum(df2$nv100k * df2$hkdm), sum(df2$nv200k * df2$hkdm))
results2 <- cbind(crude_final, who_final, hkdm_final)
row.names(results2) <- c("Net value per life-year 50k", "Net value per life-year 100k", "Net value per life-year 200k")
net_value <- cbind(results, results2)
net_value[, 1:4] <- as.numeric(net_value[, 1:4])
net_value <- as.data.frame(net_value)
net_value$crude_diff <- net_value$crude_final - net_value$crude_baseline
net_value$who_diff <- net_value$who_final - net_value$who_baseline
net_value$hkdm_diff <- net_value$hkdm_final - net_value$hkdm_baseline
net_value
|
/12 Method 3 (actual spending).R
|
no_license
|
janetltk/dm-net-value
|
R
| false | false | 18,715 |
r
|
# Method 3: age-adjusted cross-section (survivors + incident cohorts + decedents) (P.4)
setwd("C:/Users/janet/Documents/Codes/DM Project")
library(Hmisc); library(dplyr); library(data.table)
# For year t=baseline (2007-2008), calculate the following: ---------
# (1) For those who survive the year (to the end of year t), calculate expected remaining life-years for each individual, based on risk prediction model using biomarkers measured in year t. The predicted remaining life expectancy (LE) is calculated in the same way as for the survivor panel, method 1, but for each individual use their own actual age and duration of diagnosis to predict risk of death and, based on that, their remaining LE.
mylist <- readRDS("pt_clinicalvalues.rds")
load("8a MI_clinicalvalues_0711.Rdata")
rm(imp_bl, imp_fn)
baseline_imputed <- as.data.table(baseline_imputed)
final_imputed <- as.data.table(final_imputed)
load("6 participant status.Rdata")
# using model equation for HKU-SG model eq
HKU_SG_mortality <- function(input) {
100*(1-0.912^exp(2.727159
+0.02659452*as.numeric(input$age)
+4.075628e-5*(as.numeric(input$age) - 41)^3
-0.0001070358*(as.numeric(input$age) - 58)^3
+7.311264e-5*(as.numeric(input$age) - 70)^3
-6.833147e-6*(as.numeric(input$age) - 85)^3
+0.2650322*as.numeric(input$duration)
-0.01608406*(as.numeric(input$duration) - 0.04654346)^3
+0.01883374*(as.numeric(input$duration) - 0.9609856)^3
-0.00277583*(as.numeric(input$duration) - 6.466804)^3
+2.614735e-5*(as.numeric(input$duration) - 22.96235)^3
-0.1983312*(as.numeric(input$female==1))
-0.3118533*(as.numeric(input$smoking==1)) # ex-smoker
-0.6109742*(as.numeric(input$smoking==2)) # non-smoker
+0.5252391*(as.numeric(input$af==1))
+1.077321*(as.numeric(input$ckd==1))
+0.4913603*(as.numeric(input$stroke==1))
+0.2324324*(as.numeric(input$chd==1))
-0.3320009*as.numeric(input$hba1c)
+0.06135776*(as.numeric(input$hba1c) - 5.6)^3
-0.1198288*(as.numeric(input$hba1c) - 6.6)^3
+0.05774934*(as.numeric(input$hba1c) - 7.6)^3
+0.0007216831*(as.numeric(input$hba1c) - 11.6)^3
-0.006923551*as.numeric(input$sbp)
+3.548158e-6*(as.numeric(input$sbp) - 108)^3
-8.185037e-6*(as.numeric(input$sbp) - 130)^3
+4.343557e-6*(as.numeric(input$sbp) - 145)^3
+2.93321e-7*(as.numeric(input$sbp) - 174)^3
-0.00510383*as.numeric(input$dbp)
+8.585339e-6*(as.numeric(input$dbp) - 58)^3
-1.604159e-5*(as.numeric(input$dbp) - 71)^3
+4.674797e-6*(as.numeric(input$dbp) - 80)^3
+2.781449e-6*(as.numeric(input$dbp) - 96)^3
-0.1802774*as.numeric(input$ldl)
+0.03426755*(as.numeric(input$ldl) - 1.62)^3
-0.06139979*(as.numeric(input$ldl) - 2.6606)^3
+0.01499461*(as.numeric(input$ldl) - 3.3636)^3
+0.01213762*(as.numeric(input$ldl) - 4.73)^3
-0.0506029*as.numeric(input$bmi)
+0.0003252084*(as.numeric(input$bmi) - 19.7)^3
-0.0004954199*(as.numeric(input$bmi) - 23.95)^3
+2.750309e-5*(as.numeric(input$bmi) - 26.83)^3
+0.0001427083*(as.numeric(input$bmi) - 33.08)^3))
}
# select survivors + decedents who enter within the baseline period (2007-08) and are still alive by the end of baseline period (before 2009-01-01)
bl <- baseline_imputed[serial_no %in% survivor_no | serial_no %in% decedent_no]
bl <- bl[!(death.date < "2009-01-01") | is.na(death.date)] #369600 left
# for each individual, use their own actual age and duration of DM
# for baseline, calculate age at entry year
bl$entry.year <- as.numeric(substr(bl$entry.date,1,4))
bl$age <- bl$entry.year - bl$dob
# predict risk
r <- data.frame(serial_no = bl$serial_no, bl_risk = HKU_SG_mortality(bl))
# calculate remaining life years
# lifetable at 2006
male_lifetable <- data.frame(age = seq(1:100), le=c(78.53, 77.56, 76.58, 75.60, 74.61, 73.63, 72.64, 71.65, 70.65, 69.66, 68.67, 67.68, 66.68, 65.69, 64.70, 63.71, 62.72, 61.73, 60.74, 59.76, 58.78, 57.80, 56.82, 55.85, 54.87, 53.90, 52.94, 51.97, 51.00, 50.04, 49.07, 48.10, 47.14, 46.17, 45.20, 44.24, 43.28, 42.32, 41.36, 40.41, 39.46, 38.51, 37.56, 36.61, 35.67, 34.73, 33.79, 32.86, 31.93, 31.01, 30.10, 29.20, 28.30, 27.41, 26.52, 25.65, 24.78, 23.92, 23.07, 22.23, 21.40, 20.59, 19.79, 18.99, 18.21, 17.44, 16.67, 15.92, 15.19, 14.47, 13.77, 13.09, 12.42, 11.78, 11.15, 10.55, 9.96, 9.40, 8.86, 8.34, 7.84, 7.37, 6.92, 6.49, 6.09, 5.70, 5.33, 4.98, 4.65, 4.34, 4.05, 3.77, 3.51, 3.27, 3.04, 2.83, 2.63, 2.44, 2.27, 2.11))
female_lifetable <- data.frame(seq =seq(1:100), le = c(84.69, 83.71, 82.72, 81.73, 80.74, 79.75, 78.76, 77.77, 76.77, 75.78, 74.78, 73.79, 72.80, 71.81, 70.81, 69.82, 68.83, 67.84, 66.85, 65.86, 64.86, 63.87, 62.88, 61.89, 60.90, 59.91, 58.93, 57.94, 56.96, 55.97, 54.99, 54.01, 53.03, 52.05, 51.07, 50.09, 49.12, 48.14, 47.17, 46.20, 45.23, 44.26, 43.29, 42.33, 41.37, 40.40, 39.45, 38.49, 37.54, 36.60, 35.66, 34.72, 33.79, 32.86, 31.93, 31.01, 30.09, 29.17, 28.25, 27.35, 26.44, 25.55, 24.65, 23.77, 22.89, 22.01, 21.14, 20.28, 19.44, 18.60, 17.78, 16.98, 16.20, 15.43, 14.68, 13.94, 13.23, 12.54, 11.87, 11.23, 10.60, 10.01, 9.43, 8.88, 8.35, 7.84, 7.35, 6.88, 6.44, 6.01, 5.61, 5.22, 4.86, 4.52, 4.19, 3.89, 3.60, 3.33, 3.08, 2.85))
le <- mylist[[1]][c("serial_no", "female", "age")]
le$le <- ifelse(le$female==TRUE, female_lifetable$le[le$age], male_lifetable$le[le$age])
# (3) Multiply remaining LYs by the value of a LY to get the value of remaining life, V.
# we approximate the predicted probability of death in the next 5 years by giving one fifth of the predicted probability(0.2 Pjt) to each of the first 5 years, and assume that all patients surviving beyond year 5 (with probability 1- Pjt)have the same age- and sex-specific remaining life expectancy as a general individual from the lifetable of that population.
# Below adapted from Brian's Stata Codes (09_lifetable_netvalue)
df <- merge(le, r, by="serial_no", all.y=T)
df$bl_risk <- df$bl_risk/100
df <- df[complete.cases(df)==TRUE,] #15 participants with age >100 eliminated
df <- as.data.table(df)
rate <- 0.03 # 3%
df$le_int <- df$le - df$le %% 1
df$le_rem <- df$le %% 1
for (i in c(50, 100, 200)){
assign(paste0("valyear", i, "k1"), i*1000*((1-(1/(1+rate)^1))/rate))
assign(paste0("valyear", i, "k2"), i*1000*((1-(1/(1+rate)^2))/rate))
assign(paste0("valyear", i, "k3"), i*1000*((1-(1/(1+rate)^3))/rate))
assign(paste0("valyear", i, "k4"), i*1000*((1-(1/(1+rate)^4))/rate))
assign(paste0("valyear", i, "k5"), i*1000*((1-(1/(1+rate)^5))/rate))
}
df$le_50k_bl <-
((df$bl_risk/5)*(valyear50k1 + valyear50k2 + valyear50k3 + valyear50k4 + valyear50k5)
+ (1-df$bl_risk)*
(50000*((1-(1/(1+rate)^df$le_int))/rate)
+ 50000*df$le_rem/(1+rate)^(df$le_int+1)))
df$le_100k_bl <-
((df$bl_risk/5)*(valyear100k1 + valyear100k2 + valyear100k3 + valyear100k4 + valyear100k5)
+ (1-df$bl_risk)*
(100000*((1-(1/(1+rate)^df$le_int))/rate)
+ 100000*df$le_rem/(1+rate)^(df$le_int+1)))
df$le_200k_bl <-
((df$bl_risk/5)*(valyear200k1 + valyear200k2 + valyear200k3 + valyear200k4 + valyear200k5)
+ (1-df$bl_risk)*
(200000*((1-(1/(1+rate)^df$le_int))/rate)
+ 200000*df$le_rem/(1+rate)^(df$le_int+1)))
df <- df[, -c("le", "bl_risk", "le_int", "le_rem")]
# (2) For those who die during year t, give them 0.5 as their remaining life-years. In other words, decedents received 0.5LY for the year.
# only include decedents who died before 2009-01-01
decedent <- as.data.table(mylist[[1]])
decedent <- decedent[serial_no %in% decedent_no]
decedent <- decedent[death.date < "2009-01-01", c("serial_no", "female", "dob", "entry.date", "death.date")] #27218 obs
decedent$entry.yr <- as.numeric(substr(decedent$entry.date, 1, 4))
decedent$death.yr <- as.numeric(substr(decedent$death.date, 1, 4))
decedent <- decedent[!death.yr == 2006] # remove those who died in 2006 (before entry year)
decedent$le <- decedent$death.yr - decedent$entry.yr + 0.5 # for those who die during year t, give them 0.5 remaining life-years
decedent$age <- decedent$entry.yr - decedent$dob
decedent <- decedent[, c("serial_no", "female", "age", "le")]
decedent$le_int <- decedent$le - decedent$le %% 1
decedent$le_rem <- decedent$le %% 1
decedent$le_50k_bl <- 50000*((1-(1/(1+rate)^decedent$le_int))/rate) + 50000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent$le_100k_bl <- 100000*((1-(1/(1+rate)^decedent$le_int))/rate) + 100000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent$le_200k_bl <- 200000*((1-(1/(1+rate)^decedent$le_int))/rate) + 200000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent <- decedent[, -c("le", "le_int", "le_rem")]
# Combine decedents and survivors dataframes
df <- rbind(df, decedent)
# (4) For each individual in year t (including decedents), subtract c, their total medical spending in year t, from V, the value of remaining life. We call this Vt - ct.
spending <- readRDS("4c total adjusted spending.rds")
spending$spend.adj <- spending$spend.adj / 7.84975 # Convert from HKD to USD
s <- reshape(spending, direction = "wide", timevar = "yr", idvar = "serial_no")
s <- data.table(serial_no = s$serial_no, spending = s$spend.adj.2007 + s$spend.adj.2008)
df <- merge(df, s, by = "serial_no", all.x = TRUE)
# crude results
crude_baseline <- c((mean(df$le_50k_bl) - mean(df$spending)), (mean(df$le_100k_bl) - mean(df$spending)), (mean(df$le_200k_bl) - mean(df$spending)))
# (5) For each age- and sex- group (e.g. males age 40-45), obtain average Vt - ct
# 15-19, 20-24 etc. until >85
df <- mutate(df, group = cut(age, breaks=c(seq(from = 14, to = 84, by = 5), Inf), labels=c("15-19", "20-24", "25-29", "30-34", "35-39", "40-44", "45-49", "50-54", "55-59", "60-64", "65-69", "70-74", "75-79", "80-84", "85+")))
df <- as.data.table(df)
df <- df[, .(spending = mean(spending), le50k = mean(le_50k_bl), le100k = mean(le_100k_bl), le200k = mean(le_200k_bl)), keyby = .(group, female)]
df$nv50k <- df$le50k - df$spending
df$nv100k <- df$le100k - df$spending
df$nv200k <- df$le200k - df$spending
df <- df[, -c("le50k", "le100k", "le200k", "spending")]
# (6) Create a weighted average Vt - ct based on the weights of the reference population,by multiplying the average for each age-sex group by the weight assigned to that age-sex group in the reference population.
# WHO reference population from age 15+ to 85+
df$who <- rep(c(0.057318597, 0.055626862, 0.053664342, 0.051498732, 0.048386049, 0.0445964, 0.040874449, 0.03634014, 0.03079106, 0.025174285, 0.020031384, 0.014955503, 0.010286478, 0.006158347, 0.004297372), each = 2)
df$hkdm <- c(0.001340, 0.000997, 0.001315, 0.001293, 0.001794, 0.002307, 0.003605, 0.004583, 0.007690, 0.008642, 0.016029, 0.014628, 0.029435, 0.023546, 0.059159, 0.046097, 0.082705, 0.068599, 0.090264, 0.081155, 0.090036, 0.084927, 0.070938, 0.066088, 0.067006, 0.075821, NA, NA, NA, NA)
who_baseline <- c(sum(df$nv50k * df$who), sum(df$nv100k * df$who), sum(df$nv200k * df$who))
df2 <- df[complete.cases(df)]
hkdm_baseline <- c(sum(df2$nv50k * df2$hkdm), sum(df2$nv100k * df2$hkdm), sum(df2$nv200k * df2$hkdm))
results <- cbind(crude_baseline, who_baseline, hkdm_baseline)
row.names(results) <- c("Net value per life-year 50k", "Net value per life-year 100k", "Net value per life-year 200k")
#######################
# For year t=final (2013-2014), calculate the following: ---------
# (1) For those who survive the year (to the end of year t), calculate expected remaining life-years for each individual, based on risk prediction model using biomarkers measured in year t. The predicted remaining life expectancy (LE) is calculated in the same way as for the survivor panel, method (1), but for each individual use their own actual age and duration of diagnosis to predict risk of death and, based on that, their remaining LE.
# select survivors
fn <- final_imputed[is.na(death.date)]
# for each individual, use their own actual age and duration of DM
# for final period, calculate age and duration at 2014-01-01
fn$age <- 2014 - fn$dob
fn$duration <- as.Date("2014-12-31") - fn$dm.date
fn$duration <- as.numeric(fn$duration/365.25) # change format from "difftime" to "numeric", unit from "day" to "year"
# predict risk
r <- data.frame(serial_no = fn$serial_no, fn_risk = HKU_SG_mortality(fn))
# calculate remaining life years
le <- mylist[[1]][c("serial_no","female","duration","age")]
le$le <- ifelse(le$female==TRUE, female_lifetable$le[le$age], male_lifetable$le[le$age])
df <- merge(le, r, by="serial_no", all.y=T)
df$fn_risk <- df$fn_risk/100
df <- df[complete.cases(df)==TRUE,] #35 participants with age >100 eliminated
df <- as.data.table(df)
df$le_int <- df$le - df$le %% 1
df$le_rem <- df$le %% 1
df$le_50k_fn <-
((df$fn_risk/5)*(valyear50k1 + valyear50k2 + valyear50k3 + valyear50k4 + valyear50k5)
+ (1-df$fn_risk)*
(50000*((1-(1/(1+rate)^df$le_int))/rate)
+ 50000*df$le_rem/(1+rate)^(df$le_int+1)))
df$le_100k_fn <-
((df$fn_risk/5)*(valyear100k1 + valyear100k2 + valyear100k3 + valyear100k4 + valyear100k5)
+ (1-df$fn_risk)*
(100000*((1-(1/(1+rate)^df$le_int))/rate)
+ 100000*df$le_rem/(1+rate)^(df$le_int+1)))
df$le_200k_fn <-
((df$fn_risk/5)*(valyear200k1 + valyear200k2 + valyear200k3 + valyear200k4 + valyear200k5)
+ (1-df$fn_risk)*
(200000*((1-(1/(1+rate)^df$le_int))/rate)
+ 200000*df$le_rem/(1+rate)^(df$le_int+1)))
df <- df[, -c("le", "fn_risk", "le_int", "le_rem", "duration")]
# (2) For those who die during year t, give them 0.5 as their remaining life-years. In other words, decedents received 0.5LY for the year.
# only include participants who died in 2013 and 2014
decedent <- as.data.table(mylist[[1]])
decedent <- decedent[death.date > "2012-12-31" & death.date < "2015-01-01", c("serial_no", "female", "dob", "entry.date", "death.date")]
decedent$entry.yr <- as.numeric(substr(decedent$entry.date, 1, 4))
decedent$death.yr <- as.numeric(substr(decedent$death.date, 1, 4))
decedent$entry.yr <- ifelse(decedent$entry.yr == 2014, 2014, 2013) # only consider spending / life value during final period, so only the life-year of 2013-14 is considered, even if participants entered before 2013
decedent$le <- decedent$death.yr - decedent$entry.yr + 0.5
decedent$age <- as.numeric(2014 - decedent$dob)
decedent <- decedent[, c("serial_no", "female", "age", "le")]
# (3) Multiply remaining LYs by the value of a LY to get the value of remaining life, V, assuming value of life-year = $50 000, $100,000 and $200 000
decedent$le_int <- decedent$le - decedent$le %% 1
decedent$le_rem <- decedent$le %% 1
decedent$le_50k_fn <- 50000*((1-(1/(1+rate)^decedent$le_int))/rate) + 50000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent$le_100k_fn <- 100000*((1-(1/(1+rate)^decedent$le_int))/rate) + 100000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent$le_200k_fn <- 200000*((1-(1/(1+rate)^decedent$le_int))/rate) + 200000*decedent$le_rem/(1+rate)^(decedent$le_int+1)
decedent <- decedent[, -c("le", "le_int", "le_rem")]
# Combine decedents and survivors dataframes
df <- rbind(df, decedent)
# (4) For each individual in year t (including decedents), subtract c, their total medical spending in year t, from V, the value of remaining life. We call this Vt - ct.
s <- reshape(spending, direction = "wide", timevar = "yr", idvar = "serial_no")
s <- data.table(serial_no = s$serial_no, spending = s$spend.adj.2013 + s$spend.adj.2014)
df <- merge(df, s, by = "serial_no", all.x = TRUE)
# crude results
crude_final <- c((mean(df$le_50k_fn) - mean(df$spending)), (mean(df$le_100k_fn) - mean(df$spending)), (mean(df$le_200k_fn) - mean(df$spending)))
# (5) For each age- and sex- group (e.g. males age 40-45), obtain average Vt - ct
# 15-19, 20-24 etc. until >85
df <- mutate(df, group = cut(age, breaks=c(seq(from = 14, to = 84, by = 5), Inf), labels=c("15-19", "20-24", "25-29", "30-34", "35-39", "40-44", "45-49", "50-54", "55-59", "60-64", "65-69", "70-74", "75-79", "80-84", "85+")))
df <- as.data.table(df)
df <- df[, .(spending = mean(spending), le50k = mean(le_50k_fn), le100k = mean(le_100k_fn), le200k = mean(le_200k_fn)), keyby = .(group, female)]
df$nv50k <- df$le50k - df$spending
df$nv100k <- df$le100k - df$spending
df$nv200k <- df$le200k - df$spending
df <- df[, -c("le50k", "le100k", "le200k", "spending")]
# (6) Create a weighted average Vt - ct based on the weights of the reference population,by multiplying the average for each age-sex group by the weight assigned to that age-sex group in the reference population.
# WHO reference population from age 15+ to 85+
df$who <- rep(c(0.057318597, 0.055626862, 0.053664342, 0.051498732, 0.048386049, 0.0445964, 0.040874449, 0.03634014, 0.03079106, 0.025174285, 0.020031384, 0.014955503, 0.010286478, 0.006158347, 0.004297372), each = 2)
df$hkdm <- c(0.001340, 0.000997, 0.001315, 0.001293, 0.001794, 0.002307, 0.003605, 0.004583, 0.007690, 0.008642, 0.016029, 0.014628, 0.029435, 0.023546, 0.059159, 0.046097, 0.082705, 0.068599, 0.090264, 0.081155, 0.090036, 0.084927, 0.070938, 0.066088, 0.067006, 0.075821, NA, NA, NA, NA)
who_final <- c(sum(df$nv50k * df$who), sum(df$nv100k * df$who), sum(df$nv200k * df$who))
df2 <- df[complete.cases(df)]
hkdm_final <- c(sum(df2$nv50k * df2$hkdm), sum(df2$nv100k * df2$hkdm), sum(df2$nv200k * df2$hkdm))
results2 <- cbind(crude_final, who_final, hkdm_final)
row.names(results2) <- c("Net value per life-year 50k", "Net value per life-year 100k", "Net value per life-year 200k")
net_value <- cbind(results, results2)
net_value[, 1:4] <- as.numeric(net_value[, 1:4])
net_value <- as.data.frame(net_value)
net_value$crude_diff <- net_value$crude_final - net_value$crude_baseline
net_value$who_diff <- net_value$who_final - net_value$who_baseline
net_value$hkdm_diff <- net_value$hkdm_final - net_value$hkdm_baseline
net_value
|
#' Set random seed for future assignment
#'
#' @usage fassignment \%seed\% seed
#'
#' @param fassignment The future assignment, e.g.
#' \code{x \%<-\% \{ expr \}}.
#' @inheritParams multiprocess
#'
#' @export
`%seed%` <- function(fassignment, seed) {
fassignment <- substitute(fassignment)
envir <- parent.frame(1)
## Temporarily set 'seed' argument
args <- getOption("future.disposable", list())
args["seed"] <- list(seed)
options(future.disposable = args)
eval(fassignment, envir=envir)
}
|
/R/seed_OP.R
|
no_license
|
tdhock/future
|
R
| false | false | 514 |
r
|
#' Set random seed for future assignment
#'
#' @usage fassignment \%seed\% seed
#'
#' @param fassignment The future assignment, e.g.
#' \code{x \%<-\% \{ expr \}}.
#' @inheritParams multiprocess
#'
#' @export
`%seed%` <- function(fassignment, seed) {
fassignment <- substitute(fassignment)
envir <- parent.frame(1)
## Temporarily set 'seed' argument
args <- getOption("future.disposable", list())
args["seed"] <- list(seed)
options(future.disposable = args)
eval(fassignment, envir=envir)
}
|
# Download FITS file containing the 3FHL Catalog from the Fermi Science Support Center (FSSC) website
if (!file.exists("data-raw/gll_psch_v13.fit")) {
FHL3Url <- "http://fermi.gsfc.nasa.gov/ssc/data/access/lat/3FHL/gll_psch_v13.fit"
download.file(FHL3Url, destfile = "data-raw/gll_psch_v13.fit", method="curl")
}
# Read the 3FHL FITS file into a data frame
FHL3 <- as_tibble(readFrameFromFITS("data-raw/gll_psch_v13.fit", hdu = 1))
save(FHL3, file = "data/FHL3.rdata", compress = "xz")
|
/data-raw/FHL3.R
|
no_license
|
niallcoffey/fermicatsR2
|
R
| false | false | 491 |
r
|
# Download FITS file containing the 3FHL Catalog from the Fermi Science Support Center (FSSC) website
if (!file.exists("data-raw/gll_psch_v13.fit")) {
FHL3Url <- "http://fermi.gsfc.nasa.gov/ssc/data/access/lat/3FHL/gll_psch_v13.fit"
download.file(FHL3Url, destfile = "data-raw/gll_psch_v13.fit", method="curl")
}
# Read the 3FHL FITS file into a data frame
FHL3 <- as_tibble(readFrameFromFITS("data-raw/gll_psch_v13.fit", hdu = 1))
save(FHL3, file = "data/FHL3.rdata", compress = "xz")
|
\name{ilpBinaryT2}
\alias{ilpBinaryT2}
\title{
ILP method used to optimise a model
}
\description{
This function is the ilp method to be used to optimise a model by fitting to data for time point 2, that should follow optimisation based on time point 1.
}
\usage{
ilpBinaryT2(cnolist,
model,
sizeFac = 0.0001,
mipGap = 0,
relGap = 0,
timelimit = 3600,
cplexPath,
method = "quadratic",
numSolutions = 100,
limitPop = 500,
poolIntensity = 0,
poolReplace = 2)
}
\arguments{
\item{cnolist}{
a CNOlist on which the score is based (based on valueSignals[[2]], i.e. data at
time 1)
}
\item{model}{
a model structure, as created by \code{readSIF}, normally pre-processed but that
is not a requirement of this function
}
\item{sizeFac}{
the scaling factor for the size term in the objective function, default to 0.0001
}
\item{mipGap}{
the absolute tolerance on the gap between the best integer objective and the objective of the best node remaining. When this difference falls below the value of this parameter, the linear integer optimization is stopped. Default set to 0
}
\item{relGap}{
the relative tolerance on the objective value for the solutions in the solution pool. Solutions that are worse (either greater in the case of a minimization, or less in the case of a maximization) than the incumbent solution by this measure are not kept in the solution pool. Default set to 0
}
\item{timelimit}{
the maximum optimisation time in seconds, default set to 3600
}
\item{cplexPath}{
the path where the cplex solver is stored. Default set to "~/Documents/cplex"
}
\item{method}{
the method of writing the objective function (quadratic/linear). Default set to "quadratic"
}
\item{numSolutions}{
the number of solutions to save
}
\item{limitPop}{
the number of solutions to be generated. Default set to 500
}
\item{poolIntensity}{
the Intensity of solution searching. Default set to 4
}
\item{poolReplace}{
pool replacement strategy, consult CPLEX manual for details.
}
}
\value{
This function returns a list with elements:
\item{bitstringILPAll}{the list of all optimal bitstrings identified}
\item{bScore}{the best score for each set of bitstrings}
\item{time_cplex_only}{the time it took for cplex to solve the problem}
\item{total_time}{the total time for the pipeline to run (writing problem + solving problem + retrieving solutions)}
\item{stringsTolScores}{the scores of the above-mentioned strings}
}
\author{
E Gjerga, H Koch
}
\references{
Alexander Mitsos, Ioannis N. Melas, Paraskeuas Siminelakis, Aikaterini D. Chairakaki, Julio Saez-Rodriguez, and Leonidas G. Alexopoulos. Identifying Drug Effects via Pathway Alterations using an Integer Linear Programming Optimization Formulation on Phosphoproteomic Data. PLoS Comput Biol. 2009 Dec; 5(12): e1000591.
}
\examples{
# Toy Exampple
data("ToyModel", package="CellNOptR")
data("CNOlistToy", package="CellNOptR")
pknmodel = ToyModel
cnolist = CNOlist(CNOlistToy)
model = preprocessing(data = cnolist, model = pknmodel, compression = TRUE, expansion = TRUE)
plotModel(model = model, CNOlist = cnolist)
# Training to data - ILP
\dontrun{
resILP = ilpBinaryT2(cnolist = cnolist, model = model)
}
}
|
/man/ilpBinaryT2.Rd
|
no_license
|
saezlab/CellNOptR
|
R
| false | false | 3,450 |
rd
|
\name{ilpBinaryT2}
\alias{ilpBinaryT2}
\title{
ILP method used to optimise a model
}
\description{
This function is the ilp method to be used to optimise a model by fitting to data for time point 2, that should follow optimisation based on time point 1.
}
\usage{
ilpBinaryT2(cnolist,
model,
sizeFac = 0.0001,
mipGap = 0,
relGap = 0,
timelimit = 3600,
cplexPath,
method = "quadratic",
numSolutions = 100,
limitPop = 500,
poolIntensity = 0,
poolReplace = 2)
}
\arguments{
\item{cnolist}{
a CNOlist on which the score is based (based on valueSignals[[2]], i.e. data at
time 1)
}
\item{model}{
a model structure, as created by \code{readSIF}, normally pre-processed but that
is not a requirement of this function
}
\item{sizeFac}{
the scaling factor for the size term in the objective function, default to 0.0001
}
\item{mipGap}{
the absolute tolerance on the gap between the best integer objective and the objective of the best node remaining. When this difference falls below the value of this parameter, the linear integer optimization is stopped. Default set to 0
}
\item{relGap}{
the relative tolerance on the objective value for the solutions in the solution pool. Solutions that are worse (either greater in the case of a minimization, or less in the case of a maximization) than the incumbent solution by this measure are not kept in the solution pool. Default set to 0
}
\item{timelimit}{
the maximum optimisation time in seconds, default set to 3600
}
\item{cplexPath}{
the path where the cplex solver is stored. Default set to "~/Documents/cplex"
}
\item{method}{
the method of writing the objective function (quadratic/linear). Default set to "quadratic"
}
\item{numSolutions}{
the number of solutions to save
}
\item{limitPop}{
the number of solutions to be generated. Default set to 500
}
\item{poolIntensity}{
the Intensity of solution searching. Default set to 4
}
\item{poolReplace}{
pool replacement strategy, consult CPLEX manual for details.
}
}
\value{
This function returns a list with elements:
\item{bitstringILPAll}{the list of all optimal bitstrings identified}
\item{bScore}{the best score for each set of bitstrings}
\item{time_cplex_only}{the time it took for cplex to solve the problem}
\item{total_time}{the total time for the pipeline to run (writing problem + solving problem + retrieving solutions)}
\item{stringsTolScores}{the scores of the above-mentioned strings}
}
\author{
E Gjerga, H Koch
}
\references{
Alexander Mitsos, Ioannis N. Melas, Paraskeuas Siminelakis, Aikaterini D. Chairakaki, Julio Saez-Rodriguez, and Leonidas G. Alexopoulos. Identifying Drug Effects via Pathway Alterations using an Integer Linear Programming Optimization Formulation on Phosphoproteomic Data. PLoS Comput Biol. 2009 Dec; 5(12): e1000591.
}
\examples{
# Toy Exampple
data("ToyModel", package="CellNOptR")
data("CNOlistToy", package="CellNOptR")
pknmodel = ToyModel
cnolist = CNOlist(CNOlistToy)
model = preprocessing(data = cnolist, model = pknmodel, compression = TRUE, expansion = TRUE)
plotModel(model = model, CNOlist = cnolist)
# Training to data - ILP
\dontrun{
resILP = ilpBinaryT2(cnolist = cnolist, model = model)
}
}
|
library(quanteda)
library(spacyr)
library(tidyverse)
# For this script we're going to learn how to use part-of-speech tagging in R.
# This is a little tricky as it required a Python installation,
# and an installation of the Python package spacy.
# The spacyr package is just a wrapper for Python.
# Once you have everything set up, you need to load the packages
# and initialize the spacy model.
# You can substitute other models including en_core_web_sm and
# en_core_web_lg. For these consult:
# https://spacy.io/usage
# You may or may not need to include the python_executable argument.
spacy_initialize(model = "en")
# If it can't find a Python executable, you'll need to add the
# python_executable argument. And you'll need to point to
# the virtual environment in which you've installed spacy.
# On a Mac it will look something like:
# python_executable = "/Users/MyName/.env/bin/python"
# To find the path, you can run the following:
# reticulate::py_config()
#
# That will return an output with executable paths at the end.
# If you istalled spacy in a virtual envornment, one of those paths
# will end with .env/bin/python
# Otherwise, you can set the path to your specific installation.
spacy_initialize(model = "en", python_executable = ".env/bin/python")
# Load the helper functions
source("functions/helper_functions.R")
# And our keyness functions
source("functions/keyness_functions.R")
# We're going to load in our corpus using a different technique this time.
# First we'll generate a list of file names that are in the folder we want.
# Note that full.names is set to TRUE to retrieve the full paths.
# If we also wanted to retrive paths from subfolders, we'd set recursive to TRUE.
micusp_paths <- list.files("data/text_data/micusp_mini", full.names = TRUE, pattern = "*.txt")
# To save on processing, we're first going to filter out English
# and Biology papers usting str_detect()
paths_sub <- micusp_paths %>% str_detect("ENG|BIO") %>% keep(micusp_paths, .)
# Read in our files from the paths.
sub_df <- readtext_lite(paths_sub)
# And create a corpus object.
sub_corpus <- corpus(sub_df)
# Here's where the process changes from previous ones.
# We're going to use spacy to parse the corpus, rather than quanteda
sub_prsd <- spacy_parse(sub_corpus, pos = TRUE, tag = TRUE)
# Now we'll convert that into a tokens object that quanteda understands.
# Note that we can choose the contatenator. Your choice may affect
# How you split the columns later.
sub_tokens <- as.tokens(sub_prsd, include_pos = "tag", concatenator = "_")
# We can see how many tokens we have.
ntoken(sub_tokens)
# And we can view them.
sub_tokens$BIO.G0.02.1.txt[1:20]
# To get rid of some of tokens we don't want to count, we'll need to do
# Some filtering. The first of these removes anything that doesn't
# have the a letter A-Z after the underscore.
sub_tokens <- tokens_select(sub_tokens, "_[A-Z]", selection = "keep", valuetype = "regex", case_insensitive = T)
# This filters any that don't have a word or digit chacter before the underscore.
sub_tokens <- tokens_select(sub_tokens, "\\W_", selection = "remove", valuetype = "regex")
# And lastly any tokeans with a digit immediate before the underscore.
sub_tokens <- tokens_select(sub_tokens, "\\d_", selection = "remove", valuetype = "regex")
# Look at our new counts.
ntoken(sub_tokens)
# If we wanted to look at all nouns, we could do something like this.
tokens_select(sub_tokens, pattern = c("*_NN*"))
# Now lets make a dfm.
sub_dfm <- dfm(sub_tokens)
# To attach our metadata, we'll again use a differnt technique.
# Note that important metadata like the discipline is encoded into the file names.
# To see them, we can look at rownames() in our corpus object.
rownames(sub_corpus$documents)
# We can use regular expressions in the gsub() to retreive the first three
# characters in the document name. See how the gsub() function works.
?gsub
# So in the pattern below we enclose \\w{3} in paratheses so we can return
# those with \\1.
gsub("(\\w{3})\\..*?$", "\\1", rownames(sub_corpus$documents))
# Then we just pass the results to docvars()
docvars(sub_dfm, "discipline_cat") <- gsub("(\\w{3})\\..*?$", "\\1", rownames(sub_corpus$documents))
# Let's look at frequencies.
textstat_frequency(sub_dfm, n = 10)
# To generate a keyword list, we'llmake an index.
bio_index <- docvars(sub_dfm, "discipline_cat") == "BIO"
# And finally generate a keyword list.
bio_keywords <- textstat_keyness(sub_dfm, bio_index, measure = "lr")
# Let's see what we have.
head(bio_keywords, 10)
# We can add an effect size column.
bio_keywords <- bio_keywords %>%
mutate(effect = log_ratio(n_target, n_reference))
# Let's see what we have.
View(bio_keywords)
# We may now want to split our token from our tag at the concatenator.
# This is why the choice of the contatenator can be important.
bio_keywords <- bio_keywords %>%
separate(col = feature, into = c("feature", "tag"), sep = "_")
# This is where using tagging can be powerful.
# Now we can filter our keywords by various criteria.
# Say we want all singular nouns with p < 0.01:
bio_select <- bio_keywords %>%
filter(tag == "nn" & p < 0.01)
# Or if we want ALL nouns we can use a logical grep argument:
bio_select <- bio_keywords %>%
filter(grepl("nn", tag) & p < 0.01)
# Or we may want to find only those with a postive G2:
bio_select <- bio_keywords %>%
filter(grepl("nn", tag) & p < 0.01 & G2 > 0)
# Or all parts of speech EXCEPT nouns:
bio_select <- bio_keywords %>%
filter(!grepl("nn", tag) & p < 0.01 & G2 > 0)
# To end our spacy session run spacy_finalize.
spacy_finalize()
|
/R/05_keyness_pos.R
|
no_license
|
stelmanj/textstat_tools
|
R
| false | false | 5,628 |
r
|
library(quanteda)
library(spacyr)
library(tidyverse)
# For this script we're going to learn how to use part-of-speech tagging in R.
# This is a little tricky as it required a Python installation,
# and an installation of the Python package spacy.
# The spacyr package is just a wrapper for Python.
# Once you have everything set up, you need to load the packages
# and initialize the spacy model.
# You can substitute other models including en_core_web_sm and
# en_core_web_lg. For these consult:
# https://spacy.io/usage
# You may or may not need to include the python_executable argument.
spacy_initialize(model = "en")
# If it can't find a Python executable, you'll need to add the
# python_executable argument. And you'll need to point to
# the virtual environment in which you've installed spacy.
# On a Mac it will look something like:
# python_executable = "/Users/MyName/.env/bin/python"
# To find the path, you can run the following:
# reticulate::py_config()
#
# That will return an output with executable paths at the end.
# If you istalled spacy in a virtual envornment, one of those paths
# will end with .env/bin/python
# Otherwise, you can set the path to your specific installation.
spacy_initialize(model = "en", python_executable = ".env/bin/python")
# Load the helper functions
source("functions/helper_functions.R")
# And our keyness functions
source("functions/keyness_functions.R")
# We're going to load in our corpus using a different technique this time.
# First we'll generate a list of file names that are in the folder we want.
# Note that full.names is set to TRUE to retrieve the full paths.
# If we also wanted to retrive paths from subfolders, we'd set recursive to TRUE.
micusp_paths <- list.files("data/text_data/micusp_mini", full.names = TRUE, pattern = "*.txt")
# To save on processing, we're first going to filter out English
# and Biology papers usting str_detect()
paths_sub <- micusp_paths %>% str_detect("ENG|BIO") %>% keep(micusp_paths, .)
# Read in our files from the paths.
sub_df <- readtext_lite(paths_sub)
# And create a corpus object.
sub_corpus <- corpus(sub_df)
# Here's where the process changes from previous ones.
# We're going to use spacy to parse the corpus, rather than quanteda
sub_prsd <- spacy_parse(sub_corpus, pos = TRUE, tag = TRUE)
# Now we'll convert that into a tokens object that quanteda understands.
# Note that we can choose the contatenator. Your choice may affect
# How you split the columns later.
sub_tokens <- as.tokens(sub_prsd, include_pos = "tag", concatenator = "_")
# We can see how many tokens we have.
ntoken(sub_tokens)
# And we can view them.
sub_tokens$BIO.G0.02.1.txt[1:20]
# To get rid of some of tokens we don't want to count, we'll need to do
# Some filtering. The first of these removes anything that doesn't
# have the a letter A-Z after the underscore.
sub_tokens <- tokens_select(sub_tokens, "_[A-Z]", selection = "keep", valuetype = "regex", case_insensitive = T)
# This filters any that don't have a word or digit chacter before the underscore.
sub_tokens <- tokens_select(sub_tokens, "\\W_", selection = "remove", valuetype = "regex")
# And lastly any tokeans with a digit immediate before the underscore.
sub_tokens <- tokens_select(sub_tokens, "\\d_", selection = "remove", valuetype = "regex")
# Look at our new counts.
ntoken(sub_tokens)
# If we wanted to look at all nouns, we could do something like this.
tokens_select(sub_tokens, pattern = c("*_NN*"))
# Now lets make a dfm.
sub_dfm <- dfm(sub_tokens)
# To attach our metadata, we'll again use a differnt technique.
# Note that important metadata like the discipline is encoded into the file names.
# To see them, we can look at rownames() in our corpus object.
rownames(sub_corpus$documents)
# We can use regular expressions in the gsub() to retreive the first three
# characters in the document name. See how the gsub() function works.
?gsub
# So in the pattern below we enclose \\w{3} in paratheses so we can return
# those with \\1.
gsub("(\\w{3})\\..*?$", "\\1", rownames(sub_corpus$documents))
# Then we just pass the results to docvars()
docvars(sub_dfm, "discipline_cat") <- gsub("(\\w{3})\\..*?$", "\\1", rownames(sub_corpus$documents))
# Let's look at frequencies.
textstat_frequency(sub_dfm, n = 10)
# To generate a keyword list, we'llmake an index.
bio_index <- docvars(sub_dfm, "discipline_cat") == "BIO"
# And finally generate a keyword list.
bio_keywords <- textstat_keyness(sub_dfm, bio_index, measure = "lr")
# Let's see what we have.
head(bio_keywords, 10)
# We can add an effect size column.
bio_keywords <- bio_keywords %>%
mutate(effect = log_ratio(n_target, n_reference))
# Let's see what we have.
View(bio_keywords)
# We may now want to split our token from our tag at the concatenator.
# This is why the choice of the contatenator can be important.
bio_keywords <- bio_keywords %>%
separate(col = feature, into = c("feature", "tag"), sep = "_")
# This is where using tagging can be powerful.
# Now we can filter our keywords by various criteria.
# Say we want all singular nouns with p < 0.01:
bio_select <- bio_keywords %>%
filter(tag == "nn" & p < 0.01)
# Or if we want ALL nouns we can use a logical grep argument:
bio_select <- bio_keywords %>%
filter(grepl("nn", tag) & p < 0.01)
# Or we may want to find only those with a postive G2:
bio_select <- bio_keywords %>%
filter(grepl("nn", tag) & p < 0.01 & G2 > 0)
# Or all parts of speech EXCEPT nouns:
bio_select <- bio_keywords %>%
filter(!grepl("nn", tag) & p < 0.01 & G2 > 0)
# To end our spacy session run spacy_finalize.
spacy_finalize()
|
source("preliminaries.R")
#prepare data
motorVehicles <- SCC[grepl("veh",SCC$Short.Name,ignore.case=TRUE),"SCC"]
plot5draw <- function() {
NEIbaltMV <- NEI[baltimore & NEI$SCC %in% motorVehicles,]
qplot(
year, Emissions,
data = NEIbaltMV,
geom=c("point","smooth"),method=lm
) + ggtitle("Emissions of Motor Vehicles in Baltimore")
}
print(plot5draw())
plot5 <- function() {
png("plot5.png")
print(plot5draw())
dev.off()
}
plot5()
|
/plot5.R
|
no_license
|
adrian0cg/exdata-project2
|
R
| false | false | 528 |
r
|
source("preliminaries.R")
#prepare data
motorVehicles <- SCC[grepl("veh",SCC$Short.Name,ignore.case=TRUE),"SCC"]
plot5draw <- function() {
NEIbaltMV <- NEI[baltimore & NEI$SCC %in% motorVehicles,]
qplot(
year, Emissions,
data = NEIbaltMV,
geom=c("point","smooth"),method=lm
) + ggtitle("Emissions of Motor Vehicles in Baltimore")
}
print(plot5draw())
plot5 <- function() {
png("plot5.png")
print(plot5draw())
dev.off()
}
plot5()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addWaterYear.R
\name{calcWaterYear}
\alias{calcWaterYear}
\title{Extract WY from a date}
\usage{
calcWaterYear(dateVec)
}
\arguments{
\item{dateVec}{vector of dates as character ("YYYY-DD-MM"),
Date, or POSIXct. Numeric does not work.}
}
\value{
numeric vector indicating the water year
}
\description{
Determine the correct water year based on a calendar date.
}
\details{
This function calculates a water year based on the USGS
definition that a water year starts on October 1 of the year before,
and ends on September 30. For example, water year 2015 started on
2014-10-01 and ended on 2015-09-30.
}
\examples{
x <- seq(as.Date("2010-01-01"), as.Date("2010-12-31"), by = "month")
calcWaterYear(x)
y <- c("2010-01-01", "1994-02", "1980", "2009-11-01", NA)
calcWaterYear(y)
}
|
/man/calcWaterYear.Rd
|
no_license
|
cran/dataRetrieval
|
R
| false | true | 887 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addWaterYear.R
\name{calcWaterYear}
\alias{calcWaterYear}
\title{Extract WY from a date}
\usage{
calcWaterYear(dateVec)
}
\arguments{
\item{dateVec}{vector of dates as character ("YYYY-DD-MM"),
Date, or POSIXct. Numeric does not work.}
}
\value{
numeric vector indicating the water year
}
\description{
Determine the correct water year based on a calendar date.
}
\details{
This function calculates a water year based on the USGS
definition that a water year starts on October 1 of the year before,
and ends on September 30. For example, water year 2015 started on
2014-10-01 and ended on 2015-09-30.
}
\examples{
x <- seq(as.Date("2010-01-01"), as.Date("2010-12-31"), by = "month")
calcWaterYear(x)
y <- c("2010-01-01", "1994-02", "1980", "2009-11-01", NA)
calcWaterYear(y)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/set-get-functions.R
\name{set_fontsize}
\alias{set_fontsize}
\title{Set font size}
\usage{
set_fontsize(size)
}
\arguments{
\item{size}{font size in pt}
}
\description{
Set font size
}
|
/man/set_fontsize.Rd
|
no_license
|
cperriard/pmreports2
|
R
| false | false | 272 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/set-get-functions.R
\name{set_fontsize}
\alias{set_fontsize}
\title{Set font size}
\usage{
set_fontsize(size)
}
\arguments{
\item{size}{font size in pt}
}
\description{
Set font size
}
|
library(xgboost)
library(Matrix)
set.seed(1234)
train <- read.csv("/Users/sophiasufas/Desktop/R/GitHub/DataScienceR/DataScienceR/kaggle/train.csv")
test <- read.csv("/Users/sophiasufas/Desktop/R/GitHub/DataScienceR/DataScienceR/kaggle/test.csv")
cat('Length: ', nrow(train))
##### Removing IDs
train$ID <- NULL
test.id <- test$ID
test$ID <- NULL
##### Extracting TARGET
train.y <- train$TARGET
train$TARGET <- NULL
##### 0 count per line
count0 <- function(x) {
return( sum(x == 0) )
}
train$n0 <- apply(train, 1, FUN=count0)
test$n0 <- apply(test, 1, FUN=count0)
##### Removing constant features
cat("\n## Removing the constants features.\n")
for (f in names(train)) {
if (length(unique(train[[f]])) == 1) {
# cat(f, "is constant in train. We delete it.\n")
train[[f]] <- NULL
test[[f]] <- NULL
}
}
##### Removing identical features
features_pair <- combn(names(train), 2, simplify = F)
toRemove <- c()
for(pair in features_pair) {
f1 <- pair[1]
f2 <- pair[2]
if (!(f1 %in% toRemove) & !(f2 %in% toRemove)) {
if (all(train[[f1]] == train[[f2]])) {
# cat(f1, "and", f2, "are equals.\n")
toRemove <- c(toRemove, f2)
}
}
}
feature.names <- setdiff(names(train), toRemove)
train$var38 <- log(train$var38)
test$var38 <- log(test$var38)
train <- train[, feature.names]
test <- test[, feature.names]
tc <- test
#---limit vars in test based on min and max vals of train
print('Setting min-max lims on test data')
for(f in colnames(train)){
lim <- min(train[,f])
test[test[,f]<lim,f] <- lim
lim <- max(train[,f])
test[test[,f]>lim,f] <- lim
}
#---
train$TARGET <- train.y
train <- sparse.model.matrix(TARGET ~ ., data = train)
dtrain <- xgb.DMatrix(data=train, label=train.y)
watchlist <- list(train=dtrain)
param <- list( objective = "binary:logistic",
booster = "gbtree",
eval_metric = "auc",
eta = 0.0202048,
max_depth = 5,
subsample = 0.6815,
colsample_bytree = 0.701
)
clf <- xgb.train( params = param,
data = dtrain,
nrounds = 560,
verbose = 1,
watchlist = watchlist,
maximize = FALSE
)
#######actual variables
feature.names
test$TARGET <- -1
test <- sparse.model.matrix(TARGET ~ ., data = test)
preds <- predict(clf, test)
pred <-predict(clf,train)
AUC<-function(actual,predicted)
{
library(pROC)
auc<-auc(as.numeric(actual),as.numeric(predicted))
auc
}
AUC(train.y,pred) ##AUC
nv = tc['num_var33']+tc['saldo_medio_var33_ult3']+tc['saldo_medio_var44_hace2']+tc['saldo_medio_var44_hace3']+
tc['saldo_medio_var33_ult1']+tc['saldo_medio_var44_ult1']
preds[nv > 0] = 0
preds[tc['var15'] < 23] = 0
preds[tc['saldo_medio_var5_hace2'] > 160000] = 0
preds[tc['saldo_var33'] > 0] = 0
preds[tc['var38'] > 3988596] = 0
preds[tc['var21'] > 7500] = 0
preds[tc['num_var30'] > 9] = 0
preds[tc['num_var13_0'] > 6] = 0
preds[tc['num_var33_0'] > 0] = 0
preds[tc['imp_ent_var16_ult1'] > 51003] = 0
preds[tc['imp_op_var39_comer_ult3'] > 13184] = 0
preds[tc['saldo_medio_var5_ult3'] > 108251] = 0
preds[tc['num_var37_0'] > 45] = 0
preds[tc['saldo_var5'] > 137615] = 0
preds[tc['saldo_var8'] > 60099] = 0
preds[(tc['var15']+tc['num_var45_hace3']+tc['num_var45_ult3']+tc['var36']) <= 24] = 0
preds[tc['saldo_var14'] > 19053.78] = 0
preds[tc['saldo_var17'] > 288188.97] = 0
preds[tc['saldo_var26'] > 10381.29] = 0
preds[tc['num_var13_largo_0'] > 3] = 0
preds[tc['imp_op_var40_comer_ult1'] > 3639.87] = 0
preds[tc['num_var5_0'] > 6] = 0
preds[tc['saldo_medio_var13_largo_ult1'] > 0] = 0
preds[tc['num_meses_var13_largo_ult3'] > 0] = 0
preds[tc['num_var20_0'] > 0] = 0
preds[tc['saldo_var13_largo'] > 150000] = 0
preds[tc['num_var17_0'] > 21] = 0
preds[tc['num_var24_0'] > 3] = 0
preds[tc['num_var26_0'] > 12] = 0
sum(preds==0)
A <- c('delta_imp_reemb_var33_1y3', 'ind_var18_0', 'num_trasp_var33_in_hace3', 'num_var6_0', 'ind_var13_medio', 'num_var34_0', 'num_var18_0', 'num_meses_var13_medio_ult3', 'num_var29', 'delta_num_reemb_var33_1y3', 'ind_var29', 'num_trasp_var33_out_ult1', 'ind_var13_medio_0', 'num_var6', 'delta_num_trasp_var17_out_1y3', 'delta_imp_amort_var18_1y3', 'imp_reemb_var33_ult1', 'delta_num_trasp_var33_out_1y3', 'num_reemb_var17_hace3', 'delta_imp_amort_var34_1y3', 'ind_var33_0', 'ind_var34_0', 'saldo_medio_var29_hace3', 'num_var13_medio', 'ind_var18', 'num_var18', 'num_reemb_var33_ult1', 'ind_var7_emit_ult1', 'ind_var6_0', 'ind_var34', 'num_var34', 'ind_var33', 'imp_reemb_var17_hace3', 'ind_var29_0', 'num_var29_0', 'num_trasp_var17_out_ult1', 'delta_imp_trasp_var33_out_1y3', 'ind_var6', 'delta_imp_trasp_var17_out_1y3')
for(i in length(A)){
preds[tc[A[i]] > 0] = 0
}
sum(preds==0)
sum(preds<0.001 & preds > 0 )
sum(preds<0.00075 & preds > 0)
sum(preds<0.0005 & preds > 0)
preds[preds<0.001] = 0
# BAD
# num_var35 = tc['num_var35']
# saldo_var30 = tc['saldo_var30']
# preds[tc['num_var39_0'] > 12] = 0
# preds[tc['num_var41_0'] > 12] = 0
# preds[tc['saldo_var12'] > 506414] = 0
# preds[tc['saldo_var13'] > 309000] = 0
# preds[tc['saldo_var24'] > 506413.14] = 0
# preds[tc['imp_op_var39_efect_ult3'] > 14010] = 0
# No improvement
# num_var1 = tc['num_var1']
# preds[tc['saldo_var18'] > 0] = 0
# preds[tc['imp_op_var41_comer_ult3'] > 13183.23] = 0
submission <- data.frame(ID=test.id, TARGET=preds)
cat("saving the submission file\n")
write.csv(submission, "submission.csv", row.names = F)
|
/kaggle/kaggle.R
|
no_license
|
iamkailan/2018_spring_CSX
|
R
| false | false | 5,646 |
r
|
library(xgboost)
library(Matrix)
set.seed(1234)
train <- read.csv("/Users/sophiasufas/Desktop/R/GitHub/DataScienceR/DataScienceR/kaggle/train.csv")
test <- read.csv("/Users/sophiasufas/Desktop/R/GitHub/DataScienceR/DataScienceR/kaggle/test.csv")
cat('Length: ', nrow(train))
##### Removing IDs
train$ID <- NULL
test.id <- test$ID
test$ID <- NULL
##### Extracting TARGET
train.y <- train$TARGET
train$TARGET <- NULL
##### 0 count per line
count0 <- function(x) {
return( sum(x == 0) )
}
train$n0 <- apply(train, 1, FUN=count0)
test$n0 <- apply(test, 1, FUN=count0)
##### Removing constant features
cat("\n## Removing the constants features.\n")
for (f in names(train)) {
if (length(unique(train[[f]])) == 1) {
# cat(f, "is constant in train. We delete it.\n")
train[[f]] <- NULL
test[[f]] <- NULL
}
}
##### Removing identical features
features_pair <- combn(names(train), 2, simplify = F)
toRemove <- c()
for(pair in features_pair) {
f1 <- pair[1]
f2 <- pair[2]
if (!(f1 %in% toRemove) & !(f2 %in% toRemove)) {
if (all(train[[f1]] == train[[f2]])) {
# cat(f1, "and", f2, "are equals.\n")
toRemove <- c(toRemove, f2)
}
}
}
feature.names <- setdiff(names(train), toRemove)
train$var38 <- log(train$var38)
test$var38 <- log(test$var38)
train <- train[, feature.names]
test <- test[, feature.names]
tc <- test
#---limit vars in test based on min and max vals of train
print('Setting min-max lims on test data')
for(f in colnames(train)){
lim <- min(train[,f])
test[test[,f]<lim,f] <- lim
lim <- max(train[,f])
test[test[,f]>lim,f] <- lim
}
#---
train$TARGET <- train.y
train <- sparse.model.matrix(TARGET ~ ., data = train)
dtrain <- xgb.DMatrix(data=train, label=train.y)
watchlist <- list(train=dtrain)
param <- list( objective = "binary:logistic",
booster = "gbtree",
eval_metric = "auc",
eta = 0.0202048,
max_depth = 5,
subsample = 0.6815,
colsample_bytree = 0.701
)
clf <- xgb.train( params = param,
data = dtrain,
nrounds = 560,
verbose = 1,
watchlist = watchlist,
maximize = FALSE
)
#######actual variables
feature.names
test$TARGET <- -1
test <- sparse.model.matrix(TARGET ~ ., data = test)
preds <- predict(clf, test)
pred <-predict(clf,train)
AUC<-function(actual,predicted)
{
library(pROC)
auc<-auc(as.numeric(actual),as.numeric(predicted))
auc
}
AUC(train.y,pred) ##AUC
nv = tc['num_var33']+tc['saldo_medio_var33_ult3']+tc['saldo_medio_var44_hace2']+tc['saldo_medio_var44_hace3']+
tc['saldo_medio_var33_ult1']+tc['saldo_medio_var44_ult1']
preds[nv > 0] = 0
preds[tc['var15'] < 23] = 0
preds[tc['saldo_medio_var5_hace2'] > 160000] = 0
preds[tc['saldo_var33'] > 0] = 0
preds[tc['var38'] > 3988596] = 0
preds[tc['var21'] > 7500] = 0
preds[tc['num_var30'] > 9] = 0
preds[tc['num_var13_0'] > 6] = 0
preds[tc['num_var33_0'] > 0] = 0
preds[tc['imp_ent_var16_ult1'] > 51003] = 0
preds[tc['imp_op_var39_comer_ult3'] > 13184] = 0
preds[tc['saldo_medio_var5_ult3'] > 108251] = 0
preds[tc['num_var37_0'] > 45] = 0
preds[tc['saldo_var5'] > 137615] = 0
preds[tc['saldo_var8'] > 60099] = 0
preds[(tc['var15']+tc['num_var45_hace3']+tc['num_var45_ult3']+tc['var36']) <= 24] = 0
preds[tc['saldo_var14'] > 19053.78] = 0
preds[tc['saldo_var17'] > 288188.97] = 0
preds[tc['saldo_var26'] > 10381.29] = 0
preds[tc['num_var13_largo_0'] > 3] = 0
preds[tc['imp_op_var40_comer_ult1'] > 3639.87] = 0
preds[tc['num_var5_0'] > 6] = 0
preds[tc['saldo_medio_var13_largo_ult1'] > 0] = 0
preds[tc['num_meses_var13_largo_ult3'] > 0] = 0
preds[tc['num_var20_0'] > 0] = 0
preds[tc['saldo_var13_largo'] > 150000] = 0
preds[tc['num_var17_0'] > 21] = 0
preds[tc['num_var24_0'] > 3] = 0
preds[tc['num_var26_0'] > 12] = 0
sum(preds==0)
A <- c('delta_imp_reemb_var33_1y3', 'ind_var18_0', 'num_trasp_var33_in_hace3', 'num_var6_0', 'ind_var13_medio', 'num_var34_0', 'num_var18_0', 'num_meses_var13_medio_ult3', 'num_var29', 'delta_num_reemb_var33_1y3', 'ind_var29', 'num_trasp_var33_out_ult1', 'ind_var13_medio_0', 'num_var6', 'delta_num_trasp_var17_out_1y3', 'delta_imp_amort_var18_1y3', 'imp_reemb_var33_ult1', 'delta_num_trasp_var33_out_1y3', 'num_reemb_var17_hace3', 'delta_imp_amort_var34_1y3', 'ind_var33_0', 'ind_var34_0', 'saldo_medio_var29_hace3', 'num_var13_medio', 'ind_var18', 'num_var18', 'num_reemb_var33_ult1', 'ind_var7_emit_ult1', 'ind_var6_0', 'ind_var34', 'num_var34', 'ind_var33', 'imp_reemb_var17_hace3', 'ind_var29_0', 'num_var29_0', 'num_trasp_var17_out_ult1', 'delta_imp_trasp_var33_out_1y3', 'ind_var6', 'delta_imp_trasp_var17_out_1y3')
for(i in length(A)){
preds[tc[A[i]] > 0] = 0
}
sum(preds==0)
sum(preds<0.001 & preds > 0 )
sum(preds<0.00075 & preds > 0)
sum(preds<0.0005 & preds > 0)
preds[preds<0.001] = 0
# BAD
# num_var35 = tc['num_var35']
# saldo_var30 = tc['saldo_var30']
# preds[tc['num_var39_0'] > 12] = 0
# preds[tc['num_var41_0'] > 12] = 0
# preds[tc['saldo_var12'] > 506414] = 0
# preds[tc['saldo_var13'] > 309000] = 0
# preds[tc['saldo_var24'] > 506413.14] = 0
# preds[tc['imp_op_var39_efect_ult3'] > 14010] = 0
# No improvement
# num_var1 = tc['num_var1']
# preds[tc['saldo_var18'] > 0] = 0
# preds[tc['imp_op_var41_comer_ult3'] > 13183.23] = 0
submission <- data.frame(ID=test.id, TARGET=preds)
cat("saving the submission file\n")
write.csv(submission, "submission.csv", row.names = F)
|
####create an answer key####
##setup
dat = read.csv("CVOE 10_25_21.csv")
pure = subset(dat,
dat$block_type == "oe" | dat$block_type == "cv")
switch = subset(dat,
dat$block_type == "alt" | dat$block_type == "shuf")
library(tidyr)
library(magicfor)
for (i in dat$STIM){
num.chars = nchar(i)
}
####switch trials####
string1 = as.character(switch$STIM)
starts = seq(1, num.chars, by = 2)
##make letter and number columns
switch$letter = substr(string1, 0, 1)
switch$number = substr(string1, 4, 4)
##subset based on cv or oe
switch.cv = subset(switch,
switch$CVOE == "CV")
switch.oe = subset(switch,
switch$CVOE == "OE")
##denote whether letter is consonant or vowel
list.vowel = c("A", "E", "I", "O", "U")
list.con = c("D", "J", "H", "P", "S")
##denote whether number is odd or even
list.even = c("0", "2", "4", "6", "8")
list.odd = c("1", "3", "5", "7", "9")
##make c or v column for switch.cv
magic_for(print, silent = TRUE)
for (value in switch.cv$letter) {
print(match(value, list.vowel))
}
cv = magic_result_as_vector()
switch.cv$c_or_v = cv
##make o or e column for switch.oe
for (value in switch.oe$number) {
print(match(value, list.even))
}
oe = magic_result_as_vector()
switch.oe$o_or_e = oe
oe = as.character(oe)
##get rid of the NAs (oe)
switch.oe["o_or_e"][is.na(switch.oe["o_or_e"])] = "o"
##make 1, 2, 3, 4, and 5 into e
switch.oe$o_or_e[switch.oe$o_or_e == "1"] = "e"
switch.oe$o_or_e[switch.oe$o_or_e == "2"] = "e"
switch.oe$o_or_e[switch.oe$o_or_e == "3"] = "e"
switch.oe$o_or_e[switch.oe$o_or_e == "4"] = "e"
switch.oe$o_or_e[switch.oe$o_or_e == "5"] = "e"
table(switch.oe$o_or_e)
##get rid of the NAs (cv)
switch.cv["c_or_v"][is.na(switch.cv["c_or_v"])] = "c"
##make 1, 2, 3, 4, and 5 into V
switch.cv$c_or_v[switch.cv$c_or_v == "1"] = "v"
switch.cv$c_or_v[switch.cv$c_or_v == "2"] = "v"
switch.cv$c_or_v[switch.cv$c_or_v == "3"] = "v"
switch.cv$c_or_v[switch.cv$c_or_v == "4"] = "v"
switch.cv$c_or_v[switch.cv$c_or_v == "5"] = "v"
table(switch.cv$c_or_v)
##convert o e column to o's and e's
switch.oe$response2 = factor(switch.oe$Response)
switch.oe$response2 = as.numeric(switch.oe$response2)
switch.oe$response2 = as.character(switch.oe$response2)
switch.oe$response2[switch.oe$response2 == "1"] = "e"
switch.oe$response2[switch.oe$response2 == "2"] = "o"
#switch.oe$response2[switch.oe$response2 == "1"] = NA
##make answer key columns for oe
switch.oe$match2 = switch.oe$response2 == switch.oe$o_or_e
switch.oe$score2 = as.numeric(switch.oe$match2)
##convert p q column to c's and v's
switch.cv$response2 = factor(switch.cv$Response)
switch.cv$response2 = as.numeric(switch.cv$response2)
switch.cv$response2 = as.character(switch.cv$response2)
switch.cv$response2[switch.cv$response2 == "1"] = "v"
switch.cv$response2[switch.cv$response2 == "2"] = "c"
##make answer key columns for cv
switch.cv$match2 = switch.cv$response2 == switch.cv$c_or_v
switch.cv$score2 = as.numeric(switch.cv$match2)
####pure trials####
##make letter and number columns
string2 = as.character(pure$STIM)
pure$letter = substr(string2, 0, 1)
pure$number = substr(string2, 4, 4)
##subset based on cv or oe
pure.cv = subset(pure,
pure$CVOE == "CV")
pure.oe = subset(pure,
pure$CVOE == "OE")
##make c or v column for pure.cv
magic_for(print, silent = TRUE)
for (value in pure.cv$letter) {
print(match(value, list.vowel))
}
cv2 = magic_result_as_vector()
pure.cv$c_or_v = cv2
##make o or e column for pure.oe
magic_for(print, silent = TRUE)
for (value in pure.oe$number) {
print(match(value, list.even))
}
oe2 = magic_result_as_vector()
pure.oe$o_or_e = oe2
##get rid of the NAs (oe)
pure.oe["o_or_e"][is.na(pure.oe["o_or_e"])] = "o"
##make 1, 2, 3, 4, and 5 into e
pure.oe$o_or_e[pure.oe$o_or_e == "1"] = "e"
pure.oe$o_or_e[pure.oe$o_or_e == "2"] = "e"
pure.oe$o_or_e[pure.oe$o_or_e == "3"] = "e"
pure.oe$o_or_e[pure.oe$o_or_e == "4"] = "e"
pure.oe$o_or_e[pure.oe$o_or_e == "5"] = "e"
table(pure.oe$o_or_e)
##get rid of the NAs (cv)
pure.cv["c_or_v"][is.na(pure.cv["c_or_v"])] = "c"
##make 1, 2, 3, 4, and 5 into v
pure.cv$c_or_v[pure.cv$c_or_v == "1"] = "v"
pure.cv$c_or_v[pure.cv$c_or_v == "2"] = "v"
pure.cv$c_or_v[pure.cv$c_or_v == "3"] = "v"
pure.cv$c_or_v[pure.cv$c_or_v == "4"] = "v"
pure.cv$c_or_v[pure.cv$c_or_v == "5"] = "v"
table(pure.cv$c_or_v)
##convert p q column to c's and v's
pure.cv$response2 = factor(pure.cv$Response)
pure.cv$response2 = as.numeric(pure.cv$response2)
pure.cv$response2 = as.character(pure.cv$response2)
pure.cv$response2[pure.cv$response2 == "1"] = "v"
pure.cv$response2[pure.cv$response2 == "2"] = "c"
##make answer key columns for cv
pure.cv$match2 = pure.cv$response2 == pure.cv$c_or_v
pure.cv$score2 = as.numeric(pure.cv$match2)
##convert p q column to o's and e's
pure.oe$response2 = factor(pure.oe$Response)
pure.oe$response2 = as.numeric(pure.oe$response2)
pure.oe$response2 = as.character(pure.oe$response2)
pure.oe$response2[pure.oe$response2 == "1"] = "e"
pure.oe$response2[pure.oe$response2 == "2"] = "o"
##make answer key columns for oe
pure.oe$match2 = pure.oe$response2 == pure.oe$o_or_e
pure.oe$score2 = as.numeric(pure.oe$match2)
####put everything back together####
##match column names
colnames(pure.cv)[16] = "key2"
colnames(pure.oe)[16] = "key2"
colnames(switch.cv)[16] = "key2"
colnames(switch.oe)[16] = "key2"
final = rbind(pure.cv, pure.oe, switch.cv, switch.oe)
#write.csv(final, file = "scored 10_25_21.csv", row.names = F)
|
/CVOE/1 Analysis/Scripts/rescore data.R
|
no_license
|
npm27/Spring-2019-Projects
|
R
| false | false | 5,575 |
r
|
####create an answer key####
##setup
dat = read.csv("CVOE 10_25_21.csv")
pure = subset(dat,
dat$block_type == "oe" | dat$block_type == "cv")
switch = subset(dat,
dat$block_type == "alt" | dat$block_type == "shuf")
library(tidyr)
library(magicfor)
for (i in dat$STIM){
num.chars = nchar(i)
}
####switch trials####
string1 = as.character(switch$STIM)
starts = seq(1, num.chars, by = 2)
##make letter and number columns
switch$letter = substr(string1, 0, 1)
switch$number = substr(string1, 4, 4)
##subset based on cv or oe
switch.cv = subset(switch,
switch$CVOE == "CV")
switch.oe = subset(switch,
switch$CVOE == "OE")
##denote whether letter is consonant or vowel
list.vowel = c("A", "E", "I", "O", "U")
list.con = c("D", "J", "H", "P", "S")
##denote whether number is odd or even
list.even = c("0", "2", "4", "6", "8")
list.odd = c("1", "3", "5", "7", "9")
##make c or v column for switch.cv
magic_for(print, silent = TRUE)
for (value in switch.cv$letter) {
print(match(value, list.vowel))
}
cv = magic_result_as_vector()
switch.cv$c_or_v = cv
##make o or e column for switch.oe
for (value in switch.oe$number) {
print(match(value, list.even))
}
oe = magic_result_as_vector()
switch.oe$o_or_e = oe
oe = as.character(oe)
##get rid of the NAs (oe)
switch.oe["o_or_e"][is.na(switch.oe["o_or_e"])] = "o"
##make 1, 2, 3, 4, and 5 into e
switch.oe$o_or_e[switch.oe$o_or_e == "1"] = "e"
switch.oe$o_or_e[switch.oe$o_or_e == "2"] = "e"
switch.oe$o_or_e[switch.oe$o_or_e == "3"] = "e"
switch.oe$o_or_e[switch.oe$o_or_e == "4"] = "e"
switch.oe$o_or_e[switch.oe$o_or_e == "5"] = "e"
table(switch.oe$o_or_e)
##get rid of the NAs (cv)
switch.cv["c_or_v"][is.na(switch.cv["c_or_v"])] = "c"
##make 1, 2, 3, 4, and 5 into V
switch.cv$c_or_v[switch.cv$c_or_v == "1"] = "v"
switch.cv$c_or_v[switch.cv$c_or_v == "2"] = "v"
switch.cv$c_or_v[switch.cv$c_or_v == "3"] = "v"
switch.cv$c_or_v[switch.cv$c_or_v == "4"] = "v"
switch.cv$c_or_v[switch.cv$c_or_v == "5"] = "v"
table(switch.cv$c_or_v)
##convert o e column to o's and e's
switch.oe$response2 = factor(switch.oe$Response)
switch.oe$response2 = as.numeric(switch.oe$response2)
switch.oe$response2 = as.character(switch.oe$response2)
switch.oe$response2[switch.oe$response2 == "1"] = "e"
switch.oe$response2[switch.oe$response2 == "2"] = "o"
#switch.oe$response2[switch.oe$response2 == "1"] = NA
##make answer key columns for oe
switch.oe$match2 = switch.oe$response2 == switch.oe$o_or_e
switch.oe$score2 = as.numeric(switch.oe$match2)
##convert p q column to c's and v's
switch.cv$response2 = factor(switch.cv$Response)
switch.cv$response2 = as.numeric(switch.cv$response2)
switch.cv$response2 = as.character(switch.cv$response2)
switch.cv$response2[switch.cv$response2 == "1"] = "v"
switch.cv$response2[switch.cv$response2 == "2"] = "c"
##make answer key columns for cv
switch.cv$match2 = switch.cv$response2 == switch.cv$c_or_v
switch.cv$score2 = as.numeric(switch.cv$match2)
####pure trials####
##make letter and number columns
string2 = as.character(pure$STIM)
pure$letter = substr(string2, 0, 1)
pure$number = substr(string2, 4, 4)
##subset based on cv or oe
pure.cv = subset(pure,
pure$CVOE == "CV")
pure.oe = subset(pure,
pure$CVOE == "OE")
##make c or v column for pure.cv
magic_for(print, silent = TRUE)
for (value in pure.cv$letter) {
print(match(value, list.vowel))
}
cv2 = magic_result_as_vector()
pure.cv$c_or_v = cv2
##make o or e column for pure.oe
magic_for(print, silent = TRUE)
for (value in pure.oe$number) {
print(match(value, list.even))
}
oe2 = magic_result_as_vector()
pure.oe$o_or_e = oe2
##get rid of the NAs (oe)
pure.oe["o_or_e"][is.na(pure.oe["o_or_e"])] = "o"
##make 1, 2, 3, 4, and 5 into e
pure.oe$o_or_e[pure.oe$o_or_e == "1"] = "e"
pure.oe$o_or_e[pure.oe$o_or_e == "2"] = "e"
pure.oe$o_or_e[pure.oe$o_or_e == "3"] = "e"
pure.oe$o_or_e[pure.oe$o_or_e == "4"] = "e"
pure.oe$o_or_e[pure.oe$o_or_e == "5"] = "e"
table(pure.oe$o_or_e)
##get rid of the NAs (cv)
pure.cv["c_or_v"][is.na(pure.cv["c_or_v"])] = "c"
##make 1, 2, 3, 4, and 5 into v
pure.cv$c_or_v[pure.cv$c_or_v == "1"] = "v"
pure.cv$c_or_v[pure.cv$c_or_v == "2"] = "v"
pure.cv$c_or_v[pure.cv$c_or_v == "3"] = "v"
pure.cv$c_or_v[pure.cv$c_or_v == "4"] = "v"
pure.cv$c_or_v[pure.cv$c_or_v == "5"] = "v"
table(pure.cv$c_or_v)
##convert p q column to c's and v's
pure.cv$response2 = factor(pure.cv$Response)
pure.cv$response2 = as.numeric(pure.cv$response2)
pure.cv$response2 = as.character(pure.cv$response2)
pure.cv$response2[pure.cv$response2 == "1"] = "v"
pure.cv$response2[pure.cv$response2 == "2"] = "c"
##make answer key columns for cv
pure.cv$match2 = pure.cv$response2 == pure.cv$c_or_v
pure.cv$score2 = as.numeric(pure.cv$match2)
##convert p q column to o's and e's
pure.oe$response2 = factor(pure.oe$Response)
pure.oe$response2 = as.numeric(pure.oe$response2)
pure.oe$response2 = as.character(pure.oe$response2)
pure.oe$response2[pure.oe$response2 == "1"] = "e"
pure.oe$response2[pure.oe$response2 == "2"] = "o"
##make answer key columns for oe
pure.oe$match2 = pure.oe$response2 == pure.oe$o_or_e
pure.oe$score2 = as.numeric(pure.oe$match2)
####put everything back together####
##match column names
colnames(pure.cv)[16] = "key2"
colnames(pure.oe)[16] = "key2"
colnames(switch.cv)[16] = "key2"
colnames(switch.oe)[16] = "key2"
final = rbind(pure.cv, pure.oe, switch.cv, switch.oe)
#write.csv(final, file = "scored 10_25_21.csv", row.names = F)
|
library(dygraphs)
library(shiny)
library(shinydashboard)
library(shinyWidgets)
library(readr)
library(xts)
library(TTR)
library(Hmisc)
library(archivist)
library(devtools)
library(archivist.github)
library(archivist)
library(bit64)
library(markdown)
# Cargar la data ARS
IngresoMedioARS <- read_csv("Data/ingreso_medio_pesos.csv")
IngresoMedioARS <- as.xts(IngresoMedioARS[,-1], as.Date(IngresoMedioARS$X1, "%d/%m/%Y"))
IngresoMedioARS <- na.locf(IngresoMedioARS)
IngresoMedioARS <- na.locf(IngresoMedioARS, fromLast = T)
# Cargar la data USD
IngresoMedioUSD <- read_csv("Data/ingreso_medio_dolares.csv")
IngresoMedioUSD <- as.xts(IngresoMedioUSD[,-1], as.Date(IngresoMedioUSD$X1, "%d/%m/%Y"))
IngresoMedioUSD <- na.locf(IngresoMedioUSD)
IngresoMedioUSD <- na.locf(IngresoMedioUSD, fromLast = T)
# Cargar la data Norm
IngresoMedioNorm <- read_csv("Data/ingreso_medio_base_100_2006.csv")
IngresoMedioNorm <- as.xts(IngresoMedioNorm[,-1], as.Date(IngresoMedioNorm$Date, "%d/%m/%Y"))
IngresoMedioNorm <- na.locf(IngresoMedioNorm)
IngresoMedioNorm <- na.locf(IngresoMedioNorm, fromLast = T)
############################# API
# Correr el UI
ui <- dashboardPage(
dashboardHeader(title = "EPH - Ocupados"),
dashboardSidebar(sidebarMenu(
menuItem("Ingresos Medios", tabName = "tab", icon = icon("chart-line")),
# menuItem("Ingresos Medios - Escala Log", tabName = "tab1", icon = icon("chart-line")),
menuItem("Nota Metodolรณgica", tabName = "tab2", icon = icon("table")),
menuItem("Descargar Tabla", tabName = "tab4", icon = icon("download"))),
selectInput("variable", "Categoria:",
c("Pesos Corrientes" = "IM",
"Dรณlares Base 100 = Q3 2006" = "IMN",
"Dรณlares Corrientes" = "IMUSD")),
pickerInput(inputId = "Aglomerado",
label = "Selecciona el Aglomerado",
choices = c(colnames(IngresoMedioARS)),
selected = "BAHIA BLANCA - CERRI",options = list(`actions-box` = TRUE),multiple = T)),
dashboardBody(
tabItems(
tabItem(tabName = "tab",
fluidRow(
box(dygraphOutput("graph"), width=60),
box(textOutput("legendDivID"),
title = "Legend", collapsible = TRUE,
width=55)
)
),
# tabItem(tabName = "tab1",
# fluidRow(
# box(dygraphOutput("graph1"), width=55),
# box(textOutput("legendDivID1"),
# title = "Legend", collapsible = TRUE, width=55)
# )
# ),
tabItem(tabName = "tab2",
fluidRow(
column(12,
includeMarkdown("Data/readme.Rmd")
)
)
),
tabItem(tabName = "tab4",
fluidRow(
column(12,
downloadLink('downloadData', 'Presione Aqui')
)
)
)
)
)
)
# Correr el server
server <- function(input, output) {
library(archivist)
library(dplyr)
output$graph <- renderDygraph({
if(input$variable == "IMUSD"){
IngresoMedio <- IngresoMedioUSD[,c(input$Aglomerado)]
TITLE = "Ingresos Medios en USD"}
else if(input$variable == "IMN"){
TITLE = "Ingresos Medios en USD Normalizados, Base 100 = Q3 2006"
IngresoMedio <- IngresoMedioNorm[,c(input$Aglomerado)]
} else {
TITLE = "Ingresos Medios en Pesos Corrientes"
IngresoMedio <- IngresoMedioARS[,c(input$Aglomerado)]
}
withProgress(message = "Loading...", {
dygraph(IngresoMedio, main = TITLE) %>%
dyRangeSelector() %>%
dyLegend(labelsDiv = "legendDivID")
})
})
# output$graph1 <- renderDygraph({
#
# if(input$variable == "IMUSD"){
# TITLE = "Ingresos Medios en Dolares"
# IngresoMedio <- IngresoMedio/133}
# else if(input$variable == "IMN"){
# TITLE = "Ingresos Medios Normalizados"
# IngresoMedio <- 100 * cumprod(1 + ROC(IngresoMedio, type = "discrete")[-1, ])
# } else {
# TITLE = "Ingresos Medios"
# IngresoMedio <- IngresoMedio
# }
#
# withProgress(message = "Loading...", {
#
# dygraph(log(IngresoMedio), main = TITLE, ylab = "Valor") %>%
# dyOptions(colors = RColorBrewer::brewer.pal(32, "Set2")) %>%
# dyRangeSelector() %>%
# dyLegend(labelsDiv = "legendDivID1")
# })
# })
output$table <- renderDataTable({(IngresoMedio)})
output$downloadData <- downloadHandler(
filename = function() {
if(input$variable == "IMUSD"){
TITLE = "Ingresos Medios en Dolares "}
else if(input$variable == "IMN"){
TITLE = "Ingresos Medios Normalizados "
} else {
TITLE = "Ingresos Medios "
}
paste(TITLE, Sys.Date(), '.csv', sep='')
},
content = function(con) {
if(input$variable == "IMUSD"){
IngresoMedio <- IngresoMedioUSD}
else if(input$variable == "IMN"){
IngresoMedio <- IngresoMedioNorm
} else {
IngresoMedio <- IngresoMedioARS
}
write.csv(as.data.frame(IngresoMedio), con)
}
)
}
# Correr la API
shinyApp(ui, server)
# Compara para una fecha, las distintas provicias.
# Mean, std, Min, Max, Percentiles.
|
/IngresoMedio/app.R
|
no_license
|
praies/EPA2020
|
R
| false | false | 5,428 |
r
|
library(dygraphs)
library(shiny)
library(shinydashboard)
library(shinyWidgets)
library(readr)
library(xts)
library(TTR)
library(Hmisc)
library(archivist)
library(devtools)
library(archivist.github)
library(archivist)
library(bit64)
library(markdown)
# Cargar la data ARS
IngresoMedioARS <- read_csv("Data/ingreso_medio_pesos.csv")
IngresoMedioARS <- as.xts(IngresoMedioARS[,-1], as.Date(IngresoMedioARS$X1, "%d/%m/%Y"))
IngresoMedioARS <- na.locf(IngresoMedioARS)
IngresoMedioARS <- na.locf(IngresoMedioARS, fromLast = T)
# Cargar la data USD
IngresoMedioUSD <- read_csv("Data/ingreso_medio_dolares.csv")
IngresoMedioUSD <- as.xts(IngresoMedioUSD[,-1], as.Date(IngresoMedioUSD$X1, "%d/%m/%Y"))
IngresoMedioUSD <- na.locf(IngresoMedioUSD)
IngresoMedioUSD <- na.locf(IngresoMedioUSD, fromLast = T)
# Cargar la data Norm
IngresoMedioNorm <- read_csv("Data/ingreso_medio_base_100_2006.csv")
IngresoMedioNorm <- as.xts(IngresoMedioNorm[,-1], as.Date(IngresoMedioNorm$Date, "%d/%m/%Y"))
IngresoMedioNorm <- na.locf(IngresoMedioNorm)
IngresoMedioNorm <- na.locf(IngresoMedioNorm, fromLast = T)
############################# API
# Correr el UI
ui <- dashboardPage(
dashboardHeader(title = "EPH - Ocupados"),
dashboardSidebar(sidebarMenu(
menuItem("Ingresos Medios", tabName = "tab", icon = icon("chart-line")),
# menuItem("Ingresos Medios - Escala Log", tabName = "tab1", icon = icon("chart-line")),
menuItem("Nota Metodolรณgica", tabName = "tab2", icon = icon("table")),
menuItem("Descargar Tabla", tabName = "tab4", icon = icon("download"))),
selectInput("variable", "Categoria:",
c("Pesos Corrientes" = "IM",
"Dรณlares Base 100 = Q3 2006" = "IMN",
"Dรณlares Corrientes" = "IMUSD")),
pickerInput(inputId = "Aglomerado",
label = "Selecciona el Aglomerado",
choices = c(colnames(IngresoMedioARS)),
selected = "BAHIA BLANCA - CERRI",options = list(`actions-box` = TRUE),multiple = T)),
dashboardBody(
tabItems(
tabItem(tabName = "tab",
fluidRow(
box(dygraphOutput("graph"), width=60),
box(textOutput("legendDivID"),
title = "Legend", collapsible = TRUE,
width=55)
)
),
# tabItem(tabName = "tab1",
# fluidRow(
# box(dygraphOutput("graph1"), width=55),
# box(textOutput("legendDivID1"),
# title = "Legend", collapsible = TRUE, width=55)
# )
# ),
tabItem(tabName = "tab2",
fluidRow(
column(12,
includeMarkdown("Data/readme.Rmd")
)
)
),
tabItem(tabName = "tab4",
fluidRow(
column(12,
downloadLink('downloadData', 'Presione Aqui')
)
)
)
)
)
)
# Correr el server
server <- function(input, output) {
library(archivist)
library(dplyr)
output$graph <- renderDygraph({
if(input$variable == "IMUSD"){
IngresoMedio <- IngresoMedioUSD[,c(input$Aglomerado)]
TITLE = "Ingresos Medios en USD"}
else if(input$variable == "IMN"){
TITLE = "Ingresos Medios en USD Normalizados, Base 100 = Q3 2006"
IngresoMedio <- IngresoMedioNorm[,c(input$Aglomerado)]
} else {
TITLE = "Ingresos Medios en Pesos Corrientes"
IngresoMedio <- IngresoMedioARS[,c(input$Aglomerado)]
}
withProgress(message = "Loading...", {
dygraph(IngresoMedio, main = TITLE) %>%
dyRangeSelector() %>%
dyLegend(labelsDiv = "legendDivID")
})
})
# output$graph1 <- renderDygraph({
#
# if(input$variable == "IMUSD"){
# TITLE = "Ingresos Medios en Dolares"
# IngresoMedio <- IngresoMedio/133}
# else if(input$variable == "IMN"){
# TITLE = "Ingresos Medios Normalizados"
# IngresoMedio <- 100 * cumprod(1 + ROC(IngresoMedio, type = "discrete")[-1, ])
# } else {
# TITLE = "Ingresos Medios"
# IngresoMedio <- IngresoMedio
# }
#
# withProgress(message = "Loading...", {
#
# dygraph(log(IngresoMedio), main = TITLE, ylab = "Valor") %>%
# dyOptions(colors = RColorBrewer::brewer.pal(32, "Set2")) %>%
# dyRangeSelector() %>%
# dyLegend(labelsDiv = "legendDivID1")
# })
# })
output$table <- renderDataTable({(IngresoMedio)})
output$downloadData <- downloadHandler(
filename = function() {
if(input$variable == "IMUSD"){
TITLE = "Ingresos Medios en Dolares "}
else if(input$variable == "IMN"){
TITLE = "Ingresos Medios Normalizados "
} else {
TITLE = "Ingresos Medios "
}
paste(TITLE, Sys.Date(), '.csv', sep='')
},
content = function(con) {
if(input$variable == "IMUSD"){
IngresoMedio <- IngresoMedioUSD}
else if(input$variable == "IMN"){
IngresoMedio <- IngresoMedioNorm
} else {
IngresoMedio <- IngresoMedioARS
}
write.csv(as.data.frame(IngresoMedio), con)
}
)
}
# Correr la API
shinyApp(ui, server)
# Compara para una fecha, las distintas provicias.
# Mean, std, Min, Max, Percentiles.
|
## First check whether it has the file in the current dir.
if (!"read-data.R" %in% list.files()) {
setwd("/Users/admin/Downloads/DATA Science/exploratory track")
}
source("read-data.R")
# Set weekdays in English
Sys.setlocale("LC_TIME", "English")
png(filename = "plot2.png",
width = 480, height = 480,
units = "px")
plot(DateTime, as.numeric(as.character(Global_active_power)),
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
xmyyj001/ExData_Plotting1
|
R
| false | false | 500 |
r
|
## First check whether it has the file in the current dir.
if (!"read-data.R" %in% list.files()) {
setwd("/Users/admin/Downloads/DATA Science/exploratory track")
}
source("read-data.R")
# Set weekdays in English
Sys.setlocale("LC_TIME", "English")
png(filename = "plot2.png",
width = 480, height = 480,
units = "px")
plot(DateTime, as.numeric(as.character(Global_active_power)),
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
library(scientoText)
### Name: g_index
### Title: g index
### Aliases: g_index
### ** Examples
g_index(c(1,2,5,0,3,11))
|
/data/genthat_extracted_code/scientoText/examples/g_index.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 127 |
r
|
library(scientoText)
### Name: g_index
### Title: g index
### Aliases: g_index
### ** Examples
g_index(c(1,2,5,0,3,11))
|
\name{achievements}
\docType{data}
\alias{achievements}
\title{Ignizio (1976) Example Data Sets}
\description{
The data set is a data frame that defines the achievement goals
\eqn{ g_1(n,p), g_2(n,p), ..., g_K(n,p) }. The columns depend on
the formulation of the goal programming problem. \cr
\cr
For a lexicographical goal programming problem, the data frame has
four named columns. The first column is called 'objective' and it contains
the index for a particular problem object. The second column is called 'priority'
and it is the level to which the row (i.e. objective) is assigned.
The third column is called 'p' and it contains the weight associated with
the positive deviation variable. The fourth column is called 'n' and
it contains the weight associated with the negative deviation variable.
An objective can appear in two rows if each deviation variable is to be
assigned to a different priority level.
For a weighted goal programming problem, the data frame has five named
columns. The first four columns are identical to the columns in the
data frame for a lexicgraphical goal programming problem. The fifth
column is called 'w' and it is the weight associated with the specified
priority level.
}
\format{
The data set is a data frame.
}
\references{
Ignizio, J. P. (1976). Goal Programming and Extensions, Lexington Books.
}
\author{ Frederick Novomestky \email{fnovomes@poly.edu} }
\seealso{
\code{\link{ignizio.datasets}}
}
\keyword{datasets}
|
/man/achievements.Rd
|
no_license
|
Bhanditz/goalprog
|
R
| false | false | 1,549 |
rd
|
\name{achievements}
\docType{data}
\alias{achievements}
\title{Ignizio (1976) Example Data Sets}
\description{
The data set is a data frame that defines the achievement goals
\eqn{ g_1(n,p), g_2(n,p), ..., g_K(n,p) }. The columns depend on
the formulation of the goal programming problem. \cr
\cr
For a lexicographical goal programming problem, the data frame has
four named columns. The first column is called 'objective' and it contains
the index for a particular problem object. The second column is called 'priority'
and it is the level to which the row (i.e. objective) is assigned.
The third column is called 'p' and it contains the weight associated with
the positive deviation variable. The fourth column is called 'n' and
it contains the weight associated with the negative deviation variable.
An objective can appear in two rows if each deviation variable is to be
assigned to a different priority level.
For a weighted goal programming problem, the data frame has five named
columns. The first four columns are identical to the columns in the
data frame for a lexicgraphical goal programming problem. The fifth
column is called 'w' and it is the weight associated with the specified
priority level.
}
\format{
The data set is a data frame.
}
\references{
Ignizio, J. P. (1976). Goal Programming and Extensions, Lexington Books.
}
\author{ Frederick Novomestky \email{fnovomes@poly.edu} }
\seealso{
\code{\link{ignizio.datasets}}
}
\keyword{datasets}
|
# Funkcija, ki uvozi rezultate iz finala svetovnih prvenstev od leta 2005 naprej za discipline 100 m , 200 m
# in 400 m za moลกke in ลพenske
# od leta 1999 kej je bilo takrat uvedeno merjenje reakcijskega ฤasa
uvozi.rezultati <- function(link, leto, disciplina, spol) {
stran <- html_session(link) %>% read_html()
tabela <- stran %>% html_nodes(xpath="//table[@class='records-table clickable']") %>%
.[[1]] %>% html_table(dec=".") %>% mutate(MARK=parse_number(as.character(MARK)), leto=leto, disciplina=disciplina, spol=spol)
for (j in 1:ncol(tabela)) {
if (is.character(tabela[[j]])) {
Encoding(tabela[[j]]) <- "UTF-8"
}
}
return(tabela)
}
# moลกki 100 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/men/100-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
moski.100m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/men/100-metres/final/result"),
2021 - 2*i, "100 m", "Moski")) %>% bind_rows() %>% select(-BIB)
# ลพenske 100 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/women/100-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
zenske.100m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/women/100-metres/final/result"),
2021 - 2*i, "100 m", "Zenski")) %>% bind_rows() %>% select(c(-BIB, -6))
# moลกki 200 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/men/200-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
moski.200m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/men/200-metres/final/result"),
2021 - 2*i, "200 m", "Moski")) %>% bind_rows() %>% select(c(-BIB, -6))
# ลพenske 200 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/women/200-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
zenske.200m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/women/200-metres/final/result"),
2021 - 2*i, "200 m", "Zenski")) %>% bind_rows() %>% select(-BIB)
# moลกki 400 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/men/400-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
moski.400m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/men/400-metres/final/result"),
2021 - 2*i, "400 m", "Moski")) %>% bind_rows() %>% select(-BIB)
# ลพenske 400 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/women/400-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
zenske.400m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/women/400-metres/final/result"),
2021 - 2*i, "400 m", "Zenski")) %>% bind_rows() %>% select(-BIB)
# testna
#tabela1 <- uvozi.rezultati("https://www.iaaf.org/competitions/iaaf-world-championships/13th-iaaf-world-championships-in-athletics-4147/results/women/100-metres/final/result", 2017, "100 m", "F")
#tabela2 <- uvozi.rezultati("https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/results/men/100-metres/final/result", 2017, "100 m", "M")
# zdruzene tabele za discipline
sprint <- bind_rows(zenske.100m, zenske.200m, zenske.400m, moski.100m, moski.200m, moski.400m)
#spremenim kratice drzav da se ujemajo s tistimi na zemljevidu
sprint$COUNTRY <- gsub("NGR", "NGA", sprint$COUNTRY)
sprint$COUNTRY <- gsub("NED", "NLD", sprint$COUNTRY)
sprint$COUNTRY <- gsub("BUL", "BGR", sprint$COUNTRY)
sprint$COUNTRY <- gsub("BAH", "BHS", sprint$COUNTRY)
sprint$COUNTRY <- gsub("GRE", "GRC", sprint$COUNTRY)
sprint$COUNTRY <- gsub("CHA", "TCD", sprint$COUNTRY)
sprint$COUNTRY <- gsub("SKN", "KNA", sprint$COUNTRY)
sprint$COUNTRY <- gsub("ANT", "ATG", sprint$COUNTRY)
sprint$COUNTRY <- gsub("SRI", "LKA", sprint$COUNTRY)
sprint$COUNTRY <- gsub("SLO", "SVN", sprint$COUNTRY)
sprint$COUNTRY <- gsub("RSA", "ZAF", sprint$COUNTRY)
sprint$COUNTRY <- gsub("BOT", "BWA", sprint$COUNTRY)
sprint$COUNTRY <- gsub("MRI", "MUS", sprint$COUNTRY)
sprint$COUNTRY <- gsub("GRN", "GRD", sprint$COUNTRY)
sprint$COUNTRY <- gsub("GER", "DEU", sprint$COUNTRY)
sprint$COUNTRY <- gsub("KSA", "SAU", sprint$COUNTRY)
sprint$COUNTRY <- gsub("BAR", "BRB", sprint$COUNTRY)
sprint$COUNTRY <- gsub("AHO", "CUW", sprint$COUNTRY)
sprint$COUNTRY <- gsub("CAY", "CYM", sprint$COUNTRY)
sprint$COUNTRY <- gsub("POR", "PRT", sprint$COUNTRY)
sprint$COUNTRY <- gsub("ISV", "VIR", sprint$COUNTRY)
sprint$COUNTRY <- gsub("ZAM", "ZMB", sprint$COUNTRY)
|
/uvoz/uvoz_iaaf.R
|
permissive
|
larajagodnik/APPR-2018-19
|
R
| false | false | 6,446 |
r
|
# Funkcija, ki uvozi rezultate iz finala svetovnih prvenstev od leta 2005 naprej za discipline 100 m , 200 m
# in 400 m za moลกke in ลพenske
# od leta 1999 kej je bilo takrat uvedeno merjenje reakcijskega ฤasa
uvozi.rezultati <- function(link, leto, disciplina, spol) {
stran <- html_session(link) %>% read_html()
tabela <- stran %>% html_nodes(xpath="//table[@class='records-table clickable']") %>%
.[[1]] %>% html_table(dec=".") %>% mutate(MARK=parse_number(as.character(MARK)), leto=leto, disciplina=disciplina, spol=spol)
for (j in 1:ncol(tabela)) {
if (is.character(tabela[[j]])) {
Encoding(tabela[[j]]) <- "UTF-8"
}
}
return(tabela)
}
# moลกki 100 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/men/100-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
moski.100m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/men/100-metres/final/result"),
2021 - 2*i, "100 m", "Moski")) %>% bind_rows() %>% select(-BIB)
# ลพenske 100 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/women/100-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
zenske.100m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/women/100-metres/final/result"),
2021 - 2*i, "100 m", "Zenski")) %>% bind_rows() %>% select(c(-BIB, -6))
# moลกki 200 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/men/200-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
moski.200m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/men/200-metres/final/result"),
2021 - 2*i, "200 m", "Moski")) %>% bind_rows() %>% select(c(-BIB, -6))
# ลพenske 200 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/women/200-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
zenske.200m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/women/200-metres/final/result"),
2021 - 2*i, "200 m", "Zenski")) %>% bind_rows() %>% select(-BIB)
# moลกki 400 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/men/400-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
moski.400m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/men/400-metres/final/result"),
2021 - 2*i, "400 m", "Moski")) %>% bind_rows() %>% select(-BIB)
# ลพenske 400 m
link <- "https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/timetable/bydiscipline/women/400-metres"
povezave <- html_session(link) %>% html_nodes(xpath="//ul[@class='dropdown-menu']//a") %>% html_attr("href") %>%
strapplyc("^(.*)/timetable/bydiscipline") %>% unlist()
zenske.400m <- lapply(2:11, function(i) uvozi.rezultati(paste0("https://www.iaaf.org", povezave[i],
"/results/women/400-metres/final/result"),
2021 - 2*i, "400 m", "Zenski")) %>% bind_rows() %>% select(-BIB)
# testna
#tabela1 <- uvozi.rezultati("https://www.iaaf.org/competitions/iaaf-world-championships/13th-iaaf-world-championships-in-athletics-4147/results/women/100-metres/final/result", 2017, "100 m", "F")
#tabela2 <- uvozi.rezultati("https://www.iaaf.org/competitions/iaaf-world-championships/iaaf-world-championships-london-2017-5151/results/men/100-metres/final/result", 2017, "100 m", "M")
# zdruzene tabele za discipline
sprint <- bind_rows(zenske.100m, zenske.200m, zenske.400m, moski.100m, moski.200m, moski.400m)
#spremenim kratice drzav da se ujemajo s tistimi na zemljevidu
sprint$COUNTRY <- gsub("NGR", "NGA", sprint$COUNTRY)
sprint$COUNTRY <- gsub("NED", "NLD", sprint$COUNTRY)
sprint$COUNTRY <- gsub("BUL", "BGR", sprint$COUNTRY)
sprint$COUNTRY <- gsub("BAH", "BHS", sprint$COUNTRY)
sprint$COUNTRY <- gsub("GRE", "GRC", sprint$COUNTRY)
sprint$COUNTRY <- gsub("CHA", "TCD", sprint$COUNTRY)
sprint$COUNTRY <- gsub("SKN", "KNA", sprint$COUNTRY)
sprint$COUNTRY <- gsub("ANT", "ATG", sprint$COUNTRY)
sprint$COUNTRY <- gsub("SRI", "LKA", sprint$COUNTRY)
sprint$COUNTRY <- gsub("SLO", "SVN", sprint$COUNTRY)
sprint$COUNTRY <- gsub("RSA", "ZAF", sprint$COUNTRY)
sprint$COUNTRY <- gsub("BOT", "BWA", sprint$COUNTRY)
sprint$COUNTRY <- gsub("MRI", "MUS", sprint$COUNTRY)
sprint$COUNTRY <- gsub("GRN", "GRD", sprint$COUNTRY)
sprint$COUNTRY <- gsub("GER", "DEU", sprint$COUNTRY)
sprint$COUNTRY <- gsub("KSA", "SAU", sprint$COUNTRY)
sprint$COUNTRY <- gsub("BAR", "BRB", sprint$COUNTRY)
sprint$COUNTRY <- gsub("AHO", "CUW", sprint$COUNTRY)
sprint$COUNTRY <- gsub("CAY", "CYM", sprint$COUNTRY)
sprint$COUNTRY <- gsub("POR", "PRT", sprint$COUNTRY)
sprint$COUNTRY <- gsub("ISV", "VIR", sprint$COUNTRY)
sprint$COUNTRY <- gsub("ZAM", "ZMB", sprint$COUNTRY)
|
#' Calculate under-10 nlx, mx, and ax values
#'
#' Calculate under-10-specific nlx, mx, and ax values based on Human Life-Table Database k1 parameters
#' Original k1 parameters supposed to come from Coale, A.J. and Demeny, P. 1983. Regional Model Life Tables and Stable Populations. Second Edition. Academic Press, N.Y.-L.
#'
#' @param dt data.table with variables: ihme_loc_id, sex, age, sim, age_length, qx, lx, dx
#' @param id_vars character vector of id variables (last one must be age)
#'
#' @return data.table with variables: ihme_loc_id, sex, age, sim, age_length, qx, lx, dx
#' @export
#'
#' @import data.table
#' @import assertable
recalc_u10_nlx_mx_ax <- function(dt, id_vars) {
## Prep datasets
if(tail(id_vars, 1) != "age") stop("numeric variable age must be the last var specified in id_vars")
key_ids <- id_vars[id_vars != "age"]
setorderv(dt, id_vars)
under_10 <- dt[age <= 5]
## Generate k1 parameter
## Human Life-Table Database -- Type 6. Recalculated abridged life tables (from Coale-Demeny 1983)
## http://www.lifetable.de/methodology.pdf pg. 7
under_1 <- dt[age == 0, .SD, .SDcols=c(key_ids, "qx")]
under_1[sex == "male" & qx > .01, k1 := 1.352]
under_1[sex == "male" & qx <=.01, k1 := 1.653 - 3.013 * qx]
under_1[sex == "female" & qx > .01, k1 := 1.361]
under_1[sex == "female" & qx <= .01, k1 := 1.524 - 1.627 * qx]
under_1[, qx := NULL]
assert_values(under_1, "k1", "not_na", quiet=T)
under_10 <- merge(under_10, under_1, by = key_ids)
## Recalculate nLx for 0-1, 1-5, 5-10, 10-15
## Age 0 recalculated using Human Life-Table Database
## http://www.lifetable.de/methodology.pdf pg. 6, equation 5
lx_1_5 <- under_10[age == 1, lx]
lx_5_10 <- under_10[age == 5, lx]
lx_10_15 <- dt[age == 10, lx]
if(length(lx_1_5) != length(lx_5_10) | length(lx_5_10) != length(lx_10_15)) stop("Lx lengths do not match")
## Apply k1 weights -- merge lx_1_merged on so that subsetted results (e.g. '& qx > .1') don't get mixed-up lx values
under_10[age == 0, lx_1_merged := lx_1_5]
under_10[age == 0, nLx := (0.05 + 3 * qx) + (0.95 - 3 * qx) * lx_1_merged]
under_10[age == 0 & qx > .1, nLx := .35 + .65 * lx_1_merged]
under_10[age == 1, nLx := k1 * lx_1_5 + (4 - k1) * lx_5_10]
under_10[age == 5, nLx := 2.5 * (lx_5_10 + lx_10_15)]
under_10[, c("lx_1_merged", "k1") := NULL]
## Generate mx
under_10[, mx := dx/nLx]
## Generate capped ax values
under_10[, ax := (qx + (age_length * mx * qx) - (age_length * mx)) / (mx * qx)]
# ax can be negative if qx is very low compared to mx, Recenter all values that occur this way
under_10[(ax <= 0 | ax >= 1) & age == 0 & sex == "male", ax := .2]
under_10[(ax <= 0 | ax >= 1) & age == 0 & sex == "female", ax := .15]
under_10[(ax <= 0 | ax >= 4) & age == 1 & sex == "male", ax := 1.35]
under_10[(ax <= 0 | ax >= 4) & age == 1 & sex == "female", ax := 1.36]
under_10[(ax <= 0 | ax >= 5) & age == 5 & sex == "male", ax := 2.5]
under_10[(ax <= 0 | ax >= 5) & age == 5 & sex == "female", ax := 2.5]
assert_values(under_10, c("lx", "qx", "nLx", "mx", "ax"), "not_na", quiet=T)
dt <- rbindlist(list(dt[age >= 10], under_10), use.names=T)
return(dt)
}
|
/gbd_2019/mortality_code/model_life_tables/mltgeneration/R/recalc_u10_nlx_mx_ax.R
|
no_license
|
Nermin-Ghith/ihme-modeling
|
R
| false | false | 3,176 |
r
|
#' Calculate under-10 nlx, mx, and ax values
#'
#' Calculate under-10-specific nlx, mx, and ax values based on Human Life-Table Database k1 parameters
#' Original k1 parameters supposed to come from Coale, A.J. and Demeny, P. 1983. Regional Model Life Tables and Stable Populations. Second Edition. Academic Press, N.Y.-L.
#'
#' @param dt data.table with variables: ihme_loc_id, sex, age, sim, age_length, qx, lx, dx
#' @param id_vars character vector of id variables (last one must be age)
#'
#' @return data.table with variables: ihme_loc_id, sex, age, sim, age_length, qx, lx, dx
#' @export
#'
#' @import data.table
#' @import assertable
recalc_u10_nlx_mx_ax <- function(dt, id_vars) {
## Prep datasets
if(tail(id_vars, 1) != "age") stop("numeric variable age must be the last var specified in id_vars")
key_ids <- id_vars[id_vars != "age"]
setorderv(dt, id_vars)
under_10 <- dt[age <= 5]
## Generate k1 parameter
## Human Life-Table Database -- Type 6. Recalculated abridged life tables (from Coale-Demeny 1983)
## http://www.lifetable.de/methodology.pdf pg. 7
under_1 <- dt[age == 0, .SD, .SDcols=c(key_ids, "qx")]
under_1[sex == "male" & qx > .01, k1 := 1.352]
under_1[sex == "male" & qx <=.01, k1 := 1.653 - 3.013 * qx]
under_1[sex == "female" & qx > .01, k1 := 1.361]
under_1[sex == "female" & qx <= .01, k1 := 1.524 - 1.627 * qx]
under_1[, qx := NULL]
assert_values(under_1, "k1", "not_na", quiet=T)
under_10 <- merge(under_10, under_1, by = key_ids)
## Recalculate nLx for 0-1, 1-5, 5-10, 10-15
## Age 0 recalculated using Human Life-Table Database
## http://www.lifetable.de/methodology.pdf pg. 6, equation 5
lx_1_5 <- under_10[age == 1, lx]
lx_5_10 <- under_10[age == 5, lx]
lx_10_15 <- dt[age == 10, lx]
if(length(lx_1_5) != length(lx_5_10) | length(lx_5_10) != length(lx_10_15)) stop("Lx lengths do not match")
## Apply k1 weights -- merge lx_1_merged on so that subsetted results (e.g. '& qx > .1') don't get mixed-up lx values
under_10[age == 0, lx_1_merged := lx_1_5]
under_10[age == 0, nLx := (0.05 + 3 * qx) + (0.95 - 3 * qx) * lx_1_merged]
under_10[age == 0 & qx > .1, nLx := .35 + .65 * lx_1_merged]
under_10[age == 1, nLx := k1 * lx_1_5 + (4 - k1) * lx_5_10]
under_10[age == 5, nLx := 2.5 * (lx_5_10 + lx_10_15)]
under_10[, c("lx_1_merged", "k1") := NULL]
## Generate mx
under_10[, mx := dx/nLx]
## Generate capped ax values
under_10[, ax := (qx + (age_length * mx * qx) - (age_length * mx)) / (mx * qx)]
# ax can be negative if qx is very low compared to mx, Recenter all values that occur this way
under_10[(ax <= 0 | ax >= 1) & age == 0 & sex == "male", ax := .2]
under_10[(ax <= 0 | ax >= 1) & age == 0 & sex == "female", ax := .15]
under_10[(ax <= 0 | ax >= 4) & age == 1 & sex == "male", ax := 1.35]
under_10[(ax <= 0 | ax >= 4) & age == 1 & sex == "female", ax := 1.36]
under_10[(ax <= 0 | ax >= 5) & age == 5 & sex == "male", ax := 2.5]
under_10[(ax <= 0 | ax >= 5) & age == 5 & sex == "female", ax := 2.5]
assert_values(under_10, c("lx", "qx", "nLx", "mx", "ax"), "not_na", quiet=T)
dt <- rbindlist(list(dt[age >= 10], under_10), use.names=T)
return(dt)
}
|
#' @title Two-component generalized extreme value distribution (GEV)
#' @description Density, distribution function, quantile function and
#' random generation for a two-component GEV distribution (product of two GEVs).
#' @param x vector of quantiles.
#' @param q vector of quantiles.
#' @param p vector of probabilities.
#' @param n number of observations.
#' @param param1 three-dimensional vector (loc, scale, shape)' of a GEV from season 1.
#' @param param2 three-dimensional vector (loc, scale, shape)' of a GEV from season 2.
#' @details These functions use the parametrization of the \link[evd]{gev}-functions from the package 'evd'.
#' The distribution \eqn{F} of a two-component GEV is:
#' \eqn{F=F_1 \cdot F_2}{F=F_1 * F_2}, where \eqn{F_1} and \eqn{F_2} are two
#' distribution functions of a GEV.
#' @examples
#' # density and distribution function of a two-component GEV:
#' par(mfrow=c(3,1))
#' curve(dGEVxGEV(x, c(2,1,0.2), c(3,2,0.4)), from=0, to=20, ylab="Density", n=1000)
#' curve(pGEVxGEV(x, c(2,1,0.2), c(3,2,0.4)), from=0, to=20, ylab="Probability", n=1000)
#'
#' # quantiles of a two-component GEV:
#' qGEVxGEV(p=c(0.9, 0.99), c(2,1,0.2), c(3,2,0.4))
#'
#' # random numbers of a two-component GEV:
#' set.seed(23764)
#' rn <- rGEVxGEV(1000, c(2,1,0.1), c(3,2,0))
#' hist(rn, breaks=40, freq=FALSE, main="")
#' curve(dGEVxGEV(x, c(2,1,0.1), c(3,2,0)), from=0, to=20,
#' ylab="density", n=1000, col="red", add=TRUE)
#' @rdname twocompGEV
#' @export
dGEVxGEV <- function(x, param1, param2){
fs <- evd::dgev(x, loc = param1[1], scale = param1[2], shape = param1[3])
Fs <- evd::pgev(x, loc = param1[1], scale = param1[2], shape = param1[3])
fw <- evd::dgev(x, loc = param2[1], scale = param2[2], shape = param2[3])
Fw <- evd::pgev(x, loc = param2[1], scale = param2[2], shape = param2[3])
fprodgev <- fs*Fw + fw*Fs
return(fprodgev)
}
#' @rdname twocompGEV
#' @export
pGEVxGEV <- function(q, param1, param2){
evd::pgev(q, loc = param1[1], scale = param1[2], shape = param1[3]) * evd::pgev(q, loc = param2[1], scale = param2[2], shape = param2[3])
}
#' @rdname twocompGEV
#' @export
qGEVxGEV <- function(p, param1, param2){
if(!all(p < 1 & p > 0)) stop("p must be a probability: p in (0,1)")
f <- function(q) evd::pgev(q, loc = param1[1], scale = param1[2], shape = param1[3]) * evd::pgev(q, loc = param2[1], scale = param2[2], shape = param2[3])
sapply(p, function(y) uniroot(function(x) f(x)-y, interval=c(0.01, 1e10))$root)
}
#' @rdname twocompGEV
#' @export
rGEVxGEV <- function(n, param1, param2){
u <- runif(n)
qGEVxGEV(u, param1, param2)
}
#' @title Block maxima distribution
#' @description Calculates quantiles of a block maxima distribution.
#' @param p vector of probabilities.
#' @param b block length (in general \code{b} \eqn{\ge 2}).
#' @param param three-dimensional vector with location (mu), scale (sigma)
#' and shape (xi) parameter.
#' @details Formular of a block maxima distribution function:
#' \deqn{F_j(x)=F_j^{(b)}(x)=\left[2\cdot T_{1/\xi}\left(\left\{1+\xi\frac{x-\mu_j}{\sigma_j}\right\}\cdot T_{1/\xi}^{-1}\left(1-\frac{1}{2b}\right)\right)-1\right]^b,}{F_j(x)=F_j^(b)(x)=[2 T_{1/xi}({1+xi (x-mu_j)/(sigma_j)} T_{1/xi}^{-1}(1-1/(2b)))-1]^b,}
#' where \eqn{T_{\nu}}{T_nu} denote the t-distribution function with \eqn{\nu}{nu} degrees of freedom.
#' @return Quantile of a block maxima distribution.
#' @examples
#' qBM(p=c(0.75, 0.99), b=12, param=c(2,1,0.2))
#' @export
qBM <- function(p, b, param){
if(!all(p < 1 & p > 0)) stop("p must be a probability: p in (0,1)")
mu <- param[1]
sigma <- param[2]
xi <- param[3]
return(mu + sigma/xi*( qt((1+p^(1/b))/2, df=1/xi)/qt((2-1/b)/2, df=1/xi) - 1))
}
|
/R/product_gev.R
|
no_license
|
cran/flood
|
R
| false | false | 3,775 |
r
|
#' @title Two-component generalized extreme value distribution (GEV)
#' @description Density, distribution function, quantile function and
#' random generation for a two-component GEV distribution (product of two GEVs).
#' @param x vector of quantiles.
#' @param q vector of quantiles.
#' @param p vector of probabilities.
#' @param n number of observations.
#' @param param1 three-dimensional vector (loc, scale, shape)' of a GEV from season 1.
#' @param param2 three-dimensional vector (loc, scale, shape)' of a GEV from season 2.
#' @details These functions use the parametrization of the \link[evd]{gev}-functions from the package 'evd'.
#' The distribution \eqn{F} of a two-component GEV is:
#' \eqn{F=F_1 \cdot F_2}{F=F_1 * F_2}, where \eqn{F_1} and \eqn{F_2} are two
#' distribution functions of a GEV.
#' @examples
#' # density and distribution function of a two-component GEV:
#' par(mfrow=c(3,1))
#' curve(dGEVxGEV(x, c(2,1,0.2), c(3,2,0.4)), from=0, to=20, ylab="Density", n=1000)
#' curve(pGEVxGEV(x, c(2,1,0.2), c(3,2,0.4)), from=0, to=20, ylab="Probability", n=1000)
#'
#' # quantiles of a two-component GEV:
#' qGEVxGEV(p=c(0.9, 0.99), c(2,1,0.2), c(3,2,0.4))
#'
#' # random numbers of a two-component GEV:
#' set.seed(23764)
#' rn <- rGEVxGEV(1000, c(2,1,0.1), c(3,2,0))
#' hist(rn, breaks=40, freq=FALSE, main="")
#' curve(dGEVxGEV(x, c(2,1,0.1), c(3,2,0)), from=0, to=20,
#' ylab="density", n=1000, col="red", add=TRUE)
#' @rdname twocompGEV
#' @export
dGEVxGEV <- function(x, param1, param2){
fs <- evd::dgev(x, loc = param1[1], scale = param1[2], shape = param1[3])
Fs <- evd::pgev(x, loc = param1[1], scale = param1[2], shape = param1[3])
fw <- evd::dgev(x, loc = param2[1], scale = param2[2], shape = param2[3])
Fw <- evd::pgev(x, loc = param2[1], scale = param2[2], shape = param2[3])
fprodgev <- fs*Fw + fw*Fs
return(fprodgev)
}
#' @rdname twocompGEV
#' @export
pGEVxGEV <- function(q, param1, param2){
evd::pgev(q, loc = param1[1], scale = param1[2], shape = param1[3]) * evd::pgev(q, loc = param2[1], scale = param2[2], shape = param2[3])
}
#' @rdname twocompGEV
#' @export
qGEVxGEV <- function(p, param1, param2){
if(!all(p < 1 & p > 0)) stop("p must be a probability: p in (0,1)")
f <- function(q) evd::pgev(q, loc = param1[1], scale = param1[2], shape = param1[3]) * evd::pgev(q, loc = param2[1], scale = param2[2], shape = param2[3])
sapply(p, function(y) uniroot(function(x) f(x)-y, interval=c(0.01, 1e10))$root)
}
#' @rdname twocompGEV
#' @export
rGEVxGEV <- function(n, param1, param2){
u <- runif(n)
qGEVxGEV(u, param1, param2)
}
#' @title Block maxima distribution
#' @description Calculates quantiles of a block maxima distribution.
#' @param p vector of probabilities.
#' @param b block length (in general \code{b} \eqn{\ge 2}).
#' @param param three-dimensional vector with location (mu), scale (sigma)
#' and shape (xi) parameter.
#' @details Formular of a block maxima distribution function:
#' \deqn{F_j(x)=F_j^{(b)}(x)=\left[2\cdot T_{1/\xi}\left(\left\{1+\xi\frac{x-\mu_j}{\sigma_j}\right\}\cdot T_{1/\xi}^{-1}\left(1-\frac{1}{2b}\right)\right)-1\right]^b,}{F_j(x)=F_j^(b)(x)=[2 T_{1/xi}({1+xi (x-mu_j)/(sigma_j)} T_{1/xi}^{-1}(1-1/(2b)))-1]^b,}
#' where \eqn{T_{\nu}}{T_nu} denote the t-distribution function with \eqn{\nu}{nu} degrees of freedom.
#' @return Quantile of a block maxima distribution.
#' @examples
#' qBM(p=c(0.75, 0.99), b=12, param=c(2,1,0.2))
#' @export
qBM <- function(p, b, param){
if(!all(p < 1 & p > 0)) stop("p must be a probability: p in (0,1)")
mu <- param[1]
sigma <- param[2]
xi <- param[3]
return(mu + sigma/xi*( qt((1+p^(1/b))/2, df=1/xi)/qt((2-1/b)/2, df=1/xi) - 1))
}
|
setwd("C:/Users/Giwrgos/Dropbox/Summer School/Recordings")
test = read.csv("sample_test.csv")
real = test$class
predsA = read.csv("bci_predictionsA.csv")
predsB = read.csv("bci_predictionsB.csv")
real = (real +1)/2
sum(real == predsA[,1])/length(real)
sum(real == predsB[,1])/length(real)
data = data.frame(cbind(real,predsA[,1],predsB[,1],seq(1,length(real))))
par(mfrow=c(3,1))
plot(predsA[,1],col="blue",pch=19,xlab="",main="Team A")
plot(predsB[,1],col="red",pch=19,xlab="",main="Team B")
plot(real,col="black",pch=19,xlab="",main="Real")
ggplot(data, aes(x)) +
geom_point(aes(y = real, colour = "TRUE")) +
geom_point(aes(y = V2, colour = "Team A")) +
geom_point(aes(y = V3, colour = "Team B"))
library(ggplot2)
qplot(x,predsA[,1])
qplot(x,predsB[,1])
x = read.csv("Sub1_Ses5_raw.csv")
x$P8=NULL
x$T8=NULL
write.csv(x,"C:/Users/Giwrgos/Desktop/Sub1_Ses5_raw.csv",row.names = F)
x = read.csv("Sub2_Ses5_raw.csv")
x$P8=NULL
x$T8=NULL
write.csv(x,"C:/Users/Giwrgos/Desktop/Sub2_Ses5_raw.csv",row.names = F)
x = read.csv("Sub3_Ses5_raw.csv")
x$P8=NULL
x$T8=NULL
write.csv(x,"C:/Users/Giwrgos/Desktop/Sub3_Ses5_raw.csv",row.names = F)
x = read.csv("Sub4_Ses5_raw.csv")
x$P8=NULL
x$T8=NULL
write.csv(x,"C:/Users/Giwrgos/Desktop/Sub4_Ses5_raw.csv",row.names = F)
x = read.csv("Sub5_Ses5_raw.csv")
x$P8=NULL
x$T8=NULL
write.csv(x,"C:/Users/Giwrgos/Desktop/Sub5_Ses5_raw.csv",row.names = F)
|
/Recordings/temp.R
|
no_license
|
IoannisParaskevopoulos/Demokritos-BCI-Summer-School-2016
|
R
| false | false | 1,415 |
r
|
setwd("C:/Users/Giwrgos/Dropbox/Summer School/Recordings")
test = read.csv("sample_test.csv")
real = test$class
predsA = read.csv("bci_predictionsA.csv")
predsB = read.csv("bci_predictionsB.csv")
real = (real +1)/2
sum(real == predsA[,1])/length(real)
sum(real == predsB[,1])/length(real)
data = data.frame(cbind(real,predsA[,1],predsB[,1],seq(1,length(real))))
par(mfrow=c(3,1))
plot(predsA[,1],col="blue",pch=19,xlab="",main="Team A")
plot(predsB[,1],col="red",pch=19,xlab="",main="Team B")
plot(real,col="black",pch=19,xlab="",main="Real")
ggplot(data, aes(x)) +
geom_point(aes(y = real, colour = "TRUE")) +
geom_point(aes(y = V2, colour = "Team A")) +
geom_point(aes(y = V3, colour = "Team B"))
library(ggplot2)
qplot(x,predsA[,1])
qplot(x,predsB[,1])
x = read.csv("Sub1_Ses5_raw.csv")
x$P8=NULL
x$T8=NULL
write.csv(x,"C:/Users/Giwrgos/Desktop/Sub1_Ses5_raw.csv",row.names = F)
x = read.csv("Sub2_Ses5_raw.csv")
x$P8=NULL
x$T8=NULL
write.csv(x,"C:/Users/Giwrgos/Desktop/Sub2_Ses5_raw.csv",row.names = F)
x = read.csv("Sub3_Ses5_raw.csv")
x$P8=NULL
x$T8=NULL
write.csv(x,"C:/Users/Giwrgos/Desktop/Sub3_Ses5_raw.csv",row.names = F)
x = read.csv("Sub4_Ses5_raw.csv")
x$P8=NULL
x$T8=NULL
write.csv(x,"C:/Users/Giwrgos/Desktop/Sub4_Ses5_raw.csv",row.names = F)
x = read.csv("Sub5_Ses5_raw.csv")
x$P8=NULL
x$T8=NULL
write.csv(x,"C:/Users/Giwrgos/Desktop/Sub5_Ses5_raw.csv",row.names = F)
|
######################################################################################
# Option.R #
# #
# Task: Set key simulation options: analysis type, parameter spaces #
# number of simulations and seeds, directories #
# #
# authors: thiery.masserey@swisstph.ch #
######################################################################################
# Download function in the environments
source("myRfunctions.R")
require(plyr)
require(dplyr)
require(xlsx)
# ------------------------------------------
# Set options for a locally called analysis.
# ------------------------------------------
set_options <-function(do_step = NA, sample_num = 0, quiet = FALSE) {
#----general information----
pm <- list()
# Define analyse name
pm$analysis_name <- "SMC"
# Define the user name
pm$user <- "masthi00"
# Iteration
pm$sample_num <- sample_num
# ---- Sampling options ----
opts <- list()
# Number of latin hypercube samples to generate (prior to GP)
opts$lhc_samples <- 1
# Number of different seeds to simulate
opts$n_seeds <- 50
# Flag for sampling EIR values
opts$do_resample <- TRUE
# ---- Post processing ----
# Define the type of analysis
opts$Type_analysis <- "Trial" # option are: Prophylaxis or SMC or Efficacy or Trial
# Define if same or different children recieve SMC at different round
opts$Type_coverage <- "Fixed" # option are : Random/Fixed (NB: Random is only for SMC or efficacy or Trial)
# Define the outcome
opts$om_outcome <- "Spread"
# ---- Gaussian process ----
# Select GP kernel function
opts$gp_kernel <- "Gaussian" # "Gaussian", "Matern5_2", or "Matern3_2"
# Proportion of data withheld from training set
opts$gp_test_split <- 0.2
# Maximum number of iteration of optimization algorithm
opts$gp_max_iter <- 10000
# ---- Adaptive sampling ----
# Maximum number of adaptive sampling attempts
opts$sampling_max_iter <- 10
# Number of points to re-simulate per arm
opts$n_adaptive_samples <- 100
# Quantitative threshold for accepting GP performance
opts$stop_criteria <- 0.99
# ---- Option for sensitivity analysis
# Number of sampling points
opts$sa_n <- 10000
# Create output directories
pm$opts <- opts
# Specify parameter range and which one are constrained
pm <- constrained_parameter(pm, opts$Type_analysis) # see function bellow
pm <- variable_parameter(pm, opts$Type_analysis) # see function bellow
# Define the directory
pm <- set_dirs(pm, pm$opts$Type_analysis) # check directories.R
# ---- Display key options and return ----
# Number of scenarios defined - just to inform the user
n_param <- 0
for (i in 1:length(pm$settings)) {
n_param[i] <- length(pm$settings[[i]])
}
# Define the number of setting
n_settings <- prod(n_param)
# Estimate the number of scenario
pm$opts$n_total_jobs <- pm$opts$lhc_samples * n_settings * pm$opts$n_seeds
# Only display if flag is on
if (quiet == FALSE) {
message(" - Total number of arms: ",
format(n_settings, big.mark = ","))
message(" - Total number of simulations: ",
format(pm$opts$n_total_jobs, big.mark = ","))
}
# Return the output
return(pm)
}
# ------------------------------------------------------
# Define the parameter values of constrained parameters.
# ------------------------------------------------------
constrained_parameter <- function(pm, type_analysis) {
# Create a list with all the arm to simulate
settings <- list()
# Define the different seasonality profile via fourrier coefficient
sesonality1 <- c(0, 0, 0, 0)
sesonality2 <-c(-1.1688319842020671, -1.9682456652323406, -0.23417717218399048, 0.3833462397257487)
sesonality3 <-c(-1.0296205679575603, -1.7833550771077473, -0.7692119280497233, 1.332314173380534) # 0.07+10 test 10
# Setting when aimed to estimate the prophylactic period
if (type_analysis == "Propylaxis") {
# Define level of EIR
EIR <- c(5, 50, 100, 150, 500)
# Seasonality pattern
Seasonality <- c("sesonality1")
# Level of access to treatment
Access <- c(0)
# Diagnostic detection limit
Diagnostic <- c(20)
# Merge all the information
setting_data <-Reduce(merge,
list(
as.data.frame(EIR),
as.data.frame(Access),
as.data.frame(Diagnostic),
as.data.frame(Seasonality)))
# Give name to the columns
colnames(setting_data) <- c("eir", "Access", "Diagnostic", "seasonality")
# Save the information into variable setting
settings$eir <- unique(setting_data$eir)
settings$Access <- unique(setting_data$Access)
settings$Diagnostic <- unique(setting_data$Diagnostic)
settings$seasonality <- data.frame(sesonality1)
}
# Setting when aimed to estimate the selection coefficient
if (type_analysis == "SMC") {
# Seasonality profile
Seasonality <- c("sesonality2", "sesonality3")
# Reduction of coverage at each round of adaptive sampling
Coverage_reduction <- c(0, 0.1)
# Number of round of adaptive sampling (NB: 5.4 = 4 + 1 before, 4.5= 4+ 1 after)
Number_round <- c(4, 5.4, 4.5)
# Maximum age targeted by SMC
Age <- c(5, 10)
# EC50 of the resistant genotype
IC50_SP_R <- c(24.20)
# EC50 of the sensitive genotype
IC50_SP <- c(2.39)
# Merge all the information
setting_data <- Reduce( merge,
list(
as.data.frame(IC50_SP_R),
as.data.frame(IC50_SP),
as.data.frame(Age),
as.data.frame(Number_round),
as.data.frame(Coverage_reduction),
as.data.frame(Seasonality)))
# Name the columns
colnames(setting_data) <-c(
"IC50_SP_R",
"IC50_SP",
"Age",
"Number_round",
"Coverage_reduction",
"seasonality")
# Save the information into variable setting
settings$IC50_SP_R <- unique(setting_data$IC50_SP_R)
settings$IC50_SP <- unique(setting_data$IC50_SP)
settings$Age <- unique(Age)
settings$Number_round <- unique(setting_data$Number_round)
settings$Coverage_reduction <- unique(setting_data$Coverage_reduction)
settings$seasonality <- data.frame(sesonality2, sesonality3)
}
# Setting when aimed to assessed the efficacy of SMC
if (type_analysis == "Efficacy") {
# Seasonality Profile
Seasonality <- c("sesonality2")
# Reduction of coverage at each round of adaptive sampling
Coverage_reduction <- c(0.1)
# Number of round of adaptive sampling (NB: 5.4 = 4 + 1 before, 4.5= 4+ 1 after)
Number_round <- c(4)
# Maximum age targeted by SMC
Age <- c(5)
# EC50 of the sensitive genotype
IC50_SP <- c(60.12)
# Merge all the information
setting_data <-
Reduce(merge,
list(
as.data.frame(IC50_SP),
as.data.frame(Age),
as.data.frame(Number_round),
as.data.frame(Coverage_reduction),
as.data.frame(Seasonality)))
# Name the columns
colnames(setting_data) <- c(
"IC50_SP",
"Age",
"Number_round",
"Coverage_reduction",
"seasonality")
# Save the information into variable setting
settings$IC50_SP_R <- unique(setting_data$IC50_SP_R)
settings$IC50_SP <- unique(setting_data$IC50_SP)
settings$Age <- unique(Age)
settings$Number_round <- unique(setting_data$Number_round)
settings$Coverage_reduction <-unique(setting_data$Coverage_reduction)
settings$seasonality <- data.frame(sesonality2)
}
# Setting when aimed to replicate the trial of Zongo et al (2015)
if (type_analysis == "Trial") {
# EC50 of the resistant genotype
IC50_SP_R <- c(0.5)
# EC50 of the sensitive genotype
IC50_SP <- c(2.39, 0.5)
# SMC coverage
Coverage <- c(0, 1) #0 control group, 1 trial group
# Merge all the information
setting_data <- Reduce(merge,
list(
as.data.frame(IC50_SP_R),
as.data.frame(IC50_SP),
as.data.frame(Coverage)))
# Name the columns
colnames(setting_data) <- c("IC50_SP_R", "IC50_SP", "Coverage")
# Save the information into variable setting
settings$IC50_SP_R <- unique(setting_data$IC50_SP_R)
settings$IC50_SP <- unique(setting_data$IC50_SP)
settings$Coverage <- unique(setting_data$Coverage)
}
# Append settings to pm list
pm$settings <- settings
# Return pm
return(pm)
}
# -------------------------------------------------------------------
# Define the parameter space for parameters that are not constrained.
# -------------------------------------------------------------------
variable_parameter <- function(pm, type_analysis) {
# Parameter space if aimed to estimate the prophylactic period
if (type_analysis == "Propylaxis") {
# Parameter name
Parameter <- c("IC50_SP")
# Maximum values
max <- c(0.02) # 0.02, 0.3,100
# Minimum values
min <- c(0.0005) # 0.0005,0.002,0.01
}
# Parameter space if aimed to identify the key driver of SMC-resistance
if (type_analysis == "SMC") {
# Parameter names
Parameter <- c("Coverage", "Access", "eir", "half_life_long", "Dosage_long") # NB: dosae long do not vary at the end
# Maximum values
max <- c(1, 0.5, 500, 21, 30)
# Minimum values
min <- c(0.7, 0.04, 5, 7, 30)
}
if (type_analysis == "Efficacy") {
# Parameter names
Parameter <- c("Coverage", "Access", "eir", "half_life_long", "Dosage_long") # NB: dosae long do not vary at the end
# Maximum values
max <- c(1, 0.04, 5, 15, 30)
# Minimum values
min <- c(0.9, 0.5, 500, 15, 30)
}
if (type_analysis == "Trial") {
# Parameter names
Parameter <- c("eir") # NB: dosage long do not vary at the end
# Maximum values
max <- c(350)
# Minimum values
min <- c(350)
}
# Merge the information into a dataframe
program_data <- data.frame(Parameter, max, min)
# Convert dataframe to list
prog <- as.list(program_data)
# Names
prog$prog_names <- Parameter
# Easy access number of programs
prog$n_progs <- length(prog$prog_names)
# Append program details to pm list
pm$prog <- prog
# Return pm
return(pm)
}
|
/Option.R
|
no_license
|
ThieryM95/SP_resistance_workflow
|
R
| false | false | 11,141 |
r
|
######################################################################################
# Option.R #
# #
# Task: Set key simulation options: analysis type, parameter spaces #
# number of simulations and seeds, directories #
# #
# authors: thiery.masserey@swisstph.ch #
######################################################################################
# Download function in the environments
source("myRfunctions.R")
require(plyr)
require(dplyr)
require(xlsx)
# ------------------------------------------
# Set options for a locally called analysis.
# ------------------------------------------
set_options <-function(do_step = NA, sample_num = 0, quiet = FALSE) {
#----general information----
pm <- list()
# Define analyse name
pm$analysis_name <- "SMC"
# Define the user name
pm$user <- "masthi00"
# Iteration
pm$sample_num <- sample_num
# ---- Sampling options ----
opts <- list()
# Number of latin hypercube samples to generate (prior to GP)
opts$lhc_samples <- 1
# Number of different seeds to simulate
opts$n_seeds <- 50
# Flag for sampling EIR values
opts$do_resample <- TRUE
# ---- Post processing ----
# Define the type of analysis
opts$Type_analysis <- "Trial" # option are: Prophylaxis or SMC or Efficacy or Trial
# Define if same or different children recieve SMC at different round
opts$Type_coverage <- "Fixed" # option are : Random/Fixed (NB: Random is only for SMC or efficacy or Trial)
# Define the outcome
opts$om_outcome <- "Spread"
# ---- Gaussian process ----
# Select GP kernel function
opts$gp_kernel <- "Gaussian" # "Gaussian", "Matern5_2", or "Matern3_2"
# Proportion of data withheld from training set
opts$gp_test_split <- 0.2
# Maximum number of iteration of optimization algorithm
opts$gp_max_iter <- 10000
# ---- Adaptive sampling ----
# Maximum number of adaptive sampling attempts
opts$sampling_max_iter <- 10
# Number of points to re-simulate per arm
opts$n_adaptive_samples <- 100
# Quantitative threshold for accepting GP performance
opts$stop_criteria <- 0.99
# ---- Option for sensitivity analysis
# Number of sampling points
opts$sa_n <- 10000
# Create output directories
pm$opts <- opts
# Specify parameter range and which one are constrained
pm <- constrained_parameter(pm, opts$Type_analysis) # see function bellow
pm <- variable_parameter(pm, opts$Type_analysis) # see function bellow
# Define the directory
pm <- set_dirs(pm, pm$opts$Type_analysis) # check directories.R
# ---- Display key options and return ----
# Number of scenarios defined - just to inform the user
n_param <- 0
for (i in 1:length(pm$settings)) {
n_param[i] <- length(pm$settings[[i]])
}
# Define the number of setting
n_settings <- prod(n_param)
# Estimate the number of scenario
pm$opts$n_total_jobs <- pm$opts$lhc_samples * n_settings * pm$opts$n_seeds
# Only display if flag is on
if (quiet == FALSE) {
message(" - Total number of arms: ",
format(n_settings, big.mark = ","))
message(" - Total number of simulations: ",
format(pm$opts$n_total_jobs, big.mark = ","))
}
# Return the output
return(pm)
}
# ------------------------------------------------------
# Define the parameter values of constrained parameters.
# ------------------------------------------------------
constrained_parameter <- function(pm, type_analysis) {
# Create a list with all the arm to simulate
settings <- list()
# Define the different seasonality profile via fourrier coefficient
sesonality1 <- c(0, 0, 0, 0)
sesonality2 <-c(-1.1688319842020671, -1.9682456652323406, -0.23417717218399048, 0.3833462397257487)
sesonality3 <-c(-1.0296205679575603, -1.7833550771077473, -0.7692119280497233, 1.332314173380534) # 0.07+10 test 10
# Setting when aimed to estimate the prophylactic period
if (type_analysis == "Propylaxis") {
# Define level of EIR
EIR <- c(5, 50, 100, 150, 500)
# Seasonality pattern
Seasonality <- c("sesonality1")
# Level of access to treatment
Access <- c(0)
# Diagnostic detection limit
Diagnostic <- c(20)
# Merge all the information
setting_data <-Reduce(merge,
list(
as.data.frame(EIR),
as.data.frame(Access),
as.data.frame(Diagnostic),
as.data.frame(Seasonality)))
# Give name to the columns
colnames(setting_data) <- c("eir", "Access", "Diagnostic", "seasonality")
# Save the information into variable setting
settings$eir <- unique(setting_data$eir)
settings$Access <- unique(setting_data$Access)
settings$Diagnostic <- unique(setting_data$Diagnostic)
settings$seasonality <- data.frame(sesonality1)
}
# Setting when aimed to estimate the selection coefficient
if (type_analysis == "SMC") {
# Seasonality profile
Seasonality <- c("sesonality2", "sesonality3")
# Reduction of coverage at each round of adaptive sampling
Coverage_reduction <- c(0, 0.1)
# Number of round of adaptive sampling (NB: 5.4 = 4 + 1 before, 4.5= 4+ 1 after)
Number_round <- c(4, 5.4, 4.5)
# Maximum age targeted by SMC
Age <- c(5, 10)
# EC50 of the resistant genotype
IC50_SP_R <- c(24.20)
# EC50 of the sensitive genotype
IC50_SP <- c(2.39)
# Merge all the information
setting_data <- Reduce( merge,
list(
as.data.frame(IC50_SP_R),
as.data.frame(IC50_SP),
as.data.frame(Age),
as.data.frame(Number_round),
as.data.frame(Coverage_reduction),
as.data.frame(Seasonality)))
# Name the columns
colnames(setting_data) <-c(
"IC50_SP_R",
"IC50_SP",
"Age",
"Number_round",
"Coverage_reduction",
"seasonality")
# Save the information into variable setting
settings$IC50_SP_R <- unique(setting_data$IC50_SP_R)
settings$IC50_SP <- unique(setting_data$IC50_SP)
settings$Age <- unique(Age)
settings$Number_round <- unique(setting_data$Number_round)
settings$Coverage_reduction <- unique(setting_data$Coverage_reduction)
settings$seasonality <- data.frame(sesonality2, sesonality3)
}
# Setting when aimed to assessed the efficacy of SMC
if (type_analysis == "Efficacy") {
# Seasonality Profile
Seasonality <- c("sesonality2")
# Reduction of coverage at each round of adaptive sampling
Coverage_reduction <- c(0.1)
# Number of round of adaptive sampling (NB: 5.4 = 4 + 1 before, 4.5= 4+ 1 after)
Number_round <- c(4)
# Maximum age targeted by SMC
Age <- c(5)
# EC50 of the sensitive genotype
IC50_SP <- c(60.12)
# Merge all the information
setting_data <-
Reduce(merge,
list(
as.data.frame(IC50_SP),
as.data.frame(Age),
as.data.frame(Number_round),
as.data.frame(Coverage_reduction),
as.data.frame(Seasonality)))
# Name the columns
colnames(setting_data) <- c(
"IC50_SP",
"Age",
"Number_round",
"Coverage_reduction",
"seasonality")
# Save the information into variable setting
settings$IC50_SP_R <- unique(setting_data$IC50_SP_R)
settings$IC50_SP <- unique(setting_data$IC50_SP)
settings$Age <- unique(Age)
settings$Number_round <- unique(setting_data$Number_round)
settings$Coverage_reduction <-unique(setting_data$Coverage_reduction)
settings$seasonality <- data.frame(sesonality2)
}
# Setting when aimed to replicate the trial of Zongo et al (2015)
if (type_analysis == "Trial") {
# EC50 of the resistant genotype
IC50_SP_R <- c(0.5)
# EC50 of the sensitive genotype
IC50_SP <- c(2.39, 0.5)
# SMC coverage
Coverage <- c(0, 1) #0 control group, 1 trial group
# Merge all the information
setting_data <- Reduce(merge,
list(
as.data.frame(IC50_SP_R),
as.data.frame(IC50_SP),
as.data.frame(Coverage)))
# Name the columns
colnames(setting_data) <- c("IC50_SP_R", "IC50_SP", "Coverage")
# Save the information into variable setting
settings$IC50_SP_R <- unique(setting_data$IC50_SP_R)
settings$IC50_SP <- unique(setting_data$IC50_SP)
settings$Coverage <- unique(setting_data$Coverage)
}
# Append settings to pm list
pm$settings <- settings
# Return pm
return(pm)
}
# -------------------------------------------------------------------
# Define the parameter space for parameters that are not constrained.
# -------------------------------------------------------------------
variable_parameter <- function(pm, type_analysis) {
# Parameter space if aimed to estimate the prophylactic period
if (type_analysis == "Propylaxis") {
# Parameter name
Parameter <- c("IC50_SP")
# Maximum values
max <- c(0.02) # 0.02, 0.3,100
# Minimum values
min <- c(0.0005) # 0.0005,0.002,0.01
}
# Parameter space if aimed to identify the key driver of SMC-resistance
if (type_analysis == "SMC") {
# Parameter names
Parameter <- c("Coverage", "Access", "eir", "half_life_long", "Dosage_long") # NB: dosae long do not vary at the end
# Maximum values
max <- c(1, 0.5, 500, 21, 30)
# Minimum values
min <- c(0.7, 0.04, 5, 7, 30)
}
if (type_analysis == "Efficacy") {
# Parameter names
Parameter <- c("Coverage", "Access", "eir", "half_life_long", "Dosage_long") # NB: dosae long do not vary at the end
# Maximum values
max <- c(1, 0.04, 5, 15, 30)
# Minimum values
min <- c(0.9, 0.5, 500, 15, 30)
}
if (type_analysis == "Trial") {
# Parameter names
Parameter <- c("eir") # NB: dosage long do not vary at the end
# Maximum values
max <- c(350)
# Minimum values
min <- c(350)
}
# Merge the information into a dataframe
program_data <- data.frame(Parameter, max, min)
# Convert dataframe to list
prog <- as.list(program_data)
# Names
prog$prog_names <- Parameter
# Easy access number of programs
prog$n_progs <- length(prog$prog_names)
# Append program details to pm list
pm$prog <- prog
# Return pm
return(pm)
}
|
library(XLConnect)
library(ggplot2)
library(dplyr)
setwd("C:\\Users\\Pragan\\Dropbox\\bb")
draft_details <- readWorksheetFromFile("2018_draft.xlsx", sheet = 1, startRow = 1, endCol = 4)
ggplot(draft_details, aes(x = Pick_Num, y = Price)) + geom_point() + facet_wrap(~ Manager)
ggplot(draft_details, aes(y = Price, x = Pick_Num)) + geom_point(aes(color = factor(Manager)))
dplyr::summarise(draft_details, avg=mean(Price)) %>%
group_by(Manager)
draft_details %>%
group_by(Manager) %>%
summarise(avg=median(Pick_Num))
|
/draft_summary.r
|
no_license
|
paulragan/fantasybaseball
|
R
| false | false | 539 |
r
|
library(XLConnect)
library(ggplot2)
library(dplyr)
setwd("C:\\Users\\Pragan\\Dropbox\\bb")
draft_details <- readWorksheetFromFile("2018_draft.xlsx", sheet = 1, startRow = 1, endCol = 4)
ggplot(draft_details, aes(x = Pick_Num, y = Price)) + geom_point() + facet_wrap(~ Manager)
ggplot(draft_details, aes(y = Price, x = Pick_Num)) + geom_point(aes(color = factor(Manager)))
dplyr::summarise(draft_details, avg=mean(Price)) %>%
group_by(Manager)
draft_details %>%
group_by(Manager) %>%
summarise(avg=median(Pick_Num))
|
tmAggregate <- function(dtfDT, indexList, type, ascending, drop.unused.levels, fun.aggregate, args) {
l <- s <- i <- k <- n <- NULL
depth <- length(indexList)
dats <- list()
for (d in 1:depth) {
datd <- tmAggregateStep(dtfDT, indexList[1:d], fun.aggregate, args)
if (d < depth) {
indexPlus <- indexList[(d+1):depth]
datd[, get("indexPlus"):=lapply(indexPlus, function(x)factor(NA, levels=levels(dtfDT[[x]])))]
setcolorder(datd, c(indexList, "s", "c", "i", "se"))
}
datd[, l:=d]
dats[[d]] <- datd
}
datlist <- rbindlist(dats)
datlist <- datlist[!is.na(datlist$index1), ]
datlist <- datlist[!is.na(datlist$s), ]
if (min(datlist$s) < 0) stop("vSize contains negative values.")
datlist <- datlist[datlist$s>0,]
if (drop.unused.levels && is.factor(datlist$c))
datlist$c <- datlist$c[, drop=TRUE]
if (type=="dens") {
# datlist[, c:=c/s]
datlist[is.nan(datlist$c), c:=0]
}
if (!ascending) {
datlist[, i:=-i]
}
# add unqiue key (k)
datlist[, k:=as.factor(do.call("paste", c(as.list(datlist[, c(indexList, "l"), with=FALSE]), sep="__")))]
setkey(datlist, k)
# add label name (n)
datlist[, n:=apply(datlist, MARGIN=1, FUN=function(x) x[as.integer(x["l"])])]
datlist[, n:=ifelse(is.na(n), "", n)]
datlist
}
tmAggregateStep <- function(dtfDT, indexList, fun.aggregate, args) {
.SD <- s <- i <- se <- w <- NULL
fun <- match.fun(fun.aggregate)
isCat <- !is.numeric(dtfDT$c)
## aggregate numeric or categorical variable
fn <- function(x) {
if (any(is.na(x))) {
y <- NA
mode(y) <- mode(x)
y
} else if (is.numeric(x)) {
sum(x)
} else {
which.max(table(x))
}
}
fn_nna <- function(x) {
if (any(is.na(x)) && !args$na.rm) stop("NA values found in vSize variable", call. = FALSE)
if (is.numeric(x)) {
sum(x, na.rm = TRUE)
} else {
which.max(table(x))
}
}
if (fun.aggregate=="weighted.mean") {
if (isCat) {
dat <- dtfDT[ , list(s=fn_nna(s), c=fn(c), i=fn(i), se=do.call("fun", c(list(se, w), args))), by=indexList]
} else {
dat <- dtfDT[ , list(s=fn_nna(s), c=do.call("fun", c(list(c, w), args)), i=fn(i), se=do.call("fun", c(list(se, w), args))), by=indexList]
}
} else {
if (isCat) {
dat <- dtfDT[ , list(s=fn_nna(s), c=fn(c), i=fn(i), se=do.call("fun", c(list(se), args))), by=indexList]
} else {
dat <- dtfDT[ , list(s=fn_nna(s), c=do.call("fun", c(list(c), args)), i=fn(i), se=do.call("fun", c(list(se), args))), by=indexList]
}
}
## aggregate categorical variables: for each aggregate, get the mode
if (isCat) {
#fact <- factor(datCat$c, levels=1:nlevels(dtfDT$c), labels=levels(dtfDT$c))
dat[, c:=factor(c, levels=1:nlevels(dtfDT$c), labels=levels(dtfDT$c))]
}
dat
}
|
/pkg/R/tmAggregate.R
|
no_license
|
mtennekes/treemap
|
R
| false | false | 3,164 |
r
|
tmAggregate <- function(dtfDT, indexList, type, ascending, drop.unused.levels, fun.aggregate, args) {
l <- s <- i <- k <- n <- NULL
depth <- length(indexList)
dats <- list()
for (d in 1:depth) {
datd <- tmAggregateStep(dtfDT, indexList[1:d], fun.aggregate, args)
if (d < depth) {
indexPlus <- indexList[(d+1):depth]
datd[, get("indexPlus"):=lapply(indexPlus, function(x)factor(NA, levels=levels(dtfDT[[x]])))]
setcolorder(datd, c(indexList, "s", "c", "i", "se"))
}
datd[, l:=d]
dats[[d]] <- datd
}
datlist <- rbindlist(dats)
datlist <- datlist[!is.na(datlist$index1), ]
datlist <- datlist[!is.na(datlist$s), ]
if (min(datlist$s) < 0) stop("vSize contains negative values.")
datlist <- datlist[datlist$s>0,]
if (drop.unused.levels && is.factor(datlist$c))
datlist$c <- datlist$c[, drop=TRUE]
if (type=="dens") {
# datlist[, c:=c/s]
datlist[is.nan(datlist$c), c:=0]
}
if (!ascending) {
datlist[, i:=-i]
}
# add unqiue key (k)
datlist[, k:=as.factor(do.call("paste", c(as.list(datlist[, c(indexList, "l"), with=FALSE]), sep="__")))]
setkey(datlist, k)
# add label name (n)
datlist[, n:=apply(datlist, MARGIN=1, FUN=function(x) x[as.integer(x["l"])])]
datlist[, n:=ifelse(is.na(n), "", n)]
datlist
}
tmAggregateStep <- function(dtfDT, indexList, fun.aggregate, args) {
.SD <- s <- i <- se <- w <- NULL
fun <- match.fun(fun.aggregate)
isCat <- !is.numeric(dtfDT$c)
## aggregate numeric or categorical variable
fn <- function(x) {
if (any(is.na(x))) {
y <- NA
mode(y) <- mode(x)
y
} else if (is.numeric(x)) {
sum(x)
} else {
which.max(table(x))
}
}
fn_nna <- function(x) {
if (any(is.na(x)) && !args$na.rm) stop("NA values found in vSize variable", call. = FALSE)
if (is.numeric(x)) {
sum(x, na.rm = TRUE)
} else {
which.max(table(x))
}
}
if (fun.aggregate=="weighted.mean") {
if (isCat) {
dat <- dtfDT[ , list(s=fn_nna(s), c=fn(c), i=fn(i), se=do.call("fun", c(list(se, w), args))), by=indexList]
} else {
dat <- dtfDT[ , list(s=fn_nna(s), c=do.call("fun", c(list(c, w), args)), i=fn(i), se=do.call("fun", c(list(se, w), args))), by=indexList]
}
} else {
if (isCat) {
dat <- dtfDT[ , list(s=fn_nna(s), c=fn(c), i=fn(i), se=do.call("fun", c(list(se), args))), by=indexList]
} else {
dat <- dtfDT[ , list(s=fn_nna(s), c=do.call("fun", c(list(c), args)), i=fn(i), se=do.call("fun", c(list(se), args))), by=indexList]
}
}
## aggregate categorical variables: for each aggregate, get the mode
if (isCat) {
#fact <- factor(datCat$c, levels=1:nlevels(dtfDT$c), labels=levels(dtfDT$c))
dat[, c:=factor(c, levels=1:nlevels(dtfDT$c), labels=levels(dtfDT$c))]
}
dat
}
|
#Generalized SVD
rm(list = ls())
setwd("D:/Dropbox/Junrui Di/tensor analysis/GSVD/")
source("GSVDScripts/applyall.R")
load("Data/hr50.rda")
Y = scale(hr50[,-1], center = T, scale = F)
#1. 2nd order
SVD2 = svd(Y)
U2 = SVD2$u
V2 = SVD2$v
S2 = diag(SVD2$d)
#2. 3rd
moment3 = MGT3(Y)
V3 = hoevd(moment3,rank = 32)$u
core_v3 = hoevd(moment3,rank = 32)$z
unfold_core_v3_1 = k_unfold(as.tensor(core_v3),m=1)@data
unfold_core_v3_2 = k_unfold(as.tensor(core_v3),m=2)@data
unfold_core_v3_3 = k_unfold(as.tensor(core_v3),m=3)@data
core_v3_1 = core_v3[1,,]
core_v3_2 = core_v3[2,,]
core_v3_3 = core_v3[3,,]
U3 = Gram3_hosvd(Y)
S3 = t(U3) %*% Y %*% V3
util3 = svd(S3)$u
vtil3 = svd(S3)$v
u3u3t = U3 %*% util3
v3v3t = V3 %*% vtil3
#3. 4th
moment4 = MGT4(Y)
V4= hoevd(moment4,rank = 32)
U4 = Gram4_hosvd(Y)
S4 = t(U4) %*% Y %*% V4
util4 = svd(S4)$u
vtil4 = svd(S4)$v
u4u4t = U4 %*% util4
v4v4t = V4 %*% vtil4
pdf(file = "result/0117/corplot_U.pdf", width = 28, height = 28)
par(mar = c(4,5,9,6))
par(oma = c(1,0,1,0))
corrplot::corrplot(cor(U),cl.pos = "b",cl.cex = 2,tl.cex = 1.6,cl.align.text = "r",na.label = "-")
dev.off()
pdf(file = "result/0117/center moment/corplot_US.pdf", width = 28, height = 28)
par(mar = c(4,5,9,6))
par(oma = c(1,0,1,0))
corrplot::corrplot(cor(US),cl.pos = "b",cl.cex = 2,tl.cex = 1.6,cl.align.text = "r",na.label = "-")
dev.off()
load("data/cov50.rda")
cov = subset(cov50, select = c(ID,Male,MobilityProblem,mortstat,cancer, diabetes))
col11 = rgb(0, 0, 238, alpha=100, maxColorValue=255)
col22 = rgb(205,15,20,maxColorValue=255)
u2 = U[,c(1:10)]
u3 = U[,c(33:42)]
u4 = U[,c(65:74)]
u = cbind(u2,u3,u4)
pdf(file = "result/0117/heamaps_S.pdf", width = 15, height = 15)
par(mfrow = c(3,3))
heatmap(diag(SVD2$d),Rowv = NA, Colv = NA)
heatmap(S3,Rowv = NA, Colv = NA)
heatmap(S4,Rowv = NA, Colv = NA)
dev.off()
pdf(file = "result/0117/Upairs_mortality.pdf", width = 15, height = 15)
par(mfrow = c(3,3))
for(i in 1:29){
for(j in (i+1):30 ){
plot(u[,i],u[,j],xlab = names(u)[i],ylab = names(u)[j],col=c(col11,col22)[as.factor(cov$mortstat)],pch = 19)
}
}
dev.off()
pdf(file = "result/0117/Upairs_mobility.pdf", width = 15, height = 15)
par(mfrow = c(3,3))
for(i in 1:29){
for(j in (i+1):30 ){
plot(u[,i],u[,j],xlab = names(u)[i],ylab = names(u)[j],col=c(col11,col22)[as.factor(cov$MobilityProblem)],pch = 19)
}
}
dev.off()
library(qdap)
library(timeDate)
library(lubridate)
TIME = char2end(as.character(timeSequence(from = hm("7:00"), to = hm("22:59"), by = "hour")),char = " ",noc=1)
TIME = beg2char(TIME,":",2)
for(i in 1:ncol(V2)){
sign2 = sign(V2[1,i])
sign23 = sign(V3[1,i])
sign234 = sign(V4[1,i])
if(sign2 != sign23){
V3[,i] = -V3[,i]
}
if(sign2 != sign234){
V4[,i] = -V4[,i]
}
}
pdf("result/0117/V_comparison.pdf",width = 10,height = 10)
par(mfrow = c(3,1))
for(i in 1:32){
plot(V2[,i],main = paste0("V2 - ",i),type = "l",xaxt = "n",ylab = "RSV")
axis(1, at = c(seq(1,32,2)),labels = TIME)
abline(h = 0,lty = 3)
plot(V3[,i],main = paste0("V3 - ",i),type = "l",xaxt = "n", ylab = "RSV")
axis(1, at = c(seq(1,32,2)),labels = TIME)
abline(h = 0,lty = 3)
plot(V4[,i],main = paste0("V4 - ",i),type = "l",xaxt = "n", ylab = "RSV")
axis(1, at = c(seq(1,32,2)),labels = TIME)
abline(h = 0,lty = 3)
}
dev.off()
pdf(file = "result/0117/corplot_V.pdf", width = 28, height = 28)
par(mar = c(4,5,9,6))
par(oma = c(1,0,1,0))
corrplot::corrplot(cor(V),cl.pos = "b",cl.cex = 2,tl.cex = 1.6,cl.align.text = "r",na.label = "-")
dev.off()
|
/GSVD.R
|
no_license
|
junruidi/GSVD_Scripts
|
R
| false | false | 3,694 |
r
|
#Generalized SVD
rm(list = ls())
setwd("D:/Dropbox/Junrui Di/tensor analysis/GSVD/")
source("GSVDScripts/applyall.R")
load("Data/hr50.rda")
Y = scale(hr50[,-1], center = T, scale = F)
#1. 2nd order
SVD2 = svd(Y)
U2 = SVD2$u
V2 = SVD2$v
S2 = diag(SVD2$d)
#2. 3rd
moment3 = MGT3(Y)
V3 = hoevd(moment3,rank = 32)$u
core_v3 = hoevd(moment3,rank = 32)$z
unfold_core_v3_1 = k_unfold(as.tensor(core_v3),m=1)@data
unfold_core_v3_2 = k_unfold(as.tensor(core_v3),m=2)@data
unfold_core_v3_3 = k_unfold(as.tensor(core_v3),m=3)@data
core_v3_1 = core_v3[1,,]
core_v3_2 = core_v3[2,,]
core_v3_3 = core_v3[3,,]
U3 = Gram3_hosvd(Y)
S3 = t(U3) %*% Y %*% V3
util3 = svd(S3)$u
vtil3 = svd(S3)$v
u3u3t = U3 %*% util3
v3v3t = V3 %*% vtil3
#3. 4th
moment4 = MGT4(Y)
V4= hoevd(moment4,rank = 32)
U4 = Gram4_hosvd(Y)
S4 = t(U4) %*% Y %*% V4
util4 = svd(S4)$u
vtil4 = svd(S4)$v
u4u4t = U4 %*% util4
v4v4t = V4 %*% vtil4
pdf(file = "result/0117/corplot_U.pdf", width = 28, height = 28)
par(mar = c(4,5,9,6))
par(oma = c(1,0,1,0))
corrplot::corrplot(cor(U),cl.pos = "b",cl.cex = 2,tl.cex = 1.6,cl.align.text = "r",na.label = "-")
dev.off()
pdf(file = "result/0117/center moment/corplot_US.pdf", width = 28, height = 28)
par(mar = c(4,5,9,6))
par(oma = c(1,0,1,0))
corrplot::corrplot(cor(US),cl.pos = "b",cl.cex = 2,tl.cex = 1.6,cl.align.text = "r",na.label = "-")
dev.off()
load("data/cov50.rda")
cov = subset(cov50, select = c(ID,Male,MobilityProblem,mortstat,cancer, diabetes))
col11 = rgb(0, 0, 238, alpha=100, maxColorValue=255)
col22 = rgb(205,15,20,maxColorValue=255)
u2 = U[,c(1:10)]
u3 = U[,c(33:42)]
u4 = U[,c(65:74)]
u = cbind(u2,u3,u4)
pdf(file = "result/0117/heamaps_S.pdf", width = 15, height = 15)
par(mfrow = c(3,3))
heatmap(diag(SVD2$d),Rowv = NA, Colv = NA)
heatmap(S3,Rowv = NA, Colv = NA)
heatmap(S4,Rowv = NA, Colv = NA)
dev.off()
pdf(file = "result/0117/Upairs_mortality.pdf", width = 15, height = 15)
par(mfrow = c(3,3))
for(i in 1:29){
for(j in (i+1):30 ){
plot(u[,i],u[,j],xlab = names(u)[i],ylab = names(u)[j],col=c(col11,col22)[as.factor(cov$mortstat)],pch = 19)
}
}
dev.off()
pdf(file = "result/0117/Upairs_mobility.pdf", width = 15, height = 15)
par(mfrow = c(3,3))
for(i in 1:29){
for(j in (i+1):30 ){
plot(u[,i],u[,j],xlab = names(u)[i],ylab = names(u)[j],col=c(col11,col22)[as.factor(cov$MobilityProblem)],pch = 19)
}
}
dev.off()
library(qdap)
library(timeDate)
library(lubridate)
TIME = char2end(as.character(timeSequence(from = hm("7:00"), to = hm("22:59"), by = "hour")),char = " ",noc=1)
TIME = beg2char(TIME,":",2)
for(i in 1:ncol(V2)){
sign2 = sign(V2[1,i])
sign23 = sign(V3[1,i])
sign234 = sign(V4[1,i])
if(sign2 != sign23){
V3[,i] = -V3[,i]
}
if(sign2 != sign234){
V4[,i] = -V4[,i]
}
}
pdf("result/0117/V_comparison.pdf",width = 10,height = 10)
par(mfrow = c(3,1))
for(i in 1:32){
plot(V2[,i],main = paste0("V2 - ",i),type = "l",xaxt = "n",ylab = "RSV")
axis(1, at = c(seq(1,32,2)),labels = TIME)
abline(h = 0,lty = 3)
plot(V3[,i],main = paste0("V3 - ",i),type = "l",xaxt = "n", ylab = "RSV")
axis(1, at = c(seq(1,32,2)),labels = TIME)
abline(h = 0,lty = 3)
plot(V4[,i],main = paste0("V4 - ",i),type = "l",xaxt = "n", ylab = "RSV")
axis(1, at = c(seq(1,32,2)),labels = TIME)
abline(h = 0,lty = 3)
}
dev.off()
pdf(file = "result/0117/corplot_V.pdf", width = 28, height = 28)
par(mar = c(4,5,9,6))
par(oma = c(1,0,1,0))
corrplot::corrplot(cor(V),cl.pos = "b",cl.cex = 2,tl.cex = 1.6,cl.align.text = "r",na.label = "-")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshiftserverless_service.R
\name{redshiftserverless}
\alias{redshiftserverless}
\title{Redshift Serverless}
\usage{
redshiftserverless(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
}}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
This is an interface reference for Amazon Redshift Serverless. It
contains documentation for one of the programming or command line
interfaces you can use to manage Amazon Redshift Serverless.
Amazon Redshift Serverless automatically provisions data warehouse
capacity and intelligently scales the underlying resources based on
workload demands. Amazon Redshift Serverless adjusts capacity in seconds
to deliver consistently high performance and simplified operations for
even the most demanding and volatile workloads. Amazon Redshift
Serverless lets you focus on using your data to acquire new insights for
your business and customers.
To learn more about Amazon Redshift Serverless, see \href{https://docs.aws.amazon.com/redshift/latest/mgmt/serverless-whatis.html}{What is Amazon Redshift Serverless}.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- redshiftserverless(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical"
)
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[=redshiftserverless_convert_recovery_point_to_snapshot]{convert_recovery_point_to_snapshot} \tab Converts a recovery point to a snapshot\cr
\link[=redshiftserverless_create_endpoint_access]{create_endpoint_access} \tab Creates an Amazon Redshift Serverless managed VPC endpoint\cr
\link[=redshiftserverless_create_namespace]{create_namespace} \tab Creates a namespace in Amazon Redshift Serverless\cr
\link[=redshiftserverless_create_snapshot]{create_snapshot} \tab Creates a snapshot of all databases in a namespace\cr
\link[=redshiftserverless_create_usage_limit]{create_usage_limit} \tab Creates a usage limit for a specified Amazon Redshift Serverless usage type\cr
\link[=redshiftserverless_create_workgroup]{create_workgroup} \tab Creates an workgroup in Amazon Redshift Serverless\cr
\link[=redshiftserverless_delete_endpoint_access]{delete_endpoint_access} \tab Deletes an Amazon Redshift Serverless managed VPC endpoint\cr
\link[=redshiftserverless_delete_namespace]{delete_namespace} \tab Deletes a namespace from Amazon Redshift Serverless\cr
\link[=redshiftserverless_delete_resource_policy]{delete_resource_policy} \tab Deletes the specified resource policy\cr
\link[=redshiftserverless_delete_snapshot]{delete_snapshot} \tab Deletes a snapshot from Amazon Redshift Serverless\cr
\link[=redshiftserverless_delete_usage_limit]{delete_usage_limit} \tab Deletes a usage limit from Amazon Redshift Serverless\cr
\link[=redshiftserverless_delete_workgroup]{delete_workgroup} \tab Deletes a workgroup\cr
\link[=redshiftserverless_get_credentials]{get_credentials} \tab Returns a database user name and temporary password with temporary authorization to log in to Amazon Redshift Serverless\cr
\link[=redshiftserverless_get_endpoint_access]{get_endpoint_access} \tab Returns information, such as the name, about a VPC endpoint\cr
\link[=redshiftserverless_get_namespace]{get_namespace} \tab Returns information about a namespace in Amazon Redshift Serverless\cr
\link[=redshiftserverless_get_recovery_point]{get_recovery_point} \tab Returns information about a recovery point\cr
\link[=redshiftserverless_get_resource_policy]{get_resource_policy} \tab Returns a resource policy\cr
\link[=redshiftserverless_get_snapshot]{get_snapshot} \tab Returns information about a specific snapshot\cr
\link[=redshiftserverless_get_table_restore_status]{get_table_restore_status} \tab Returns information about a TableRestoreStatus object\cr
\link[=redshiftserverless_get_usage_limit]{get_usage_limit} \tab Returns information about a usage limit\cr
\link[=redshiftserverless_get_workgroup]{get_workgroup} \tab Returns information about a specific workgroup\cr
\link[=redshiftserverless_list_endpoint_access]{list_endpoint_access} \tab Returns an array of EndpointAccess objects and relevant information\cr
\link[=redshiftserverless_list_namespaces]{list_namespaces} \tab Returns information about a list of specified namespaces\cr
\link[=redshiftserverless_list_recovery_points]{list_recovery_points} \tab Returns an array of recovery points\cr
\link[=redshiftserverless_list_snapshots]{list_snapshots} \tab Returns a list of snapshots\cr
\link[=redshiftserverless_list_table_restore_status]{list_table_restore_status} \tab Returns information about an array of TableRestoreStatus objects\cr
\link[=redshiftserverless_list_tags_for_resource]{list_tags_for_resource} \tab Lists the tags assigned to a resource\cr
\link[=redshiftserverless_list_usage_limits]{list_usage_limits} \tab Lists all usage limits within Amazon Redshift Serverless\cr
\link[=redshiftserverless_list_workgroups]{list_workgroups} \tab Returns information about a list of specified workgroups\cr
\link[=redshiftserverless_put_resource_policy]{put_resource_policy} \tab Creates or updates a resource policy\cr
\link[=redshiftserverless_restore_from_recovery_point]{restore_from_recovery_point} \tab Restore the data from a recovery point\cr
\link[=redshiftserverless_restore_from_snapshot]{restore_from_snapshot} \tab Restores a namespace from a snapshot\cr
\link[=redshiftserverless_restore_table_from_snapshot]{restore_table_from_snapshot} \tab Restores a table from a snapshot to your Amazon Redshift Serverless instance\cr
\link[=redshiftserverless_tag_resource]{tag_resource} \tab Assigns one or more tags to a resource\cr
\link[=redshiftserverless_untag_resource]{untag_resource} \tab Removes a tag or set of tags from a resource\cr
\link[=redshiftserverless_update_endpoint_access]{update_endpoint_access} \tab Updates an Amazon Redshift Serverless managed endpoint\cr
\link[=redshiftserverless_update_namespace]{update_namespace} \tab Updates a namespace with the specified settings\cr
\link[=redshiftserverless_update_snapshot]{update_snapshot} \tab Updates a snapshot\cr
\link[=redshiftserverless_update_usage_limit]{update_usage_limit} \tab Update a usage limit in Amazon Redshift Serverless\cr
\link[=redshiftserverless_update_workgroup]{update_workgroup} \tab Updates a workgroup with the specified configuration settings
}
}
\examples{
\dontrun{
svc <- redshiftserverless()
svc$convert_recovery_point_to_snapshot(
Foo = 123
)
}
}
|
/man/redshiftserverless.Rd
|
no_license
|
cran/paws.database
|
R
| false | true | 7,955 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshiftserverless_service.R
\name{redshiftserverless}
\alias{redshiftserverless}
\title{Redshift Serverless}
\usage{
redshiftserverless(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
}}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
This is an interface reference for Amazon Redshift Serverless. It
contains documentation for one of the programming or command line
interfaces you can use to manage Amazon Redshift Serverless.
Amazon Redshift Serverless automatically provisions data warehouse
capacity and intelligently scales the underlying resources based on
workload demands. Amazon Redshift Serverless adjusts capacity in seconds
to deliver consistently high performance and simplified operations for
even the most demanding and volatile workloads. Amazon Redshift
Serverless lets you focus on using your data to acquire new insights for
your business and customers.
To learn more about Amazon Redshift Serverless, see \href{https://docs.aws.amazon.com/redshift/latest/mgmt/serverless-whatis.html}{What is Amazon Redshift Serverless}.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- redshiftserverless(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical"
)
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[=redshiftserverless_convert_recovery_point_to_snapshot]{convert_recovery_point_to_snapshot} \tab Converts a recovery point to a snapshot\cr
\link[=redshiftserverless_create_endpoint_access]{create_endpoint_access} \tab Creates an Amazon Redshift Serverless managed VPC endpoint\cr
\link[=redshiftserverless_create_namespace]{create_namespace} \tab Creates a namespace in Amazon Redshift Serverless\cr
\link[=redshiftserverless_create_snapshot]{create_snapshot} \tab Creates a snapshot of all databases in a namespace\cr
\link[=redshiftserverless_create_usage_limit]{create_usage_limit} \tab Creates a usage limit for a specified Amazon Redshift Serverless usage type\cr
\link[=redshiftserverless_create_workgroup]{create_workgroup} \tab Creates an workgroup in Amazon Redshift Serverless\cr
\link[=redshiftserverless_delete_endpoint_access]{delete_endpoint_access} \tab Deletes an Amazon Redshift Serverless managed VPC endpoint\cr
\link[=redshiftserverless_delete_namespace]{delete_namespace} \tab Deletes a namespace from Amazon Redshift Serverless\cr
\link[=redshiftserverless_delete_resource_policy]{delete_resource_policy} \tab Deletes the specified resource policy\cr
\link[=redshiftserverless_delete_snapshot]{delete_snapshot} \tab Deletes a snapshot from Amazon Redshift Serverless\cr
\link[=redshiftserverless_delete_usage_limit]{delete_usage_limit} \tab Deletes a usage limit from Amazon Redshift Serverless\cr
\link[=redshiftserverless_delete_workgroup]{delete_workgroup} \tab Deletes a workgroup\cr
\link[=redshiftserverless_get_credentials]{get_credentials} \tab Returns a database user name and temporary password with temporary authorization to log in to Amazon Redshift Serverless\cr
\link[=redshiftserverless_get_endpoint_access]{get_endpoint_access} \tab Returns information, such as the name, about a VPC endpoint\cr
\link[=redshiftserverless_get_namespace]{get_namespace} \tab Returns information about a namespace in Amazon Redshift Serverless\cr
\link[=redshiftserverless_get_recovery_point]{get_recovery_point} \tab Returns information about a recovery point\cr
\link[=redshiftserverless_get_resource_policy]{get_resource_policy} \tab Returns a resource policy\cr
\link[=redshiftserverless_get_snapshot]{get_snapshot} \tab Returns information about a specific snapshot\cr
\link[=redshiftserverless_get_table_restore_status]{get_table_restore_status} \tab Returns information about a TableRestoreStatus object\cr
\link[=redshiftserverless_get_usage_limit]{get_usage_limit} \tab Returns information about a usage limit\cr
\link[=redshiftserverless_get_workgroup]{get_workgroup} \tab Returns information about a specific workgroup\cr
\link[=redshiftserverless_list_endpoint_access]{list_endpoint_access} \tab Returns an array of EndpointAccess objects and relevant information\cr
\link[=redshiftserverless_list_namespaces]{list_namespaces} \tab Returns information about a list of specified namespaces\cr
\link[=redshiftserverless_list_recovery_points]{list_recovery_points} \tab Returns an array of recovery points\cr
\link[=redshiftserverless_list_snapshots]{list_snapshots} \tab Returns a list of snapshots\cr
\link[=redshiftserverless_list_table_restore_status]{list_table_restore_status} \tab Returns information about an array of TableRestoreStatus objects\cr
\link[=redshiftserverless_list_tags_for_resource]{list_tags_for_resource} \tab Lists the tags assigned to a resource\cr
\link[=redshiftserverless_list_usage_limits]{list_usage_limits} \tab Lists all usage limits within Amazon Redshift Serverless\cr
\link[=redshiftserverless_list_workgroups]{list_workgroups} \tab Returns information about a list of specified workgroups\cr
\link[=redshiftserverless_put_resource_policy]{put_resource_policy} \tab Creates or updates a resource policy\cr
\link[=redshiftserverless_restore_from_recovery_point]{restore_from_recovery_point} \tab Restore the data from a recovery point\cr
\link[=redshiftserverless_restore_from_snapshot]{restore_from_snapshot} \tab Restores a namespace from a snapshot\cr
\link[=redshiftserverless_restore_table_from_snapshot]{restore_table_from_snapshot} \tab Restores a table from a snapshot to your Amazon Redshift Serverless instance\cr
\link[=redshiftserverless_tag_resource]{tag_resource} \tab Assigns one or more tags to a resource\cr
\link[=redshiftserverless_untag_resource]{untag_resource} \tab Removes a tag or set of tags from a resource\cr
\link[=redshiftserverless_update_endpoint_access]{update_endpoint_access} \tab Updates an Amazon Redshift Serverless managed endpoint\cr
\link[=redshiftserverless_update_namespace]{update_namespace} \tab Updates a namespace with the specified settings\cr
\link[=redshiftserverless_update_snapshot]{update_snapshot} \tab Updates a snapshot\cr
\link[=redshiftserverless_update_usage_limit]{update_usage_limit} \tab Update a usage limit in Amazon Redshift Serverless\cr
\link[=redshiftserverless_update_workgroup]{update_workgroup} \tab Updates a workgroup with the specified configuration settings
}
}
\examples{
\dontrun{
svc <- redshiftserverless()
svc$convert_recovery_point_to_snapshot(
Foo = 123
)
}
}
|
## Define genome and fitness landscape
n <- 10 #genome size
refgene <- rep(0,n) #reference genome as a vector of 0s
u <- 1e-3 #mutation rate
fl <- 0.4 #lethal fraction
fn <- 0.3 #neutral fraction
fb <- 1 - fl -fn #beneficial fraction
wref <- 2 #reference reproductivity
population_capacity <- 8000
generations <- 300
upper_slope_range_fraction_from_capacity <- 0.75 #for choosing end of slope
lower_slope_range_fraction_from_capacity <- 0.25 #for choosing start of slope
threshold_majority <- 0.8 #for finding the fixed mutations
repeat_sim <- 10
### DEFINE FITNESS VALUES
# for (i in 1:n) {
# ifelse(i < n*fl, assign(paste("s",i, sep = "_"), runif(1,-1,0)),
# ifelse(i < n*(fl+fn), assign(paste("s",i, sep = "_"), 0),
# assign(paste("s",i, sep = "_"), runif(1,0,100))))
# } #assign mutation fitness each position, Wg=Wref.Pi(1+si) = 1(1+si)^vi
s <- c()
for (i in 1:n) {
ifelse(i < n*fl, pawn <- runif(1,-1,0),
ifelse(i < n*(fl+fn), pawn <- 0, pawn <- runif(1,0,2)))
s <- c(s,pawn)
} #As a VECTOR assign mutation fitness each position, Wg=wref.Pi(1+si) = 1(1+si)^vi
s <- round(s, digits = 2)
##############################
slope_matrix <- matrix(nrow = n,ncol = repeat_sim)
mutant_numbers_rep <- list()
for (l in 1:repeat_sim) {
#Start population
data <- matrix(refgene)
#matrix store number of mutations in each position in each generation
mutant_number_each_position <- matrix(nrow = n,ncol = generations)
#fraction of mutant in 1 position compare to all mutant in that same generation
mutant_fraction_each_position <- matrix(nrow = n,ncol = generations)
#All data on all genes in all generationz
# store <- list()
for (i in 1:generations){
#producing children as poisson process with mean calculated from fitness, more realistic than round the fitness
children_individuals <- sapply(wref*(apply((1+s)^data,2,prod)), function(x) {rpois(1,x)}) #number of children iof each column
population_size <- sum(children_individuals)
while(population_size > population_capacity)
{
children_individuals[sample(1:length(children_individuals),50)] <- rep(0,50)
population_size <- sum(children_individuals)
}
data1 <- matrix(nrow = n, ncol = population_size)
survivals <- which(children_individuals != 0)
children_individuals <- children_individuals[survivals] #kids of survival only
data <- matrix(data[,survivals], nrow = n)
for (j in 1:ncol(data)){
data1[,(sum(children_individuals[0:(j-1)])+1):
(sum(children_individuals[0:(j-1)])+children_individuals[j])] <- rep(data[,j],children_individuals[j])
} #next generation without mutation
mutationvector <- runif(ncol(data1),0,1)
mutated_genes_position <- which(mutationvector < u)
if(length(mutated_genes_position) > 0) for (k in mutated_genes_position) {
data1[sample(1:n,1),k] <- 1 - data1[sample(1:n,1),k]
}
data <- data1
mutant_number_each_position[,i] <- rowSums(data)
mutant_fraction_each_position[,i] <- mutant_number_each_position[,i]/ sum(mutant_number_each_position[,i])
print(i)
}
#take row names (position in genome that can be mutated) of numbers higher than threshold in vector with mutant number for each position
positions_reached_majority <- sort(unique(which(mutant_number_each_position > population_capacity*threshold_majority, arr.ind = TRUE)[,1]))
for (i in positions_reached_majority) {
start_slope <- max(which(mutant_number_each_position[i,] < population_capacity*lower_slope_range_fraction_from_capacity))
stop_slope <- max(which(mutant_number_each_position[i,] < population_capacity*upper_slope_range_fraction_from_capacity))
slope_matrix[i,l] <- lm(mutant_number_each_position[i,start_slope:stop_slope] ~ c(start_slope:stop_slope))$coefficients[2]
}
mutant_numbers_rep[[l]] <- mutant_number_each_position
# initiate plot
plot(range(0:generations), range(0:max(mutant_number_each_position)), type="n", xlab="Generation", ylab="Mutants_number" )
colors <- rainbow(n)
linetype <- c(1:n)
# add lines
for (i in 1:n) {
lines(mutant_number_each_position[i,], type="l", lwd=2,
lty=linetype[i], col=colors[i])
}
# add a title and subtitle
title(paste("Mutation number",", Rep = ", as.character(l),"\n","s = ", paste(sapply(s, as.character), collapse = "_"),"\n","u = ", as.character(u),", Wref = ", as.character(wref), sep = ""))
# add a legend
legend("bottomright", as.character(c(1:n)), cex= 1, col=rainbow(n) , lty= 1:n, title="position")
}
slope_matrix
#slope is not a good estimatation, too much clonal interferences
##############################
#Start population
data <- matrix(refgene)
#matrix store number of mutations in each position in each generation
mutant_number_each_position <- matrix(nrow = n,ncol = generations)
#fraction of mutant in 1 position compare to all mutant in that same generation
mutant_fraction_each_position <- matrix(nrow = n,ncol = generations)
#All data on all genes in all generationz
store <- list()
for (i in 1:generations){
#producing children as poisson process with mean calculated from fitness, more realistic than round the fitness
children_individuals <- sapply(wref*(apply((1+s)^data,2,prod)), function(x) {rpois(1,x)}) #number of children iof each column
population_size <- sum(children_individuals)
while(population_size > population_capacity)
{
children_individuals[sample(1:length(children_individuals),50)] <- rep(0,50)
population_size <- sum(children_individuals)
}
data1 <- matrix(nrow = n, ncol = population_size)
survivals <- which(children_individuals != 0)
children_individuals <- children_individuals[survivals] #kids of survival only
data <- matrix(data[,survivals], nrow = n)
for (j in 1:ncol(data)){
data1[,(sum(children_individuals[0:(j-1)])+1):
(sum(children_individuals[0:(j-1)])+children_individuals[j])] <- rep(data[,j],children_individuals[j])
} #next generation without mutation
mutationvector <- runif(ncol(data1),0,1)
mutated_genes_position <- which(mutationvector < u)
if(length(mutated_genes_position) > 0) for (k in mutated_genes_position) {
data1[sample(1:n,1),k] <- 1 - data1[sample(1:n,1),k]
}
store[[i]] <- data <- data1
mutant_number_each_position[,i] <- rowSums(store[[i]])
mutant_fraction_each_position[,i] <- mutant_number_each_position[,i]/ sum(mutant_number_each_position[,i])
print(i)
}
#saveRDS(store, file = "store.rds")
#saveRDS(mutant_number_each_position, file = "mutant_number_each position.rds")
# initiate plot
plot(range(0:generations), range(0:max(mutant_number_each_position)), type="n", xlab="Generation", ylab="Mutants_number" )
colors <- rainbow(n)
linetype <- c(1:n)
# add lines
for (i in 1:n) {
lines(mutant_number_each_position[i,], type="l", lwd=2,
lty=linetype[i], col=colors[i])
}
# add a title and subtitle
title(paste("Mutation number","\n","s = ", paste(sapply(s, as.character), collapse = "_"),"\n","u = ", as.character(u),", Wref = ", as.character(wref), sep = ""))
# add a legend
legend("bottomright", as.character(c(1:n)), cex= 1, col=rainbow(n) , lty= 1:n, title="position")
#####2nd plott
# initiate plot
plot(range(0:generations), range(0:1), type="n", xlab="Generation", ylab="Mutants_fraction" )
colors <- rainbow(n)
linetype <- c(1:n)
# add lines
for (i in 1:n) {
lines(mutant_fraction_each_position[i,], type="l", lwd=2,
lty=linetype[i], col=colors[i])
}
# add a title and subtitle
title(paste("Mutation fraction","\n","s = ", paste(sapply(s, as.character), collapse = "_"),"\n","u = ", as.character(u),", Wref = ", as.character(wref), sep = ""))
# add a legend
legend("topright", as.character(c(1:n)), cex= 1, col=rainbow(n) , lty= 1:n, title="position")
#take row names (position in genome that can be mutated) of numbers higher than threshold in vector with mutant number for each position
positions_reached_majority <- sort(unique(which(mutant_number_each_position > population_capacity*threshold_majority, arr.ind = TRUE)[,1]))
slope_vector <- c()
dummy <- 1
for (i in positions_reached_majority) {
start_slope <- min(which(mutant_number_each_position[i,] > population_capacity*lower_slope_range_fraction_from_capacity))
stop_slope <- max(which(mutant_number_each_position[i,] < population_capacity*upper_slope_range_fraction_from_capacity))
slope_vector[dummy] <- lm(mutant_number_each_position[i,start_slope:stop_slope] ~ c(start_slope:stop_slope))$coefficients[2]
dummy <- dummy + 1
}
names(slope_vector) <- as.character(positions_reached_majority)
|
/s_multiplicative_w_slopes.R
|
no_license
|
maithunguyen/Fitness-landscapes-and-DFEs
|
R
| false | false | 8,657 |
r
|
## Define genome and fitness landscape
n <- 10 #genome size
refgene <- rep(0,n) #reference genome as a vector of 0s
u <- 1e-3 #mutation rate
fl <- 0.4 #lethal fraction
fn <- 0.3 #neutral fraction
fb <- 1 - fl -fn #beneficial fraction
wref <- 2 #reference reproductivity
population_capacity <- 8000
generations <- 300
upper_slope_range_fraction_from_capacity <- 0.75 #for choosing end of slope
lower_slope_range_fraction_from_capacity <- 0.25 #for choosing start of slope
threshold_majority <- 0.8 #for finding the fixed mutations
repeat_sim <- 10
### DEFINE FITNESS VALUES
# for (i in 1:n) {
# ifelse(i < n*fl, assign(paste("s",i, sep = "_"), runif(1,-1,0)),
# ifelse(i < n*(fl+fn), assign(paste("s",i, sep = "_"), 0),
# assign(paste("s",i, sep = "_"), runif(1,0,100))))
# } #assign mutation fitness each position, Wg=Wref.Pi(1+si) = 1(1+si)^vi
s <- c()
for (i in 1:n) {
ifelse(i < n*fl, pawn <- runif(1,-1,0),
ifelse(i < n*(fl+fn), pawn <- 0, pawn <- runif(1,0,2)))
s <- c(s,pawn)
} #As a VECTOR assign mutation fitness each position, Wg=wref.Pi(1+si) = 1(1+si)^vi
s <- round(s, digits = 2)
##############################
slope_matrix <- matrix(nrow = n,ncol = repeat_sim)
mutant_numbers_rep <- list()
for (l in 1:repeat_sim) {
#Start population
data <- matrix(refgene)
#matrix store number of mutations in each position in each generation
mutant_number_each_position <- matrix(nrow = n,ncol = generations)
#fraction of mutant in 1 position compare to all mutant in that same generation
mutant_fraction_each_position <- matrix(nrow = n,ncol = generations)
#All data on all genes in all generationz
# store <- list()
for (i in 1:generations){
#producing children as poisson process with mean calculated from fitness, more realistic than round the fitness
children_individuals <- sapply(wref*(apply((1+s)^data,2,prod)), function(x) {rpois(1,x)}) #number of children iof each column
population_size <- sum(children_individuals)
while(population_size > population_capacity)
{
children_individuals[sample(1:length(children_individuals),50)] <- rep(0,50)
population_size <- sum(children_individuals)
}
data1 <- matrix(nrow = n, ncol = population_size)
survivals <- which(children_individuals != 0)
children_individuals <- children_individuals[survivals] #kids of survival only
data <- matrix(data[,survivals], nrow = n)
for (j in 1:ncol(data)){
data1[,(sum(children_individuals[0:(j-1)])+1):
(sum(children_individuals[0:(j-1)])+children_individuals[j])] <- rep(data[,j],children_individuals[j])
} #next generation without mutation
mutationvector <- runif(ncol(data1),0,1)
mutated_genes_position <- which(mutationvector < u)
if(length(mutated_genes_position) > 0) for (k in mutated_genes_position) {
data1[sample(1:n,1),k] <- 1 - data1[sample(1:n,1),k]
}
data <- data1
mutant_number_each_position[,i] <- rowSums(data)
mutant_fraction_each_position[,i] <- mutant_number_each_position[,i]/ sum(mutant_number_each_position[,i])
print(i)
}
#take row names (position in genome that can be mutated) of numbers higher than threshold in vector with mutant number for each position
positions_reached_majority <- sort(unique(which(mutant_number_each_position > population_capacity*threshold_majority, arr.ind = TRUE)[,1]))
for (i in positions_reached_majority) {
start_slope <- max(which(mutant_number_each_position[i,] < population_capacity*lower_slope_range_fraction_from_capacity))
stop_slope <- max(which(mutant_number_each_position[i,] < population_capacity*upper_slope_range_fraction_from_capacity))
slope_matrix[i,l] <- lm(mutant_number_each_position[i,start_slope:stop_slope] ~ c(start_slope:stop_slope))$coefficients[2]
}
mutant_numbers_rep[[l]] <- mutant_number_each_position
# initiate plot
plot(range(0:generations), range(0:max(mutant_number_each_position)), type="n", xlab="Generation", ylab="Mutants_number" )
colors <- rainbow(n)
linetype <- c(1:n)
# add lines
for (i in 1:n) {
lines(mutant_number_each_position[i,], type="l", lwd=2,
lty=linetype[i], col=colors[i])
}
# add a title and subtitle
title(paste("Mutation number",", Rep = ", as.character(l),"\n","s = ", paste(sapply(s, as.character), collapse = "_"),"\n","u = ", as.character(u),", Wref = ", as.character(wref), sep = ""))
# add a legend
legend("bottomright", as.character(c(1:n)), cex= 1, col=rainbow(n) , lty= 1:n, title="position")
}
slope_matrix
#slope is not a good estimatation, too much clonal interferences
##############################
#Start population
data <- matrix(refgene)
#matrix store number of mutations in each position in each generation
mutant_number_each_position <- matrix(nrow = n,ncol = generations)
#fraction of mutant in 1 position compare to all mutant in that same generation
mutant_fraction_each_position <- matrix(nrow = n,ncol = generations)
#All data on all genes in all generationz
store <- list()
for (i in 1:generations){
#producing children as poisson process with mean calculated from fitness, more realistic than round the fitness
children_individuals <- sapply(wref*(apply((1+s)^data,2,prod)), function(x) {rpois(1,x)}) #number of children iof each column
population_size <- sum(children_individuals)
while(population_size > population_capacity)
{
children_individuals[sample(1:length(children_individuals),50)] <- rep(0,50)
population_size <- sum(children_individuals)
}
data1 <- matrix(nrow = n, ncol = population_size)
survivals <- which(children_individuals != 0)
children_individuals <- children_individuals[survivals] #kids of survival only
data <- matrix(data[,survivals], nrow = n)
for (j in 1:ncol(data)){
data1[,(sum(children_individuals[0:(j-1)])+1):
(sum(children_individuals[0:(j-1)])+children_individuals[j])] <- rep(data[,j],children_individuals[j])
} #next generation without mutation
mutationvector <- runif(ncol(data1),0,1)
mutated_genes_position <- which(mutationvector < u)
if(length(mutated_genes_position) > 0) for (k in mutated_genes_position) {
data1[sample(1:n,1),k] <- 1 - data1[sample(1:n,1),k]
}
store[[i]] <- data <- data1
mutant_number_each_position[,i] <- rowSums(store[[i]])
mutant_fraction_each_position[,i] <- mutant_number_each_position[,i]/ sum(mutant_number_each_position[,i])
print(i)
}
#saveRDS(store, file = "store.rds")
#saveRDS(mutant_number_each_position, file = "mutant_number_each position.rds")
# initiate plot
plot(range(0:generations), range(0:max(mutant_number_each_position)), type="n", xlab="Generation", ylab="Mutants_number" )
colors <- rainbow(n)
linetype <- c(1:n)
# add lines
for (i in 1:n) {
lines(mutant_number_each_position[i,], type="l", lwd=2,
lty=linetype[i], col=colors[i])
}
# add a title and subtitle
title(paste("Mutation number","\n","s = ", paste(sapply(s, as.character), collapse = "_"),"\n","u = ", as.character(u),", Wref = ", as.character(wref), sep = ""))
# add a legend
legend("bottomright", as.character(c(1:n)), cex= 1, col=rainbow(n) , lty= 1:n, title="position")
#####2nd plott
# initiate plot
plot(range(0:generations), range(0:1), type="n", xlab="Generation", ylab="Mutants_fraction" )
colors <- rainbow(n)
linetype <- c(1:n)
# add lines
for (i in 1:n) {
lines(mutant_fraction_each_position[i,], type="l", lwd=2,
lty=linetype[i], col=colors[i])
}
# add a title and subtitle
title(paste("Mutation fraction","\n","s = ", paste(sapply(s, as.character), collapse = "_"),"\n","u = ", as.character(u),", Wref = ", as.character(wref), sep = ""))
# add a legend
legend("topright", as.character(c(1:n)), cex= 1, col=rainbow(n) , lty= 1:n, title="position")
#take row names (position in genome that can be mutated) of numbers higher than threshold in vector with mutant number for each position
positions_reached_majority <- sort(unique(which(mutant_number_each_position > population_capacity*threshold_majority, arr.ind = TRUE)[,1]))
slope_vector <- c()
dummy <- 1
for (i in positions_reached_majority) {
start_slope <- min(which(mutant_number_each_position[i,] > population_capacity*lower_slope_range_fraction_from_capacity))
stop_slope <- max(which(mutant_number_each_position[i,] < population_capacity*upper_slope_range_fraction_from_capacity))
slope_vector[dummy] <- lm(mutant_number_each_position[i,start_slope:stop_slope] ~ c(start_slope:stop_slope))$coefficients[2]
dummy <- dummy + 1
}
names(slope_vector) <- as.character(positions_reached_majority)
|
suppressPackageStartupMessages(library(tidyverse))
library(rray)
library(patchwork)
library(here)
theme_set(theme_minimal(base_size = 12))
map_draw_location = here('data', 'map_parameter_draws.RDS')
map_draws = readRDS(map_draw_location)
mcmc_draw_location = here('data', 'mcmc_parameter_draws.RDS')
mcmc_draws = readRDS(mcmc_draw_location)
#Dose sizes
DD = c(seq(0,10, 0.05),seq(11, 60, 0.5))
nD = length(DD)
# Function to evaluate risk of being below threshold at tmax
# This function uses the parameter draws from the posterior to compute the concentration curve
# Then, we can evaluate curves over dose sizes.
# Returns an array of risk which corresponds to dose size
get_risk_at_max = function(patient, draws, thresh){
n = nrow(draws[[1]])
#Pk paramters for patient
ka = rray(draws$ka[,patient], dim=c(n,1))
ke = rray(draws$ke[,patient], dim=c(n,1))
cl = rray(draws$cl[,patient], dim=c(n,1))
time = log(ka/ke)/(ka - ke)
# Dose sizes to evaluate over
D = rray(DD, dim = c(1,nD))
# Array broadcasting for economy of thought
y = 1000*(ke*ka)/(2*cl*(ke-ka))*(exp(-ka*time) - exp(-ke*time))
yy = y*D
r= yy %>% rray_lesser(thresh) %>% rray_mean(axes = c(1))
as.numeric(r)
}
estimate_d = function(model, p){
roots = uniroot(function(x) model(x) - p, c(0, max(DD)))
roots$root
}
risk_at_max<-
tibble(patient = 1:100) %>%
mutate(
#Get risk for each patient
map_risk_at_max = map(patient, ~get_risk_at_max(.x, map_draws, 100)),
mcmc_risk_at_max = map(patient, ~get_risk_at_max(.x, mcmc_draws, 100)),
# Interpolate the risk as a function of dose using a hermite spline
D = list(DD),
mcmc_spline = map2(D, mcmc_risk_at_max, ~splinefun(.x, .y, method='hyman')),
map_spline = map2(D, map_risk_at_max, ~splinefun(.x, .y, method='hyman'))
)
doses_for_max = risk_at_max %>%
select(patient, mcmc_spline, map_spline) %>%
crossing(p = seq(0.05, 0.95, 0.05)) %>%
# This line uses root solving to find the dose required to achieve the desired risk level
mutate(mcmc_estimated_dose = map2_dbl(mcmc_spline, p, estimate_d),
map_estimated_dose = map2_dbl(map_spline, p, estimate_d),
delta = map_estimated_dose - mcmc_estimated_dose
)
doses_for_max%>%
select(patient, p, mcmc_estimated_dose, map_estimated_dose) %>%
write_csv(here("data","experiment_2_doses.csv"))
figure_7_right = doses_for_max %>%
ggplot(aes(p, delta, group = patient))+
geom_line()+
scale_x_continuous(labels = scales::percent, limits = c(0,0.5))+
geom_hline(aes(yintercept = 0), color = 'red')+
labs(x = 'Risk For Max Concentration',
y = 'MAP Dose - HMC Dose')
ggsave(filename = 'figure_7_right.pdf',
plot = figure_7_right,
path = here('figures'))
# ---- Calibration ----
dose_location = here( "data","experiment_2_doses.csv")
doses_for_max %>%
select(patient, p, mcmc_estimated_dose, map_estimated_dose) %>%
write_csv(dose_location)
experiment_2_doses = here("data","experiment_2_doses.csv") %>%
read_csv() %>%
filter(p<=0.5)
true_pk_params = here("data","simulated_data.csv") %>%
read_csv()
pkfunc<-function(dose, cl, ke, ka, t){
1000*dose*ke*ka/(2*cl*(ke - ka))*(exp(-ka*t) - exp(-ke*t))
}
# To determine calibration, we give each patient their recommended dose for the desired risk
# Each dose was designed to elicit a risk of exceeding some threshold
# The calibration is the propotion of those patients who fail to exceed the threshold.
figure_8_right_data = experiment_2_doses %>%
left_join(true_pk_params, by = c('patient' = 'subjectids')) %>%
mutate(t = log(ka/ke)/(ka - ke),
conc_mcmc = pkfunc(mcmc_estimated_dose, cl, ke, ka, t),
conc_map = pkfunc(map_estimated_dose, cl, ke, ka, t),
calibration_mcmc = conc_mcmc<=100,
calibration_map = conc_map<=100) %>%
group_by(p) %>%
summarize(mcmc_calib = mean(calibration_mcmc),
map_calib = mean(calibration_map))
figure_8_right<-figure_8_right_data %>%
ggplot()+
geom_point(aes(p, mcmc_calib, color = 'HMC'))+
geom_point(aes(p, map_calib, color = 'MAP'))+
geom_abline()+
theme(aspect.ratio = 1, legend.position = 'top')+
scale_color_brewer(palette = 'Set1', direction = -1)+
xlab('Desired Risk')+
ylab('Calibration For Experiment 1')+
scale_y_continuous(labels = scales::percent, limits = c(0,1))+
scale_x_continuous(labels = scales::percent)+
labs(color = '')
ggsave(filename = 'figure_8_right.pdf',
plot = figure_8_right,
path = here("figures"))
|
/analysis/05_CMax_Calibration.R
|
no_license
|
Dpananos/PKBayes
|
R
| false | false | 4,604 |
r
|
suppressPackageStartupMessages(library(tidyverse))
library(rray)
library(patchwork)
library(here)
theme_set(theme_minimal(base_size = 12))
map_draw_location = here('data', 'map_parameter_draws.RDS')
map_draws = readRDS(map_draw_location)
mcmc_draw_location = here('data', 'mcmc_parameter_draws.RDS')
mcmc_draws = readRDS(mcmc_draw_location)
#Dose sizes
DD = c(seq(0,10, 0.05),seq(11, 60, 0.5))
nD = length(DD)
# Function to evaluate risk of being below threshold at tmax
# This function uses the parameter draws from the posterior to compute the concentration curve
# Then, we can evaluate curves over dose sizes.
# Returns an array of risk which corresponds to dose size
get_risk_at_max = function(patient, draws, thresh){
n = nrow(draws[[1]])
#Pk paramters for patient
ka = rray(draws$ka[,patient], dim=c(n,1))
ke = rray(draws$ke[,patient], dim=c(n,1))
cl = rray(draws$cl[,patient], dim=c(n,1))
time = log(ka/ke)/(ka - ke)
# Dose sizes to evaluate over
D = rray(DD, dim = c(1,nD))
# Array broadcasting for economy of thought
y = 1000*(ke*ka)/(2*cl*(ke-ka))*(exp(-ka*time) - exp(-ke*time))
yy = y*D
r= yy %>% rray_lesser(thresh) %>% rray_mean(axes = c(1))
as.numeric(r)
}
estimate_d = function(model, p){
roots = uniroot(function(x) model(x) - p, c(0, max(DD)))
roots$root
}
risk_at_max<-
tibble(patient = 1:100) %>%
mutate(
#Get risk for each patient
map_risk_at_max = map(patient, ~get_risk_at_max(.x, map_draws, 100)),
mcmc_risk_at_max = map(patient, ~get_risk_at_max(.x, mcmc_draws, 100)),
# Interpolate the risk as a function of dose using a hermite spline
D = list(DD),
mcmc_spline = map2(D, mcmc_risk_at_max, ~splinefun(.x, .y, method='hyman')),
map_spline = map2(D, map_risk_at_max, ~splinefun(.x, .y, method='hyman'))
)
doses_for_max = risk_at_max %>%
select(patient, mcmc_spline, map_spline) %>%
crossing(p = seq(0.05, 0.95, 0.05)) %>%
# This line uses root solving to find the dose required to achieve the desired risk level
mutate(mcmc_estimated_dose = map2_dbl(mcmc_spline, p, estimate_d),
map_estimated_dose = map2_dbl(map_spline, p, estimate_d),
delta = map_estimated_dose - mcmc_estimated_dose
)
doses_for_max%>%
select(patient, p, mcmc_estimated_dose, map_estimated_dose) %>%
write_csv(here("data","experiment_2_doses.csv"))
figure_7_right = doses_for_max %>%
ggplot(aes(p, delta, group = patient))+
geom_line()+
scale_x_continuous(labels = scales::percent, limits = c(0,0.5))+
geom_hline(aes(yintercept = 0), color = 'red')+
labs(x = 'Risk For Max Concentration',
y = 'MAP Dose - HMC Dose')
ggsave(filename = 'figure_7_right.pdf',
plot = figure_7_right,
path = here('figures'))
# ---- Calibration ----
dose_location = here( "data","experiment_2_doses.csv")
doses_for_max %>%
select(patient, p, mcmc_estimated_dose, map_estimated_dose) %>%
write_csv(dose_location)
experiment_2_doses = here("data","experiment_2_doses.csv") %>%
read_csv() %>%
filter(p<=0.5)
true_pk_params = here("data","simulated_data.csv") %>%
read_csv()
pkfunc<-function(dose, cl, ke, ka, t){
1000*dose*ke*ka/(2*cl*(ke - ka))*(exp(-ka*t) - exp(-ke*t))
}
# To determine calibration, we give each patient their recommended dose for the desired risk
# Each dose was designed to elicit a risk of exceeding some threshold
# The calibration is the propotion of those patients who fail to exceed the threshold.
figure_8_right_data = experiment_2_doses %>%
left_join(true_pk_params, by = c('patient' = 'subjectids')) %>%
mutate(t = log(ka/ke)/(ka - ke),
conc_mcmc = pkfunc(mcmc_estimated_dose, cl, ke, ka, t),
conc_map = pkfunc(map_estimated_dose, cl, ke, ka, t),
calibration_mcmc = conc_mcmc<=100,
calibration_map = conc_map<=100) %>%
group_by(p) %>%
summarize(mcmc_calib = mean(calibration_mcmc),
map_calib = mean(calibration_map))
figure_8_right<-figure_8_right_data %>%
ggplot()+
geom_point(aes(p, mcmc_calib, color = 'HMC'))+
geom_point(aes(p, map_calib, color = 'MAP'))+
geom_abline()+
theme(aspect.ratio = 1, legend.position = 'top')+
scale_color_brewer(palette = 'Set1', direction = -1)+
xlab('Desired Risk')+
ylab('Calibration For Experiment 1')+
scale_y_continuous(labels = scales::percent, limits = c(0,1))+
scale_x_continuous(labels = scales::percent)+
labs(color = '')
ggsave(filename = 'figure_8_right.pdf',
plot = figure_8_right,
path = here("figures"))
|
library(XML)
Sys.setlocale(category='LC_ALL', locale='C')
url <- "https://play.google.com/store/apps/details?id=com.facebook.katana&hl=zh-TW"
html <- htmlParse(paste(readLines(url,warn = F),collapse = ""),encoding="utf8")
temp <- xpathSApply(html,"//div[@class='review-body']")
doc <- lapply(temp,function(u)xmlValue(u,trim = T))
doc <- gsub("ๅฎๆด่ฉ่ซ","",doc)
|
/GooglePlayCommentExtraction.R
|
no_license
|
parker00811/III-0329-TextMining
|
R
| false | false | 367 |
r
|
library(XML)
Sys.setlocale(category='LC_ALL', locale='C')
url <- "https://play.google.com/store/apps/details?id=com.facebook.katana&hl=zh-TW"
html <- htmlParse(paste(readLines(url,warn = F),collapse = ""),encoding="utf8")
temp <- xpathSApply(html,"//div[@class='review-body']")
doc <- lapply(temp,function(u)xmlValue(u,trim = T))
doc <- gsub("ๅฎๆด่ฉ่ซ","",doc)
|
library(reshape)
library(gdata)
library(vegan)
data <- read.xls("Niwot_Fac_Data_20160802.xlsx", 1)
melted <- melt(data, id = c("PLOT", "SUBPLOT", "SPECIES"), measured = c("COUNT"))
#Beware duplicate data when casting rows
casted <- cast(melted, PLOT + SUBPLOT ~ SPECIES, fill = 0)
#We will probably want a better way to handle this so that we can
#use information in the PLOT column to make the ordination graph more readable
casted$PLOT <- NULL
casted$SUBPLOT <- NULL
ord <- metaMDS(casted, autotransform=FALSE)
plot(ord)
|
/diversity_reshape.R
|
no_license
|
achmurzy/Niwot_Facilitation
|
R
| false | false | 527 |
r
|
library(reshape)
library(gdata)
library(vegan)
data <- read.xls("Niwot_Fac_Data_20160802.xlsx", 1)
melted <- melt(data, id = c("PLOT", "SUBPLOT", "SPECIES"), measured = c("COUNT"))
#Beware duplicate data when casting rows
casted <- cast(melted, PLOT + SUBPLOT ~ SPECIES, fill = 0)
#We will probably want a better way to handle this so that we can
#use information in the PLOT column to make the ordination graph more readable
casted$PLOT <- NULL
casted$SUBPLOT <- NULL
ord <- metaMDS(casted, autotransform=FALSE)
plot(ord)
|
#' rastR_downloadR
#'
#' This function:
#' - reads in a list of GBIF taxonIDs
#' - searches for the corresponding occurence maps using the map api
#' - checks if there is no occurrence data and skips those urls to prevent abortion
#' - writes .tif image files with the occurrence maps for each taxon into a new folder
#'
#' @param webpage A list of URLs to download map data from
#' @param rastR All output file names will start with this, followed by taxonID
#'
#' @export
#' @examples
#'
# load necessary packages
require(stringr)
require(dismo)
# read in data in form of a csv file containing the GBIF taxonIDs to search for
taxonID_big = read.csv('~/Dropbox/Spatial_Bioinformatics/GBIF_Project/TaxonID_tiny.csv')
taxonID_small = taxonID_big[,3]
# Create a list of URLs based on the taxonIDs, using the GBIF map api
webpage = paste0('https://api.gbif.org/v2/map/occurrence/density/0/0/0@1x.png?taxonKey=', taxonID_small, '&squareSize=64&style=classic.point','test.ras')
# The loop will produce an error if an output file of the same name exists (overwrite = FALSE)
# Therefore, create a new folder to make sure it works
dir.create('rastR')
setwd('rastR')
# try(...) tests if the webpage provides an occurence map
# class(...) assigns a new class "try-error", if there is an error for try()
# if(...) {next} skips the webpage[i] if there is no data
# print(paste(...)) prints out an error that indicates which taxonID did not have occurence map data
# the function then creates a raster and writes out each file with the taxonID in the name
stack1 = NULL
for(i in 1:length(webpage)) {
if(class(try(raster(webpage[i]), silent = T)) == "try-error") {print(paste('could not find webpage with taxonID', taxonID_small[i], sep=' '));
next}
rastR = raster(webpage[i])
# stack1 = stack(rastR, stack1)
writeRaster(ra, paste('rastR_', taxonID_small[i], sep=''), format = 'GTiff')
}
|
/R/rastR_downloadR.R
|
no_license
|
jsneumann/GBIF
|
R
| false | false | 1,898 |
r
|
#' rastR_downloadR
#'
#' This function:
#' - reads in a list of GBIF taxonIDs
#' - searches for the corresponding occurence maps using the map api
#' - checks if there is no occurrence data and skips those urls to prevent abortion
#' - writes .tif image files with the occurrence maps for each taxon into a new folder
#'
#' @param webpage A list of URLs to download map data from
#' @param rastR All output file names will start with this, followed by taxonID
#'
#' @export
#' @examples
#'
# load necessary packages
require(stringr)
require(dismo)
# read in data in form of a csv file containing the GBIF taxonIDs to search for
taxonID_big = read.csv('~/Dropbox/Spatial_Bioinformatics/GBIF_Project/TaxonID_tiny.csv')
taxonID_small = taxonID_big[,3]
# Create a list of URLs based on the taxonIDs, using the GBIF map api
webpage = paste0('https://api.gbif.org/v2/map/occurrence/density/0/0/0@1x.png?taxonKey=', taxonID_small, '&squareSize=64&style=classic.point','test.ras')
# The loop will produce an error if an output file of the same name exists (overwrite = FALSE)
# Therefore, create a new folder to make sure it works
dir.create('rastR')
setwd('rastR')
# try(...) tests if the webpage provides an occurence map
# class(...) assigns a new class "try-error", if there is an error for try()
# if(...) {next} skips the webpage[i] if there is no data
# print(paste(...)) prints out an error that indicates which taxonID did not have occurence map data
# the function then creates a raster and writes out each file with the taxonID in the name
stack1 = NULL
for(i in 1:length(webpage)) {
if(class(try(raster(webpage[i]), silent = T)) == "try-error") {print(paste('could not find webpage with taxonID', taxonID_small[i], sep=' '));
next}
rastR = raster(webpage[i])
# stack1 = stack(rastR, stack1)
writeRaster(ra, paste('rastR_', taxonID_small[i], sep=''), format = 'GTiff')
}
|
# Script to read in the Individual household electric power consumption Data Set from the
# UC Irvine Machine Learning Repository, recreate a line chart of the Global Active Power
# variable over the two days, and save it to a .png fle. Assumes the data file is in the working directory.
# Load data. data are on lines 66638 to 69517
epc<-read.csv("household_power_consumption.txt",header=FALSE,sep=";",na.strings = "?",skip=66637,nrows=2880,
col.names=c("Date","Time","Global_active_power","Global_reactive_power","Voltage",
"Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#Add date/time variable.
epc$DateTime<-strptime(paste(epc$Date,epc$Time),"%d/%m/%Y %H:%M:%S")
# Launch graphics device.
png("plot2.png",width=480,height=480)
# Call plotting function.
with(epc,plot(DateTime,Global_active_power,type='l',xlab='',
ylab="Global Active Power (kilowatts)"))
# No additional annotation needed for plot2.
# Close graphics device.
dev.off()
|
/plot2.R
|
no_license
|
becca50/ExData_Plotting1
|
R
| false | false | 1,006 |
r
|
# Script to read in the Individual household electric power consumption Data Set from the
# UC Irvine Machine Learning Repository, recreate a line chart of the Global Active Power
# variable over the two days, and save it to a .png fle. Assumes the data file is in the working directory.
# Load data. data are on lines 66638 to 69517
epc<-read.csv("household_power_consumption.txt",header=FALSE,sep=";",na.strings = "?",skip=66637,nrows=2880,
col.names=c("Date","Time","Global_active_power","Global_reactive_power","Voltage",
"Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#Add date/time variable.
epc$DateTime<-strptime(paste(epc$Date,epc$Time),"%d/%m/%Y %H:%M:%S")
# Launch graphics device.
png("plot2.png",width=480,height=480)
# Call plotting function.
with(epc,plot(DateTime,Global_active_power,type='l',xlab='',
ylab="Global Active Power (kilowatts)"))
# No additional annotation needed for plot2.
# Close graphics device.
dev.off()
|
complete <- function(directory,id=1:332){
#get list of files in the directory
filelist = list.files(path = directory,pattern = "*.csv",full.names = T)
#get the number of files
nf = length(filelist)
#read all files in one data frame
data <- data.frame()
for(i in id){
data <- rbind(data,read.csv(filelist[i]))
}
#get nrows of data
nrows = nrow(data)
#testing head
head(data,n=10)
#testing tail
tail(data,n =5)
#get the data of the id
output <- data.frame(matrix(ncol = 2, nrow = 0))
x <- c("id", "nobs")
for (i in id){
res <- data[which(data[,"ID"]== i),]
res <-res[complete.cases(res),]
#res <-data[complete.cases(data[which(data[,"ID"] == i ),]),]
output <- rbind(output,c(i,nrow(res)))
}
#naming the cols
names(output) <- x
#final output of the function
output
}
|
/Scripts/complete.R
|
no_license
|
Adjeiinfo/courseraDataScience
|
R
| false | false | 874 |
r
|
complete <- function(directory,id=1:332){
#get list of files in the directory
filelist = list.files(path = directory,pattern = "*.csv",full.names = T)
#get the number of files
nf = length(filelist)
#read all files in one data frame
data <- data.frame()
for(i in id){
data <- rbind(data,read.csv(filelist[i]))
}
#get nrows of data
nrows = nrow(data)
#testing head
head(data,n=10)
#testing tail
tail(data,n =5)
#get the data of the id
output <- data.frame(matrix(ncol = 2, nrow = 0))
x <- c("id", "nobs")
for (i in id){
res <- data[which(data[,"ID"]== i),]
res <-res[complete.cases(res),]
#res <-data[complete.cases(data[which(data[,"ID"] == i ),]),]
output <- rbind(output,c(i,nrow(res)))
}
#naming the cols
names(output) <- x
#final output of the function
output
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_feature_count_vectorizer.R
\name{ft_count_vectorizer}
\alias{ft_count_vectorizer}
\alias{ml_vocabulary}
\title{Feature Transformation -- CountVectorizer (Estimator)}
\usage{
ft_count_vectorizer(
x,
input_col = NULL,
output_col = NULL,
binary = FALSE,
min_df = 1,
min_tf = 1,
vocab_size = 2^18,
uid = random_string("count_vectorizer_"),
...
)
ml_vocabulary(model)
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_col}{The name of the input column.}
\item{output_col}{The name of the output column.}
\item{binary}{Binary toggle to control the output vector values.
If \code{TRUE}, all nonzero counts (after \code{min_tf} filter applied)
are set to 1. This is useful for discrete probabilistic models that
model binary events rather than integer counts. Default: \code{FALSE}}
\item{min_df}{Specifies the minimum number of different documents a
term must appear in to be included in the vocabulary. If this is an
integer greater than or equal to 1, this specifies the number of
documents the term must appear in; if this is a double in [0,1), then
this specifies the fraction of documents. Default: 1.}
\item{min_tf}{Filter to ignore rare words in a document. For each
document, terms with frequency/count less than the given threshold
are ignored. If this is an integer greater than or equal to 1, then
this specifies a count (of times the term must appear in the document);
if this is a double in [0,1), then this specifies a fraction (out of
the document's token count). Default: 1.}
\item{vocab_size}{Build a vocabulary that only considers the top
\code{vocab_size} terms ordered by term frequency across the corpus.
Default: \code{2^18}.}
\item{uid}{A character string used to uniquely identify the feature transformer.}
\item{...}{Optional arguments; currently unused.}
\item{model}{A \code{ml_count_vectorizer_model}.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns a \code{ml_transformer},
a \code{ml_estimator}, or one of their subclasses. The object contains a pointer to
a Spark \code{Transformer} or \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the transformer or estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a transformer is constructed then
immediately applied to the input \code{tbl_spark}, returning a \code{tbl_spark}
}
\code{ml_vocabulary()} returns a vector of vocabulary built.
}
\description{
Extracts a vocabulary from document collections.
}
\details{
In the case where \code{x} is a \code{tbl_spark}, the estimator fits against \code{x}
to obtain a transformer, which is then immediately used to transform \code{x}, returning a \code{tbl_spark}.
}
\seealso{
See \url{https://spark.apache.org/docs/latest/ml-features.html} for
more information on the set of transformations available for DataFrame
columns in Spark.
Other feature transformers:
\code{\link{ft_binarizer}()},
\code{\link{ft_bucketizer}()},
\code{\link{ft_chisq_selector}()},
\code{\link{ft_dct}()},
\code{\link{ft_elementwise_product}()},
\code{\link{ft_feature_hasher}()},
\code{\link{ft_hashing_tf}()},
\code{\link{ft_idf}()},
\code{\link{ft_imputer}()},
\code{\link{ft_index_to_string}()},
\code{\link{ft_interaction}()},
\code{\link{ft_lsh}},
\code{\link{ft_max_abs_scaler}()},
\code{\link{ft_min_max_scaler}()},
\code{\link{ft_ngram}()},
\code{\link{ft_normalizer}()},
\code{\link{ft_one_hot_encoder_estimator}()},
\code{\link{ft_one_hot_encoder}()},
\code{\link{ft_pca}()},
\code{\link{ft_polynomial_expansion}()},
\code{\link{ft_quantile_discretizer}()},
\code{\link{ft_r_formula}()},
\code{\link{ft_regex_tokenizer}()},
\code{\link{ft_robust_scaler}()},
\code{\link{ft_sql_transformer}()},
\code{\link{ft_standard_scaler}()},
\code{\link{ft_stop_words_remover}()},
\code{\link{ft_string_indexer}()},
\code{\link{ft_tokenizer}()},
\code{\link{ft_vector_assembler}()},
\code{\link{ft_vector_indexer}()},
\code{\link{ft_vector_slicer}()},
\code{\link{ft_word2vec}()}
}
\concept{feature transformers}
|
/man/ft_count_vectorizer.Rd
|
permissive
|
sparklyr/sparklyr
|
R
| false | true | 4,403 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_feature_count_vectorizer.R
\name{ft_count_vectorizer}
\alias{ft_count_vectorizer}
\alias{ml_vocabulary}
\title{Feature Transformation -- CountVectorizer (Estimator)}
\usage{
ft_count_vectorizer(
x,
input_col = NULL,
output_col = NULL,
binary = FALSE,
min_df = 1,
min_tf = 1,
vocab_size = 2^18,
uid = random_string("count_vectorizer_"),
...
)
ml_vocabulary(model)
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_col}{The name of the input column.}
\item{output_col}{The name of the output column.}
\item{binary}{Binary toggle to control the output vector values.
If \code{TRUE}, all nonzero counts (after \code{min_tf} filter applied)
are set to 1. This is useful for discrete probabilistic models that
model binary events rather than integer counts. Default: \code{FALSE}}
\item{min_df}{Specifies the minimum number of different documents a
term must appear in to be included in the vocabulary. If this is an
integer greater than or equal to 1, this specifies the number of
documents the term must appear in; if this is a double in [0,1), then
this specifies the fraction of documents. Default: 1.}
\item{min_tf}{Filter to ignore rare words in a document. For each
document, terms with frequency/count less than the given threshold
are ignored. If this is an integer greater than or equal to 1, then
this specifies a count (of times the term must appear in the document);
if this is a double in [0,1), then this specifies a fraction (out of
the document's token count). Default: 1.}
\item{vocab_size}{Build a vocabulary that only considers the top
\code{vocab_size} terms ordered by term frequency across the corpus.
Default: \code{2^18}.}
\item{uid}{A character string used to uniquely identify the feature transformer.}
\item{...}{Optional arguments; currently unused.}
\item{model}{A \code{ml_count_vectorizer_model}.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns a \code{ml_transformer},
a \code{ml_estimator}, or one of their subclasses. The object contains a pointer to
a Spark \code{Transformer} or \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the transformer or estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a transformer is constructed then
immediately applied to the input \code{tbl_spark}, returning a \code{tbl_spark}
}
\code{ml_vocabulary()} returns a vector of vocabulary built.
}
\description{
Extracts a vocabulary from document collections.
}
\details{
In the case where \code{x} is a \code{tbl_spark}, the estimator fits against \code{x}
to obtain a transformer, which is then immediately used to transform \code{x}, returning a \code{tbl_spark}.
}
\seealso{
See \url{https://spark.apache.org/docs/latest/ml-features.html} for
more information on the set of transformations available for DataFrame
columns in Spark.
Other feature transformers:
\code{\link{ft_binarizer}()},
\code{\link{ft_bucketizer}()},
\code{\link{ft_chisq_selector}()},
\code{\link{ft_dct}()},
\code{\link{ft_elementwise_product}()},
\code{\link{ft_feature_hasher}()},
\code{\link{ft_hashing_tf}()},
\code{\link{ft_idf}()},
\code{\link{ft_imputer}()},
\code{\link{ft_index_to_string}()},
\code{\link{ft_interaction}()},
\code{\link{ft_lsh}},
\code{\link{ft_max_abs_scaler}()},
\code{\link{ft_min_max_scaler}()},
\code{\link{ft_ngram}()},
\code{\link{ft_normalizer}()},
\code{\link{ft_one_hot_encoder_estimator}()},
\code{\link{ft_one_hot_encoder}()},
\code{\link{ft_pca}()},
\code{\link{ft_polynomial_expansion}()},
\code{\link{ft_quantile_discretizer}()},
\code{\link{ft_r_formula}()},
\code{\link{ft_regex_tokenizer}()},
\code{\link{ft_robust_scaler}()},
\code{\link{ft_sql_transformer}()},
\code{\link{ft_standard_scaler}()},
\code{\link{ft_stop_words_remover}()},
\code{\link{ft_string_indexer}()},
\code{\link{ft_tokenizer}()},
\code{\link{ft_vector_assembler}()},
\code{\link{ft_vector_indexer}()},
\code{\link{ft_vector_slicer}()},
\code{\link{ft_word2vec}()}
}
\concept{feature transformers}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Group_specific_Var_AUC_estimation.R
\name{Group_specific_Var_AUC_estimation}
\alias{Group_specific_Var_AUC_estimation}
\title{Variance of the Area Under The Curve of Group-Specific Polynomial Marginal Dynamics}
\usage{
Group_specific_Var_AUC_estimation(
MEM_Pol_group,
time,
Groups = NULL,
method = "trapezoid",
Averaged = FALSE
)
}
\arguments{
\item{MEM_Pol_group}{A list with similar structure than the output provided by the function \link[DeltaAUCpckg]{MEM_Polynomial_Group_structure}.
A list containing: \tabular{ll}{
\code{Model_estimation} \tab either the variance-covariance matrix of the marginal (fixed) parameters (at least for the groups whose Variance of AUC is to estimate), or a list containing at least this matrix labeled \emph{'varFix'} (see \link[DeltaAUCpckg]{MEM_Polynomial_Group_structure} for details about the parameter order). \cr
\code{Model_features} \tab a list of at least 2 elements: \cr
\tab 1. \code{Groups} - a vector indicating the names of the groups whose variance of fixed parameters are given.\cr
\tab 2. \code{Marginal.dyn.feature} - a list summarizing the features of the marginal dynamics defined in the model: \cr
\tab \itemize{
\item \code{dynamic.type} - a character scalar indicating the chosen type of marginal dynamics. Options are 'polynomial' or 'spline'
\item \code{intercept} - a logical vector summarizing choices about global and group-specific intercepts (Number of groups + 1) elements whose elements are named as ('global.intercept','group.intercept1', ..., 'group.interceptG') if G Groups are defined in \code{MEM_Pol_group}. For each element of the vector, if TRUE, the considered intercept is considered as included in the model.
If \code{dynamic.type} is defined as 'polynomial':
\item \code{polynomial.degree} - an integer vector indicating the degree of polynomial functions, one value for each group.
If \code{dynamic.type} is defined as 'spline':
\item \code{spline.degree} - an integer vector indicating the degree of B-spline curves, one for each group.
\item \code{knots} - a list of group-specific internal knots used to build B-spline basis (one numerical vector for each group) (see \link[splines]{bs} for more details).
\item \code{df} - a numerical vector of group-specific degrees of freedom used to build B-spline basis, (one for each group).
\item \code{boundary.knots} - a list of group-specific boundary knots used to build B-spline basis (one vector for each group) (see \link[splines]{bs} for more details).
} \cr
}}
\item{time}{a numerical vector of time points (x-axis coordinates) or a list of numerical vectors (with as much elements than the number of groups in \code{Groups}).}
\item{Groups}{a vector indicating the names of the groups belonging to the set of groups involved in \code{MEM_Pol_group} for which we want to estimate the AUC (a subset or the entire set of groups involved in the model can be considered). If NULL (default), the AUC for all the groups involved the MEM is calculated.}
\item{method}{a character scalar indicating the interpolation method to use to estimate the AUC. Options are 'trapezoid' (default), 'lagrange' and 'spline'. In this version, the 'spline' interpolation is implemented with the "not-a-knot" spline boundary conditions.}
\item{Averaged}{a logical scalar. If TRUE, the function return the normalized AUC (nAUC) computed as the AUC divided by the range of the time calculation. If FALSE (default), the classic AUC is calculated.}
}
\value{
A numerical vector containing the estimation of the variance of the AUC (or nAUC) for each group defined in the \code{Groups} vector.
}
\description{
This function calculates the variance of the area under the curve of marginal dynamics modeled by group-structured polynomials or B-spline curves in Mixed-Effects models
}
\examples{
# Download of data
data("HIV_Simu_Dataset_Delta01_cens")
data <- HIV_Simu_Dataset_Delta01_cens
# Change factors in character vectors
data$id <- as.character(data$id) ; data$Group <- as.character(data$Group)
# Example 1: We consider the variable 'MEM_Pol_Group' as the output of our function \link[DeltaAUCpckg]{MEM_Polynomial_Group_structure}
MEM_estimation <- MEM_Polynomial_Group_structure(y=data$VL,x=data$time,Group=data$Group,Id=data$id,Cens=data$cens)
Var_AUC_estimation <- Group_specific_Var_AUC_estimation(MEM_Pol_group=MEM_estimation,time=list(unique(data$time[which(data$Group == "Group1")]),
unique(data$time[which(data$Group == "Group2")])))
Example 2: We consider results of MEM estimation from another source. We have to give build the variable 'MEM_Pol_group' with the good structure
# We build the variable 'MEM_Pol_group.1' with the results of MEM estimation obtained for two groups
Covariance_Matrix_1 <- matrix(rnorm(7*7,mean=0,sd=0.01),ncol=7,nrow=7) # Generation of random matrix
Covariance_Matrix_1 <- Covariance_Matrix_1 \%*\% t(Covariance_Matrix_1) # Transform the matrix into symmetric one
MEM_Pol_group.1 <- list(Model_estimation=Covariance_Matrix_1, # Covariance matrix of fixed effects for all parameters
Model_features=list(Groups=c("Group1","Group2"),
Marginal.dyn.feature=list(dynamic.type="polynomial",intercept=c(global.intercept=TRUE,group.intercept1=FALSE,group.intercept2=FALSE),polynomial.degree=c(3,3))))
Var_AUC_estimation_G1.1 <- Group_specific_Var_AUC_estimation(MEM_Pol_group.1,time=unique(data$time[which(data$Group == "Group1")]),Groups=c("Group1"))
# We build the variable 'MEM_Pol_group.2' with the results of MEM estimation obtained only for the group of interest (extraction)
Covariance_Matrix_2 <- matrix(rnorm(4*4,mean=0,sd=0.01),ncol=4,nrow=4) # Generation of random matrix
Covariance_Matrix_2 <- Covariance_Matrix_2 \%*\% t(Covariance_Matrix_2) # Transform the matrix into a symetric one
MEM_Pol_group.2 <- list(Model_estimation=Covariance_Matrix_2, # Covariance matrix of fixed effects, only for the parameters from Group1
Model_features=list(Groups=c("Group1"),
Marginal.dyn.feature=list(dynamic.type="polynomial",intercept=c(global.intercept=TRUE,group.intercept1=FALSE),polynomial.degree=c(3))))
Var_AUC_estimation_G1.2 <- Group_specific_Var_AUC_estimation(MEM_Pol_group=MEM_Pol_group.2,time=unique(data$time[which(data$Group == "Group1")]))
}
\seealso{
\code{\link[splines]{bs}},
\code{\link[Group_specific_AUC_estimation]{MEM_Polynomial_Group_structure}}
}
|
/man/Group_specific_Var_AUC_estimation.Rd
|
permissive
|
marie-alexandre/DeltaAUCpckg
|
R
| false | true | 6,762 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Group_specific_Var_AUC_estimation.R
\name{Group_specific_Var_AUC_estimation}
\alias{Group_specific_Var_AUC_estimation}
\title{Variance of the Area Under The Curve of Group-Specific Polynomial Marginal Dynamics}
\usage{
Group_specific_Var_AUC_estimation(
MEM_Pol_group,
time,
Groups = NULL,
method = "trapezoid",
Averaged = FALSE
)
}
\arguments{
\item{MEM_Pol_group}{A list with similar structure than the output provided by the function \link[DeltaAUCpckg]{MEM_Polynomial_Group_structure}.
A list containing: \tabular{ll}{
\code{Model_estimation} \tab either the variance-covariance matrix of the marginal (fixed) parameters (at least for the groups whose Variance of AUC is to estimate), or a list containing at least this matrix labeled \emph{'varFix'} (see \link[DeltaAUCpckg]{MEM_Polynomial_Group_structure} for details about the parameter order). \cr
\code{Model_features} \tab a list of at least 2 elements: \cr
\tab 1. \code{Groups} - a vector indicating the names of the groups whose variance of fixed parameters are given.\cr
\tab 2. \code{Marginal.dyn.feature} - a list summarizing the features of the marginal dynamics defined in the model: \cr
\tab \itemize{
\item \code{dynamic.type} - a character scalar indicating the chosen type of marginal dynamics. Options are 'polynomial' or 'spline'
\item \code{intercept} - a logical vector summarizing choices about global and group-specific intercepts (Number of groups + 1) elements whose elements are named as ('global.intercept','group.intercept1', ..., 'group.interceptG') if G Groups are defined in \code{MEM_Pol_group}. For each element of the vector, if TRUE, the considered intercept is considered as included in the model.
If \code{dynamic.type} is defined as 'polynomial':
\item \code{polynomial.degree} - an integer vector indicating the degree of polynomial functions, one value for each group.
If \code{dynamic.type} is defined as 'spline':
\item \code{spline.degree} - an integer vector indicating the degree of B-spline curves, one for each group.
\item \code{knots} - a list of group-specific internal knots used to build B-spline basis (one numerical vector for each group) (see \link[splines]{bs} for more details).
\item \code{df} - a numerical vector of group-specific degrees of freedom used to build B-spline basis, (one for each group).
\item \code{boundary.knots} - a list of group-specific boundary knots used to build B-spline basis (one vector for each group) (see \link[splines]{bs} for more details).
} \cr
}}
\item{time}{a numerical vector of time points (x-axis coordinates) or a list of numerical vectors (with as much elements than the number of groups in \code{Groups}).}
\item{Groups}{a vector indicating the names of the groups belonging to the set of groups involved in \code{MEM_Pol_group} for which we want to estimate the AUC (a subset or the entire set of groups involved in the model can be considered). If NULL (default), the AUC for all the groups involved the MEM is calculated.}
\item{method}{a character scalar indicating the interpolation method to use to estimate the AUC. Options are 'trapezoid' (default), 'lagrange' and 'spline'. In this version, the 'spline' interpolation is implemented with the "not-a-knot" spline boundary conditions.}
\item{Averaged}{a logical scalar. If TRUE, the function return the normalized AUC (nAUC) computed as the AUC divided by the range of the time calculation. If FALSE (default), the classic AUC is calculated.}
}
\value{
A numerical vector containing the estimation of the variance of the AUC (or nAUC) for each group defined in the \code{Groups} vector.
}
\description{
This function calculates the variance of the area under the curve of marginal dynamics modeled by group-structured polynomials or B-spline curves in Mixed-Effects models
}
\examples{
# Download of data
data("HIV_Simu_Dataset_Delta01_cens")
data <- HIV_Simu_Dataset_Delta01_cens
# Change factors in character vectors
data$id <- as.character(data$id) ; data$Group <- as.character(data$Group)
# Example 1: We consider the variable 'MEM_Pol_Group' as the output of our function \link[DeltaAUCpckg]{MEM_Polynomial_Group_structure}
MEM_estimation <- MEM_Polynomial_Group_structure(y=data$VL,x=data$time,Group=data$Group,Id=data$id,Cens=data$cens)
Var_AUC_estimation <- Group_specific_Var_AUC_estimation(MEM_Pol_group=MEM_estimation,time=list(unique(data$time[which(data$Group == "Group1")]),
unique(data$time[which(data$Group == "Group2")])))
Example 2: We consider results of MEM estimation from another source. We have to give build the variable 'MEM_Pol_group' with the good structure
# We build the variable 'MEM_Pol_group.1' with the results of MEM estimation obtained for two groups
Covariance_Matrix_1 <- matrix(rnorm(7*7,mean=0,sd=0.01),ncol=7,nrow=7) # Generation of random matrix
Covariance_Matrix_1 <- Covariance_Matrix_1 \%*\% t(Covariance_Matrix_1) # Transform the matrix into symmetric one
MEM_Pol_group.1 <- list(Model_estimation=Covariance_Matrix_1, # Covariance matrix of fixed effects for all parameters
Model_features=list(Groups=c("Group1","Group2"),
Marginal.dyn.feature=list(dynamic.type="polynomial",intercept=c(global.intercept=TRUE,group.intercept1=FALSE,group.intercept2=FALSE),polynomial.degree=c(3,3))))
Var_AUC_estimation_G1.1 <- Group_specific_Var_AUC_estimation(MEM_Pol_group.1,time=unique(data$time[which(data$Group == "Group1")]),Groups=c("Group1"))
# We build the variable 'MEM_Pol_group.2' with the results of MEM estimation obtained only for the group of interest (extraction)
Covariance_Matrix_2 <- matrix(rnorm(4*4,mean=0,sd=0.01),ncol=4,nrow=4) # Generation of random matrix
Covariance_Matrix_2 <- Covariance_Matrix_2 \%*\% t(Covariance_Matrix_2) # Transform the matrix into a symetric one
MEM_Pol_group.2 <- list(Model_estimation=Covariance_Matrix_2, # Covariance matrix of fixed effects, only for the parameters from Group1
Model_features=list(Groups=c("Group1"),
Marginal.dyn.feature=list(dynamic.type="polynomial",intercept=c(global.intercept=TRUE,group.intercept1=FALSE),polynomial.degree=c(3))))
Var_AUC_estimation_G1.2 <- Group_specific_Var_AUC_estimation(MEM_Pol_group=MEM_Pol_group.2,time=unique(data$time[which(data$Group == "Group1")]))
}
\seealso{
\code{\link[splines]{bs}},
\code{\link[Group_specific_AUC_estimation]{MEM_Polynomial_Group_structure}}
}
|
## makeCacheMatrix creates a special matrix object, and then cacheSolve
## calculates the inverse of the matrix.
## If the matrix inverse has already been calculated, it will instead
## find it in the cache and return it, and not calculate it again.
makeCacheMatrix <- function(x = matrix()) {
inv_x <- NULL
set <- function(y) {
x <<- y
inv_x <<- NULL
}
get <- function() x
setinverse<- function(inverse) inv_x <<-inverse
getinverse <- function() inv_x
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function cacheSolve returns the inverse of a matrix A created with
## the makeCacheMatrix function.
## If the cached inverse is available, cacheSolve retrieves it, while if
## not, it computes, caches, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_x <- x$getinverse()
if (!is.null(inv_x)) {
message("getting cached inverse matrix")
return(inv_x)
} else {
inv_x <- solve(x$get())
x$setinverse(inv_x)
return(inv_x)
}
}
|
/cachematrix.R
|
no_license
|
liucong2016/cachematrix
|
R
| false | false | 1,118 |
r
|
## makeCacheMatrix creates a special matrix object, and then cacheSolve
## calculates the inverse of the matrix.
## If the matrix inverse has already been calculated, it will instead
## find it in the cache and return it, and not calculate it again.
makeCacheMatrix <- function(x = matrix()) {
inv_x <- NULL
set <- function(y) {
x <<- y
inv_x <<- NULL
}
get <- function() x
setinverse<- function(inverse) inv_x <<-inverse
getinverse <- function() inv_x
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function cacheSolve returns the inverse of a matrix A created with
## the makeCacheMatrix function.
## If the cached inverse is available, cacheSolve retrieves it, while if
## not, it computes, caches, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_x <- x$getinverse()
if (!is.null(inv_x)) {
message("getting cached inverse matrix")
return(inv_x)
} else {
inv_x <- solve(x$get())
x$setinverse(inv_x)
return(inv_x)
}
}
|
## Course project of Getting and Cleanning Data (3rd Week)
## 1. Merges the training and the test sets to create one data set.
## Read activity labels
activityLabels <- read.table("./UCI HAR Dataset/activity_labels.txt")
colnames(activityLabels) <- c("activity_id","activity_code")
## Read features
features <- read.table("./UCI HAR Dataset/features.txt")
## Read Test Set
xTest <- read.table("./UCI HAR Dataset/test/X_test.txt")
yTest <- read.table("./UCI HAR Dataset/test/y_test.txt")
subjTest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
## Read Train Set
xTrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
yTrain <- read.table("./UCI HAR Dataset/train/y_train.txt")
subjTrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
## The training and test files of the same type are joined before starting
## work. We always add training files to test files, in the same order:
## type-X files contain features values
xTotal <- rbind(xTest,xTrain)
colnames(xTotal) <- features$V2 # 561 columns
## type-Y files contain activity id (from 1 to 6)
yTotal <- rbind(yTest,yTrain)
colnames(yTotal) <-"activity_id"
## subject files contain subject id (from 1 to 30)
subjTotal <- rbind( subjTest, subjTrain)
colnames(subjTotal) <-"subject_id"
dataSet <- cbind(xTotal,yTotal,subjTotal)
## 2. Extracts only the measurements on the mean and standard deviation for
## each measurement.
## We select the columns whose names contain "mean" or "std" and the two
## last columns (activity_id and subject_id)
meanAndStdSet <- dataSet[,c(grep("mean|std",features$V2),
ncol(dataSet)-1,ncol(dataSet))]
## 3. Uses descriptive activity names to name the activities in the data set.
## Merge activityLabels and meanAndStdSet by activity_id
meanAndStdSetActiv <- merge (meanAndStdSet, activityLabels,
by.x="activity_id", by.y="activity_id", all=TRUE)
## 4. Appropriately labels the data set with descriptive variable names.
## Data.frame already has column names
names(meanAndStdSetActiv)
## 5. From the data set in step 4, creates a second, independent tidy data
## set with the average of each variable for each activity and each subject.
## First delete 'activity_id' column
meanAndStdSetActiv <- meanAndStdSetActiv[,(2:ncol(meanAndStdSetActiv))]
## create groups by subject_id and activity_code
library(reshape2)
meltSet <- melt(meanAndStdSetActiv,id = c("subject_id","activity_code"))
## Apply mean for each variable by subject_id and activity_code
meltSetMean <- dcast(meltSet,subject_id + activity_code ~ variable,mean)
tidyData <- melt(meltSetMean,id = c("subject_id","activity_code"))
## Write .txt file
write.table(tidyData,"./UCI HAR Dataset/tidyDataSet.txt", row.names=FALSE)
|
/run_analysis.R
|
no_license
|
tania-maldonado/Getting-and-Cleaning-Data-Project
|
R
| false | false | 3,137 |
r
|
## Course project of Getting and Cleanning Data (3rd Week)
## 1. Merges the training and the test sets to create one data set.
## Read activity labels
activityLabels <- read.table("./UCI HAR Dataset/activity_labels.txt")
colnames(activityLabels) <- c("activity_id","activity_code")
## Read features
features <- read.table("./UCI HAR Dataset/features.txt")
## Read Test Set
xTest <- read.table("./UCI HAR Dataset/test/X_test.txt")
yTest <- read.table("./UCI HAR Dataset/test/y_test.txt")
subjTest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
## Read Train Set
xTrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
yTrain <- read.table("./UCI HAR Dataset/train/y_train.txt")
subjTrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
## The training and test files of the same type are joined before starting
## work. We always add training files to test files, in the same order:
## type-X files contain features values
xTotal <- rbind(xTest,xTrain)
colnames(xTotal) <- features$V2 # 561 columns
## type-Y files contain activity id (from 1 to 6)
yTotal <- rbind(yTest,yTrain)
colnames(yTotal) <-"activity_id"
## subject files contain subject id (from 1 to 30)
subjTotal <- rbind( subjTest, subjTrain)
colnames(subjTotal) <-"subject_id"
dataSet <- cbind(xTotal,yTotal,subjTotal)
## 2. Extracts only the measurements on the mean and standard deviation for
## each measurement.
## We select the columns whose names contain "mean" or "std" and the two
## last columns (activity_id and subject_id)
meanAndStdSet <- dataSet[,c(grep("mean|std",features$V2),
ncol(dataSet)-1,ncol(dataSet))]
## 3. Uses descriptive activity names to name the activities in the data set.
## Merge activityLabels and meanAndStdSet by activity_id
meanAndStdSetActiv <- merge (meanAndStdSet, activityLabels,
by.x="activity_id", by.y="activity_id", all=TRUE)
## 4. Appropriately labels the data set with descriptive variable names.
## Data.frame already has column names
names(meanAndStdSetActiv)
## 5. From the data set in step 4, creates a second, independent tidy data
## set with the average of each variable for each activity and each subject.
## First delete 'activity_id' column
meanAndStdSetActiv <- meanAndStdSetActiv[,(2:ncol(meanAndStdSetActiv))]
## create groups by subject_id and activity_code
library(reshape2)
meltSet <- melt(meanAndStdSetActiv,id = c("subject_id","activity_code"))
## Apply mean for each variable by subject_id and activity_code
meltSetMean <- dcast(meltSet,subject_id + activity_code ~ variable,mean)
tidyData <- melt(meltSetMean,id = c("subject_id","activity_code"))
## Write .txt file
write.table(tidyData,"./UCI HAR Dataset/tidyDataSet.txt", row.names=FALSE)
|
library(gtools)
K <- 10 # num of topics
M <- 100 # num of documents
V <- 144 # num of words
set.seed(123)
alpha <- rep(0.8, K)
beta <- rep(0.2, V)
theta <- rdirichlet(M, alpha)
phi <- rdirichlet(K, beta)
z.for.doc <- apply(theta, 1, which.max)ใ# for Naive Bayes
num.word.v <- round(exp(rnorm(M, 3.0, 0.3)))ใใใ# data1: low number of words per doc
# num.word.v <- round(exp(rnorm(M, 5.0, 0.3))) # data2: high number of words per doc
w.1 <- data.frame() # data type 1: stan manual's w
w.2 <- data.frame() # data type 2: counted w
for(m in 1:M){
z <- sample(K, num.word.v[m], prob=theta[m,], replace=T)
v <- sapply(z, function(k) sample(V, 1, prob=phi[k,]))
w.1 <- rbind(w.1, data.frame(Doc=m, Word=v))
w.2 <- rbind(w.2, data.frame(Doc=m, table(Word=v)))
}
w.2$Word <- as.integer(as.character(w.2$Word))
N.1 <- nrow(w.1) # total word instances
N.2 <- nrow(w.2) # total word instances
offset.1 <- t(sapply(1:M, function(m){ range(which(m==w.1$Doc)) }))
offset.2 <- t(sapply(1:M, function(m){ range(which(m==w.2$Doc)) }))
bow <- matrix(0, M, V) # data type 3: bag-of-words
for(n in 1:N.2)
bow[w.2$Doc[n], w.2$Word[n]] <- w.2$Freq[n]
library(rstan)
data <- list(
K=K,
M=M,
V=V,
N=N.1,
Z=z.for.doc,
W=w.1$Word,
Offset=offset.1,
Alpha=rep(1, K),
Beta=rep(0.5, V)
)
fit <- stan(
file='01_sample_nb.stan',
data=data,
iter=1000,
chains=1
)
|
/cao/01_sample_nb.R
|
no_license
|
oleglr/forecast
|
R
| false | false | 1,403 |
r
|
library(gtools)
K <- 10 # num of topics
M <- 100 # num of documents
V <- 144 # num of words
set.seed(123)
alpha <- rep(0.8, K)
beta <- rep(0.2, V)
theta <- rdirichlet(M, alpha)
phi <- rdirichlet(K, beta)
z.for.doc <- apply(theta, 1, which.max)ใ# for Naive Bayes
num.word.v <- round(exp(rnorm(M, 3.0, 0.3)))ใใใ# data1: low number of words per doc
# num.word.v <- round(exp(rnorm(M, 5.0, 0.3))) # data2: high number of words per doc
w.1 <- data.frame() # data type 1: stan manual's w
w.2 <- data.frame() # data type 2: counted w
for(m in 1:M){
z <- sample(K, num.word.v[m], prob=theta[m,], replace=T)
v <- sapply(z, function(k) sample(V, 1, prob=phi[k,]))
w.1 <- rbind(w.1, data.frame(Doc=m, Word=v))
w.2 <- rbind(w.2, data.frame(Doc=m, table(Word=v)))
}
w.2$Word <- as.integer(as.character(w.2$Word))
N.1 <- nrow(w.1) # total word instances
N.2 <- nrow(w.2) # total word instances
offset.1 <- t(sapply(1:M, function(m){ range(which(m==w.1$Doc)) }))
offset.2 <- t(sapply(1:M, function(m){ range(which(m==w.2$Doc)) }))
bow <- matrix(0, M, V) # data type 3: bag-of-words
for(n in 1:N.2)
bow[w.2$Doc[n], w.2$Word[n]] <- w.2$Freq[n]
library(rstan)
data <- list(
K=K,
M=M,
V=V,
N=N.1,
Z=z.for.doc,
W=w.1$Word,
Offset=offset.1,
Alpha=rep(1, K),
Beta=rep(0.5, V)
)
fit <- stan(
file='01_sample_nb.stan',
data=data,
iter=1000,
chains=1
)
|
## Loading of packages
library(fEcofin)
library(fExtremes)
## Data handling
data(DowJones30)
DJ <- timeSeries(DowJones30[, -1],
charvec = as.character(DowJones30[, 1]))
BALoss <- -1.0 * returns(DJ[, "BA"], percentage = TRUE,
trim = TRUE)
## MRL-plot
mrlPlot(BALoss, umin = -10, umax = 10)
## GPD
BAFit <- gpdFit(BALoss, u = 3)
## Diagnostic plots
plot(BAFit)
## Risk measures
gpdRiskMeasures(BAFit, prob = c(0.95, 0.99, 0.995))
|
/FRAPO/7.3_POT-GPD for the losses of Boeing.r
|
no_license
|
highandhigh/RPractice
|
R
| false | false | 470 |
r
|
## Loading of packages
library(fEcofin)
library(fExtremes)
## Data handling
data(DowJones30)
DJ <- timeSeries(DowJones30[, -1],
charvec = as.character(DowJones30[, 1]))
BALoss <- -1.0 * returns(DJ[, "BA"], percentage = TRUE,
trim = TRUE)
## MRL-plot
mrlPlot(BALoss, umin = -10, umax = 10)
## GPD
BAFit <- gpdFit(BALoss, u = 3)
## Diagnostic plots
plot(BAFit)
## Risk measures
gpdRiskMeasures(BAFit, prob = c(0.95, 0.99, 0.995))
|
#' Fill in missing values
#'
#' Fills with NA missing values in a pivot table value array.
#'
#' A pivot table should only contain label rows and columns, and an array of
#' values, usually numeric data.
#'
#' To correctly carry out this operation, the number of rows and columns that
#' contain labels must be defined, and the table must only contain the pivot
#' table rows and columns.
#'
#' @param pt A `pivot_table` object.
#'
#' @return A `pivot_table` object.
#'
#' @family pivot table transformation functions
#' @seealso
#'
#' @examples
#' library(tidyr)
#'
#' pt <-
#' pt_m4 %>%
#' remove_top(1) %>%
#' define_labels(n_col = 2, n_row = 2) %>%
#' fill_values()
#'
#' pt <-
#' pt_ine2871 %>%
#' remove_top(6) %>%
#' remove_bottom(9) %>%
#' define_labels(n_col = 1, n_row = 2) %>%
#' fill_values()
#'
#' @export
fill_values <- function(pt) {
UseMethod("fill_values")
}
#' @rdname fill_values
#' @export
fill_values.pivot_table <- function(pt) {
rows <- (attr(pt, "n_row_labels") + 1):nrow(pt)
cols <- (attr(pt, "n_col_labels") + 1):ncol(pt)
pt[rows, cols] <-
apply(pt[rows, cols, drop = FALSE], 2, function(x)
dplyr::na_if(stringr::str_trim(x), ""))
pt
}
|
/R/pivot_table_fill_values.R
|
no_license
|
cran/flattabler
|
R
| false | false | 1,252 |
r
|
#' Fill in missing values
#'
#' Fills with NA missing values in a pivot table value array.
#'
#' A pivot table should only contain label rows and columns, and an array of
#' values, usually numeric data.
#'
#' To correctly carry out this operation, the number of rows and columns that
#' contain labels must be defined, and the table must only contain the pivot
#' table rows and columns.
#'
#' @param pt A `pivot_table` object.
#'
#' @return A `pivot_table` object.
#'
#' @family pivot table transformation functions
#' @seealso
#'
#' @examples
#' library(tidyr)
#'
#' pt <-
#' pt_m4 %>%
#' remove_top(1) %>%
#' define_labels(n_col = 2, n_row = 2) %>%
#' fill_values()
#'
#' pt <-
#' pt_ine2871 %>%
#' remove_top(6) %>%
#' remove_bottom(9) %>%
#' define_labels(n_col = 1, n_row = 2) %>%
#' fill_values()
#'
#' @export
fill_values <- function(pt) {
UseMethod("fill_values")
}
#' @rdname fill_values
#' @export
fill_values.pivot_table <- function(pt) {
rows <- (attr(pt, "n_row_labels") + 1):nrow(pt)
cols <- (attr(pt, "n_col_labels") + 1):ncol(pt)
pt[rows, cols] <-
apply(pt[rows, cols, drop = FALSE], 2, function(x)
dplyr::na_if(stringr::str_trim(x), ""))
pt
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ILCA.R
\name{ILCA}
\alias{ILCA}
\title{Iterative latent-class analysis}
\usage{
ILCA(dat, Q, seed.num = 5)
}
\arguments{
\item{dat}{A required binary item response matrix.}
\item{Q}{A required binary item and attribute association matrix.}
\item{seed.num}{seed number}
}
\value{
Estimated attribute profiles.
}
\description{
This function implements an iterative latent class analysis (ILCA; Jiang, 2019) approach to estimating attributes for cognitive diagnosis.
}
\examples{
ILCA(sim10GDINA$simdat, sim10GDINA$simQ)
}
\references{
Jiang, Z. (2019). Using the iterative latent-class analysis approach to improve attribute accuracy in diagnostic classification models. \emph{Behavior research methods}, 1-10.
}
\author{
{Zhehan Jiang, The University of Alabama}
}
|
/man/ILCA.Rd
|
no_license
|
cywongnorman/GDINA
|
R
| false | true | 845 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ILCA.R
\name{ILCA}
\alias{ILCA}
\title{Iterative latent-class analysis}
\usage{
ILCA(dat, Q, seed.num = 5)
}
\arguments{
\item{dat}{A required binary item response matrix.}
\item{Q}{A required binary item and attribute association matrix.}
\item{seed.num}{seed number}
}
\value{
Estimated attribute profiles.
}
\description{
This function implements an iterative latent class analysis (ILCA; Jiang, 2019) approach to estimating attributes for cognitive diagnosis.
}
\examples{
ILCA(sim10GDINA$simdat, sim10GDINA$simQ)
}
\references{
Jiang, Z. (2019). Using the iterative latent-class analysis approach to improve attribute accuracy in diagnostic classification models. \emph{Behavior research methods}, 1-10.
}
\author{
{Zhehan Jiang, The University of Alabama}
}
|
#####################################
# LINEAR REGRESSION #
# SUBMITTED BY : RUPA KADAMBI #
# ROLL NO : DDA1810283 #
#####################################
##############################################################################################################
# PRELIMINARY STEPS - DATA IMPORTS #
##############################################################################################################
# SET LIBRARY FOR PROJECT #
library(lubridate)
library(ggplot2)
library(dplyr)
library(MASS)
library(car)
library(stringr)
library(DataExplorer)
library(purrr)
library(tidyr)
library(reshape2)
setwd("C:/Users/rupak/Documents/Training/Course3/assignment")
#IMPORT DATA INTO R #
carprice_data <- read.csv(file="CarPrice_Assignment.csv", head = TRUE, sep = ",")
##############################################################################################################
# PRELIMINARY STEPS - DATA CLEANING # #
##############################################################################################################
View(carprice_data)
str(carprice_data)
summary(carprice_data)
#CHECK FOR MISSING AND DUPLICATE VALUES
missing_data <- plot_missing(carprice_data)
duplicates <- sum(duplicated(carprice_data))
#FIX DUPLICATE OR MISSPELT CAR NAMES
carprice_data$CarName <- gsub("toyouta", "toyota", carprice_data$CarName)
carprice_data$CarName <- gsub("vw", "volkswagen", carprice_data$CarName)
carprice_data$CarName <- gsub("vokswagen", "volkswagen", carprice_data$CarName)
carprice_data$CarName <- gsub("maxda", "mazda", carprice_data$CarName)
carprice_data$CarName <- gsub("porcshce", "porsche", carprice_data$CarName)
###############################################################################################################
# EXPLORATORY DATA ANALYSIS #
###############################################################################################################
#SPLIT THE CAR BRAND NAME FROM THE COMBINED COLUMN
carprice_data$car_brand <- toupper(word(carprice_data$CarName,1))
table(carprice_data$car_brand)
##DUMMY VARIABLE CREATION##
#CONVERT 2 LEVEL FACTOR VARIABLES INTO NUMERIC VARIABLES WITH BINARY VALUES#
levels(carprice_data$fueltype)<-c(1,0)
carprice_data$fueltype <- as.numeric(levels(carprice_data$fueltype))[carprice_data$fueltype]
levels(carprice_data$aspiration)<-c(1,0)
carprice_data$aspiration <- as.numeric(levels(carprice_data$aspiration))[carprice_data$aspiration]
levels(carprice_data$doornumber)<-c(1,0)
carprice_data$doornumber <- as.numeric(levels(carprice_data$doornumber))[carprice_data$doornumber]
levels(carprice_data$enginelocation)<-c(1,0)
carprice_data$enginelocation <- as.numeric(levels(carprice_data$enginelocation))[carprice_data$enginelocation]
str(carprice_data)
#CONVERT 2+ LEVEL FACTOR VARIABLES INTO NUMERIC VARIABLES WITH BINARY VALUES
carbody_1 <- data.frame(model.matrix( ~carbody, data = carprice_data))
carbody_1 <- carbody_1[,-1]
drivewheel_1 <- data.frame(model.matrix( ~drivewheel, data = carprice_data))
drivewheel_1 <- drivewheel_1[,-1]
enginetype_1 <- data.frame(model.matrix( ~enginetype, data = carprice_data))
enginetype_1 <- enginetype_1[,-1]
cyl_num_1 <- data.frame(model.matrix( ~cylindernumber, data = carprice_data))
cyl_num_1 <- cyl_num_1[,-1]
fuelsystem_1 <- data.frame(model.matrix( ~fuelsystem, data = carprice_data))
fuelsystem_1 <- fuelsystem_1[,-1]
carbrand_1 <- data.frame(model.matrix( ~car_brand, data = carprice_data))
carbrand_1 <- carbrand_1[,-1]
#COMBINE ALL DUMMY VARIABLES WITH THE ORIGINAL DATAFRAME
subset_carprice <- subset(carprice_data,
select = -c(carbody, drivewheel, enginetype, cylindernumber, fuelsystem, car_brand))
carprice_combined <- cbind(subset_carprice, carbody_1, drivewheel_1, enginetype_1, cyl_num_1, fuelsystem_1, carbrand_1)
#CONDUCT OUTLIER ANALYSIS ON NUMERIC VARIABLES
boxplot(carprice_combined$wheelbase)
boxplot(carprice_combined$carlength)
boxplot(carprice_combined$carwidth)
boxplot(carprice_combined$carheight)
boxplot(carprice_combined$curbweight)
boxplot(carprice_combined$enginesize)
boxplot(carprice_combined$boreratio)
boxplot(carprice_combined$stroke)
boxplot(carprice_combined$compressionratio)
boxplot(carprice_combined$horsepower)
boxplot(carprice_combined$peakrpm)
boxplot(carprice_combined$citympg)
boxplot(carprice_combined$highwaympg)
boxplot(carprice_combined$price)
###############################################################################################################
# LINEAR REGRESSION MODEL BUILDING #
###############################################################################################################
# CREATE TRAINING AND TEST DATASETS
carprice_model <- subset(carprice_combined, select = -c(CarName, car_ID))
set.seed(100)
# GENERATE ROW INDICES FOR 70% OF RECORDS
trainindices= sample(1:nrow(carprice_model), 0.7*nrow(carprice_model))
#GENERATE TRAINING DATA
traincar = carprice_model[trainindices,]
testcar = carprice_model[-trainindices,]
#BUILD FIRST MODEL WITH ALL VARIABLES
model_1 <-lm(price~.,data=traincar)
summary(model_1)
#USE STEPAIC TO CONDUCT MULTIPLE ITERATIONS OF FORWARD AND BACKWARD SELECTION AND OBTAIN THE FORMULA FOR
#CONSECUTIVE MODELS
step <- stepAIC(model_1, direction="both")
step
model_2 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + boreratio + stroke + horsepower +
peakrpm + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelrwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + cylindernumberfive +
cylindernumberthree + fuelsystem2bbl + fuelsystemmpfi + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA + car_brandJAGUAR +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandSAAB + car_brandTOYOTA +
car_brandVOLKSWAGEN, data = traincar)
summary(model_2)
#DETERMINE VIF AND OBSERVE CORRELATION VALUES
vif(model_2)
#OBSERVER HIGH VIF VALUE VARIABLES (>5) AND NOTE THE CORRESPONDING P VALUES FOR THE VARIABLES. DROP
#THE ONES THAT ARE NOT SIGNIFICANT( p> 0.05) AND BUILD THE NEXT MODEL
model_3 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + boreratio + stroke +
peakrpm + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelrwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + cylindernumberfive +
cylindernumberthree + fuelsystem2bbl + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA + car_brandJAGUAR +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandSAAB + car_brandTOYOTA +
car_brandVOLKSWAGEN, data = traincar)
summary(model_3)
#DETERMINE VIF AND OBSERVE CORRELATION VALUES
vif(model_3)
#REPEAT ITERATIONS OF ABOVE VARIABLE EXCLUSION AND MODEL BUILDING
model_4 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize + boreratio + stroke +
peakrpm + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelrwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + cylindernumberfive +
cylindernumberthree + fuelsystem2bbl + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA + car_brandJAGUAR +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandSAAB + car_brandTOYOTA +
car_brandVOLKSWAGEN, data = traincar)
summary(model_4)
vif(model_4)
#THE REMAINING VARIABLES WITH HIGH VIF VALUES HAVE LOW P VALUES. HOWEVER, THE VALUES OF CARBODY TYPE IS LESS
#SIGNIFANT THAN THE OTHER VARIABLES. SO DROP THESE AND CONTINUE
model_5 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize + boreratio + stroke +
peakrpm + carbodyhardtop + carbodyhatchback + drivewheelrwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + cylindernumberfive +
cylindernumberthree + fuelsystem2bbl + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA + car_brandJAGUAR +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandSAAB + car_brandTOYOTA +
car_brandVOLKSWAGEN, data = traincar)
summary(model_5)
vif(model_5)
# REMOVE VARIABLES WITH HIGH P VALUES THAT ARE NOT SIGNIFICANT
model_6 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm +
enginetypeohcf + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA + car_brandJAGUAR +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandTOYOTA +
car_brandVOLKSWAGEN, data = traincar)
summary(model_6)
vif(model_6)
model_7 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm +
enginetypeohcf + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandTOYOTA ,data = traincar)
summary(model_7)
vif(model_7)
model_8 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandTOYOTA ,data = traincar)
summary(model_8)
vif(model_8)
model_8 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandTOYOTA ,data = traincar)
summary(model_8)
vif(model_8)
model_9 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandTOYOTA ,data = traincar)
summary(model_9)
vif(model_9)
model_10 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandMITSUBISHI ,data = traincar)
summary(model_10)
vif(model_10)
model_11 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK ,data = traincar)
summary(model_11)
vif(model_11)
model_12 <- lm(formula = price ~ enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK ,data = traincar)
summary(model_12)
vif(model_12)
model_13 <- lm(formula = price ~ enginelocation + carwidth + enginesize +
peakrpm + car_brandBMW +
car_brandBUICK ,data = traincar)
summary(model_13)
vif(model_13)
#FINALLY ACCEPTED THE MODEL ABOVE WITH LOW VIF VALUES AND LOW P VALUES, AFTER MULTIPLE ITERATIONS ABOVE
#PREDICT THE RESULTS IN TEST
predict_price <- predict(model_13, testcar[,-1])
testcar$est_price <- predict_price
#CHECK CORRELATION IN TEST DATASET
r<- cor(testcar$price, testcar$est_price)
rsquared <- cor(testcar$price, testcar$est_price)^2
rsquared
#THE RSQUARE VALUES ARE HIGH IN BOTH TRAINING AND TEST DATA. PLOT THE TEST AND ACTUAL PRICE
testplot <- testcar
testplot$id_no <-rownames(testplot)
testplot %>%
gather(key, value, price, est_price) %>%
ggplot(aes(x=id_no, y = value, group = 1, colour = key)) + geom_line()
############################################################################################################
# MODELLING SUMMARY #
############################################################################################################
#The model built above have low VIF values (less than 5) and high statistic significance (p value < 0.05)
#The RSQUARE AND ADJUSTED RSQUARE values are very close, so there is no presence of redundant variables
#The RSQUARE values in the test data is also high and plotting the test vs actual price of the cars is close
#in most cases. Hence the model seems to be a reliable one that reflects the correlation between variables.
#The independent variables that affect the price of the cars can be seen in the final model and the
#same can be communicated to the marketing and Business team to take appropriate decision.
|
/assignment6_linear_regression.R
|
no_license
|
rkadambi/PGDDS---R-Programming
|
R
| false | false | 14,176 |
r
|
#####################################
# LINEAR REGRESSION #
# SUBMITTED BY : RUPA KADAMBI #
# ROLL NO : DDA1810283 #
#####################################
##############################################################################################################
# PRELIMINARY STEPS - DATA IMPORTS #
##############################################################################################################
# SET LIBRARY FOR PROJECT #
library(lubridate)
library(ggplot2)
library(dplyr)
library(MASS)
library(car)
library(stringr)
library(DataExplorer)
library(purrr)
library(tidyr)
library(reshape2)
setwd("C:/Users/rupak/Documents/Training/Course3/assignment")
#IMPORT DATA INTO R #
carprice_data <- read.csv(file="CarPrice_Assignment.csv", head = TRUE, sep = ",")
##############################################################################################################
# PRELIMINARY STEPS - DATA CLEANING # #
##############################################################################################################
View(carprice_data)
str(carprice_data)
summary(carprice_data)
#CHECK FOR MISSING AND DUPLICATE VALUES
missing_data <- plot_missing(carprice_data)
duplicates <- sum(duplicated(carprice_data))
#FIX DUPLICATE OR MISSPELT CAR NAMES
carprice_data$CarName <- gsub("toyouta", "toyota", carprice_data$CarName)
carprice_data$CarName <- gsub("vw", "volkswagen", carprice_data$CarName)
carprice_data$CarName <- gsub("vokswagen", "volkswagen", carprice_data$CarName)
carprice_data$CarName <- gsub("maxda", "mazda", carprice_data$CarName)
carprice_data$CarName <- gsub("porcshce", "porsche", carprice_data$CarName)
###############################################################################################################
# EXPLORATORY DATA ANALYSIS #
###############################################################################################################
#SPLIT THE CAR BRAND NAME FROM THE COMBINED COLUMN
carprice_data$car_brand <- toupper(word(carprice_data$CarName,1))
table(carprice_data$car_brand)
##DUMMY VARIABLE CREATION##
#CONVERT 2 LEVEL FACTOR VARIABLES INTO NUMERIC VARIABLES WITH BINARY VALUES#
levels(carprice_data$fueltype)<-c(1,0)
carprice_data$fueltype <- as.numeric(levels(carprice_data$fueltype))[carprice_data$fueltype]
levels(carprice_data$aspiration)<-c(1,0)
carprice_data$aspiration <- as.numeric(levels(carprice_data$aspiration))[carprice_data$aspiration]
levels(carprice_data$doornumber)<-c(1,0)
carprice_data$doornumber <- as.numeric(levels(carprice_data$doornumber))[carprice_data$doornumber]
levels(carprice_data$enginelocation)<-c(1,0)
carprice_data$enginelocation <- as.numeric(levels(carprice_data$enginelocation))[carprice_data$enginelocation]
str(carprice_data)
#CONVERT 2+ LEVEL FACTOR VARIABLES INTO NUMERIC VARIABLES WITH BINARY VALUES
carbody_1 <- data.frame(model.matrix( ~carbody, data = carprice_data))
carbody_1 <- carbody_1[,-1]
drivewheel_1 <- data.frame(model.matrix( ~drivewheel, data = carprice_data))
drivewheel_1 <- drivewheel_1[,-1]
enginetype_1 <- data.frame(model.matrix( ~enginetype, data = carprice_data))
enginetype_1 <- enginetype_1[,-1]
cyl_num_1 <- data.frame(model.matrix( ~cylindernumber, data = carprice_data))
cyl_num_1 <- cyl_num_1[,-1]
fuelsystem_1 <- data.frame(model.matrix( ~fuelsystem, data = carprice_data))
fuelsystem_1 <- fuelsystem_1[,-1]
carbrand_1 <- data.frame(model.matrix( ~car_brand, data = carprice_data))
carbrand_1 <- carbrand_1[,-1]
#COMBINE ALL DUMMY VARIABLES WITH THE ORIGINAL DATAFRAME
subset_carprice <- subset(carprice_data,
select = -c(carbody, drivewheel, enginetype, cylindernumber, fuelsystem, car_brand))
carprice_combined <- cbind(subset_carprice, carbody_1, drivewheel_1, enginetype_1, cyl_num_1, fuelsystem_1, carbrand_1)
#CONDUCT OUTLIER ANALYSIS ON NUMERIC VARIABLES
boxplot(carprice_combined$wheelbase)
boxplot(carprice_combined$carlength)
boxplot(carprice_combined$carwidth)
boxplot(carprice_combined$carheight)
boxplot(carprice_combined$curbweight)
boxplot(carprice_combined$enginesize)
boxplot(carprice_combined$boreratio)
boxplot(carprice_combined$stroke)
boxplot(carprice_combined$compressionratio)
boxplot(carprice_combined$horsepower)
boxplot(carprice_combined$peakrpm)
boxplot(carprice_combined$citympg)
boxplot(carprice_combined$highwaympg)
boxplot(carprice_combined$price)
###############################################################################################################
# LINEAR REGRESSION MODEL BUILDING #
###############################################################################################################
# CREATE TRAINING AND TEST DATASETS
carprice_model <- subset(carprice_combined, select = -c(CarName, car_ID))
set.seed(100)
# GENERATE ROW INDICES FOR 70% OF RECORDS
trainindices= sample(1:nrow(carprice_model), 0.7*nrow(carprice_model))
#GENERATE TRAINING DATA
traincar = carprice_model[trainindices,]
testcar = carprice_model[-trainindices,]
#BUILD FIRST MODEL WITH ALL VARIABLES
model_1 <-lm(price~.,data=traincar)
summary(model_1)
#USE STEPAIC TO CONDUCT MULTIPLE ITERATIONS OF FORWARD AND BACKWARD SELECTION AND OBTAIN THE FORMULA FOR
#CONSECUTIVE MODELS
step <- stepAIC(model_1, direction="both")
step
model_2 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + boreratio + stroke + horsepower +
peakrpm + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelrwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + cylindernumberfive +
cylindernumberthree + fuelsystem2bbl + fuelsystemmpfi + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA + car_brandJAGUAR +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandSAAB + car_brandTOYOTA +
car_brandVOLKSWAGEN, data = traincar)
summary(model_2)
#DETERMINE VIF AND OBSERVE CORRELATION VALUES
vif(model_2)
#OBSERVER HIGH VIF VALUE VARIABLES (>5) AND NOTE THE CORRESPONDING P VALUES FOR THE VARIABLES. DROP
#THE ONES THAT ARE NOT SIGNIFICANT( p> 0.05) AND BUILD THE NEXT MODEL
model_3 <- lm(formula = price ~ aspiration + enginelocation + carwidth +
curbweight + enginesize + boreratio + stroke +
peakrpm + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelrwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + cylindernumberfive +
cylindernumberthree + fuelsystem2bbl + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA + car_brandJAGUAR +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandSAAB + car_brandTOYOTA +
car_brandVOLKSWAGEN, data = traincar)
summary(model_3)
#DETERMINE VIF AND OBSERVE CORRELATION VALUES
vif(model_3)
#REPEAT ITERATIONS OF ABOVE VARIABLE EXCLUSION AND MODEL BUILDING
model_4 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize + boreratio + stroke +
peakrpm + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelrwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + cylindernumberfive +
cylindernumberthree + fuelsystem2bbl + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA + car_brandJAGUAR +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandSAAB + car_brandTOYOTA +
car_brandVOLKSWAGEN, data = traincar)
summary(model_4)
vif(model_4)
#THE REMAINING VARIABLES WITH HIGH VIF VALUES HAVE LOW P VALUES. HOWEVER, THE VALUES OF CARBODY TYPE IS LESS
#SIGNIFANT THAN THE OTHER VARIABLES. SO DROP THESE AND CONTINUE
model_5 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize + boreratio + stroke +
peakrpm + carbodyhardtop + carbodyhatchback + drivewheelrwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + cylindernumberfive +
cylindernumberthree + fuelsystem2bbl + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA + car_brandJAGUAR +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandSAAB + car_brandTOYOTA +
car_brandVOLKSWAGEN, data = traincar)
summary(model_5)
vif(model_5)
# REMOVE VARIABLES WITH HIGH P VALUES THAT ARE NOT SIGNIFICANT
model_6 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm +
enginetypeohcf + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA + car_brandJAGUAR +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandTOYOTA +
car_brandVOLKSWAGEN, data = traincar)
summary(model_6)
vif(model_6)
model_7 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm +
enginetypeohcf + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandTOYOTA ,data = traincar)
summary(model_7)
vif(model_7)
model_8 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandTOYOTA ,data = traincar)
summary(model_8)
vif(model_8)
model_8 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandHONDA +
car_brandMAZDA + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandTOYOTA ,data = traincar)
summary(model_8)
vif(model_8)
model_9 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandDODGE + car_brandMITSUBISHI + car_brandNISSAN +
car_brandPLYMOUTH + car_brandRENAULT + car_brandTOYOTA ,data = traincar)
summary(model_9)
vif(model_9)
model_10 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK + car_brandMITSUBISHI ,data = traincar)
summary(model_10)
vif(model_10)
model_11 <- lm(formula = price ~ aspiration + enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK ,data = traincar)
summary(model_11)
vif(model_11)
model_12 <- lm(formula = price ~ enginelocation + carwidth + enginesize +
peakrpm + enginetyperotor + car_brandBMW +
car_brandBUICK ,data = traincar)
summary(model_12)
vif(model_12)
model_13 <- lm(formula = price ~ enginelocation + carwidth + enginesize +
peakrpm + car_brandBMW +
car_brandBUICK ,data = traincar)
summary(model_13)
vif(model_13)
#FINALLY ACCEPTED THE MODEL ABOVE WITH LOW VIF VALUES AND LOW P VALUES, AFTER MULTIPLE ITERATIONS ABOVE
#PREDICT THE RESULTS IN TEST
predict_price <- predict(model_13, testcar[,-1])
testcar$est_price <- predict_price
#CHECK CORRELATION IN TEST DATASET
r<- cor(testcar$price, testcar$est_price)
rsquared <- cor(testcar$price, testcar$est_price)^2
rsquared
#THE RSQUARE VALUES ARE HIGH IN BOTH TRAINING AND TEST DATA. PLOT THE TEST AND ACTUAL PRICE
testplot <- testcar
testplot$id_no <-rownames(testplot)
testplot %>%
gather(key, value, price, est_price) %>%
ggplot(aes(x=id_no, y = value, group = 1, colour = key)) + geom_line()
############################################################################################################
# MODELLING SUMMARY #
############################################################################################################
#The model built above have low VIF values (less than 5) and high statistic significance (p value < 0.05)
#The RSQUARE AND ADJUSTED RSQUARE values are very close, so there is no presence of redundant variables
#The RSQUARE values in the test data is also high and plotting the test vs actual price of the cars is close
#in most cases. Hence the model seems to be a reliable one that reflects the correlation between variables.
#The independent variables that affect the price of the cars can be seen in the final model and the
#same can be communicated to the marketing and Business team to take appropriate decision.
|
\name{qat_save_boot_distribution_1d}
\alias{qat_save_boot_distribution_1d}
\title{Produce a savelist from a resultlist for a Boot Distribution Test}
\description{
This function takes the results, produced by qat\_analyse\_boot\_distribution\_1d and construct a savelist, which may be used to produce a netCDF output.}
\usage{
qat_save_boot_distribution_1d(resultlist_part, baseunit = "")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{resultlist_part}{A list with the results of the check}
\item{baseunit}{The unit of the original measurement vector}
}
\details{
This function takes the resultslist and transfer the content to a newly organized list. Ths also consists of more information, which help to generate an output like a netCDF-file.}
\value{
Returning a savelist with the content of the resultlist.
}
\author{Andre Duesterhus}
\seealso{\code{\link{qat_call_save_boot_distribution}}, \code{\link{qat_run_workflow_save}}}
\examples{
vec <- rnorm(1000)
result <- list(result=qat_analyse_boot_distribution_1d(vec, 1000))
savelist <- qat_save_boot_distribution_1d(result)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{utilities}
|
/man/qat_save_boot_distribution_1d.Rd
|
no_license
|
cran/qat
|
R
| false | false | 1,229 |
rd
|
\name{qat_save_boot_distribution_1d}
\alias{qat_save_boot_distribution_1d}
\title{Produce a savelist from a resultlist for a Boot Distribution Test}
\description{
This function takes the results, produced by qat\_analyse\_boot\_distribution\_1d and construct a savelist, which may be used to produce a netCDF output.}
\usage{
qat_save_boot_distribution_1d(resultlist_part, baseunit = "")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{resultlist_part}{A list with the results of the check}
\item{baseunit}{The unit of the original measurement vector}
}
\details{
This function takes the resultslist and transfer the content to a newly organized list. Ths also consists of more information, which help to generate an output like a netCDF-file.}
\value{
Returning a savelist with the content of the resultlist.
}
\author{Andre Duesterhus}
\seealso{\code{\link{qat_call_save_boot_distribution}}, \code{\link{qat_run_workflow_save}}}
\examples{
vec <- rnorm(1000)
result <- list(result=qat_analyse_boot_distribution_1d(vec, 1000))
savelist <- qat_save_boot_distribution_1d(result)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{utilities}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weighted_posteriors.R
\name{weighted_posteriors}
\alias{weighted_posteriors}
\alias{weighted_posteriors.data.frame}
\alias{weighted_posteriors.stanreg}
\alias{weighted_posteriors.brmsfit}
\alias{weighted_posteriors.blavaan}
\alias{weighted_posteriors.BFBayesFactor}
\title{Generate posterior distributions weighted across models}
\usage{
weighted_posteriors(..., prior_odds = NULL, missing = 0, verbose = TRUE)
\method{weighted_posteriors}{data.frame}(..., prior_odds = NULL, missing = 0, verbose = TRUE)
\method{weighted_posteriors}{stanreg}(
...,
prior_odds = NULL,
missing = 0,
verbose = TRUE,
effects = c("fixed", "random", "all"),
component = c("conditional", "zi", "zero_inflated", "all"),
parameters = NULL
)
\method{weighted_posteriors}{brmsfit}(
...,
prior_odds = NULL,
missing = 0,
verbose = TRUE,
effects = c("fixed", "random", "all"),
component = c("conditional", "zi", "zero_inflated", "all"),
parameters = NULL
)
\method{weighted_posteriors}{blavaan}(
...,
prior_odds = NULL,
missing = 0,
verbose = TRUE,
effects = c("fixed", "random", "all"),
component = c("conditional", "zi", "zero_inflated", "all"),
parameters = NULL
)
\method{weighted_posteriors}{BFBayesFactor}(
...,
prior_odds = NULL,
missing = 0,
verbose = TRUE,
iterations = 4000
)
}
\arguments{
\item{...}{Fitted models (see details), all fit on the same data, or a single
\code{BFBayesFactor} object.}
\item{prior_odds}{Optional vector of prior odds for the models compared to
the first model (or the denominator, for \code{BFBayesFactor} objects). For
\code{data.frame}s, this will be used as the basis of weighting.}
\item{missing}{An optional numeric value to use if a model does not contain a
parameter that appears in other models. Defaults to 0.}
\item{verbose}{Toggle off warnings.}
\item{effects}{Should results for fixed effects, random effects or both be
returned? Only applies to mixed models. May be abbreviated.}
\item{component}{Should results for all parameters, parameters for the
conditional model or the zero-inflated part of the model be returned? May
be abbreviated. Only applies to \pkg{brms}-models.}
\item{parameters}{Regular expression pattern that describes the parameters
that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are
filtered by default, so only parameters that typically appear in the
\code{summary()} are returned. Use \code{parameters} to select specific parameters
for the output.}
\item{iterations}{For \code{BayesFactor} models, how many posterior samples to draw.}
}
\value{
A data frame with posterior distributions (weighted across models) .
}
\description{
Extract posterior samples of parameters, weighted across models. Weighting is
done by comparing posterior model probabilities, via \code{\link[=bayesfactor_models]{bayesfactor_models()}}.
}
\details{
Note that across models some parameters might play different roles. For
example, the parameter \code{A} plays a different role in the model \code{Y ~ A + B}
(where it is a main effect) than it does in the model \code{Y ~ A + B + A:B}
(where it is a simple effect). In many cases centering of predictors (mean
subtracting for continuous variables, and effects coding via \code{contr.sum} or
orthonormal coding via \code{\link{contr.equalprior_pairs}} for factors) can reduce this
issue. In any case you should be mindful of this issue.
\cr\cr
See \code{\link[=bayesfactor_models]{bayesfactor_models()}} details for more info on passed models.
\cr\cr
Note that for \code{BayesFactor} models, posterior samples cannot be generated
from intercept only models.
\cr\cr
This function is similar in function to \code{brms::posterior_average}.
}
\note{
For \verb{BayesFactor < 0.9.12-4.3}, in some instances there might be
some problems of duplicate columns of random effects in the resulting data
frame.
}
\examples{
\donttest{
if (require("rstanarm") && require("see")) {
stan_m0 <- stan_glm(extra ~ 1,
data = sleep,
family = gaussian(),
refresh = 0,
diagnostic_file = file.path(tempdir(), "df0.csv")
)
stan_m1 <- stan_glm(extra ~ group,
data = sleep,
family = gaussian(),
refresh = 0,
diagnostic_file = file.path(tempdir(), "df1.csv")
)
res <- weighted_posteriors(stan_m0, stan_m1)
plot(eti(res))
}
## With BayesFactor
if (require("BayesFactor")) {
extra_sleep <- ttestBF(formula = extra ~ group, data = sleep)
wp <- weighted_posteriors(extra_sleep)
describe_posterior(extra_sleep, test = NULL)
describe_posterior(wp$delta, test = NULL) # also considers the null
}
## weighted prediction distributions via data.frames
if (require("rstanarm")) {
m0 <- stan_glm(
mpg ~ 1,
data = mtcars,
family = gaussian(),
diagnostic_file = file.path(tempdir(), "df0.csv"),
refresh = 0
)
m1 <- stan_glm(
mpg ~ carb,
data = mtcars,
family = gaussian(),
diagnostic_file = file.path(tempdir(), "df1.csv"),
refresh = 0
)
# Predictions:
pred_m0 <- data.frame(posterior_predict(m0))
pred_m1 <- data.frame(posterior_predict(m1))
BFmods <- bayesfactor_models(m0, m1)
wp <- weighted_posteriors(pred_m0, pred_m1,
prior_odds = as.numeric(BFmods)[2]
)
# look at first 5 prediction intervals
hdi(pred_m0[1:5])
hdi(pred_m1[1:5])
hdi(wp[1:5]) # between, but closer to pred_m1
}
}
}
\references{
\itemize{
\item Clyde, M., Desimone, H., & Parmigiani, G. (1996). Prediction via
orthogonalized model mixing. Journal of the American Statistical
Association, 91(435), 1197-1208.
\item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E.
(2019, March 25). A conceptual introduction to Bayesian Model Averaging.
\doi{10.31234/osf.io/wgb64}
\item Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian
inference for psychology, part IV: Parameter estimation and Bayes factors.
Psychonomic bulletin & review, 25(1), 102-113.
\item van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers,
E. J. (2019). A cautionary note on estimating effect size.
}
}
\seealso{
\code{\link[=bayesfactor_inclusion]{bayesfactor_inclusion()}} for Bayesian model averaging.
}
|
/man/weighted_posteriors.Rd
|
no_license
|
cran/bayestestR
|
R
| false | true | 6,430 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weighted_posteriors.R
\name{weighted_posteriors}
\alias{weighted_posteriors}
\alias{weighted_posteriors.data.frame}
\alias{weighted_posteriors.stanreg}
\alias{weighted_posteriors.brmsfit}
\alias{weighted_posteriors.blavaan}
\alias{weighted_posteriors.BFBayesFactor}
\title{Generate posterior distributions weighted across models}
\usage{
weighted_posteriors(..., prior_odds = NULL, missing = 0, verbose = TRUE)
\method{weighted_posteriors}{data.frame}(..., prior_odds = NULL, missing = 0, verbose = TRUE)
\method{weighted_posteriors}{stanreg}(
...,
prior_odds = NULL,
missing = 0,
verbose = TRUE,
effects = c("fixed", "random", "all"),
component = c("conditional", "zi", "zero_inflated", "all"),
parameters = NULL
)
\method{weighted_posteriors}{brmsfit}(
...,
prior_odds = NULL,
missing = 0,
verbose = TRUE,
effects = c("fixed", "random", "all"),
component = c("conditional", "zi", "zero_inflated", "all"),
parameters = NULL
)
\method{weighted_posteriors}{blavaan}(
...,
prior_odds = NULL,
missing = 0,
verbose = TRUE,
effects = c("fixed", "random", "all"),
component = c("conditional", "zi", "zero_inflated", "all"),
parameters = NULL
)
\method{weighted_posteriors}{BFBayesFactor}(
...,
prior_odds = NULL,
missing = 0,
verbose = TRUE,
iterations = 4000
)
}
\arguments{
\item{...}{Fitted models (see details), all fit on the same data, or a single
\code{BFBayesFactor} object.}
\item{prior_odds}{Optional vector of prior odds for the models compared to
the first model (or the denominator, for \code{BFBayesFactor} objects). For
\code{data.frame}s, this will be used as the basis of weighting.}
\item{missing}{An optional numeric value to use if a model does not contain a
parameter that appears in other models. Defaults to 0.}
\item{verbose}{Toggle off warnings.}
\item{effects}{Should results for fixed effects, random effects or both be
returned? Only applies to mixed models. May be abbreviated.}
\item{component}{Should results for all parameters, parameters for the
conditional model or the zero-inflated part of the model be returned? May
be abbreviated. Only applies to \pkg{brms}-models.}
\item{parameters}{Regular expression pattern that describes the parameters
that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are
filtered by default, so only parameters that typically appear in the
\code{summary()} are returned. Use \code{parameters} to select specific parameters
for the output.}
\item{iterations}{For \code{BayesFactor} models, how many posterior samples to draw.}
}
\value{
A data frame with posterior distributions (weighted across models) .
}
\description{
Extract posterior samples of parameters, weighted across models. Weighting is
done by comparing posterior model probabilities, via \code{\link[=bayesfactor_models]{bayesfactor_models()}}.
}
\details{
Note that across models some parameters might play different roles. For
example, the parameter \code{A} plays a different role in the model \code{Y ~ A + B}
(where it is a main effect) than it does in the model \code{Y ~ A + B + A:B}
(where it is a simple effect). In many cases centering of predictors (mean
subtracting for continuous variables, and effects coding via \code{contr.sum} or
orthonormal coding via \code{\link{contr.equalprior_pairs}} for factors) can reduce this
issue. In any case you should be mindful of this issue.
\cr\cr
See \code{\link[=bayesfactor_models]{bayesfactor_models()}} details for more info on passed models.
\cr\cr
Note that for \code{BayesFactor} models, posterior samples cannot be generated
from intercept only models.
\cr\cr
This function is similar in function to \code{brms::posterior_average}.
}
\note{
For \verb{BayesFactor < 0.9.12-4.3}, in some instances there might be
some problems of duplicate columns of random effects in the resulting data
frame.
}
\examples{
\donttest{
if (require("rstanarm") && require("see")) {
stan_m0 <- stan_glm(extra ~ 1,
data = sleep,
family = gaussian(),
refresh = 0,
diagnostic_file = file.path(tempdir(), "df0.csv")
)
stan_m1 <- stan_glm(extra ~ group,
data = sleep,
family = gaussian(),
refresh = 0,
diagnostic_file = file.path(tempdir(), "df1.csv")
)
res <- weighted_posteriors(stan_m0, stan_m1)
plot(eti(res))
}
## With BayesFactor
if (require("BayesFactor")) {
extra_sleep <- ttestBF(formula = extra ~ group, data = sleep)
wp <- weighted_posteriors(extra_sleep)
describe_posterior(extra_sleep, test = NULL)
describe_posterior(wp$delta, test = NULL) # also considers the null
}
## weighted prediction distributions via data.frames
if (require("rstanarm")) {
m0 <- stan_glm(
mpg ~ 1,
data = mtcars,
family = gaussian(),
diagnostic_file = file.path(tempdir(), "df0.csv"),
refresh = 0
)
m1 <- stan_glm(
mpg ~ carb,
data = mtcars,
family = gaussian(),
diagnostic_file = file.path(tempdir(), "df1.csv"),
refresh = 0
)
# Predictions:
pred_m0 <- data.frame(posterior_predict(m0))
pred_m1 <- data.frame(posterior_predict(m1))
BFmods <- bayesfactor_models(m0, m1)
wp <- weighted_posteriors(pred_m0, pred_m1,
prior_odds = as.numeric(BFmods)[2]
)
# look at first 5 prediction intervals
hdi(pred_m0[1:5])
hdi(pred_m1[1:5])
hdi(wp[1:5]) # between, but closer to pred_m1
}
}
}
\references{
\itemize{
\item Clyde, M., Desimone, H., & Parmigiani, G. (1996). Prediction via
orthogonalized model mixing. Journal of the American Statistical
Association, 91(435), 1197-1208.
\item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E.
(2019, March 25). A conceptual introduction to Bayesian Model Averaging.
\doi{10.31234/osf.io/wgb64}
\item Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian
inference for psychology, part IV: Parameter estimation and Bayes factors.
Psychonomic bulletin & review, 25(1), 102-113.
\item van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers,
E. J. (2019). A cautionary note on estimating effect size.
}
}
\seealso{
\code{\link[=bayesfactor_inclusion]{bayesfactor_inclusion()}} for Bayesian model averaging.
}
|
# *********************************************************************************
# Project : Project Assignment 2 of Coursera class 'Exploratory Data Analysis'
# from John Hopkins.
# Author : Noor Ahmed
# Created Date : Sep 17, 2014
# Script Name : plot2.R
# Purpose : This script is to plot an answer to question #2
# ---------------------------------------------------------------------------------
# Question 2:
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips == "24510") from 1999 to 2008? Use the base plotting system to make a plot
# answering this question.
# ---------------------------------------------------------------------------------
# setwd("E:/GitHubProjects/ExData_PM25/")
# Read the data file
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
# subset Baltimore, Maryland '24510
BaltimoreCity <- subset(NEI, fips == "24510")
totalPM25ByYear <- tapply(BaltimoreCity$Emissions, BaltimoreCity$year, sum)
# plotting the graph
png("plot2.png") # Set the graphics device to png
plot(names(totalPM25ByYear), totalPM25ByYear, type="l",
xlab="Year", ylab=expression("Total" ~ PM[2.5] ~ "Emissions (tons)"),
main=expression("Total Baltimore City" ~ PM[2.5] ~ "Emissions by Year"))
dev.off()
# *********************************************************************************
|
/plot2.R
|
no_license
|
dsnoor/ExData_PM25
|
R
| false | false | 1,591 |
r
|
# *********************************************************************************
# Project : Project Assignment 2 of Coursera class 'Exploratory Data Analysis'
# from John Hopkins.
# Author : Noor Ahmed
# Created Date : Sep 17, 2014
# Script Name : plot2.R
# Purpose : This script is to plot an answer to question #2
# ---------------------------------------------------------------------------------
# Question 2:
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips == "24510") from 1999 to 2008? Use the base plotting system to make a plot
# answering this question.
# ---------------------------------------------------------------------------------
# setwd("E:/GitHubProjects/ExData_PM25/")
# Read the data file
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
# subset Baltimore, Maryland '24510
BaltimoreCity <- subset(NEI, fips == "24510")
totalPM25ByYear <- tapply(BaltimoreCity$Emissions, BaltimoreCity$year, sum)
# plotting the graph
png("plot2.png") # Set the graphics device to png
plot(names(totalPM25ByYear), totalPM25ByYear, type="l",
xlab="Year", ylab=expression("Total" ~ PM[2.5] ~ "Emissions (tons)"),
main=expression("Total Baltimore City" ~ PM[2.5] ~ "Emissions by Year"))
dev.off()
# *********************************************************************************
|
test_that("exposure control works", {
skip_on_cran()
skip_on_travis()
set.seed(1)
true_theta <- runif(100, -3.5, 3.5)
resp_bayes <- simResp(itempool_bayes, true_theta)
cfg <- createShadowTestConfig(
MIP = list(solver = "LPSOLVE"),
exposure_control = list(method = "NONE")
)
set.seed(1)
solution <- Shadow(cfg, constraints_bayes, true_theta, data = resp_bayes)
exposure_rate <- solution@exposure_rate[, 2]
expect_gt(
max(exposure_rate), 0.35
)
cfg <- createShadowTestConfig(
MIP = list(solver = "LPSOLVE"),
exposure_control = list(method = "ELIGIBILITY")
)
set.seed(1)
solution <- Shadow(cfg, constraints_bayes, true_theta, data = resp_bayes)
exposure_rate <- solution@exposure_rate[, 2]
expect_lte(
max(exposure_rate), 0.35
)
cfg <- createShadowTestConfig(
MIP = list(solver = "LPSOLVE"),
exposure_control = list(
method = "BIGM",
M = 100
)
)
set.seed(1)
solution <- Shadow(cfg, constraints_bayes, true_theta, data = resp_bayes)
exposure_rate <- solution@exposure_rate[, 2]
expect_lte(
max(exposure_rate), 0.35
)
cfg <- createShadowTestConfig(
MIP = list(solver = "LPSOLVE"),
exposure_control = list(method = "BIGM-BAYESIAN"),
interim_theta = list(method = "EB"))
set.seed(1)
solution <- Shadow(cfg, constraints_bayes, true_theta, data = resp_bayes)
exposure_rate <- solution@exposure_rate[, 2]
expect_lte(
max(exposure_rate), 0.35
)
cfg <- createShadowTestConfig(
MIP = list(solver = "LPSOLVE"),
exposure_control = list(method = "BIGM-BAYESIAN"),
interim_theta = list(method = "FB"))
set.seed(1)
solution <- Shadow(cfg, constraints_bayes, true_theta, data = resp_bayes)
exposure_rate <- solution@exposure_rate[, 2]
expect_lt(
max(exposure_rate), 0.35
)
})
|
/TestDesign/tests/testthat/test_exposure_methods.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false | false | 1,830 |
r
|
test_that("exposure control works", {
skip_on_cran()
skip_on_travis()
set.seed(1)
true_theta <- runif(100, -3.5, 3.5)
resp_bayes <- simResp(itempool_bayes, true_theta)
cfg <- createShadowTestConfig(
MIP = list(solver = "LPSOLVE"),
exposure_control = list(method = "NONE")
)
set.seed(1)
solution <- Shadow(cfg, constraints_bayes, true_theta, data = resp_bayes)
exposure_rate <- solution@exposure_rate[, 2]
expect_gt(
max(exposure_rate), 0.35
)
cfg <- createShadowTestConfig(
MIP = list(solver = "LPSOLVE"),
exposure_control = list(method = "ELIGIBILITY")
)
set.seed(1)
solution <- Shadow(cfg, constraints_bayes, true_theta, data = resp_bayes)
exposure_rate <- solution@exposure_rate[, 2]
expect_lte(
max(exposure_rate), 0.35
)
cfg <- createShadowTestConfig(
MIP = list(solver = "LPSOLVE"),
exposure_control = list(
method = "BIGM",
M = 100
)
)
set.seed(1)
solution <- Shadow(cfg, constraints_bayes, true_theta, data = resp_bayes)
exposure_rate <- solution@exposure_rate[, 2]
expect_lte(
max(exposure_rate), 0.35
)
cfg <- createShadowTestConfig(
MIP = list(solver = "LPSOLVE"),
exposure_control = list(method = "BIGM-BAYESIAN"),
interim_theta = list(method = "EB"))
set.seed(1)
solution <- Shadow(cfg, constraints_bayes, true_theta, data = resp_bayes)
exposure_rate <- solution@exposure_rate[, 2]
expect_lte(
max(exposure_rate), 0.35
)
cfg <- createShadowTestConfig(
MIP = list(solver = "LPSOLVE"),
exposure_control = list(method = "BIGM-BAYESIAN"),
interim_theta = list(method = "FB"))
set.seed(1)
solution <- Shadow(cfg, constraints_bayes, true_theta, data = resp_bayes)
exposure_rate <- solution@exposure_rate[, 2]
expect_lt(
max(exposure_rate), 0.35
)
})
|
#####################################################
### Load Data source #######
#####################################################
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/Data/Data20092010.R")
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/Data/Data20112012.R")
#####################################################
### Clean the repertoir #######
#####################################################
rm(list=ls())
gc()
library(compiler)
enableJIT(1)
enableJIT(3)
library("fBasics")
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice20092010.Rdata")
#####################################################
### Data set #######
#####################################################
Data.N=Data.N2
Data.N=Data.N2[-c(506,1462,1638,1645),]
#####################################################
### Source function to use #######
#####################################################
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/Loglik Return HN sous P.R")
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/Loglik VIX HN.R")
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/Loglik Mix Ret VIX HN.R")
#####################################################
### Parameters of the model #######
#####################################################
### a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
### Initial parameter ####
para_h<-c(1.4579e-07, 1.28809e-06, 3.89124e+02, 6.564333e-01, 7.99959e+00, 9.36550e-01) ## RMSE2$rmse : 0.01548827 ## RMSE3$rmse : 0.01626734
para_h<-c(3.313135e-15, 1.366366e-06, 4.274284e+02, 7.324341e-01, 8.531595e+00, 9.95507e-01) ## RMSE2$rmse : 0.0154167 ## RMSE3$rmse : 0.01615634
para_h<-c(1.791603e-13, 1.366366e-06, 4.274284e+02, 7.323953e-01, 8.431636e+00, 9.9992e-01) ## RMSE2$rmse : 0.01542918 ## RMSE3$rmse : #0.0161758
### Solution
para_h<-c(1.791603e-11, 1.366366e-06, 3.274284e+02, 6.323953e-01, 8.141636e+00, 9.9999e-01) ## RMSE2$rmse : 0.01542918 ## RMSE3$rmse : #0.0161758
para_h<-c(1.302488e-11, 1.366365e-06, 4.275415e+02, 7.323849e-01, 8.431641e+00, 9.999201e-01) ## RMSE2$rmse : 0.0154288
para_h<-c(1.242516e-12, 1.366366e-06, 4.275503e+02, 7.323708e-01, 8.431643e+00,9.999247e-01)
para_h<-c(1.242516e-11, 1.366366e-06, 4.275503e+02, 7.323708e-01, 8.431643e+00,9.999247e-01) ## 0.01542917
para_h<-c( 5.881028e-07, 0.132e-08, 8.119e+00, 0.986, 0.080, 9.999247e-01) ## RMSE2$rmse : 0.02890324 RMSE3$rmse : 0.01818576
para_h<-c( 5.881028e-07, 0.132e-08, 8.119e+00, 0.986, 0.080, 9.999247e-01) ## RMSE2$rmse : 0.03124939 RMSE3$rmse : 0.01818576
para_h<-c( 5.881028e-04, 0.132e-06, 8.119e+01, 0.986, 0.060, 8.784247e-01) ## RMSE2$rmse : 0.02252047 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,2.026485e-0, 8.784247e-01) ## RMSE2$rmse : 0.04819471 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,2.026485e-03, 8.784247e-01) ## RMSE2$rmse : 0.04776296 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,1.026485e-00, 8.784247e-01) ## RMSE2$rmse : 0.05033635 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,1.826485e-00, 9.784247e-01) ## RMSE2$rmse : 0.05069971 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,0.526485e-00, 9.784247e-01) ## RMSE2$rmse : 0.05106537 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,0.626485e-00, 9.784247e-01) ## RMSE2$rmse : 0.05140848 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,0.7426485e-00, 9.784247e-01) ## RMSE2$rmse :0.05127656 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,0.6226485e-00, 9.784247e-01) ## RMSE2$rmse :0.05127656 RMSE3$rmse : 0.01818576
para_h<-c(2.176029e-13, 5.219194e-04, 8.622389e+00, 6.255603e-01, 0.7426485e-00,9.784247e-01) ## RMSE2$rmse :0.08123455 RMSE3$rmse : 0.01818576
para_h<-c(2.176029e-10, 3.219194e-04, 8.622389e+00, 6.255603e-01, 0.7426485e-00,9.784247e-01) ## RMSE2$rmse :0.0573787 RMSE3$rmse : 0.07463835
para_h<-c(2.176029e-12, 4.219194e-04, 8.622389e+00, 6.255603e-01, 0.7426485e-00,9.784247e-01) ## RMSE2$rmse :0.0649297 RMSE3$rmse : 0.01818576
#####################################################
### Volatility #######
#####################################################
ts.vol= shape_vol_P(para_h, Data.returns)
ts.plot(ts.vol, col = "steelblue", main = "IG Garch Model",xlab="2009",ylab="Volatility")
grid()
#####################################################
### LOg values returns #######
#####################################################
start.time <- Sys.time()
ILK=Heston_likelihood_MixViX(para_h, Data.returns)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
Heston_likelihood_ret(para_h,Data.returns)
Heston_likelihood_vix(para_h,Data.returns)
#####################################################
### Optimization of the model #######
#####################################################
start.time <- Sys.time()
Sol=optim(para_h, Heston_likelihood_MixViX , Data.returns=Data.returns, method="Nelder-Mead",control = list(maxit = 5000))
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
para_h1<-Sol$par
Sol
############################################################
#### In sample RMSE ##
############################################################
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/RMSE HN-Gaussian.R")
start.time <- Sys.time()
RMSE1=RMSE(para_h1,Data.ret,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
RMSE1$rmse
############################################################
#### Compare VIX ##
############################################################
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/Comparing VIX_HN_GARCH.R")
start.time <- Sys.time()
C_VIX= Compa_vix(para_h1,Data.returns)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
C_VIX
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice20112012.Rdata")
############################################################
#### out sample RMSE ##
############################################################
Data.N=Data.N2
start.time <- Sys.time()
RMSE2=RMSE(para_h1,Data.ret,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
RMSE2$rmse
|
/Simulation_juin_2018/Simulation Mai/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/Methode Return VIX-HN.R
|
no_license
|
Fanirisoa/dynamic_pricing
|
R
| false | false | 7,478 |
r
|
#####################################################
### Load Data source #######
#####################################################
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/Data/Data20092010.R")
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/Data/Data20112012.R")
#####################################################
### Clean the repertoir #######
#####################################################
rm(list=ls())
gc()
library(compiler)
enableJIT(1)
enableJIT(3)
library("fBasics")
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice20092010.Rdata")
#####################################################
### Data set #######
#####################################################
Data.N=Data.N2
Data.N=Data.N2[-c(506,1462,1638,1645),]
#####################################################
### Source function to use #######
#####################################################
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/Loglik Return HN sous P.R")
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/Loglik VIX HN.R")
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/Loglik Mix Ret VIX HN.R")
#####################################################
### Parameters of the model #######
#####################################################
### a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
### Initial parameter ####
para_h<-c(1.4579e-07, 1.28809e-06, 3.89124e+02, 6.564333e-01, 7.99959e+00, 9.36550e-01) ## RMSE2$rmse : 0.01548827 ## RMSE3$rmse : 0.01626734
para_h<-c(3.313135e-15, 1.366366e-06, 4.274284e+02, 7.324341e-01, 8.531595e+00, 9.95507e-01) ## RMSE2$rmse : 0.0154167 ## RMSE3$rmse : 0.01615634
para_h<-c(1.791603e-13, 1.366366e-06, 4.274284e+02, 7.323953e-01, 8.431636e+00, 9.9992e-01) ## RMSE2$rmse : 0.01542918 ## RMSE3$rmse : #0.0161758
### Solution
para_h<-c(1.791603e-11, 1.366366e-06, 3.274284e+02, 6.323953e-01, 8.141636e+00, 9.9999e-01) ## RMSE2$rmse : 0.01542918 ## RMSE3$rmse : #0.0161758
para_h<-c(1.302488e-11, 1.366365e-06, 4.275415e+02, 7.323849e-01, 8.431641e+00, 9.999201e-01) ## RMSE2$rmse : 0.0154288
para_h<-c(1.242516e-12, 1.366366e-06, 4.275503e+02, 7.323708e-01, 8.431643e+00,9.999247e-01)
para_h<-c(1.242516e-11, 1.366366e-06, 4.275503e+02, 7.323708e-01, 8.431643e+00,9.999247e-01) ## 0.01542917
para_h<-c( 5.881028e-07, 0.132e-08, 8.119e+00, 0.986, 0.080, 9.999247e-01) ## RMSE2$rmse : 0.02890324 RMSE3$rmse : 0.01818576
para_h<-c( 5.881028e-07, 0.132e-08, 8.119e+00, 0.986, 0.080, 9.999247e-01) ## RMSE2$rmse : 0.03124939 RMSE3$rmse : 0.01818576
para_h<-c( 5.881028e-04, 0.132e-06, 8.119e+01, 0.986, 0.060, 8.784247e-01) ## RMSE2$rmse : 0.02252047 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,2.026485e-0, 8.784247e-01) ## RMSE2$rmse : 0.04819471 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,2.026485e-03, 8.784247e-01) ## RMSE2$rmse : 0.04776296 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,1.026485e-00, 8.784247e-01) ## RMSE2$rmse : 0.05033635 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,1.826485e-00, 9.784247e-01) ## RMSE2$rmse : 0.05069971 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,0.526485e-00, 9.784247e-01) ## RMSE2$rmse : 0.05106537 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,0.626485e-00, 9.784247e-01) ## RMSE2$rmse : 0.05140848 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,0.7426485e-00, 9.784247e-01) ## RMSE2$rmse :0.05127656 RMSE3$rmse : 0.01818576
para_h<-c(5.282379e-08, 2.252557e-04 ,8.143868e+00 ,9.154310e-01 ,0.6226485e-00, 9.784247e-01) ## RMSE2$rmse :0.05127656 RMSE3$rmse : 0.01818576
para_h<-c(2.176029e-13, 5.219194e-04, 8.622389e+00, 6.255603e-01, 0.7426485e-00,9.784247e-01) ## RMSE2$rmse :0.08123455 RMSE3$rmse : 0.01818576
para_h<-c(2.176029e-10, 3.219194e-04, 8.622389e+00, 6.255603e-01, 0.7426485e-00,9.784247e-01) ## RMSE2$rmse :0.0573787 RMSE3$rmse : 0.07463835
para_h<-c(2.176029e-12, 4.219194e-04, 8.622389e+00, 6.255603e-01, 0.7426485e-00,9.784247e-01) ## RMSE2$rmse :0.0649297 RMSE3$rmse : 0.01818576
#####################################################
### Volatility #######
#####################################################
ts.vol= shape_vol_P(para_h, Data.returns)
ts.plot(ts.vol, col = "steelblue", main = "IG Garch Model",xlab="2009",ylab="Volatility")
grid()
#####################################################
### LOg values returns #######
#####################################################
start.time <- Sys.time()
ILK=Heston_likelihood_MixViX(para_h, Data.returns)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
Heston_likelihood_ret(para_h,Data.returns)
Heston_likelihood_vix(para_h,Data.returns)
#####################################################
### Optimization of the model #######
#####################################################
start.time <- Sys.time()
Sol=optim(para_h, Heston_likelihood_MixViX , Data.returns=Data.returns, method="Nelder-Mead",control = list(maxit = 5000))
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
para_h1<-Sol$par
Sol
############################################################
#### In sample RMSE ##
############################################################
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/RMSE HN-Gaussian.R")
start.time <- Sys.time()
RMSE1=RMSE(para_h1,Data.ret,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
RMSE1$rmse
############################################################
#### Compare VIX ##
############################################################
source("C:/Users/e0g411k03dt/Desktop/Simulation P3 Mars 2017/HN-GARCH/Gaussian Esscher returns-VIX/Comparing VIX_HN_GARCH.R")
start.time <- Sys.time()
C_VIX= Compa_vix(para_h1,Data.returns)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
C_VIX
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice20112012.Rdata")
############################################################
#### out sample RMSE ##
############################################################
Data.N=Data.N2
start.time <- Sys.time()
RMSE2=RMSE(para_h1,Data.ret,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
RMSE2$rmse
|
erpfatest <-
function (dta, design, design0 = NULL,
method = c("BH", "holm", "hochberg","hommel", "bonferroni", "BY", "fdr", "none"),
nbf = NULL,nsamples=200,significance=c("Satterthwaite","none"),
nbfmax = min(c(nsamples,nrow(design)))-ncol(design)-1,
alpha = 0.05, pi0 = 1, wantplot = ifelse(is.null(nbf),TRUE,FALSE), s0 = NULL,
min.err = 1e-02, maxiter = 5, verbose = FALSE,svd.method=c("fast.svd","irlba")) {
fastiSB = function(mPsi, lB) {
nbdta = nrow(mPsi)
m = ncol(mPsi)
nbf = ncol(lB[[1]])
lbeta = lapply(1:nbdta,function(i,mPsi,lB,nbf) lB[[i]]/(sqrt(as.vector(mPsi[i,]))%*%t(rep(1,nbf))),mPsi=mPsi,lB=lB,nbf=nbf)
lsvdBeta = lapply(lbeta,fast.svd)
ltheta = lapply(lsvdBeta,function(s,m) (s$u*(rep(1,m)%*%t(s$d/(1+s$d^2))))%*%t(s$v),m=m)
liSB = lapply(1:nbdta,function(i,lt,mPsi,nbf) lt[[i]]/(sqrt(as.vector(mPsi[i,]))%*%t(rep(1,nbf))),lt=ltheta,mPsi=mPsi,nbf=nbf)
return(list(iSB=liSB,svdBeta=lsvdBeta))
}
fastfa = function(ldta, nbf, min.err = 1e-02, verbose = FALSE,svd.method=c("fast.svd","irlba")) { # data are centered, their type is matrix, nbf cannot be zero
m = ncol(ldta[[1]])
n = nrow(ldta[[1]])
nbdta = length(ldta)
vdta = matrix(unlist(lapply(ldta,function(dta,n) (crossprod(rep(1, n),dta^2)/n),n=n)),nrow=nbdta,byrow=TRUE)
sddta = sqrt(n/(n - 1)) * sqrt(vdta)
ltdta = lapply(ldta,t)
if (svd.method=="fast.svd") lsvddta = lapply(ltdta,function(x,n) fast.svd(x/sqrt(n-1)),n=n)
if (svd.method=="irlba") lsvddta = lapply(ltdta,function(x,n,nbf) irlba(x/sqrt(n-1),nu=nbf),n=n,nbf=nbf)
lB = lapply(lsvddta,function(s,nbf,m) s$u[, 1:nbf, drop=FALSE]*tcrossprod(rep(1,m),s$d[1:nbf]),nbf=nbf,m=m)
lB2 = lapply(lB,function(b,nbf) (b^2 %*% rep(1, nbf))[, 1],nbf=nbf)
mB2 = matrix(unlist(lB2),nrow=length(ldta),byrow=TRUE)
mPsi = sddta^2 - mB2
crit = rep(1,length(ldta))
mPsi[mPsi<=1e-16] = 1e-16
while (max(crit) > min.err) {
liSB = fastiSB(mPsi,lB)
lxiSB = Map(crossprod,ltdta,liSB$iSB)
lCyz = Map(function(x,y,n) crossprod(y,x)/(n-1),y=ldta,x=lxiSB,n=n)
lCzz1 = Map(crossprod,liSB$iSB,lCyz)
lCzz2 = Map(crossprod,lB,liSB$iSB)
lCzz2 = lapply(lCzz2,function(M,nbf) diag(nbf)-M,nbf=nbf)
lCzz = Map("+",lCzz1,lCzz2)
liCzz = lapply(lCzz,solve)
lBnew = Map(tcrossprod,lCyz,liCzz)
lB2 = lapply(lBnew,function(b,nbf) (b^2 %*% rep(1, nbf))[, 1],nbf=nbf)
mB2 = matrix(unlist(lB2),nrow=length(ldta),byrow=TRUE)
mPsinew = sddta^2 - mB2
crit = ((mPsi - mPsinew)^2)%*%rep(1,m)/m
lB = lBnew
mPsi = mPsinew
mPsi[mPsi<=1e-16] = 1e-16
if (verbose) print(paste("Convergence criterion: ",signif(max(crit),digits=ceiling(-log10(min.err))),sep=""))
}
liSB = fastiSB(mPsi,lB)
res = list(B = lB, Psi = mPsi, svdbeta = liSB$svdBeta)
return(res)
}
fstat = function(erpdta,design,design0) {
n = nrow(erpdta)
idsignal = NULL
for (j in 1:ncol(design)) {
cj = apply(design0, 2, function(x, y) all(x == y), y = design[,j])
if (all(!cj))
idsignal = c(idsignal, j)
}
svd.design = fast.svd(design)
u = svd.design$u
iD = matrix(1/svd.design$d,nrow=1)
viD = svd.design$v*(rep(1,nrow(svd.design$v))%*%iD)
viDtu = tcrossprod(u,viD)
beta = crossprod(viDtu,erpdta)
beta = beta[idsignal,,drop=FALSE]
svd.design0 = fast.svd(design0)
u0 = svd.design0$u
tuy = crossprod(u,erpdta)
tu0y = crossprod(u0,erpdta)
fit = u%*%tuy
fit0 = u0%*%tu0y
res = erpdta-fit
res0 = erpdta-fit0
rdf1 = nrow(design) - length(svd.design$d)
rdf0 = nrow(design0) - length(svd.design0$d)
rss1 = (t(rep(1, n)) %*% res^2)[1,]
rss0 = (t(rep(1, n)) %*% res0^2)[1,]
F = ((rss0 - rss1)/(rdf0 - rdf1))/(rss1/rdf1)
return(list(F=F,beta=beta,residuals=res,rdf0=rdf0,rdf1=rdf1))
}
pval.fstat = function(F,erpdta,design,design0,nsamples) {
n = nrow(erpdta)
svd.design = fast.svd(design)
svd.design0 = fast.svd(design0)
u = svd.design$u
u0 = svd.design0$u
rdf1 = nrow(design) - length(svd.design$d)
rdf0 = nrow(design0) - length(svd.design0$d)
lsamples = lapply(1:nsamples,function(i,n) sample(1:n),n=n)
lu = lapply(lsamples,function(s,d) d[s,],d=u)
ltu = lapply(lu,t)
ltuy = lapply(lu,function(u,y) crossprod(u,y),y=erpdta)
lfit = Map(crossprod,ltu,ltuy)
lres = lapply(lfit,function(fit,y) y-fit,y=erpdta)
lu0 = lapply(lsamples,function(s,d) d[s,],d=u0)
ltu0 = lapply(lu0,t)
ltu0y = lapply(lu0,function(u,y) crossprod(u,y),y=erpdta)
lfit0 = Map(crossprod,ltu0,ltu0y)
lres0 = lapply(lfit0,function(fit,y) y-fit,y=erpdta)
lrss1 = lapply(lres,function(res,n) as.vector(t(rep(1, n)) %*% res^2),n=n)
lrss0 = lapply(lres0,function(res,n) as.vector(t(rep(1, n)) %*% res^2),n=n)
lf0 = Map(function(rss1,rss0,rdf1,rdf0) {
((rss0 - rss1)/(rdf0 - rdf1))/(rss1/rdf1)
},lrss1,lrss0,rdf1=rdf1,rdf0=rdf0)
mf0 = matrix(unlist(lf0),nrow=nsamples,byrow=TRUE)
varf0 = apply(mf0,2,var)
meanf0 = apply(mf0,2,mean)
const = varf0/(2*meanf0)
nu = 2*meanf0^2/varf0
pval = pchisq(F/const,df=nu,lower.tail=FALSE)
return(pval)
}
update.beta = function(erpdta,design,design0,nbf,fs0,min.err,verbose) {
n = nrow(erpdta)
idsignal = NULL
for (j in 1:ncol(design)) {
cj = apply(design0, 2, function(x, y) all(x == y), y = design[,j])
if (all(!cj))
idsignal = c(idsignal, j)
}
svd.design = fast.svd(design)
u = svd.design$u
R = diag(ncol(design))
R = R[idsignal, ,drop=FALSE]
iD = matrix(1/svd.design$d,nrow=1)
viD = svd.design$v*(rep(1,nrow(svd.design$v))%*%iD)
viDtu = tcrossprod(u,viD)
coef1 = crossprod(viDtu,erpdta)
beta = coef1[idsignal,,drop=FALSE]
itxx = tcrossprod(viD)
svd.designR = fast.svd(R%*%viD)
RiD = matrix(1/svd.designR$d,nrow=1)
RuiD = svd.designR$u*(rep(1,nrow(svd.designR$u))%*%RiD)
iRitxxtR = tcrossprod(RuiD)
fit = design%*%coef1
if (length(fs0)>0) {
res = erpdta-fit
meanres = (crossprod(rep(1,n),res)/n)[1,]
cres = res-rep(1,n)%*%t(meanres)
fa = emfa(cres,nbf=nbf,min.err=min.err,verbose=verbose,svd.method=svd.method)
Psi = fa$Psi
B = fa$B
B0 = B[fs0, ,drop=FALSE]
iSxx = ifa(Psi[fs0], B0)$iS
Sxy = tcrossprod(B0,B[-fs0, , drop=FALSE])
betaSigma0 = iSxx %*% Sxy
beta0 = beta
if (length(fs0)<ncol(erpdta)) beta0[, -fs0] = beta[, fs0,drop=FALSE] %*% betaSigma0
beta = beta - beta0
Rcoef = R%*%coef1
epsilon = beta-Rcoef
M = itxx %*% t(R) %*% iRitxxtR
Mepsilon = M%*%epsilon
coef1 = coef1+Mepsilon
fit = design%*%coef1
}
res = erpdta-fit
meanres = (crossprod(rep(1,n),res)/n)[1,]
cres = res-rep(1,n)%*%t(meanres)
sdres = sqrt(crossprod(rep(1,n),cres^2)/(n-1))[1,]
scres = cres/tcrossprod(rep(1,n),sdres)
fa = emfa(scres,nbf=nbf,min.err=min.err,verbose=verbose,svd.method=svd.method)
Psi = fa$Psi
B = fa$B
sB = t(B)/tcrossprod(rep(1,nbf),sqrt(Psi))
G = solve(diag(nbf)+tcrossprod(sB))
sB = t(B)/tcrossprod(rep(1,nbf),Psi)
GsB = crossprod(G,sB)
Factors = tcrossprod(scres,GsB)
designz0 = cbind(design0,Factors)
designz1 = cbind(designz0,design[,idsignal,drop=FALSE])
svd.designz0 = fast.svd(designz0)
svd.designz1 = fast.svd(designz1)
uz0 = svd.designz0$u
uz1 = svd.designz1$u
vz0 = svd.designz0$v
vz1 = svd.designz1$v
dz0 = svd.designz0$d
dz1 = svd.designz1$d
rdfz0 = nrow(designz0) - length(dz0)
rdfz1 = nrow(designz1) - length(dz1)
vz1id = vz1/(rep(1,ncol(designz1))%*%t(dz1))
pdesignz1 = tcrossprod(vz1id,uz1)
coefz1 = pdesignz1%*%erpdta
vz0id = vz0/(rep(1,ncol(designz0))%*%t(dz0))
pdesignz0 = tcrossprod(vz0id,uz0)
coefz0 = pdesignz0%*%erpdta
idsignalz1 = (ncol(designz1)-length(idsignal)+1):ncol(designz1)
fitz1 = designz1%*%coefz1
resz1 = erpdta-fitz1
rssz1 = (t(rep(1,n))%*%resz1^2)[1,]
fitz0 = designz0%*%coefz0
resz0 = erpdta-fitz0
rssz0 = (t(rep(1,n))%*%resz0^2)[1,]
F = ((rssz0 - rssz1)/(rdfz0 - rdfz1))/(rssz1/rdfz1)
F = ksmooth(1:T,F,bandwidth = 0.01 * diff(range(1:T)),x.points = 1:T)$y
return(list(F=F,residuals=resz1,rdf0=rdfz0,rdf1=rdfz1,beta=coefz1[idsignalz1,,drop=FALSE]))
}
pval.fstatz = function(F,erpdta,design,design0,nbf,fs0,nsamples,min.err,verbose) {
n = nrow(erpdta)
idsignal = NULL
for (j in 1:ncol(design)) {
cj = apply(design0, 2, function(x, y) all(x == y), y = design[,j])
if (all(!cj))
idsignal = c(idsignal, j)
}
R = diag(ncol(design))
R = R[idsignal, ,drop=FALSE]
svd.design = fast.svd(design)
u = svd.design$u
lsamples = lapply(1:nsamples,function(i,n) sample(1:n),n=n)
lu = lapply(lsamples,function(s,u) u[s,],u=u)
iD = matrix(1/svd.design$d,nrow=1)
viD = svd.design$v*(rep(1,nrow(svd.design$v))%*%iD)
itxx = tcrossprod(viD)
svd.designR = fast.svd(R%*%viD)
RiD = matrix(1/svd.designR$d,nrow=1)
RuiD = svd.designR$u*(rep(1,nrow(svd.designR$u))%*%RiD)
iRitxxtR = tcrossprod(RuiD)
lviDtu = lapply(lu,function(u,m) tcrossprod(u,m),m=viD)
lcoef1 = lapply(lviDtu,function(u,y) crossprod(u,y),y=erpdta)
lbeta = lapply(lcoef1,function(beta,id) beta[id,,drop=FALSE],id=idsignal)
ldesign = lapply(lsamples,function(s,design) design[s,],design=design)
ldesign0 = lapply(lsamples,function(s,design) design[s,,drop=FALSE],
design=design[,-idsignal,drop=FALSE])
lfit = Map("%*%",ldesign,lcoef1)
if (length(fs0)>0) {
lres = lapply(lfit,function(fit,y) y-fit,y=erpdta)
lmeanres = lapply(lres,function(res,n) (crossprod(rep(1,n),res)/n)[1,],n=n)
lcres = Map(function(res,meanres,n) res-rep(1,n)%*%t(meanres),lres,lmeanres,n=n)
lfa = fastfa(lcres,nbf=nbf,min.err=min.err,verbose=FALSE,svd.method=svd.method)
lPsi = as.list(data.frame(t(lfa$Psi)))
lB = lfa$B
lB0 = lapply(lB,function(B,fs0) B[fs0, ,drop=FALSE],fs0=fs0)
lB1 = lapply(lB,function(B,fs0) B[-fs0, ,drop=FALSE],fs0=fs0)
lPsi0 = lapply(lPsi,function(Psi,fs0) Psi[fs0],fs0=fs0)
liSxx = Map(function(Psi,B) ifa(Psi,B)$iS,lPsi0,lB0)
lSxy = Map(tcrossprod,lB0,lB1)
lbetaSigma0 = Map(crossprod,liSxx,lSxy)
lbeta0 = lbeta
lbeta0c = lapply(lbeta0,function(beta0,fs0) beta0[, fs0,drop=FALSE],fs0=fs0)
lbeta0 = lapply(lbeta0,function(beta0,fs0) {
res = beta0
res[,-fs0] = 0
return(res)
},fs0=fs0)
lbeta0c = Map("%*%",lbeta0c,lbetaSigma0)
lbeta0c = lapply(lbeta0c,function(beta0c,fs0,T,p) {
res = matrix(0,nrow=p,ncol=T)
res[,-fs0] = beta0c
return(res)
},fs0=fs0,T=T,p=length(idsignal))
lbeta0 = Map("+",lbeta0,lbeta0c)
lbeta = Map("-",lbeta,lbeta0)
lRcoef1 = lapply(lcoef1,function(b,R) R%*%b,R=R)
lepsilon = Map("-",lbeta,lRcoef1)
M = itxx %*% t(R) %*% iRitxxtR
lMepsilon = lapply(lepsilon,function(epsilon,M) M%*%epsilon,M=M)
lcoef1 = Map("+",lcoef1,lMepsilon)
lfit = Map("%*%",ldesign,lcoef1)
}
lres = lapply(lfit,function(fit,y) y-fit,y=erpdta)
lmeanres = lapply(lres,function(res,n) (crossprod(rep(1,n),res)/n)[1,],n=n)
lcres = Map(function(res,meanres,n) res-rep(1,n)%*%t(meanres),lres,lmeanres,n=n)
lsdres = lapply(lcres,function(res,n) sqrt(crossprod(rep(1,n),res^2)/(n-1))[1,],n=n)
lscres = Map(function(cres,sdres,n) cres/tcrossprod(rep(1,n),sdres),lcres,lsdres,n=n)
lfa = fastfa(lscres,nbf=nbf,min.err=min.err,verbose=FALSE,svd.method=svd.method)
lPsi = as.list(data.frame(t(lfa$Psi)))
lB = lfa$B
lsB = Map(function(B,Psi) t(B)/tcrossprod(rep(1,ncol(B)),sqrt(Psi)),lB,lPsi)
lG = lapply(lsB,function(sb,nbf) solve(diag(nbf)+tcrossprod(sb)),nbf=nbf)
lsB = Map(function(B,Psi) t(B)/tcrossprod(rep(1,ncol(B)),Psi),lB,lPsi)
lGsB = Map(crossprod,lG,lsB)
lFactors = Map(tcrossprod,lscres,lGsB)
ldesignz0 = Map(function(design0,Factors) cbind(design0,Factors),ldesign0,lFactors)
ldesign1 = lapply(lsamples,function(s,design) design[s,,drop=FALSE],design=design[,idsignal,drop=FALSE])
ldesignz1 = Map(function(designz0,design1) cbind(designz0,design1),ldesignz0,ldesign1)
lsvd.designz0 = lapply(ldesignz0,fast.svd)
lsvd.designz1 = lapply(ldesignz1,fast.svd)
luz0 = lapply(lsvd.designz0,function(x) x$u)
luz1 = lapply(lsvd.designz1,function(x) x$u)
lvz0 = lapply(lsvd.designz0,function(x) x$v)
lvz1 = lapply(lsvd.designz1,function(x) x$v)
ldz0 = lapply(lsvd.designz0,function(x) x$d)
ldz1 = lapply(lsvd.designz1,function(x) x$d)
rdfz0 = nrow(ldesignz0[[1]]) - length(ldz0[[1]])
rdfz1 = nrow(ldesignz1[[1]]) - length(ldz1[[1]])
lvz1id = Map(function(v,d,p) v/(rep(1,p)%*%t(d)),lvz1,ldz1,p=nbf+ncol(design))
lpdesignz1 = Map(tcrossprod,lvz1id,luz1)
lcoefz1 = lapply(lpdesignz1,function(pdesign,y) pdesign%*%y,y=erpdta)
lvz0id = Map(function(v,d,p) v/(rep(1,p)%*%t(d)),lvz0,ldz0,p=nbf+ncol(design0))
lpdesignz0 = Map(tcrossprod,lvz0id,luz0)
lcoefz0 = lapply(lpdesignz0,function(pdesign,y) pdesign%*%y,y=erpdta)
idsignalz1 = (ncol(ldesignz1[[1]])-length(idsignal)+1):ncol(ldesignz1[[1]])
lfitz1 = Map("%*%",ldesignz1,lcoefz1)
lresz1 = lapply(lfitz1,function(fit,y) y-fit,y=erpdta)
lrssz1 = lapply(lresz1,function(res,n) (t(rep(1,n))%*%res^2)[1,],n=n)
lfitz0 = Map("%*%",ldesignz0,lcoefz0)
lresz0 = lapply(lfitz0,function(fit,y) y-fit,y=erpdta)
lrssz0 = lapply(lresz0,function(res,n) (t(rep(1,n))%*%res^2)[1,],n=n)
lfz0 = Map(function(rss1,rss0,rdf1,rdf0) {
((rss0 - rss1)/(rdf0 - rdf1))/(rss1/rdf1)
},lrssz1,lrssz0,rdf1=rdfz1,rdf0=rdfz0)
mfz0 = matrix(unlist(lfz0),nrow=nsamples,byrow=TRUE)
varfz0 = apply(mfz0,2,var)
meanfz0 = apply(mfz0,2,mean)
constz = varfz0/(2*meanfz0)
nuz = 2*meanfz0^2/varfz0
pvalz = pchisq(F/constz,df=nuz,lower.tail=FALSE)
return(pvalz)
}
method = match.arg(method, choices = c("BH", "holm", "hochberg",
"hommel", "bonferroni", "BY", "fdr", "none"))
significance = match.arg(significance,c("Satterthwaite","none"))
svd.method = match.arg(svd.method,choices=c("fast.svd","irlba"))
if (typeof(nsamples) != "double")
stop("nsamples sould be an integer, usually larger than 200.")
if (is.null(design0))
design0 = matrix(1, nrow = nrow(dta), ncol = 1)
erpdta = as.matrix(dta)
design = as.matrix(design)
design0 = as.matrix(design0)
if (typeof(erpdta) != "double")
stop("ERPs should be of type double")
if (nrow(erpdta) != nrow(design))
stop("dta and design should have the same number of rows")
if (nrow(erpdta) != nrow(design0))
stop("dta and design0 should have the same number of rows")
if (ncol(design) <= ncol(design0))
stop("design0 should have fewer columns than design")
idsignal = NULL
for (j in 1:ncol(design)) {
cj = apply(design0, 2, function(x, y) all(x == y), y = design[,
j])
if (all(!cj))
idsignal = c(idsignal, j)
}
if (length(idsignal) < (ncol(design) - ncol(design0)))
stop("the null model design0 should be nested into the non-null model design")
if (typeof(alpha) != "double")
stop("alpha should be of type double")
if ((alpha <= 0) | (alpha >= 1))
stop("alpha should be in ]0,1[, typically 0.05")
if (typeof(pi0) != "double")
stop("pi0 should be of type double")
if ((pi0 <= 0) | (pi0 > 1))
stop("pi0 should be in ]0,1]")
if (length(s0) == 1)
stop("s0 should be either NULL, or of length larger than 2")
frames = 1:ncol(erpdta)
if (is.null(s0))
fs0i = integer(0)
if (length(s0) > 2)
fs0i = s0
if (length(s0) == 2)
fs0i = which((frames <= s0[1] * diff(range(frames))) |
(frames >= s0[2] * diff(range(frames))))
nbfmaxtheo = min(c(nrow(design),nsamples))-ncol(design)-1
if (sum(is.element(nbfmax, 0:nbfmaxtheo)) != 1) {
warning(paste("nbfmax should be an integer in [0,", nbfmaxtheo,"]", sep = ""))
nbfmax = nbfmaxtheo
}
n = nrow(erpdta)
T = ncol(erpdta)
pval = NULL
qval = NULL
correctedpval=NULL
significant=integer(0)
test = NULL
r2 = NULL
p0 = 1
rdf1 = NULL
rdf0 = NULL
beta = NULL
test = NULL
if (is.null(nbf)) {
svd.design = fast.svd(design)
svd.design0 = fast.svd(design0)
P0 = diag(n)-svd.design0$u%*%t(svd.design0$u)
Z = design[,idsignal,drop=FALSE]
cZ = P0%*%Z
Szz = t(cZ)%*%cZ/n
svdcz = fast.svd(cZ)
if (length(idsignal)>1) sqrtcz = svdcz$v%*%diag(svdcz$d)%*%t(svdcz$v)
if (length(idsignal)==1) sqrtcz = svdcz$v%*%t(svdcz$v)*svdcz$d
vid = svd.design$v%*%diag(1/svd.design$d)
lsamples = lapply(1:nsamples,function(i,n) sample(1:n),n=n)
lu = lapply(lsamples,function(s,d) d[s,],d=svd.design$u)
ltu = lapply(lu,t)
ltuy = lapply(lu,function(u,y) crossprod(u,y),y=erpdta)
lfit = Map(crossprod,ltu,ltuy)
lres = lapply(lfit,function(fit,y) y-fit,y=erpdta)
lsdres = lapply(lres,function(res,n) sqrt(crossprod(rep(1,n),res^2)/(n-1))[1,],n=n)
lmsdres = lapply(lsdres,function(sdres,p) tcrossprod(rep(1,p),sdres),p=length(idsignal))
lbeta.ols = lapply(ltuy,function(tuy,m,select) crossprod(m,tuy)[select,,drop=FALSE],m=t(vid),select=idsignal)
lb.ols = lapply(lbeta.ols,function(beta,m) crossprod(m,beta),m=t(sqrtcz))
lb.ols = Map("/",lb.ols,lmsdres)
mb.ols = lapply(lb.ols,function(b,p) crossprod(rep(1,p),b)/p,p=length(idsignal))
mb.ols = matrix(unlist(mb.ols),ncol=T,byrow=TRUE)
meanmb.ols = (t(rep(1,nsamples))%*%mb.ols)/nsamples
cmb.ols = mb.ols-rep(1,nsamples)%*%meanmb.ols
sdmb.ols = sqrt(t(rep(1,nsamples))%*%cmb.ols^2/(nsamples-1))
scmb.ols = cmb.ols/(rep(1,nsamples)%*%sdmb.ols)
nbf = nbfactors(scmb.ols,maxnbfactors=nbfmax,diagnostic.plot=wantplot,verbose=verbose,min.err=min.err,svd.method="irlba")$optimalnbfactors
}
if (significance=="Satterthwaite") {
F = fstat(erpdta,design=design,design0=design0)
res = F$residuals
sdres = sqrt((t(rep(1,n))%*%res^2)[1,]/(n-1))
scres = res/(rep(1,n)%*%t(sdres))
beta = F$beta
F = F$F
pval = pval.fstat(F,erpdta,design,design0,nsamples)
if (is.null(pi0))
p0 = pval.estimate.eta0(pval, diagnostic.plot = FALSE)
qval = p0 * p.adjust(pval, method = method)
fs0 = sort(unique(c(fs0i, which(pval > 0.2))))
if (verbose)
print(paste("AFA with", nbf, "factors"))
if ((nbf > 0) & (maxiter > 0)) {
diff.fs0 = length(setdiff(fs0,integer(0)))/length(union(fs0,integer(0)))
fs1 = fs0
iter = 0
while ((diff.fs0>0.05) & (iter < maxiter)) {
iter = iter + 1
if (verbose)
print(paste(iter, "/", maxiter, " iterations",sep = ""))
upd = update.beta(erpdta,design,design0,nbf=nbf,fs0=fs0,min.err=min.err,verbose=FALSE)
F = upd$F
rdf0 = upd$rdf0
rdf1 = upd$rdf1
if (length(fs0)<T)
pval = pval.fstatz(F,erpdta,design,design0,nbf,fs0,nsamples,min.err,verbose=FALSE)
if (is.null(pi0))
p0 = pval.estimate.eta0(pval, diagnostic.plot = FALSE)
qval = p0 * p.adjust(pval, method = method)
fs0 = which(pval > 0.2)
diff.fs0 = length(setdiff(fs0,fs1))/length(union(fs0,fs1))
fs1 = fs0
if (verbose) print(paste("Convergence criterion: ",diff.fs0,". Tolerance: 0.05",sep=""))
beta = upd$beta
}
}
significant = which(qval <= alpha)
test = F
r2 = (1 - 1/(1 + F * ((rdf0 - rdf1)/rdf1)))
if (length(idsignal)==1) test = sign(beta[1,])*sqrt(F)
}
list(pval = pval, correctedpval = qval, significant = significant,
pi0 = p0, test = test, df1 = rdf1, df0 = rdf0,
nbf = nbf,signal = beta, r2=r2)
}
|
/R/erpfatest.R
|
no_license
|
ChingFanSheu/ERP
|
R
| false | false | 20,222 |
r
|
erpfatest <-
function (dta, design, design0 = NULL,
method = c("BH", "holm", "hochberg","hommel", "bonferroni", "BY", "fdr", "none"),
nbf = NULL,nsamples=200,significance=c("Satterthwaite","none"),
nbfmax = min(c(nsamples,nrow(design)))-ncol(design)-1,
alpha = 0.05, pi0 = 1, wantplot = ifelse(is.null(nbf),TRUE,FALSE), s0 = NULL,
min.err = 1e-02, maxiter = 5, verbose = FALSE,svd.method=c("fast.svd","irlba")) {
fastiSB = function(mPsi, lB) {
nbdta = nrow(mPsi)
m = ncol(mPsi)
nbf = ncol(lB[[1]])
lbeta = lapply(1:nbdta,function(i,mPsi,lB,nbf) lB[[i]]/(sqrt(as.vector(mPsi[i,]))%*%t(rep(1,nbf))),mPsi=mPsi,lB=lB,nbf=nbf)
lsvdBeta = lapply(lbeta,fast.svd)
ltheta = lapply(lsvdBeta,function(s,m) (s$u*(rep(1,m)%*%t(s$d/(1+s$d^2))))%*%t(s$v),m=m)
liSB = lapply(1:nbdta,function(i,lt,mPsi,nbf) lt[[i]]/(sqrt(as.vector(mPsi[i,]))%*%t(rep(1,nbf))),lt=ltheta,mPsi=mPsi,nbf=nbf)
return(list(iSB=liSB,svdBeta=lsvdBeta))
}
fastfa = function(ldta, nbf, min.err = 1e-02, verbose = FALSE,svd.method=c("fast.svd","irlba")) { # data are centered, their type is matrix, nbf cannot be zero
m = ncol(ldta[[1]])
n = nrow(ldta[[1]])
nbdta = length(ldta)
vdta = matrix(unlist(lapply(ldta,function(dta,n) (crossprod(rep(1, n),dta^2)/n),n=n)),nrow=nbdta,byrow=TRUE)
sddta = sqrt(n/(n - 1)) * sqrt(vdta)
ltdta = lapply(ldta,t)
if (svd.method=="fast.svd") lsvddta = lapply(ltdta,function(x,n) fast.svd(x/sqrt(n-1)),n=n)
if (svd.method=="irlba") lsvddta = lapply(ltdta,function(x,n,nbf) irlba(x/sqrt(n-1),nu=nbf),n=n,nbf=nbf)
lB = lapply(lsvddta,function(s,nbf,m) s$u[, 1:nbf, drop=FALSE]*tcrossprod(rep(1,m),s$d[1:nbf]),nbf=nbf,m=m)
lB2 = lapply(lB,function(b,nbf) (b^2 %*% rep(1, nbf))[, 1],nbf=nbf)
mB2 = matrix(unlist(lB2),nrow=length(ldta),byrow=TRUE)
mPsi = sddta^2 - mB2
crit = rep(1,length(ldta))
mPsi[mPsi<=1e-16] = 1e-16
while (max(crit) > min.err) {
liSB = fastiSB(mPsi,lB)
lxiSB = Map(crossprod,ltdta,liSB$iSB)
lCyz = Map(function(x,y,n) crossprod(y,x)/(n-1),y=ldta,x=lxiSB,n=n)
lCzz1 = Map(crossprod,liSB$iSB,lCyz)
lCzz2 = Map(crossprod,lB,liSB$iSB)
lCzz2 = lapply(lCzz2,function(M,nbf) diag(nbf)-M,nbf=nbf)
lCzz = Map("+",lCzz1,lCzz2)
liCzz = lapply(lCzz,solve)
lBnew = Map(tcrossprod,lCyz,liCzz)
lB2 = lapply(lBnew,function(b,nbf) (b^2 %*% rep(1, nbf))[, 1],nbf=nbf)
mB2 = matrix(unlist(lB2),nrow=length(ldta),byrow=TRUE)
mPsinew = sddta^2 - mB2
crit = ((mPsi - mPsinew)^2)%*%rep(1,m)/m
lB = lBnew
mPsi = mPsinew
mPsi[mPsi<=1e-16] = 1e-16
if (verbose) print(paste("Convergence criterion: ",signif(max(crit),digits=ceiling(-log10(min.err))),sep=""))
}
liSB = fastiSB(mPsi,lB)
res = list(B = lB, Psi = mPsi, svdbeta = liSB$svdBeta)
return(res)
}
fstat = function(erpdta,design,design0) {
n = nrow(erpdta)
idsignal = NULL
for (j in 1:ncol(design)) {
cj = apply(design0, 2, function(x, y) all(x == y), y = design[,j])
if (all(!cj))
idsignal = c(idsignal, j)
}
svd.design = fast.svd(design)
u = svd.design$u
iD = matrix(1/svd.design$d,nrow=1)
viD = svd.design$v*(rep(1,nrow(svd.design$v))%*%iD)
viDtu = tcrossprod(u,viD)
beta = crossprod(viDtu,erpdta)
beta = beta[idsignal,,drop=FALSE]
svd.design0 = fast.svd(design0)
u0 = svd.design0$u
tuy = crossprod(u,erpdta)
tu0y = crossprod(u0,erpdta)
fit = u%*%tuy
fit0 = u0%*%tu0y
res = erpdta-fit
res0 = erpdta-fit0
rdf1 = nrow(design) - length(svd.design$d)
rdf0 = nrow(design0) - length(svd.design0$d)
rss1 = (t(rep(1, n)) %*% res^2)[1,]
rss0 = (t(rep(1, n)) %*% res0^2)[1,]
F = ((rss0 - rss1)/(rdf0 - rdf1))/(rss1/rdf1)
return(list(F=F,beta=beta,residuals=res,rdf0=rdf0,rdf1=rdf1))
}
pval.fstat = function(F,erpdta,design,design0,nsamples) {
n = nrow(erpdta)
svd.design = fast.svd(design)
svd.design0 = fast.svd(design0)
u = svd.design$u
u0 = svd.design0$u
rdf1 = nrow(design) - length(svd.design$d)
rdf0 = nrow(design0) - length(svd.design0$d)
lsamples = lapply(1:nsamples,function(i,n) sample(1:n),n=n)
lu = lapply(lsamples,function(s,d) d[s,],d=u)
ltu = lapply(lu,t)
ltuy = lapply(lu,function(u,y) crossprod(u,y),y=erpdta)
lfit = Map(crossprod,ltu,ltuy)
lres = lapply(lfit,function(fit,y) y-fit,y=erpdta)
lu0 = lapply(lsamples,function(s,d) d[s,],d=u0)
ltu0 = lapply(lu0,t)
ltu0y = lapply(lu0,function(u,y) crossprod(u,y),y=erpdta)
lfit0 = Map(crossprod,ltu0,ltu0y)
lres0 = lapply(lfit0,function(fit,y) y-fit,y=erpdta)
lrss1 = lapply(lres,function(res,n) as.vector(t(rep(1, n)) %*% res^2),n=n)
lrss0 = lapply(lres0,function(res,n) as.vector(t(rep(1, n)) %*% res^2),n=n)
lf0 = Map(function(rss1,rss0,rdf1,rdf0) {
((rss0 - rss1)/(rdf0 - rdf1))/(rss1/rdf1)
},lrss1,lrss0,rdf1=rdf1,rdf0=rdf0)
mf0 = matrix(unlist(lf0),nrow=nsamples,byrow=TRUE)
varf0 = apply(mf0,2,var)
meanf0 = apply(mf0,2,mean)
const = varf0/(2*meanf0)
nu = 2*meanf0^2/varf0
pval = pchisq(F/const,df=nu,lower.tail=FALSE)
return(pval)
}
update.beta = function(erpdta,design,design0,nbf,fs0,min.err,verbose) {
n = nrow(erpdta)
idsignal = NULL
for (j in 1:ncol(design)) {
cj = apply(design0, 2, function(x, y) all(x == y), y = design[,j])
if (all(!cj))
idsignal = c(idsignal, j)
}
svd.design = fast.svd(design)
u = svd.design$u
R = diag(ncol(design))
R = R[idsignal, ,drop=FALSE]
iD = matrix(1/svd.design$d,nrow=1)
viD = svd.design$v*(rep(1,nrow(svd.design$v))%*%iD)
viDtu = tcrossprod(u,viD)
coef1 = crossprod(viDtu,erpdta)
beta = coef1[idsignal,,drop=FALSE]
itxx = tcrossprod(viD)
svd.designR = fast.svd(R%*%viD)
RiD = matrix(1/svd.designR$d,nrow=1)
RuiD = svd.designR$u*(rep(1,nrow(svd.designR$u))%*%RiD)
iRitxxtR = tcrossprod(RuiD)
fit = design%*%coef1
if (length(fs0)>0) {
res = erpdta-fit
meanres = (crossprod(rep(1,n),res)/n)[1,]
cres = res-rep(1,n)%*%t(meanres)
fa = emfa(cres,nbf=nbf,min.err=min.err,verbose=verbose,svd.method=svd.method)
Psi = fa$Psi
B = fa$B
B0 = B[fs0, ,drop=FALSE]
iSxx = ifa(Psi[fs0], B0)$iS
Sxy = tcrossprod(B0,B[-fs0, , drop=FALSE])
betaSigma0 = iSxx %*% Sxy
beta0 = beta
if (length(fs0)<ncol(erpdta)) beta0[, -fs0] = beta[, fs0,drop=FALSE] %*% betaSigma0
beta = beta - beta0
Rcoef = R%*%coef1
epsilon = beta-Rcoef
M = itxx %*% t(R) %*% iRitxxtR
Mepsilon = M%*%epsilon
coef1 = coef1+Mepsilon
fit = design%*%coef1
}
res = erpdta-fit
meanres = (crossprod(rep(1,n),res)/n)[1,]
cres = res-rep(1,n)%*%t(meanres)
sdres = sqrt(crossprod(rep(1,n),cres^2)/(n-1))[1,]
scres = cres/tcrossprod(rep(1,n),sdres)
fa = emfa(scres,nbf=nbf,min.err=min.err,verbose=verbose,svd.method=svd.method)
Psi = fa$Psi
B = fa$B
sB = t(B)/tcrossprod(rep(1,nbf),sqrt(Psi))
G = solve(diag(nbf)+tcrossprod(sB))
sB = t(B)/tcrossprod(rep(1,nbf),Psi)
GsB = crossprod(G,sB)
Factors = tcrossprod(scres,GsB)
designz0 = cbind(design0,Factors)
designz1 = cbind(designz0,design[,idsignal,drop=FALSE])
svd.designz0 = fast.svd(designz0)
svd.designz1 = fast.svd(designz1)
uz0 = svd.designz0$u
uz1 = svd.designz1$u
vz0 = svd.designz0$v
vz1 = svd.designz1$v
dz0 = svd.designz0$d
dz1 = svd.designz1$d
rdfz0 = nrow(designz0) - length(dz0)
rdfz1 = nrow(designz1) - length(dz1)
vz1id = vz1/(rep(1,ncol(designz1))%*%t(dz1))
pdesignz1 = tcrossprod(vz1id,uz1)
coefz1 = pdesignz1%*%erpdta
vz0id = vz0/(rep(1,ncol(designz0))%*%t(dz0))
pdesignz0 = tcrossprod(vz0id,uz0)
coefz0 = pdesignz0%*%erpdta
idsignalz1 = (ncol(designz1)-length(idsignal)+1):ncol(designz1)
fitz1 = designz1%*%coefz1
resz1 = erpdta-fitz1
rssz1 = (t(rep(1,n))%*%resz1^2)[1,]
fitz0 = designz0%*%coefz0
resz0 = erpdta-fitz0
rssz0 = (t(rep(1,n))%*%resz0^2)[1,]
F = ((rssz0 - rssz1)/(rdfz0 - rdfz1))/(rssz1/rdfz1)
F = ksmooth(1:T,F,bandwidth = 0.01 * diff(range(1:T)),x.points = 1:T)$y
return(list(F=F,residuals=resz1,rdf0=rdfz0,rdf1=rdfz1,beta=coefz1[idsignalz1,,drop=FALSE]))
}
pval.fstatz = function(F,erpdta,design,design0,nbf,fs0,nsamples,min.err,verbose) {
n = nrow(erpdta)
idsignal = NULL
for (j in 1:ncol(design)) {
cj = apply(design0, 2, function(x, y) all(x == y), y = design[,j])
if (all(!cj))
idsignal = c(idsignal, j)
}
R = diag(ncol(design))
R = R[idsignal, ,drop=FALSE]
svd.design = fast.svd(design)
u = svd.design$u
lsamples = lapply(1:nsamples,function(i,n) sample(1:n),n=n)
lu = lapply(lsamples,function(s,u) u[s,],u=u)
iD = matrix(1/svd.design$d,nrow=1)
viD = svd.design$v*(rep(1,nrow(svd.design$v))%*%iD)
itxx = tcrossprod(viD)
svd.designR = fast.svd(R%*%viD)
RiD = matrix(1/svd.designR$d,nrow=1)
RuiD = svd.designR$u*(rep(1,nrow(svd.designR$u))%*%RiD)
iRitxxtR = tcrossprod(RuiD)
lviDtu = lapply(lu,function(u,m) tcrossprod(u,m),m=viD)
lcoef1 = lapply(lviDtu,function(u,y) crossprod(u,y),y=erpdta)
lbeta = lapply(lcoef1,function(beta,id) beta[id,,drop=FALSE],id=idsignal)
ldesign = lapply(lsamples,function(s,design) design[s,],design=design)
ldesign0 = lapply(lsamples,function(s,design) design[s,,drop=FALSE],
design=design[,-idsignal,drop=FALSE])
lfit = Map("%*%",ldesign,lcoef1)
if (length(fs0)>0) {
lres = lapply(lfit,function(fit,y) y-fit,y=erpdta)
lmeanres = lapply(lres,function(res,n) (crossprod(rep(1,n),res)/n)[1,],n=n)
lcres = Map(function(res,meanres,n) res-rep(1,n)%*%t(meanres),lres,lmeanres,n=n)
lfa = fastfa(lcres,nbf=nbf,min.err=min.err,verbose=FALSE,svd.method=svd.method)
lPsi = as.list(data.frame(t(lfa$Psi)))
lB = lfa$B
lB0 = lapply(lB,function(B,fs0) B[fs0, ,drop=FALSE],fs0=fs0)
lB1 = lapply(lB,function(B,fs0) B[-fs0, ,drop=FALSE],fs0=fs0)
lPsi0 = lapply(lPsi,function(Psi,fs0) Psi[fs0],fs0=fs0)
liSxx = Map(function(Psi,B) ifa(Psi,B)$iS,lPsi0,lB0)
lSxy = Map(tcrossprod,lB0,lB1)
lbetaSigma0 = Map(crossprod,liSxx,lSxy)
lbeta0 = lbeta
lbeta0c = lapply(lbeta0,function(beta0,fs0) beta0[, fs0,drop=FALSE],fs0=fs0)
lbeta0 = lapply(lbeta0,function(beta0,fs0) {
res = beta0
res[,-fs0] = 0
return(res)
},fs0=fs0)
lbeta0c = Map("%*%",lbeta0c,lbetaSigma0)
lbeta0c = lapply(lbeta0c,function(beta0c,fs0,T,p) {
res = matrix(0,nrow=p,ncol=T)
res[,-fs0] = beta0c
return(res)
},fs0=fs0,T=T,p=length(idsignal))
lbeta0 = Map("+",lbeta0,lbeta0c)
lbeta = Map("-",lbeta,lbeta0)
lRcoef1 = lapply(lcoef1,function(b,R) R%*%b,R=R)
lepsilon = Map("-",lbeta,lRcoef1)
M = itxx %*% t(R) %*% iRitxxtR
lMepsilon = lapply(lepsilon,function(epsilon,M) M%*%epsilon,M=M)
lcoef1 = Map("+",lcoef1,lMepsilon)
lfit = Map("%*%",ldesign,lcoef1)
}
lres = lapply(lfit,function(fit,y) y-fit,y=erpdta)
lmeanres = lapply(lres,function(res,n) (crossprod(rep(1,n),res)/n)[1,],n=n)
lcres = Map(function(res,meanres,n) res-rep(1,n)%*%t(meanres),lres,lmeanres,n=n)
lsdres = lapply(lcres,function(res,n) sqrt(crossprod(rep(1,n),res^2)/(n-1))[1,],n=n)
lscres = Map(function(cres,sdres,n) cres/tcrossprod(rep(1,n),sdres),lcres,lsdres,n=n)
lfa = fastfa(lscres,nbf=nbf,min.err=min.err,verbose=FALSE,svd.method=svd.method)
lPsi = as.list(data.frame(t(lfa$Psi)))
lB = lfa$B
lsB = Map(function(B,Psi) t(B)/tcrossprod(rep(1,ncol(B)),sqrt(Psi)),lB,lPsi)
lG = lapply(lsB,function(sb,nbf) solve(diag(nbf)+tcrossprod(sb)),nbf=nbf)
lsB = Map(function(B,Psi) t(B)/tcrossprod(rep(1,ncol(B)),Psi),lB,lPsi)
lGsB = Map(crossprod,lG,lsB)
lFactors = Map(tcrossprod,lscres,lGsB)
ldesignz0 = Map(function(design0,Factors) cbind(design0,Factors),ldesign0,lFactors)
ldesign1 = lapply(lsamples,function(s,design) design[s,,drop=FALSE],design=design[,idsignal,drop=FALSE])
ldesignz1 = Map(function(designz0,design1) cbind(designz0,design1),ldesignz0,ldesign1)
lsvd.designz0 = lapply(ldesignz0,fast.svd)
lsvd.designz1 = lapply(ldesignz1,fast.svd)
luz0 = lapply(lsvd.designz0,function(x) x$u)
luz1 = lapply(lsvd.designz1,function(x) x$u)
lvz0 = lapply(lsvd.designz0,function(x) x$v)
lvz1 = lapply(lsvd.designz1,function(x) x$v)
ldz0 = lapply(lsvd.designz0,function(x) x$d)
ldz1 = lapply(lsvd.designz1,function(x) x$d)
rdfz0 = nrow(ldesignz0[[1]]) - length(ldz0[[1]])
rdfz1 = nrow(ldesignz1[[1]]) - length(ldz1[[1]])
lvz1id = Map(function(v,d,p) v/(rep(1,p)%*%t(d)),lvz1,ldz1,p=nbf+ncol(design))
lpdesignz1 = Map(tcrossprod,lvz1id,luz1)
lcoefz1 = lapply(lpdesignz1,function(pdesign,y) pdesign%*%y,y=erpdta)
lvz0id = Map(function(v,d,p) v/(rep(1,p)%*%t(d)),lvz0,ldz0,p=nbf+ncol(design0))
lpdesignz0 = Map(tcrossprod,lvz0id,luz0)
lcoefz0 = lapply(lpdesignz0,function(pdesign,y) pdesign%*%y,y=erpdta)
idsignalz1 = (ncol(ldesignz1[[1]])-length(idsignal)+1):ncol(ldesignz1[[1]])
lfitz1 = Map("%*%",ldesignz1,lcoefz1)
lresz1 = lapply(lfitz1,function(fit,y) y-fit,y=erpdta)
lrssz1 = lapply(lresz1,function(res,n) (t(rep(1,n))%*%res^2)[1,],n=n)
lfitz0 = Map("%*%",ldesignz0,lcoefz0)
lresz0 = lapply(lfitz0,function(fit,y) y-fit,y=erpdta)
lrssz0 = lapply(lresz0,function(res,n) (t(rep(1,n))%*%res^2)[1,],n=n)
lfz0 = Map(function(rss1,rss0,rdf1,rdf0) {
((rss0 - rss1)/(rdf0 - rdf1))/(rss1/rdf1)
},lrssz1,lrssz0,rdf1=rdfz1,rdf0=rdfz0)
mfz0 = matrix(unlist(lfz0),nrow=nsamples,byrow=TRUE)
varfz0 = apply(mfz0,2,var)
meanfz0 = apply(mfz0,2,mean)
constz = varfz0/(2*meanfz0)
nuz = 2*meanfz0^2/varfz0
pvalz = pchisq(F/constz,df=nuz,lower.tail=FALSE)
return(pvalz)
}
method = match.arg(method, choices = c("BH", "holm", "hochberg",
"hommel", "bonferroni", "BY", "fdr", "none"))
significance = match.arg(significance,c("Satterthwaite","none"))
svd.method = match.arg(svd.method,choices=c("fast.svd","irlba"))
if (typeof(nsamples) != "double")
stop("nsamples sould be an integer, usually larger than 200.")
if (is.null(design0))
design0 = matrix(1, nrow = nrow(dta), ncol = 1)
erpdta = as.matrix(dta)
design = as.matrix(design)
design0 = as.matrix(design0)
if (typeof(erpdta) != "double")
stop("ERPs should be of type double")
if (nrow(erpdta) != nrow(design))
stop("dta and design should have the same number of rows")
if (nrow(erpdta) != nrow(design0))
stop("dta and design0 should have the same number of rows")
if (ncol(design) <= ncol(design0))
stop("design0 should have fewer columns than design")
idsignal = NULL
for (j in 1:ncol(design)) {
cj = apply(design0, 2, function(x, y) all(x == y), y = design[,
j])
if (all(!cj))
idsignal = c(idsignal, j)
}
if (length(idsignal) < (ncol(design) - ncol(design0)))
stop("the null model design0 should be nested into the non-null model design")
if (typeof(alpha) != "double")
stop("alpha should be of type double")
if ((alpha <= 0) | (alpha >= 1))
stop("alpha should be in ]0,1[, typically 0.05")
if (typeof(pi0) != "double")
stop("pi0 should be of type double")
if ((pi0 <= 0) | (pi0 > 1))
stop("pi0 should be in ]0,1]")
if (length(s0) == 1)
stop("s0 should be either NULL, or of length larger than 2")
frames = 1:ncol(erpdta)
if (is.null(s0))
fs0i = integer(0)
if (length(s0) > 2)
fs0i = s0
if (length(s0) == 2)
fs0i = which((frames <= s0[1] * diff(range(frames))) |
(frames >= s0[2] * diff(range(frames))))
nbfmaxtheo = min(c(nrow(design),nsamples))-ncol(design)-1
if (sum(is.element(nbfmax, 0:nbfmaxtheo)) != 1) {
warning(paste("nbfmax should be an integer in [0,", nbfmaxtheo,"]", sep = ""))
nbfmax = nbfmaxtheo
}
n = nrow(erpdta)
T = ncol(erpdta)
pval = NULL
qval = NULL
correctedpval=NULL
significant=integer(0)
test = NULL
r2 = NULL
p0 = 1
rdf1 = NULL
rdf0 = NULL
beta = NULL
test = NULL
if (is.null(nbf)) {
svd.design = fast.svd(design)
svd.design0 = fast.svd(design0)
P0 = diag(n)-svd.design0$u%*%t(svd.design0$u)
Z = design[,idsignal,drop=FALSE]
cZ = P0%*%Z
Szz = t(cZ)%*%cZ/n
svdcz = fast.svd(cZ)
if (length(idsignal)>1) sqrtcz = svdcz$v%*%diag(svdcz$d)%*%t(svdcz$v)
if (length(idsignal)==1) sqrtcz = svdcz$v%*%t(svdcz$v)*svdcz$d
vid = svd.design$v%*%diag(1/svd.design$d)
lsamples = lapply(1:nsamples,function(i,n) sample(1:n),n=n)
lu = lapply(lsamples,function(s,d) d[s,],d=svd.design$u)
ltu = lapply(lu,t)
ltuy = lapply(lu,function(u,y) crossprod(u,y),y=erpdta)
lfit = Map(crossprod,ltu,ltuy)
lres = lapply(lfit,function(fit,y) y-fit,y=erpdta)
lsdres = lapply(lres,function(res,n) sqrt(crossprod(rep(1,n),res^2)/(n-1))[1,],n=n)
lmsdres = lapply(lsdres,function(sdres,p) tcrossprod(rep(1,p),sdres),p=length(idsignal))
lbeta.ols = lapply(ltuy,function(tuy,m,select) crossprod(m,tuy)[select,,drop=FALSE],m=t(vid),select=idsignal)
lb.ols = lapply(lbeta.ols,function(beta,m) crossprod(m,beta),m=t(sqrtcz))
lb.ols = Map("/",lb.ols,lmsdres)
mb.ols = lapply(lb.ols,function(b,p) crossprod(rep(1,p),b)/p,p=length(idsignal))
mb.ols = matrix(unlist(mb.ols),ncol=T,byrow=TRUE)
meanmb.ols = (t(rep(1,nsamples))%*%mb.ols)/nsamples
cmb.ols = mb.ols-rep(1,nsamples)%*%meanmb.ols
sdmb.ols = sqrt(t(rep(1,nsamples))%*%cmb.ols^2/(nsamples-1))
scmb.ols = cmb.ols/(rep(1,nsamples)%*%sdmb.ols)
nbf = nbfactors(scmb.ols,maxnbfactors=nbfmax,diagnostic.plot=wantplot,verbose=verbose,min.err=min.err,svd.method="irlba")$optimalnbfactors
}
if (significance=="Satterthwaite") {
F = fstat(erpdta,design=design,design0=design0)
res = F$residuals
sdres = sqrt((t(rep(1,n))%*%res^2)[1,]/(n-1))
scres = res/(rep(1,n)%*%t(sdres))
beta = F$beta
F = F$F
pval = pval.fstat(F,erpdta,design,design0,nsamples)
if (is.null(pi0))
p0 = pval.estimate.eta0(pval, diagnostic.plot = FALSE)
qval = p0 * p.adjust(pval, method = method)
fs0 = sort(unique(c(fs0i, which(pval > 0.2))))
if (verbose)
print(paste("AFA with", nbf, "factors"))
if ((nbf > 0) & (maxiter > 0)) {
diff.fs0 = length(setdiff(fs0,integer(0)))/length(union(fs0,integer(0)))
fs1 = fs0
iter = 0
while ((diff.fs0>0.05) & (iter < maxiter)) {
iter = iter + 1
if (verbose)
print(paste(iter, "/", maxiter, " iterations",sep = ""))
upd = update.beta(erpdta,design,design0,nbf=nbf,fs0=fs0,min.err=min.err,verbose=FALSE)
F = upd$F
rdf0 = upd$rdf0
rdf1 = upd$rdf1
if (length(fs0)<T)
pval = pval.fstatz(F,erpdta,design,design0,nbf,fs0,nsamples,min.err,verbose=FALSE)
if (is.null(pi0))
p0 = pval.estimate.eta0(pval, diagnostic.plot = FALSE)
qval = p0 * p.adjust(pval, method = method)
fs0 = which(pval > 0.2)
diff.fs0 = length(setdiff(fs0,fs1))/length(union(fs0,fs1))
fs1 = fs0
if (verbose) print(paste("Convergence criterion: ",diff.fs0,". Tolerance: 0.05",sep=""))
beta = upd$beta
}
}
significant = which(qval <= alpha)
test = F
r2 = (1 - 1/(1 + F * ((rdf0 - rdf1)/rdf1)))
if (length(idsignal)==1) test = sign(beta[1,])*sqrt(F)
}
list(pval = pval, correctedpval = qval, significant = significant,
pi0 = p0, test = test, df1 = rdf1, df0 = rdf0,
nbf = nbf,signal = beta, r2=r2)
}
|
library(caret)
library(tree)
# Load iris dataset
dataset <- iris
# Print summary
summary(dataset)
# Density plots
featurePlot(dataset[,1:4], dataset[,5], plot="density", scales=list(x=list(relation="free"), y=list(relation="free")), auto.key=list(columns=3))
#### Classify the plants ####
# Variables to store the predictions and true classes.
trueClasses <- NULL; predictions <- NULL
# Set seed for reproducibility
set.seed(1234)
# Shuffle our data. Told you the sample function is very handy =) note the replace = F.
dataset <- dataset[sample(nrow(dataset), replace = F),]
# Generate folds for cross-validation
k <- 10
# Again, we can use the sample function. This time replace = T
folds <- sample(1:k, size = nrow(dataset), replace = T)
for(i in 1:k){
print(paste0("Fold: ",i,"/",k))
# Generate train and test sets
testset <- dataset[which(folds == i),]
trainset <- dataset[which(folds != i),]
# Train the model (a tree in this case)
classifier <- tree(Species ~., trainset)
tmp.preds <- as.character(predict(classifier, newdata = testset, type = "class"))
tmp.true <- as.character(testset$Species)
predictions <- c(predictions, tmp.preds)
trueClasses <- c(trueClasses, tmp.true)
}
# Print confusion matrix and some performance metrics
confusionMatrix(as.factor(predictions), reference = as.factor(trueClasses))
# Refrences
#Density plots: https://machinelearningmastery.com/data-visualization-with-the-caret-r-package/
|
/r/classification_example.R
|
no_license
|
enriquegit/data-analysis-r
|
R
| false | false | 1,480 |
r
|
library(caret)
library(tree)
# Load iris dataset
dataset <- iris
# Print summary
summary(dataset)
# Density plots
featurePlot(dataset[,1:4], dataset[,5], plot="density", scales=list(x=list(relation="free"), y=list(relation="free")), auto.key=list(columns=3))
#### Classify the plants ####
# Variables to store the predictions and true classes.
trueClasses <- NULL; predictions <- NULL
# Set seed for reproducibility
set.seed(1234)
# Shuffle our data. Told you the sample function is very handy =) note the replace = F.
dataset <- dataset[sample(nrow(dataset), replace = F),]
# Generate folds for cross-validation
k <- 10
# Again, we can use the sample function. This time replace = T
folds <- sample(1:k, size = nrow(dataset), replace = T)
for(i in 1:k){
print(paste0("Fold: ",i,"/",k))
# Generate train and test sets
testset <- dataset[which(folds == i),]
trainset <- dataset[which(folds != i),]
# Train the model (a tree in this case)
classifier <- tree(Species ~., trainset)
tmp.preds <- as.character(predict(classifier, newdata = testset, type = "class"))
tmp.true <- as.character(testset$Species)
predictions <- c(predictions, tmp.preds)
trueClasses <- c(trueClasses, tmp.true)
}
# Print confusion matrix and some performance metrics
confusionMatrix(as.factor(predictions), reference = as.factor(trueClasses))
# Refrences
#Density plots: https://machinelearningmastery.com/data-visualization-with-the-caret-r-package/
|
.addTask <- function(jobId, taskId, rCommand, ...) {
storageCredentials <- rAzureBatch::getStorageCredentials()
args <- list(...)
.doAzureBatchGlobals <- args$envir
argsList <- args$args
dependsOn <- args$dependsOn
cloudCombine <- args$cloudCombine
userOutputFiles <- args$outputFiles
if (!is.null(argsList)) {
assign("argsList", argsList, .doAzureBatchGlobals)
}
if (!is.null(cloudCombine)) {
assign("cloudCombine", cloudCombine, .doAzureBatchGlobals)
}
envFile <- paste0(taskId, ".rds")
saveRDS(argsList, file = envFile)
rAzureBatch::uploadBlob(jobId, paste0(getwd(), "/", envFile))
file.remove(envFile)
sasToken <- rAzureBatch::createSasToken("r", "c", jobId)
writeToken <- rAzureBatch::createSasToken("w", "c", jobId)
envFileUrl <-
rAzureBatch::createBlobUrl(storageCredentials$name, jobId, envFile, sasToken)
resourceFiles <-
list(rAzureBatch::createResourceFile(url = envFileUrl, fileName = envFile))
if (!is.null(args$dependsOn)) {
dependsOn <- list(taskIds = dependsOn)
}
resultFile <- paste0(taskId, "-result", ".rds")
accountName <- storageCredentials$name
downloadCommand <-
sprintf(
paste("/anaconda/envs/py35/bin/blobxfer %s %s %s --download --saskey $BLOBXFER_SASKEY",
"--remoteresource . --include result/*.rds"),
accountName,
jobId,
"$AZ_BATCH_TASK_WORKING_DIR"
)
containerUrl <-
rAzureBatch::createBlobUrl(
storageAccount = storageCredentials$name,
containerName = jobId,
sasToken = writeToken
)
outputFiles <- list(
list(
filePattern = resultFile,
destination = list(container = list(
path = paste0("result/", resultFile),
containerUrl = containerUrl
)),
uploadOptions = list(uploadCondition = "taskCompletion")
),
list(
filePattern = paste0(taskId, ".txt"),
destination = list(container = list(
path = paste0("logs/", taskId, ".txt"),
containerUrl = containerUrl
)),
uploadOptions = list(uploadCondition = "taskCompletion")
),
list(
filePattern = "../stdout.txt",
destination = list(container = list(
path = paste0("stdout/", taskId, "-stdout.txt"),
containerUrl = containerUrl
)),
uploadOptions = list(uploadCondition = "taskCompletion")
),
list(
filePattern = "../stderr.txt",
destination = list(container = list(
path = paste0("stderr/", taskId, "-stderr.txt"),
containerUrl = containerUrl
)),
uploadOptions = list(uploadCondition = "taskCompletion")
)
)
outputFiles <- append(outputFiles, userOutputFiles)
commands <-
c(downloadCommand,
rCommand)
commands <- linuxWrapCommands(commands)
sasToken <- rAzureBatch::createSasToken("rwcl", "c", jobId)
queryParameterUrl <- "?"
for (query in names(sasToken)) {
queryParameterUrl <-
paste0(queryParameterUrl,
query,
"=",
RCurl::curlEscape(sasToken[[query]]),
"&")
}
queryParameterUrl <-
substr(queryParameterUrl, 1, nchar(queryParameterUrl) - 1)
setting <- list(name = "BLOBXFER_SASKEY",
value = queryParameterUrl)
containerEnv <- list(name = "CONTAINER_NAME",
value = jobId)
rAzureBatch::addTask(
jobId,
taskId,
environmentSettings = list(setting, containerEnv),
resourceFiles = resourceFiles,
commandLine = commands,
dependsOn = dependsOn,
outputFiles = outputFiles
)
}
.addJob <- function(jobId,
poolId,
resourceFiles,
...) {
args <- list(...)
packages <- args$packages
poolInfo <- list("poolId" = poolId)
commands <- c("ls")
if (!is.null(packages)) {
jobPackages <- getJobPackageInstallationCommand("cran", packages)
commands <- c(commands, jobPackages)
}
jobPreparationTask <- list(
commandLine = linuxWrapCommands(commands),
userIdentity = list(autoUser = list(
scope = "pool",
elevationLevel = "admin"
)),
waitForSuccess = TRUE,
resourceFiles = resourceFiles,
constraints = list(maxTaskRetryCount = 2)
)
usesTaskDependencies <- TRUE
response <- rAzureBatch::addJob(
jobId,
poolInfo = poolInfo,
jobPreparationTask = jobPreparationTask,
usesTaskDependencies = usesTaskDependencies,
content = "text"
)
return(response)
}
.addPool <- function(pool, packages, environmentSettings, resourceFiles, ...) {
args <- list(...)
commands <- c(
"/anaconda/envs/py35/bin/pip install --no-dependencies blobxfer"
)
if (!is.null(args$commandLine)) {
commands <- c(commands, args$commandLine)
}
if (!is.null(packages)) {
commands <- c(commands, packages)
}
startTask <- list(
commandLine = linuxWrapCommands(commands),
userIdentity = list(autoUser = list(
scope = "pool",
elevationLevel = "admin"
)),
waitForSuccess = TRUE
)
if (!is.null(environmentSettings)) {
startTask$environmentSettings <- environmentSettings
}
if (length(resourceFiles) > 0) {
startTask$resourceFiles <- resourceFiles
}
virtualMachineConfiguration <- list(
imageReference = list(
publisher = "microsoft-ads",
offer = "linux-data-science-vm",
sku = "linuxdsvm",
version = "latest"
),
nodeAgentSKUId = "batch.node.centos 7"
)
response <- rAzureBatch::addPool(
pool$name,
pool$vmSize,
startTask = startTask,
virtualMachineConfiguration = virtualMachineConfiguration,
enableAutoScale = TRUE,
autoscaleFormula = getAutoscaleFormula(
pool$poolSize$autoscaleFormula,
pool$poolSize$dedicatedNodes$min,
pool$poolSize$dedicatedNodes$max,
pool$poolSize$lowPriorityNodes$min,
pool$poolSize$lowPriorityNodes$max,
maxTasksPerNode = pool$maxTasksPerNode
),
autoScaleEvaluationInterval = "PT5M",
maxTasksPerNode = pool$maxTasksPerNode,
content = "text"
)
return(response)
}
|
/R/helpers.R
|
permissive
|
dtenenba/doAzureParallel
|
R
| false | false | 6,070 |
r
|
.addTask <- function(jobId, taskId, rCommand, ...) {
storageCredentials <- rAzureBatch::getStorageCredentials()
args <- list(...)
.doAzureBatchGlobals <- args$envir
argsList <- args$args
dependsOn <- args$dependsOn
cloudCombine <- args$cloudCombine
userOutputFiles <- args$outputFiles
if (!is.null(argsList)) {
assign("argsList", argsList, .doAzureBatchGlobals)
}
if (!is.null(cloudCombine)) {
assign("cloudCombine", cloudCombine, .doAzureBatchGlobals)
}
envFile <- paste0(taskId, ".rds")
saveRDS(argsList, file = envFile)
rAzureBatch::uploadBlob(jobId, paste0(getwd(), "/", envFile))
file.remove(envFile)
sasToken <- rAzureBatch::createSasToken("r", "c", jobId)
writeToken <- rAzureBatch::createSasToken("w", "c", jobId)
envFileUrl <-
rAzureBatch::createBlobUrl(storageCredentials$name, jobId, envFile, sasToken)
resourceFiles <-
list(rAzureBatch::createResourceFile(url = envFileUrl, fileName = envFile))
if (!is.null(args$dependsOn)) {
dependsOn <- list(taskIds = dependsOn)
}
resultFile <- paste0(taskId, "-result", ".rds")
accountName <- storageCredentials$name
downloadCommand <-
sprintf(
paste("/anaconda/envs/py35/bin/blobxfer %s %s %s --download --saskey $BLOBXFER_SASKEY",
"--remoteresource . --include result/*.rds"),
accountName,
jobId,
"$AZ_BATCH_TASK_WORKING_DIR"
)
containerUrl <-
rAzureBatch::createBlobUrl(
storageAccount = storageCredentials$name,
containerName = jobId,
sasToken = writeToken
)
outputFiles <- list(
list(
filePattern = resultFile,
destination = list(container = list(
path = paste0("result/", resultFile),
containerUrl = containerUrl
)),
uploadOptions = list(uploadCondition = "taskCompletion")
),
list(
filePattern = paste0(taskId, ".txt"),
destination = list(container = list(
path = paste0("logs/", taskId, ".txt"),
containerUrl = containerUrl
)),
uploadOptions = list(uploadCondition = "taskCompletion")
),
list(
filePattern = "../stdout.txt",
destination = list(container = list(
path = paste0("stdout/", taskId, "-stdout.txt"),
containerUrl = containerUrl
)),
uploadOptions = list(uploadCondition = "taskCompletion")
),
list(
filePattern = "../stderr.txt",
destination = list(container = list(
path = paste0("stderr/", taskId, "-stderr.txt"),
containerUrl = containerUrl
)),
uploadOptions = list(uploadCondition = "taskCompletion")
)
)
outputFiles <- append(outputFiles, userOutputFiles)
commands <-
c(downloadCommand,
rCommand)
commands <- linuxWrapCommands(commands)
sasToken <- rAzureBatch::createSasToken("rwcl", "c", jobId)
queryParameterUrl <- "?"
for (query in names(sasToken)) {
queryParameterUrl <-
paste0(queryParameterUrl,
query,
"=",
RCurl::curlEscape(sasToken[[query]]),
"&")
}
queryParameterUrl <-
substr(queryParameterUrl, 1, nchar(queryParameterUrl) - 1)
setting <- list(name = "BLOBXFER_SASKEY",
value = queryParameterUrl)
containerEnv <- list(name = "CONTAINER_NAME",
value = jobId)
rAzureBatch::addTask(
jobId,
taskId,
environmentSettings = list(setting, containerEnv),
resourceFiles = resourceFiles,
commandLine = commands,
dependsOn = dependsOn,
outputFiles = outputFiles
)
}
.addJob <- function(jobId,
poolId,
resourceFiles,
...) {
args <- list(...)
packages <- args$packages
poolInfo <- list("poolId" = poolId)
commands <- c("ls")
if (!is.null(packages)) {
jobPackages <- getJobPackageInstallationCommand("cran", packages)
commands <- c(commands, jobPackages)
}
jobPreparationTask <- list(
commandLine = linuxWrapCommands(commands),
userIdentity = list(autoUser = list(
scope = "pool",
elevationLevel = "admin"
)),
waitForSuccess = TRUE,
resourceFiles = resourceFiles,
constraints = list(maxTaskRetryCount = 2)
)
usesTaskDependencies <- TRUE
response <- rAzureBatch::addJob(
jobId,
poolInfo = poolInfo,
jobPreparationTask = jobPreparationTask,
usesTaskDependencies = usesTaskDependencies,
content = "text"
)
return(response)
}
.addPool <- function(pool, packages, environmentSettings, resourceFiles, ...) {
args <- list(...)
commands <- c(
"/anaconda/envs/py35/bin/pip install --no-dependencies blobxfer"
)
if (!is.null(args$commandLine)) {
commands <- c(commands, args$commandLine)
}
if (!is.null(packages)) {
commands <- c(commands, packages)
}
startTask <- list(
commandLine = linuxWrapCommands(commands),
userIdentity = list(autoUser = list(
scope = "pool",
elevationLevel = "admin"
)),
waitForSuccess = TRUE
)
if (!is.null(environmentSettings)) {
startTask$environmentSettings <- environmentSettings
}
if (length(resourceFiles) > 0) {
startTask$resourceFiles <- resourceFiles
}
virtualMachineConfiguration <- list(
imageReference = list(
publisher = "microsoft-ads",
offer = "linux-data-science-vm",
sku = "linuxdsvm",
version = "latest"
),
nodeAgentSKUId = "batch.node.centos 7"
)
response <- rAzureBatch::addPool(
pool$name,
pool$vmSize,
startTask = startTask,
virtualMachineConfiguration = virtualMachineConfiguration,
enableAutoScale = TRUE,
autoscaleFormula = getAutoscaleFormula(
pool$poolSize$autoscaleFormula,
pool$poolSize$dedicatedNodes$min,
pool$poolSize$dedicatedNodes$max,
pool$poolSize$lowPriorityNodes$min,
pool$poolSize$lowPriorityNodes$max,
maxTasksPerNode = pool$maxTasksPerNode
),
autoScaleEvaluationInterval = "PT5M",
maxTasksPerNode = pool$maxTasksPerNode,
content = "text"
)
return(response)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collections_projects_teams.R
\name{get_projects}
\alias{get_projects}
\title{Get TFS Projects}
\usage{
get_projects(tfs_collection)
}
\arguments{
\item{tfs_collection}{TFS Collection}
}
\value{
\code{api_get} class of TFS Projects
}
\description{
Get a list of projects in a TFS Collection.
}
\examples{
get_projects('FinancialReportingCollection')
}
|
/man/get_projects.Rd
|
no_license
|
nickclark1000/rtfs
|
R
| false | true | 430 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collections_projects_teams.R
\name{get_projects}
\alias{get_projects}
\title{Get TFS Projects}
\usage{
get_projects(tfs_collection)
}
\arguments{
\item{tfs_collection}{TFS Collection}
}
\value{
\code{api_get} class of TFS Projects
}
\description{
Get a list of projects in a TFS Collection.
}
\examples{
get_projects('FinancialReportingCollection')
}
|
library(dplyr)
data_electric<-read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?",colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
data_electric$Date<-as.Date(data_electric$Date, "%d/%m/%Y")
filter_electric<-subset(data_electric, Date>=as.Date("2007-2-1")& Date<=as.Date("2007-2-2"))
##na cases
filter_electric<-filter_electric[complete.cases(filter_electric),]
##let's combine columns date and time
DateTime<- paste(filter_electric$Date, filter_electric$Time)
##name
DateTime<- setNames(DateTime, "DateTime")
##let's remove both column date and time and replace it with DateTime
filter_electric<-filter_electric[, (!names(filter_electric) %in% c("Date", "Time"))]
filter_electric<-cbind(DateTime, filter_electric)
##SET FORMAT
filter_electric$DateTime<-as.POSIXct(DateTime)
##LET'S DO THE PLOTS
par(mfrow=c(1,1))
##code that make imagen 1
hist(filter_electric$Global_active_power, col = "red", main = ("Global Active Power"), xlab = "Global Active Power (Kilowatts)" )
##code que hace el png
dev.copy(png, file="plot1.png")##copy my plot to a png file
dev.off()#close the png file DO NOT
|
/plot1.R
|
no_license
|
alejandrodf1/ExData_Plotting1
|
R
| false | false | 1,201 |
r
|
library(dplyr)
data_electric<-read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?",colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
data_electric$Date<-as.Date(data_electric$Date, "%d/%m/%Y")
filter_electric<-subset(data_electric, Date>=as.Date("2007-2-1")& Date<=as.Date("2007-2-2"))
##na cases
filter_electric<-filter_electric[complete.cases(filter_electric),]
##let's combine columns date and time
DateTime<- paste(filter_electric$Date, filter_electric$Time)
##name
DateTime<- setNames(DateTime, "DateTime")
##let's remove both column date and time and replace it with DateTime
filter_electric<-filter_electric[, (!names(filter_electric) %in% c("Date", "Time"))]
filter_electric<-cbind(DateTime, filter_electric)
##SET FORMAT
filter_electric$DateTime<-as.POSIXct(DateTime)
##LET'S DO THE PLOTS
par(mfrow=c(1,1))
##code that make imagen 1
hist(filter_electric$Global_active_power, col = "red", main = ("Global Active Power"), xlab = "Global Active Power (Kilowatts)" )
##code que hace el png
dev.copy(png, file="plot1.png")##copy my plot to a png file
dev.off()#close the png file DO NOT
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stepCounter.R
\name{stepCounter}
\alias{stepCounter}
\title{Step Counter}
\usage{
stepCounter(
AccData,
samplefreq = 100,
filterorder = 2,
boundaries = c(0.5, 5),
Rp = 3,
plot.it = FALSE,
hysteresis = 0.05,
verbose = verbose,
fun = c("GENEAcount", "mean", "sd", "mad")
)
}
\arguments{
\item{AccData}{The data to use for calculating the steps. This should either an AccData object or a vector.}
\item{samplefreq}{The sampling frequency of the data, in hertz,
when calculating the step number (default 100).}
\item{filterorder}{single integer, order of the Chebyshev bandpass filter,
passed to argument n of \code{\link[signal]{cheby1}}.}
\item{boundaries}{length 2 numeric vector specifying lower and upper bounds
of Chebychev filter (default \code{c(0.5, 5)} Hz),
passed to argument W of \code{\link[signal]{butter}} or \code{\link[signal]{cheby1}}.}
\item{Rp}{the decibel level that the cheby filter takes, see \code{\link[signal]{cheby1}}.}
\item{plot.it}{single logical create plot of data and zero crossing points (default \code{FALSE}).}
\item{hysteresis}{The hysteresis applied after zero crossing. (default 100mg)}
\item{verbose}{single logical should additional progress reporting be printed at the console? (default FALSE).}
\item{fun}{character vector naming functions by which to summarize steps.
"count" is an internally implemented summarizing function that returns step count.}
}
\value{
Returns a vector with length fun.
}
\description{
Function to calculate the number and variance of the steps in the data.
}
\examples{
d1 <- sin(seq(0.1, 100, 0.1))/2 + rnorm(1000)/10 + 1
Steps4 = stepCounter(d1)
length(Steps4)
mean(Steps4)
sd(Steps4)
plot(Steps4)
}
|
/man/stepCounter.Rd
|
no_license
|
cran/GENEAclassify
|
R
| false | true | 1,829 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stepCounter.R
\name{stepCounter}
\alias{stepCounter}
\title{Step Counter}
\usage{
stepCounter(
AccData,
samplefreq = 100,
filterorder = 2,
boundaries = c(0.5, 5),
Rp = 3,
plot.it = FALSE,
hysteresis = 0.05,
verbose = verbose,
fun = c("GENEAcount", "mean", "sd", "mad")
)
}
\arguments{
\item{AccData}{The data to use for calculating the steps. This should either an AccData object or a vector.}
\item{samplefreq}{The sampling frequency of the data, in hertz,
when calculating the step number (default 100).}
\item{filterorder}{single integer, order of the Chebyshev bandpass filter,
passed to argument n of \code{\link[signal]{cheby1}}.}
\item{boundaries}{length 2 numeric vector specifying lower and upper bounds
of Chebychev filter (default \code{c(0.5, 5)} Hz),
passed to argument W of \code{\link[signal]{butter}} or \code{\link[signal]{cheby1}}.}
\item{Rp}{the decibel level that the cheby filter takes, see \code{\link[signal]{cheby1}}.}
\item{plot.it}{single logical create plot of data and zero crossing points (default \code{FALSE}).}
\item{hysteresis}{The hysteresis applied after zero crossing. (default 100mg)}
\item{verbose}{single logical should additional progress reporting be printed at the console? (default FALSE).}
\item{fun}{character vector naming functions by which to summarize steps.
"count" is an internally implemented summarizing function that returns step count.}
}
\value{
Returns a vector with length fun.
}
\description{
Function to calculate the number and variance of the steps in the data.
}
\examples{
d1 <- sin(seq(0.1, 100, 0.1))/2 + rnorm(1000)/10 + 1
Steps4 = stepCounter(d1)
length(Steps4)
mean(Steps4)
sd(Steps4)
plot(Steps4)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alter.r
\name{lighter}
\alias{lighter}
\title{Make a munsell colour lighter}
\usage{
lighter(col, steps = 1)
}
\arguments{
\item{col}{character vector of Munsell colours}
\item{steps}{number of steps to take in increasing value}
}
\value{
character vector of Munsell colours
}
\description{
Increases the value of the Munsell colour.
}
\examples{
lighter("5PB 2/4")
cols <- c("5PB 2/4", "5Y 6/8")
p <- plot_mnsl(c(cols, lighter(cols), lighter(cols, 2)))
p + ggplot2::facet_wrap(~ names, ncol = 2)
# lighter and darker are usually reversible
lighter(darker("5PB 2/4"))
# unless you try to pass though white or black
lighter(darker("5PB 1/4"))
}
|
/man/lighter.Rd
|
no_license
|
john-s-christensen/munsell
|
R
| false | true | 724 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alter.r
\name{lighter}
\alias{lighter}
\title{Make a munsell colour lighter}
\usage{
lighter(col, steps = 1)
}
\arguments{
\item{col}{character vector of Munsell colours}
\item{steps}{number of steps to take in increasing value}
}
\value{
character vector of Munsell colours
}
\description{
Increases the value of the Munsell colour.
}
\examples{
lighter("5PB 2/4")
cols <- c("5PB 2/4", "5Y 6/8")
p <- plot_mnsl(c(cols, lighter(cols), lighter(cols, 2)))
p + ggplot2::facet_wrap(~ names, ncol = 2)
# lighter and darker are usually reversible
lighter(darker("5PB 2/4"))
# unless you try to pass though white or black
lighter(darker("5PB 1/4"))
}
|
## Copyright 2015 <Jeremy Yee> <jeremyyee@outlook.com.au>
## Finding the expected value function using fast methods
################################################################################
FastExpected <- function(grid, value, disturb, weight, r_index,
Neighbour, smooth = 1, SmoothNeighbour) {
## Making sure inputs are in correct format
if (ncol(r_index) != 2) stop("ncol(r_index) != 2")
if (smooth < 1) stop("smooth must be >= 1")
if (smooth >= nrow(grid)) stop("smooth must be < nrow(grid)")
## Call the C++ functions
if (missing(Neighbour)) {
Neighbour <- function(query, ref) {
rflann::Neighbour(query, ref, 1, "kdtree", 0, 1)$indices
}
}
if (missing(SmoothNeighbour)) {
SmoothNeighbour <- function(query, ref) {
rflann::Neighbour(query, ref, smooth, "kdtree", 0, 1)$indices
}
}
.Call('rcss_FastExpected', PACKAGE = 'rcss', grid, value, r_index,
disturb, weight, Neighbour, smooth, SmoothNeighbour)
}
|
/R/FastExpected.R
|
no_license
|
IanMadlenya/rcss
|
R
| false | false | 1,050 |
r
|
## Copyright 2015 <Jeremy Yee> <jeremyyee@outlook.com.au>
## Finding the expected value function using fast methods
################################################################################
FastExpected <- function(grid, value, disturb, weight, r_index,
Neighbour, smooth = 1, SmoothNeighbour) {
## Making sure inputs are in correct format
if (ncol(r_index) != 2) stop("ncol(r_index) != 2")
if (smooth < 1) stop("smooth must be >= 1")
if (smooth >= nrow(grid)) stop("smooth must be < nrow(grid)")
## Call the C++ functions
if (missing(Neighbour)) {
Neighbour <- function(query, ref) {
rflann::Neighbour(query, ref, 1, "kdtree", 0, 1)$indices
}
}
if (missing(SmoothNeighbour)) {
SmoothNeighbour <- function(query, ref) {
rflann::Neighbour(query, ref, smooth, "kdtree", 0, 1)$indices
}
}
.Call('rcss_FastExpected', PACKAGE = 'rcss', grid, value, r_index,
disturb, weight, Neighbour, smooth, SmoothNeighbour)
}
|
#' Eliminating strictly dominated choices
#'
#' This function eliminates strictly dominated choices.
#'
#' @param n an integer representing the number of choices of player 1
#' @param m an integer representing the number of choices of player 2
#' @param A an nxm matrix representing the payoff matrix of player 1
#' @param choices.A a vector of length n representing the names of player 1's choices
#' @param B an nxm matrix representing the payoff matrix of player 2
#' @param choices.B a vector of length m representing the names of player 2's choices
#' @param iteration an integer representing the iteration number of algorithm
#' @return The reduced matrices of players' that are obtained after eliminating strictly dominated choices
#' @author Bilge Baser
#' @details This function works for the games with two players.
#' @export "esdc"
#' @importFrom "stats" runif
#' @importFrom "utils" combn
#' @examples
#' a=4
#' b=4
#' pay.A=matrix(c(0,3,2,1,4,0,2,1,4,3,0,1,4,3,2,0),4,4)
#' ch.A=c("Blue","Green","Red","Yellow")
#' pay.B=matrix(c(5,4,4,4,3,5,3,3,2,2,5,2,1,1,1,5),4,4)
#' ch.B=c("Blue","Green","Red","Yellow")
#' iter=5
#' esdc(a,b,pay.A,ch.A,pay.B,ch.B,iter)
esdc<-function(n,m,A,choices.A,B,choices.B,iteration){
rownames(A)<-choices.A
rownames(B)<-rownames(A)
colnames(B)<-choices.B
colnames(A)<-colnames(B)
br=dim(B)[1]
ac=dim(A)[2]
t=1
elimination=0
Dim_A=dim(A)
Dim_B=dim(B)
print("The utility matrix of Player 1:")
print(A)
print("The utility matrix of Player 2:")
print(B)
repeat{
if(n<2){
br=1
}
else{
br=dim(B)[1]
while((n-t)>=1){ #Satir oyuncusunun strateji sayisi 2'den buyukse
row=nrow(A)
C<-t(combn(n,n-t)) #Kombinasyon matrisleri olusturmak icin
N<-t(matrix(rep(1:n, ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
if((ncol(N)-ncol(C))<2){ #D matrisinin sutun sayisi 1 ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif]=setdiff(N[dif,],C[dif,])# Nde olup Cde olmayan
D<-as.matrix(D)
}
}else{ # D matrisinin sutun sayisi 1'den buyuk ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
}
Prob<-vector(mode="numeric",length=(n-t))
R<-vector(mode="numeric",length=(n-t))
P<-vector(mode="numeric",length=(n-t))
interval=1
csa=1
while(csa<=nrow(C)){
if(ncol(C)<2){
if(ncol(D)<2){
if(m<2){
if(n<2){
A<-t(as.matrix(A))
break
}
else if (n==2){# if n=2
if(A[1]<A[2]){
A=A[-1]
elimination=elimination+1
n=n-1
t=1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[1],"is strictly dominated by the choice",choices.A[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[1],"is strictly dominated by the choice",choices.A[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
}
else if(A[1]>A[2]) {
A=A[-2]
elimination=elimination+1
n=n-1
t=1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[2],"is strictly dominated by the choice",choices.A[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[2],"is strictly dominated by the choice",choices.A[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
}
else{
}
}
else{
max=which(A==max(A),arr.ind = T)
max<-as.vector(max)
A=A[max[1],]
elimination=elimination+1
print(paste("Elimination For Player 1:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
n=1
break
}
}#if m=1
else{#if m>=2
if(n<2){
A<-as.matrix(A)
print(A)
print(B)
break
}
else if(n==2){
if(all(A[1,]<A[2,])){
A=A[-1,]
B=B[-1,]
elimination=elimination+1
n=n-1
t=1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[1],"is strictly dominated by the choice",choices.A[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[1],"is strictly dominated by the choice",choices.A[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
}
else if(all(A[2,]<A[1,])){
A=A[-2,]
B=B[-2,]
elimination=elimination+1
n=n-1
t=1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[2],"is strictly dominated by the choice",choices.A[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[2],"is strictly dominated by the choice",choices.A[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
}
else{
}
}
else{
}
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n,ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
if((ncol(N)-ncol(C))<2){ #D matrisinin sutun sayisi 1 ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif]=setdiff(N[dif,],C[dif,])# Nde olup Cde olmayan
D<-as.matrix(D)
}
}else{ # D matrisinin sutun sayisi 1'den buyuk ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
}
}#if m>=2
}#D<2
else{#if D>=2
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n,ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
if((ncol(N)-ncol(C))<2){ #D matrisinin s?tun say?s? 1 ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif]=setdiff(N[dif,],C[dif,])# N?de olup C?de olmayan
D<-as.matrix(D)
}
}else{ # D matrisinin s?tun say?s? 1'den b?y?k ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
}
for(z in 1:nrow(D)){
k=1
while(k<ncol(D)){
if(all(A[D[z,k],]<A[D[z,k+1],])){
A=A[-D[z,k],]
B=B[-D[z,k],]
n=n-1
t=1
elimination=elimination+1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[z,k]],"is strictly dominated by the choice",choices.A[D[z,k+1]],"with the probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[z,k]],"is strictly dominated by the choice",choices.A[D[z,k+1]],"with the probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n,ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
if((ncol(N)-ncol(C))<2){ #D matrisinin s?tun say?s? 1 ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif]=setdiff(N[dif,],C[dif,])# N?de olup C?de olmayan
D<-as.matrix(D)
}
}else{ # D matrisinin s?tun say?s? 1'den b?y?k ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
}
}#if
else if(all(A[D[z,k],]>A[D[z,k+1],])){
A=A[-D[z,k+1],]
B=B[-D[z,k+1],]
n=n-1
t=1
elimination=elimination+1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[z,k+1]],"is strictly dominated by the choice",choices.A[D[z,k]],"with the probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[z,k+1]],"is strictly dominated by the choice",choices.A[D[z,k]],"with the probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n,ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
if((ncol(N)-ncol(C))<2){ #D matrisinin sutun sayisi 1 ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif]=setdiff(N[dif,],C[dif,])# Nde olup Cde olmayan
D<-as.matrix(D)
}
}else{ # D matrisinin sutun sayisi 1'den buyuk ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
}
}#else if
else{
}
k=k+1
}#k<ncol(D)
}#z
}#else D>2
}#C<2
else{
for(s in 1:(iteration)){ # Kombinasyon matrisinin her bir satiri icin iteration kez olasilik vektoru uretmek icin
Prob<-0
P<-0
R<-0
k=1
sum=0
Ex<-vector(mode="numeric",length=m)
Mul<-vector(mode="numeric",length=(n-t))
counter=1
for(j in 1:(n-t)){
R[j]<-C[csa,j]
P[R[j]]<-runif(1,min=0,max=interval)
Prob[k]<-P[R[j]]
sum<-sum+P[R[j]]
interval<-(1-sum)
counter=counter+1
if(k==(n-(t+1))){
R[counter]<-C[csa,counter]
P[R[counter]]<-interval
Prob[(k+1)]<-P[R[counter]]
break
}
else {
k=k+1
}
}#for_j
if(ncol(D)<2){
Ex<-vector(mode="numeric",length=m)
val<-vector(mode="numeric",length=m)
for(e in 1:m){
c=1
while(c<=ncol(C)){ #Belirlenen olas?l?klarla beklenen de?erin hesaplanmas?
Mul[c]=Prob[c]*A[C[csa,c],e]
Ex[e]=Ex[e]+Mul[c]
c<-c+1
}#while_c
val[e]=A[D[csa],e]
} #e
if(all(Ex>val)){
elimination=elimination+1
A=A[-D[csa], ]
B=B[-D[csa], ]
n=n-1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[csa]],"is strictly dominated by the randomization of the choices",choices.A[C[csa, ]],"with the probabilities",Prob,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[csa]],"is strictly dominated by the randomization of the choices",choices.A[C[csa, ]],"with the probabilities",Prob,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n, ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
csa=0
break # break den sonra s'nin d???na ??k?yor. E?er SD ise indirge ve yeni C olu?tur. SD de?ilse iterasyonlara devam et.
}
else{
}
} #if ncol(D)
else{#if ncol(D)>=2
for(ds in 1:ncol(D)){
val<-vector(mode="numeric",length=m)
for(e in 1:m){
c=1
while(c<=ncol(C)){ #Belirlenen olas?l?klarla beklenen de?erin hesaplanmas?
Mul[c]=Prob[c]*A[C[csa,c],e]
Ex[e]=Ex[e]+Mul[c]
c<-c+1
}#while_c
val[e]=A[D[csa,ds],e]
}#for e
if(all(Ex>val)){
elimination=elimination+1
A=A[-D[csa,ds], ]
B=B[-D[csa,ds], ]
n=n-1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[csa,ds]],"is strictly dominated by the randomization of choices",choices.A[C[csa, ]],"with the probabilities",Prob,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[csa,ds]],"is strictly dominated by the randomization of choices",choices.A[C[csa, ]],"with the probabilities",Prob,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n, ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
csa=0
break # break den sonra s'nin d???na ??k?yor. E?er SD ise indirge ve yeni C olu?tur. SD de?ilse iterasyonlara devam et.
}#if all
else{
}
} #for ds
break
}#else
} #s
}#C>=2
csa=csa+1
}#csa
if(n==1){
break
}
else{
}
if(row==nrow(A)){
t=t+1
}else{
t=1
}
}#while(n-t)
}
A<-as.matrix(A)
B<-as.matrix(B)
if(m<2){
ac=1
}
else{
ac=dim(A)[2]
t=1
while((m-t)>=1){ #S?tun oyuncusunun strateji say?s? 2'den b?y?kse
col=ncol(B)
CC<-t(combn(m,m-t)) #Kombinasyon matrisleri olu?turmak i?in
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
if((ncol(NN)-ncol(CC))<2){ #DD matrisinin s?tun say?s? 1 ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif]=setdiff(NN[dif,],CC[dif, ])# N?de olup C?de olmayan
DD<-as.matrix(DD)
}
}else{ # DD matrisinin s?tun say?s? 1'den b?y?k ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
}
Prob<-vector(mode="numeric",length=(m-t))
R<-vector(mode="numeric",length=(m-t))
P<-vector(mode="numeric",length=(m-t))
interval=1
csu=1
while(csu<=nrow(CC)){
if(ncol(CC)<2){
if(ncol(DD)<2){
if(n<2){
if(m<2){
B<-as.matrix(B)
break
}
else if(m==2){
if(B[1]<B[2]){
B=B[-1]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
print(paste("Elimination For Player 2:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
break
}
else{
print(paste("Elimination For Player 2:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
}
}
else if(B[2]<B[1]){
B=B[-2]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
print(paste("Elimination For Player 2:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
break
}
else{
print(paste("Elimination For Player 2:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
}
}
else{
}
}#if m=2
else{
maxx=(which(maxx==max(B),arr.ind = T))
maxx<-as.vector(maxx)
B=B[,max[2]]
elimination=elimination+1
print(paste("Elimination For Player 2:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
m=1
break
}
}#if n<2
else{# if n>=2
if(m==1){
B<-as.matrix(B)
print(B)
break
}
else if(m==2){
if(all(B[,1]<B[,2])){
B=B[,-1]
A=A[,-1]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[1],"is strictly dominated by the choice",choices.B[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[1],"is strictly dominated by the choice",choices.B[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
}
}
else if(all(B[,2]<B[,1])){
A=A[,-2]
B=B[,-2]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
choices.B<-colnames(B)
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[2],"is strictly dominated by the choice",choices.B[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
break
}
else{
choices.B<-colnames(B)
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[2],"is strictly dominated by the choice",choices.B[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
}
}
else{
}
}
else{
}
CC<-t(combn(m,m-t)) #Kombinasyon matrisleri olu?turmak i?in
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
if((ncol(NN)-ncol(CC))<2){ #DD matrisinin s?tun say?s? 1 ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif]=setdiff(NN[dif,],CC[dif, ])# N?de olup C?de olmayan
DD<-as.matrix(DD)
}
}else{ # DD matrisinin s?tun say?s? 1'den b?y?k ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
}
}#if n>=2
}#DD<2
else{#if DD>=2
CC<-t(combn(m,m-t)) #Kombinasyon matrisleri olu?turmak i?in
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
if((ncol(NN)-ncol(CC))<2){ #DD matrisinin s?tun say?s? 1 ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif]=setdiff(NN[dif,],CC[dif, ])# N?de olup C?de olmayan
DD<-as.matrix(DD)
}
}else{ # DD matrisinin s?tun say?s? 1'den b?y?k ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
}
for(z in 1:nrow(DD)){
k=1
while(k<ncol(DD)){
if(all(B[,DD[z,k]]<B[,DD[z,k+1]])){
B=B[,-DD[z,k]]
A=A[,-DD[z,k]]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[DD[z,k]],"is strictly dominated by the choice",choices.B[DD[z,k+1]],"with probability",1,'\n')
print(A)
print(B)
choices.B<-colnames(B)
break
}
else{
}
CC<-t(combn(m,m-t)) #Kombinasyon matrisleri olu?turmak i?in
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
if((ncol(NN)-ncol(CC))<2){ #DD matrisinin s?tun say?s? 1 ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif]=setdiff(NN[dif,],CC[dif, ])# N?de olup C?de olmayan
DD<-as.matrix(DD)
}
}else{ # DD matrisinin s?tun say?s? 1'den b?y?k ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
}
}#if
else if(all(B[,DD[z,k]]>B[,DD[z,k+1]])){
A=A[,-DD[z,k+1]]
B=B[,-DD[z,k+1]]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[DD[z,k+1]],"is strictly dominated by the choice",choices.B[DD[z,k]],"with probability",1,'\n')
print(A)
print(B)
choices.B<-rownames(B)
break
}
else{
}
CC<-t(combn(m,m-t)) #Kombinasyon matrisleri olu?turmak i?in
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
if((ncol(NN)-ncol(CC))<2){ #DD matrisinin s?tun say?s? 1 ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif]=setdiff(NN[dif,],CC[dif, ])# N?de olup C?de olmayan
DD<-as.matrix(DD)
}
}else{ # DD matrisinin s?tun say?s? 1'den b?y?k ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
}
}#else if
else{
}
k=k+1
}#k<ncol(DD)
}#z
}#else DD>2
}#CC<2
else{#CC>=2
for(s in 1:(iteration)){ # Kombinasyon matrisinin her bir sat?r? i?in iteration kez olas?l?k vekt?r? ?retmek i?in
Prob<-0
P<-0
R<-0
k=1
sum=0
Ex<-vector(mode="numeric",length=n)
Mul<-vector(mode="numeric",length=(m-t))
counter=1
for(j in 1:(m-t)){
R[j]<-CC[csu,j]
P[R[j]]<-runif(1,min=0,max=interval)
Prob[k]<-P[R[j]]
sum<-sum+P[R[j]]
interval<-(1-sum)
counter=counter+1
if(k==(m-(t+1))){
R[counter]<-CC[csu,counter]
P[R[counter]]<-interval
Prob[(k+1)]<-P[R[counter]]
break
}
else {
k=k+1
}
}#for_j
if(ncol(DD)<2){
Ex<-vector(mode="numeric",length=n)
val<-vector(mode="numeric",length=n)
for(e in 1:n){
c=1
while(c<=ncol(CC)){ #Belirlenen olas?l?klarla beklenen de?erin hesaplanmas?
Mul[c]=Prob[c]*B[e,CC[csu,c]]
Ex[e]=Ex[e]+Mul[c]
c<-c+1
}#while_c
val[e]=B[e,DD[csu]]
} #e
if(all(Ex>val)){
elimination=elimination+1
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[DD[csu]],"is strictly dominated by the randomization of the choices",choices.B[CC[csu, ]],"with the probabilities",Prob,'\n')
B=B[,-DD[csu]]
A=A[,-DD[csu]]
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.B<-colnames(B)
m=m-1
if(m==1){
B<-as.matrix(B)
break
}else{
}
CC<-t(combn(m,m-t))
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
csu=0
break # break den sonra s'nin d???na ??k?yor. E?er SD ise indirge ve yeni C olu?tur. SD de?ilse iterasyonlara devam et.
}#all
else{
}
} #if ncol(DD)<2
else{#if ncol(DD)>=2
for(ds in 1:ncol(DD)){
val<-vector(mode="numeric",length=n)
for(e in 1:n){
c=1
while(c<=ncol(CC)){ #Belirlenen olas?l?klarla beklenen de?erin hesaplanmas?
Mul[c]=Prob[c]*B[e,CC[csu,c]]
Ex[e]=Ex[e]+Mul[c]
c<-c+1
}#while_c
val[e]=B[e,DD[csu,ds]]
}#for e
if(all(Ex>val)){
elimination=elimination+1
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[DD[csu,ds]],"is strictly dominated by the randomization of the choices",choices.B[CC[csu, ]],"with the probabilities",Prob,'\n')
B=B[,-DD[csu,ds]]
A=A[,-DD[csu,ds]]
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.B<-colnames(B)
m=m-1
if(m==1){
B<-as.matrix(B)
break
}else{
}
CC<-t(combn(m,m-t))
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
csu=0
break # break den sonra s'nin d???na ??k?yor. E?er SD ise indirge ve yeni C olu?tur. SD de?ilse iterasyonlara devam et.
}#if all
else{
}
} #for ds
break
}#else ncol(DD)>2
} #s
}#CC>=2
csu=csu+1
}#csu
if(m==1){
break
}
else{
}
if(col==ncol(B)){
t=t+1
}else{
t=1
}
} #while(m-t)
}
ac_new=m
br_new=n
if(ac_new==ac&&br_new==br){
break
}
else{
}
}
print("ELIMINATION IS OVER.")
print("The Last Reduced Matrix For Player 1:")
if(n==1){
print(A)
}
else{
print(A)
}
print("The Last Reduced Matrix For Player 2:")
print(B)
}
#'Finding types that express common belief in rationality for optimal choices
#'
#'This function takes the reduced payoff matrices and finds out the probabilities for the types that expresses common belief in rationality for optimal choices.
#'
#'
#' @param A an nxm matrix representing the reduced payoff matrix of player 1
#' @param B an nxm matrix representing the reduced payoff matrix of player 2
#' @param choices.A a vector of length n representing the names of player 1's choices
#' @param choices.B a vector of length m representing the names of player 2's choices
#' @return Probabilities of the types that expresses common belief in rationality for optimal choices
#' @author Bilge Baser
#' @details This function works for the games with two players. It returns infeasible solution for the irrational choices.
#' @export "type"
#' @importFrom "lpSolve" lp
#' @importFrom "utils" install.packages
#' @seealso \code{lp}
#' @examples
#' Ar=matrix(c(0,3,2,4,0,2,4,3,0),3,3)
#' choices.Ar=c("Blue","Green","Red")
#' Br=matrix(c(5,4,4,3,5,3,2,2,5),3,3)
#' choices.Br=c("Blue","Green","Red")
#' type(Ar,Br,choices.Ar,choices.Br)
type<-function(A,B,choices.A,choices.B){
rownames(A)<-choices.A
rownames(B)<-rownames(A)
colnames(B)<-choices.B
colnames(A)<-colnames(B)
S<-vector(mode="numeric",length=ncol(A))
Fa<-vector(mode="numeric",length=ncol(A)-1)
SS<-vector(mode = "numeric",length = nrow(B))
Fb<-vector(mode="numeric",length=nrow(B)-1)
S<-c(1:ncol(A))
SS<-c(1:nrow(B))
print("The utility matrix of Player 1:")
print(A)
for (i in 1:nrow(A)) {
Fark<-matrix(nrow=nrow(A)-1,ncol=ncol(A))
for (j in 1:nrow(A)-1) {
Fa<-setdiff(S,i)
Fark[j,]<-A[i,]-A[Fa[j],]
}
if(nrow(A)>1){
print(paste("The difference between the coefficients of the utility functions for the strategy",rownames(A)[i],":"))
print(Fark)
}
else{
}
Kat<-rbind(Fark,rep(1,ncol(A)))
f.obj<-vector(mode="numeric",length=ncol(A))
f.obj<-c(A[i,])
f.obj<-as.data.frame(f.obj)
f.con<-matrix(Kat,nrow=nrow(A),byrow=TRUE)
f.dir<-c(rep(">=",nrow(A)-1),"=")
f.rhs<-c(rep(0,nrow(A)-1),1)
Sols<-lp("max",f.obj,f.con,f.dir,f.rhs,transpose.constraints = FALSE)$solution
cat("Player 1's type for the strategy",rownames(A)[i],":",Sols,"\n")
Sol<-lp("max",f.obj,f.con,f.dir,f.rhs,transpose.constraints = FALSE)
print(Sol)
}
print("The utility matrix of Player 2:")
print(B)
for (i in 1:ncol(B)) {
Farkb<-matrix(nrow=nrow(B),ncol=ncol(B)-1)
for (j in 1:ncol(B)-1) {
Fb<-setdiff(SS,i)
Farkb[,j]<-B[,i]-B[,Fb[j]]
}
if(ncol(B)>1){
print(paste("The difference between the coefficients of the utility functions for the strategy",colnames(B)[i],":"))
print(Farkb)
}
else{
}
Katb<-cbind(Farkb,rep(1,nrow(B)))
fb.obj<-vector(mode="numeric",length=nrow(B))
fb.obj<-c(B[,i])
fb.obj<-as.data.frame(fb.obj)
fb.con<-matrix(Katb,ncol=ncol(B))
fb.dir<-c(rep(">=",ncol(B)-1),"=")
fb.rhs<-c(rep(0,ncol(B)-1),1)
Solsb<-lp("max",fb.obj,fb.con,fb.dir,fb.rhs,transpose.constraints = FALSE)$solution
cat("Player 2's Type For The Strategy",colnames(B)[i],":",Solsb,"\n")
Solb<-lp("max",fb.obj,fb.con,fb.dir,fb.rhs,transpose.constraints = FALSE)
print(Solb)
}
}
|
/R/EpistemicGameTheory.R
|
no_license
|
cran/EpistemicGameTheory
|
R
| false | false | 41,218 |
r
|
#' Eliminating strictly dominated choices
#'
#' This function eliminates strictly dominated choices.
#'
#' @param n an integer representing the number of choices of player 1
#' @param m an integer representing the number of choices of player 2
#' @param A an nxm matrix representing the payoff matrix of player 1
#' @param choices.A a vector of length n representing the names of player 1's choices
#' @param B an nxm matrix representing the payoff matrix of player 2
#' @param choices.B a vector of length m representing the names of player 2's choices
#' @param iteration an integer representing the iteration number of algorithm
#' @return The reduced matrices of players' that are obtained after eliminating strictly dominated choices
#' @author Bilge Baser
#' @details This function works for the games with two players.
#' @export "esdc"
#' @importFrom "stats" runif
#' @importFrom "utils" combn
#' @examples
#' a=4
#' b=4
#' pay.A=matrix(c(0,3,2,1,4,0,2,1,4,3,0,1,4,3,2,0),4,4)
#' ch.A=c("Blue","Green","Red","Yellow")
#' pay.B=matrix(c(5,4,4,4,3,5,3,3,2,2,5,2,1,1,1,5),4,4)
#' ch.B=c("Blue","Green","Red","Yellow")
#' iter=5
#' esdc(a,b,pay.A,ch.A,pay.B,ch.B,iter)
esdc<-function(n,m,A,choices.A,B,choices.B,iteration){
rownames(A)<-choices.A
rownames(B)<-rownames(A)
colnames(B)<-choices.B
colnames(A)<-colnames(B)
br=dim(B)[1]
ac=dim(A)[2]
t=1
elimination=0
Dim_A=dim(A)
Dim_B=dim(B)
print("The utility matrix of Player 1:")
print(A)
print("The utility matrix of Player 2:")
print(B)
repeat{
if(n<2){
br=1
}
else{
br=dim(B)[1]
while((n-t)>=1){ #Satir oyuncusunun strateji sayisi 2'den buyukse
row=nrow(A)
C<-t(combn(n,n-t)) #Kombinasyon matrisleri olusturmak icin
N<-t(matrix(rep(1:n, ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
if((ncol(N)-ncol(C))<2){ #D matrisinin sutun sayisi 1 ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif]=setdiff(N[dif,],C[dif,])# Nde olup Cde olmayan
D<-as.matrix(D)
}
}else{ # D matrisinin sutun sayisi 1'den buyuk ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
}
Prob<-vector(mode="numeric",length=(n-t))
R<-vector(mode="numeric",length=(n-t))
P<-vector(mode="numeric",length=(n-t))
interval=1
csa=1
while(csa<=nrow(C)){
if(ncol(C)<2){
if(ncol(D)<2){
if(m<2){
if(n<2){
A<-t(as.matrix(A))
break
}
else if (n==2){# if n=2
if(A[1]<A[2]){
A=A[-1]
elimination=elimination+1
n=n-1
t=1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[1],"is strictly dominated by the choice",choices.A[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[1],"is strictly dominated by the choice",choices.A[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
}
else if(A[1]>A[2]) {
A=A[-2]
elimination=elimination+1
n=n-1
t=1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[2],"is strictly dominated by the choice",choices.A[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[2],"is strictly dominated by the choice",choices.A[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
}
else{
}
}
else{
max=which(A==max(A),arr.ind = T)
max<-as.vector(max)
A=A[max[1],]
elimination=elimination+1
print(paste("Elimination For Player 1:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
n=1
break
}
}#if m=1
else{#if m>=2
if(n<2){
A<-as.matrix(A)
print(A)
print(B)
break
}
else if(n==2){
if(all(A[1,]<A[2,])){
A=A[-1,]
B=B[-1,]
elimination=elimination+1
n=n-1
t=1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[1],"is strictly dominated by the choice",choices.A[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[1],"is strictly dominated by the choice",choices.A[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
}
else if(all(A[2,]<A[1,])){
A=A[-2,]
B=B[-2,]
elimination=elimination+1
n=n-1
t=1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[2],"is strictly dominated by the choice",choices.A[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[2],"is strictly dominated by the choice",choices.A[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
}
else{
}
}
else{
}
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n,ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
if((ncol(N)-ncol(C))<2){ #D matrisinin sutun sayisi 1 ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif]=setdiff(N[dif,],C[dif,])# Nde olup Cde olmayan
D<-as.matrix(D)
}
}else{ # D matrisinin sutun sayisi 1'den buyuk ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
}
}#if m>=2
}#D<2
else{#if D>=2
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n,ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
if((ncol(N)-ncol(C))<2){ #D matrisinin s?tun say?s? 1 ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif]=setdiff(N[dif,],C[dif,])# N?de olup C?de olmayan
D<-as.matrix(D)
}
}else{ # D matrisinin s?tun say?s? 1'den b?y?k ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
}
for(z in 1:nrow(D)){
k=1
while(k<ncol(D)){
if(all(A[D[z,k],]<A[D[z,k+1],])){
A=A[-D[z,k],]
B=B[-D[z,k],]
n=n-1
t=1
elimination=elimination+1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[z,k]],"is strictly dominated by the choice",choices.A[D[z,k+1]],"with the probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[z,k]],"is strictly dominated by the choice",choices.A[D[z,k+1]],"with the probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n,ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
if((ncol(N)-ncol(C))<2){ #D matrisinin s?tun say?s? 1 ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif]=setdiff(N[dif,],C[dif,])# N?de olup C?de olmayan
D<-as.matrix(D)
}
}else{ # D matrisinin s?tun say?s? 1'den b?y?k ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
}
}#if
else if(all(A[D[z,k],]>A[D[z,k+1],])){
A=A[-D[z,k+1],]
B=B[-D[z,k+1],]
n=n-1
t=1
elimination=elimination+1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[z,k+1]],"is strictly dominated by the choice",choices.A[D[z,k]],"with the probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[z,k+1]],"is strictly dominated by the choice",choices.A[D[z,k]],"with the probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n,ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
if((ncol(N)-ncol(C))<2){ #D matrisinin sutun sayisi 1 ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif]=setdiff(N[dif,],C[dif,])# Nde olup Cde olmayan
D<-as.matrix(D)
}
}else{ # D matrisinin sutun sayisi 1'den buyuk ise
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
}
}#else if
else{
}
k=k+1
}#k<ncol(D)
}#z
}#else D>2
}#C<2
else{
for(s in 1:(iteration)){ # Kombinasyon matrisinin her bir satiri icin iteration kez olasilik vektoru uretmek icin
Prob<-0
P<-0
R<-0
k=1
sum=0
Ex<-vector(mode="numeric",length=m)
Mul<-vector(mode="numeric",length=(n-t))
counter=1
for(j in 1:(n-t)){
R[j]<-C[csa,j]
P[R[j]]<-runif(1,min=0,max=interval)
Prob[k]<-P[R[j]]
sum<-sum+P[R[j]]
interval<-(1-sum)
counter=counter+1
if(k==(n-(t+1))){
R[counter]<-C[csa,counter]
P[R[counter]]<-interval
Prob[(k+1)]<-P[R[counter]]
break
}
else {
k=k+1
}
}#for_j
if(ncol(D)<2){
Ex<-vector(mode="numeric",length=m)
val<-vector(mode="numeric",length=m)
for(e in 1:m){
c=1
while(c<=ncol(C)){ #Belirlenen olas?l?klarla beklenen de?erin hesaplanmas?
Mul[c]=Prob[c]*A[C[csa,c],e]
Ex[e]=Ex[e]+Mul[c]
c<-c+1
}#while_c
val[e]=A[D[csa],e]
} #e
if(all(Ex>val)){
elimination=elimination+1
A=A[-D[csa], ]
B=B[-D[csa], ]
n=n-1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[csa]],"is strictly dominated by the randomization of the choices",choices.A[C[csa, ]],"with the probabilities",Prob,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[csa]],"is strictly dominated by the randomization of the choices",choices.A[C[csa, ]],"with the probabilities",Prob,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n, ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
csa=0
break # break den sonra s'nin d???na ??k?yor. E?er SD ise indirge ve yeni C olu?tur. SD de?ilse iterasyonlara devam et.
}
else{
}
} #if ncol(D)
else{#if ncol(D)>=2
for(ds in 1:ncol(D)){
val<-vector(mode="numeric",length=m)
for(e in 1:m){
c=1
while(c<=ncol(C)){ #Belirlenen olas?l?klarla beklenen de?erin hesaplanmas?
Mul[c]=Prob[c]*A[C[csa,c],e]
Ex[e]=Ex[e]+Mul[c]
c<-c+1
}#while_c
val[e]=A[D[csa,ds],e]
}#for e
if(all(Ex>val)){
elimination=elimination+1
A=A[-D[csa,ds], ]
B=B[-D[csa,ds], ]
n=n-1
if(n==1){
A<-t(as.matrix(A))
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[csa,ds]],"is strictly dominated by the randomization of choices",choices.A[C[csa, ]],"with the probabilities",Prob,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
break
}else{
print(paste("Elimination:",elimination))
cat("For Player 1:",choices.A[D[csa,ds]],"is strictly dominated by the randomization of choices",choices.A[C[csa, ]],"with the probabilities",Prob,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.A<-rownames(A)
}
C<-t(combn(n,n-t))
N<-t(matrix(rep(1:n, ncol(combn(n,n-t))),n,ncol(combn(n,n-t))))
D<-matrix(nrow=nrow(C),ncol=ncol(N)-ncol(C))
for(dif in 1:nrow(C)){
D[dif, ]=setdiff(1:n,C[dif, ])
}
csa=0
break # break den sonra s'nin d???na ??k?yor. E?er SD ise indirge ve yeni C olu?tur. SD de?ilse iterasyonlara devam et.
}#if all
else{
}
} #for ds
break
}#else
} #s
}#C>=2
csa=csa+1
}#csa
if(n==1){
break
}
else{
}
if(row==nrow(A)){
t=t+1
}else{
t=1
}
}#while(n-t)
}
A<-as.matrix(A)
B<-as.matrix(B)
if(m<2){
ac=1
}
else{
ac=dim(A)[2]
t=1
while((m-t)>=1){ #S?tun oyuncusunun strateji say?s? 2'den b?y?kse
col=ncol(B)
CC<-t(combn(m,m-t)) #Kombinasyon matrisleri olu?turmak i?in
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
if((ncol(NN)-ncol(CC))<2){ #DD matrisinin s?tun say?s? 1 ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif]=setdiff(NN[dif,],CC[dif, ])# N?de olup C?de olmayan
DD<-as.matrix(DD)
}
}else{ # DD matrisinin s?tun say?s? 1'den b?y?k ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
}
Prob<-vector(mode="numeric",length=(m-t))
R<-vector(mode="numeric",length=(m-t))
P<-vector(mode="numeric",length=(m-t))
interval=1
csu=1
while(csu<=nrow(CC)){
if(ncol(CC)<2){
if(ncol(DD)<2){
if(n<2){
if(m<2){
B<-as.matrix(B)
break
}
else if(m==2){
if(B[1]<B[2]){
B=B[-1]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
print(paste("Elimination For Player 2:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
break
}
else{
print(paste("Elimination For Player 2:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
}
}
else if(B[2]<B[1]){
B=B[-2]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
print(paste("Elimination For Player 2:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
break
}
else{
print(paste("Elimination For Player 2:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
}
}
else{
}
}#if m=2
else{
maxx=(which(maxx==max(B),arr.ind = T))
maxx<-as.vector(maxx)
B=B[,max[2]]
elimination=elimination+1
print(paste("Elimination For Player 2:",elimination))
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
m=1
break
}
}#if n<2
else{# if n>=2
if(m==1){
B<-as.matrix(B)
print(B)
break
}
else if(m==2){
if(all(B[,1]<B[,2])){
B=B[,-1]
A=A[,-1]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[1],"is strictly dominated by the choice",choices.B[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
break
}
else{
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[1],"is strictly dominated by the choice",choices.B[2],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
}
}
else if(all(B[,2]<B[,1])){
A=A[,-2]
B=B[,-2]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
choices.B<-colnames(B)
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[2],"is strictly dominated by the choice",choices.B[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
break
}
else{
choices.B<-colnames(B)
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[2],"is strictly dominated by the choice",choices.B[1],"with probability",1,'\n')
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
}
}
else{
}
}
else{
}
CC<-t(combn(m,m-t)) #Kombinasyon matrisleri olu?turmak i?in
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
if((ncol(NN)-ncol(CC))<2){ #DD matrisinin s?tun say?s? 1 ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif]=setdiff(NN[dif,],CC[dif, ])# N?de olup C?de olmayan
DD<-as.matrix(DD)
}
}else{ # DD matrisinin s?tun say?s? 1'den b?y?k ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
}
}#if n>=2
}#DD<2
else{#if DD>=2
CC<-t(combn(m,m-t)) #Kombinasyon matrisleri olu?turmak i?in
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
if((ncol(NN)-ncol(CC))<2){ #DD matrisinin s?tun say?s? 1 ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif]=setdiff(NN[dif,],CC[dif, ])# N?de olup C?de olmayan
DD<-as.matrix(DD)
}
}else{ # DD matrisinin s?tun say?s? 1'den b?y?k ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
}
for(z in 1:nrow(DD)){
k=1
while(k<ncol(DD)){
if(all(B[,DD[z,k]]<B[,DD[z,k+1]])){
B=B[,-DD[z,k]]
A=A[,-DD[z,k]]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[DD[z,k]],"is strictly dominated by the choice",choices.B[DD[z,k+1]],"with probability",1,'\n')
print(A)
print(B)
choices.B<-colnames(B)
break
}
else{
}
CC<-t(combn(m,m-t)) #Kombinasyon matrisleri olu?turmak i?in
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
if((ncol(NN)-ncol(CC))<2){ #DD matrisinin s?tun say?s? 1 ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif]=setdiff(NN[dif,],CC[dif, ])# N?de olup C?de olmayan
DD<-as.matrix(DD)
}
}else{ # DD matrisinin s?tun say?s? 1'den b?y?k ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
}
}#if
else if(all(B[,DD[z,k]]>B[,DD[z,k+1]])){
A=A[,-DD[z,k+1]]
B=B[,-DD[z,k+1]]
elimination=elimination+1
m=m-1
t=1
if(m==1){
B<-as.matrix(B)
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[DD[z,k+1]],"is strictly dominated by the choice",choices.B[DD[z,k]],"with probability",1,'\n')
print(A)
print(B)
choices.B<-rownames(B)
break
}
else{
}
CC<-t(combn(m,m-t)) #Kombinasyon matrisleri olu?turmak i?in
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
if((ncol(NN)-ncol(CC))<2){ #DD matrisinin s?tun say?s? 1 ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif]=setdiff(NN[dif,],CC[dif, ])# N?de olup C?de olmayan
DD<-as.matrix(DD)
}
}else{ # DD matrisinin s?tun say?s? 1'den b?y?k ise
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
}
}#else if
else{
}
k=k+1
}#k<ncol(DD)
}#z
}#else DD>2
}#CC<2
else{#CC>=2
for(s in 1:(iteration)){ # Kombinasyon matrisinin her bir sat?r? i?in iteration kez olas?l?k vekt?r? ?retmek i?in
Prob<-0
P<-0
R<-0
k=1
sum=0
Ex<-vector(mode="numeric",length=n)
Mul<-vector(mode="numeric",length=(m-t))
counter=1
for(j in 1:(m-t)){
R[j]<-CC[csu,j]
P[R[j]]<-runif(1,min=0,max=interval)
Prob[k]<-P[R[j]]
sum<-sum+P[R[j]]
interval<-(1-sum)
counter=counter+1
if(k==(m-(t+1))){
R[counter]<-CC[csu,counter]
P[R[counter]]<-interval
Prob[(k+1)]<-P[R[counter]]
break
}
else {
k=k+1
}
}#for_j
if(ncol(DD)<2){
Ex<-vector(mode="numeric",length=n)
val<-vector(mode="numeric",length=n)
for(e in 1:n){
c=1
while(c<=ncol(CC)){ #Belirlenen olas?l?klarla beklenen de?erin hesaplanmas?
Mul[c]=Prob[c]*B[e,CC[csu,c]]
Ex[e]=Ex[e]+Mul[c]
c<-c+1
}#while_c
val[e]=B[e,DD[csu]]
} #e
if(all(Ex>val)){
elimination=elimination+1
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[DD[csu]],"is strictly dominated by the randomization of the choices",choices.B[CC[csu, ]],"with the probabilities",Prob,'\n')
B=B[,-DD[csu]]
A=A[,-DD[csu]]
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.B<-colnames(B)
m=m-1
if(m==1){
B<-as.matrix(B)
break
}else{
}
CC<-t(combn(m,m-t))
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
csu=0
break # break den sonra s'nin d???na ??k?yor. E?er SD ise indirge ve yeni C olu?tur. SD de?ilse iterasyonlara devam et.
}#all
else{
}
} #if ncol(DD)<2
else{#if ncol(DD)>=2
for(ds in 1:ncol(DD)){
val<-vector(mode="numeric",length=n)
for(e in 1:n){
c=1
while(c<=ncol(CC)){ #Belirlenen olas?l?klarla beklenen de?erin hesaplanmas?
Mul[c]=Prob[c]*B[e,CC[csu,c]]
Ex[e]=Ex[e]+Mul[c]
c<-c+1
}#while_c
val[e]=B[e,DD[csu,ds]]
}#for e
if(all(Ex>val)){
elimination=elimination+1
print(paste("Elimination:",elimination))
cat("For Player 2:",choices.B[DD[csu,ds]],"is strictly dominated by the randomization of the choices",choices.B[CC[csu, ]],"with the probabilities",Prob,'\n')
B=B[,-DD[csu,ds]]
A=A[,-DD[csu,ds]]
print("The reduced utility matrix of Player 1:")
print(A)
print("The reduced utility matrix of Player 2:")
print(B)
choices.B<-colnames(B)
m=m-1
if(m==1){
B<-as.matrix(B)
break
}else{
}
CC<-t(combn(m,m-t))
NN<-t(matrix(rep(1:m, ncol(combn(m,m-t))),m,ncol(combn(m,m-t))))
DD<-matrix(nrow=nrow(CC),ncol=ncol(NN)-ncol(CC))
for(dif in 1:nrow(CC)){
DD[dif, ]=setdiff(1:m,CC[dif, ])
}
csu=0
break # break den sonra s'nin d???na ??k?yor. E?er SD ise indirge ve yeni C olu?tur. SD de?ilse iterasyonlara devam et.
}#if all
else{
}
} #for ds
break
}#else ncol(DD)>2
} #s
}#CC>=2
csu=csu+1
}#csu
if(m==1){
break
}
else{
}
if(col==ncol(B)){
t=t+1
}else{
t=1
}
} #while(m-t)
}
ac_new=m
br_new=n
if(ac_new==ac&&br_new==br){
break
}
else{
}
}
print("ELIMINATION IS OVER.")
print("The Last Reduced Matrix For Player 1:")
if(n==1){
print(A)
}
else{
print(A)
}
print("The Last Reduced Matrix For Player 2:")
print(B)
}
#'Finding types that express common belief in rationality for optimal choices
#'
#'This function takes the reduced payoff matrices and finds out the probabilities for the types that expresses common belief in rationality for optimal choices.
#'
#'
#' @param A an nxm matrix representing the reduced payoff matrix of player 1
#' @param B an nxm matrix representing the reduced payoff matrix of player 2
#' @param choices.A a vector of length n representing the names of player 1's choices
#' @param choices.B a vector of length m representing the names of player 2's choices
#' @return Probabilities of the types that expresses common belief in rationality for optimal choices
#' @author Bilge Baser
#' @details This function works for the games with two players. It returns infeasible solution for the irrational choices.
#' @export "type"
#' @importFrom "lpSolve" lp
#' @importFrom "utils" install.packages
#' @seealso \code{lp}
#' @examples
#' Ar=matrix(c(0,3,2,4,0,2,4,3,0),3,3)
#' choices.Ar=c("Blue","Green","Red")
#' Br=matrix(c(5,4,4,3,5,3,2,2,5),3,3)
#' choices.Br=c("Blue","Green","Red")
#' type(Ar,Br,choices.Ar,choices.Br)
type<-function(A,B,choices.A,choices.B){
rownames(A)<-choices.A
rownames(B)<-rownames(A)
colnames(B)<-choices.B
colnames(A)<-colnames(B)
S<-vector(mode="numeric",length=ncol(A))
Fa<-vector(mode="numeric",length=ncol(A)-1)
SS<-vector(mode = "numeric",length = nrow(B))
Fb<-vector(mode="numeric",length=nrow(B)-1)
S<-c(1:ncol(A))
SS<-c(1:nrow(B))
print("The utility matrix of Player 1:")
print(A)
for (i in 1:nrow(A)) {
Fark<-matrix(nrow=nrow(A)-1,ncol=ncol(A))
for (j in 1:nrow(A)-1) {
Fa<-setdiff(S,i)
Fark[j,]<-A[i,]-A[Fa[j],]
}
if(nrow(A)>1){
print(paste("The difference between the coefficients of the utility functions for the strategy",rownames(A)[i],":"))
print(Fark)
}
else{
}
Kat<-rbind(Fark,rep(1,ncol(A)))
f.obj<-vector(mode="numeric",length=ncol(A))
f.obj<-c(A[i,])
f.obj<-as.data.frame(f.obj)
f.con<-matrix(Kat,nrow=nrow(A),byrow=TRUE)
f.dir<-c(rep(">=",nrow(A)-1),"=")
f.rhs<-c(rep(0,nrow(A)-1),1)
Sols<-lp("max",f.obj,f.con,f.dir,f.rhs,transpose.constraints = FALSE)$solution
cat("Player 1's type for the strategy",rownames(A)[i],":",Sols,"\n")
Sol<-lp("max",f.obj,f.con,f.dir,f.rhs,transpose.constraints = FALSE)
print(Sol)
}
print("The utility matrix of Player 2:")
print(B)
for (i in 1:ncol(B)) {
Farkb<-matrix(nrow=nrow(B),ncol=ncol(B)-1)
for (j in 1:ncol(B)-1) {
Fb<-setdiff(SS,i)
Farkb[,j]<-B[,i]-B[,Fb[j]]
}
if(ncol(B)>1){
print(paste("The difference between the coefficients of the utility functions for the strategy",colnames(B)[i],":"))
print(Farkb)
}
else{
}
Katb<-cbind(Farkb,rep(1,nrow(B)))
fb.obj<-vector(mode="numeric",length=nrow(B))
fb.obj<-c(B[,i])
fb.obj<-as.data.frame(fb.obj)
fb.con<-matrix(Katb,ncol=ncol(B))
fb.dir<-c(rep(">=",ncol(B)-1),"=")
fb.rhs<-c(rep(0,ncol(B)-1),1)
Solsb<-lp("max",fb.obj,fb.con,fb.dir,fb.rhs,transpose.constraints = FALSE)$solution
cat("Player 2's Type For The Strategy",colnames(B)[i],":",Solsb,"\n")
Solb<-lp("max",fb.obj,fb.con,fb.dir,fb.rhs,transpose.constraints = FALSE)
print(Solb)
}
}
|
my_messy_data <- read_csv("https://raw.githubusercontent.com/ajstewartlang/03_data_wrangling/master/data/my_data.csv")
head(my_messy_data)
#Recode our four conditions
my_messy_data %>%
mutate(condition = recode(condition,
"1" = "PrimeA_TargetB",
"2" = "PrimeA_TargetB",
"3" = "PrimeB_TargetA",
"4" = "PrimeB_TargetB")) %>%
#Separating our condition columns
separate(col = "condition", into = c("Prime", "Target"), sep = "_") %>%
#Mutate our condition columns into factors
mutate(Prime = factor(Prime), Target = factor(Target))
|
/recoding_variables_script.R
|
no_license
|
shimthal61/data-wrangling
|
R
| false | false | 648 |
r
|
my_messy_data <- read_csv("https://raw.githubusercontent.com/ajstewartlang/03_data_wrangling/master/data/my_data.csv")
head(my_messy_data)
#Recode our four conditions
my_messy_data %>%
mutate(condition = recode(condition,
"1" = "PrimeA_TargetB",
"2" = "PrimeA_TargetB",
"3" = "PrimeB_TargetA",
"4" = "PrimeB_TargetB")) %>%
#Separating our condition columns
separate(col = "condition", into = c("Prime", "Target"), sep = "_") %>%
#Mutate our condition columns into factors
mutate(Prime = factor(Prime), Target = factor(Target))
|
#' @export
read_gamelogs <- function(player = NULL) {
dat <- plyr::ldply(list.files("data/gamelogs"), function(x) {
t <- read.csv(file = paste0("data/gamelogs/", x), header = T, stringsAsFactors = F)
t$year <- as.numeric(substr(x, 1, 4))
return(t)
})
dat <- dat %>% filter(!is.na(game_num))
if(!is.null(player)) {
dat <- dat %>% filter(player == player)
}
dat$player[dat$player == "Odell Beckham"] <- "Odell Beckham Jr."
dat <- dat %>% select(player, year, game_num, rush_yds, rush_td, rec, rec_yds, rec_td, two_pt_md, pass_yds, pass_td, pass_int, kick_ret_yds, kick_ret_td, punt_ret_yds, punt_ret_td)
dat[is.na(dat)] <- 0
dat$pts <- weekly_fantasy_points(dat)
return(dat)
}
|
/R/read_gamelogs.R
|
no_license
|
ctloftin/FantasyFootballData
|
R
| false | false | 716 |
r
|
#' @export
read_gamelogs <- function(player = NULL) {
dat <- plyr::ldply(list.files("data/gamelogs"), function(x) {
t <- read.csv(file = paste0("data/gamelogs/", x), header = T, stringsAsFactors = F)
t$year <- as.numeric(substr(x, 1, 4))
return(t)
})
dat <- dat %>% filter(!is.na(game_num))
if(!is.null(player)) {
dat <- dat %>% filter(player == player)
}
dat$player[dat$player == "Odell Beckham"] <- "Odell Beckham Jr."
dat <- dat %>% select(player, year, game_num, rush_yds, rush_td, rec, rec_yds, rec_td, two_pt_md, pass_yds, pass_td, pass_int, kick_ret_yds, kick_ret_td, punt_ret_yds, punt_ret_td)
dat[is.na(dat)] <- 0
dat$pts <- weekly_fantasy_points(dat)
return(dat)
}
|
# determination of P(k,N,w)
pval <- function(k,N,w)
{
return((k/w-N-1)*b(k,N,w)+2*Gb(k,N,w))
}
# helper function
b<-function(k,N,w)
{
return(choose(N,k)*w^k*(1-w)^(N-k))
}
# helper function
Gb<-function(k,N,w)
{
sum<-0
for(i in k:N)
{
sum <- sum + b(i,N,w)
}
return(sum)
}
# If two significant overlapping windows were found, these windows are
# merged. If the windows do not overlap, two different windows are stored
# in a list
listadapt <- function(lcur,lnew)
{
if(length(lcur)==0)
{
lcur=lnew
return(lcur)
}
else
{
if(lnew[[1]][1]<=lcur[[length(lcur)]][2])
{
lcur[[length(lcur)]][2]<-lnew[[1]][2]
if(lcur[[length(lcur)]][3]>lnew[[1]][3])
{
lcur[[length(lcur)]][3] <- lnew[[1]][3]
}
return(lcur)
}
else
{
lcur<-append(lcur,lnew)
return(lcur)
}
}
}
# This method searches for data accumulations by shifting a window with
# window size <w> across the data and deciding at each position if there
# is a data accumulation. To test this, a scan statistic with significance
# level <sign.level> is used.
scanStatistic <- function(vect, w=0.25, sign.level=0.1)
{
temp<-vect
vect <-unlist(vect)
vsort <- sort(vect)
N <- length(vect)
range <- (max(vect)) - (min(vect))
windowsize <- range*w
N <- length(vect)
binarizeddata<-temp
res<-list()
lcur<-list()
# shift a fixed window over the data
# the window is moved from point to point
for(i in seq_along(vect))
{
start <- vsort[i]
stop <- vsort[i] + windowsize
k <- length(vect[(vect >= start) & (vect <= stop)])
p <- pval(k,N,w)
if(p>1)
{
p=0.99
}
if(p<=sign.level & p>0 & k >= (N*w-1) & k > 2)
{
res <- listadapt(res,list(c(start,stop,p)))
}
}
# if no accumulation for a fixed <sign.level> was found, the
# binarization is rejected, and we search for a accumulation
# with a higher sign.level.
if(length(res)==0)
{
while(TRUE)
{
sign.level=sign.level+0.05
if(sign.level>2)
{
binarizeddata<-(sapply(vect,function(x) 0))
return(list(bindata=binarizeddata,thresholds=NA,reject=TRUE))
}
for(i in seq_along(vect))
{
start <- vsort[i]
stop <- vsort[i] + windowsize
k <- length(vect[(vect >= start) & (vect <= stop)])
p <- pval(k,N,w)
if(p>1)
{
p=0.99
}
if(p<=sign.level & p>0 & k >= (N*w-1) & k > 2)
{
#res <- append(res,list(c(start=start,stop=stop,pval=p)))
res <- listadapt(res,list(c(start,stop,p)))
}
}
if(length(res)!=0)
break
}
reject<-TRUE
}
else
{
reject<-FALSE
}
# search the window with the smallest sign.level.
# this window is used for the binarization
min=1000
ind=0
for(i in seq_along(res))
{
if(res[[i]][3]<min)
{
ind=i
min=res[[i]][3]
}
}
# are more points on the left or on the right side
# of the window? Based on this, the binarization is performed
bigger <- length(vect[vect > res[[ind]][2]])
smaller <- length(vect[vect < res[[ind]][1]])
if(bigger > smaller)
{
threshold<-res[[ind]][2]
small<-tail(vsort[vsort<=threshold],n=1)
big<-vsort[vsort>threshold][1]
thres<-(big+small)/2
for(i in seq_along(vect))
{
if(vect[i]<=threshold)
{
binarizeddata[i]<-0
}
else
{
binarizeddata[i]<-1
}
}
}
else
{
threshold<-res[[ind]][1]
small<-tail(vsort[vsort<threshold],n=1)
big<-vsort[vsort>=threshold][1]
thres<-(big+small)/2
for(i in seq_along(vect))
{
if(vect[i]>=threshold)
{
binarizeddata[i]<-1
}
else
{
binarizeddata[i]<-0
}
}
}
return(list(bindata=binarizeddata,thresholds=as.numeric(thres),reject=reject))
}
|
/R/scanStatistic.R
|
no_license
|
cran/BoolNet
|
R
| false | false | 3,735 |
r
|
# determination of P(k,N,w)
pval <- function(k,N,w)
{
return((k/w-N-1)*b(k,N,w)+2*Gb(k,N,w))
}
# helper function
b<-function(k,N,w)
{
return(choose(N,k)*w^k*(1-w)^(N-k))
}
# helper function
Gb<-function(k,N,w)
{
sum<-0
for(i in k:N)
{
sum <- sum + b(i,N,w)
}
return(sum)
}
# If two significant overlapping windows were found, these windows are
# merged. If the windows do not overlap, two different windows are stored
# in a list
listadapt <- function(lcur,lnew)
{
if(length(lcur)==0)
{
lcur=lnew
return(lcur)
}
else
{
if(lnew[[1]][1]<=lcur[[length(lcur)]][2])
{
lcur[[length(lcur)]][2]<-lnew[[1]][2]
if(lcur[[length(lcur)]][3]>lnew[[1]][3])
{
lcur[[length(lcur)]][3] <- lnew[[1]][3]
}
return(lcur)
}
else
{
lcur<-append(lcur,lnew)
return(lcur)
}
}
}
# This method searches for data accumulations by shifting a window with
# window size <w> across the data and deciding at each position if there
# is a data accumulation. To test this, a scan statistic with significance
# level <sign.level> is used.
scanStatistic <- function(vect, w=0.25, sign.level=0.1)
{
temp<-vect
vect <-unlist(vect)
vsort <- sort(vect)
N <- length(vect)
range <- (max(vect)) - (min(vect))
windowsize <- range*w
N <- length(vect)
binarizeddata<-temp
res<-list()
lcur<-list()
# shift a fixed window over the data
# the window is moved from point to point
for(i in seq_along(vect))
{
start <- vsort[i]
stop <- vsort[i] + windowsize
k <- length(vect[(vect >= start) & (vect <= stop)])
p <- pval(k,N,w)
if(p>1)
{
p=0.99
}
if(p<=sign.level & p>0 & k >= (N*w-1) & k > 2)
{
res <- listadapt(res,list(c(start,stop,p)))
}
}
# if no accumulation for a fixed <sign.level> was found, the
# binarization is rejected, and we search for a accumulation
# with a higher sign.level.
if(length(res)==0)
{
while(TRUE)
{
sign.level=sign.level+0.05
if(sign.level>2)
{
binarizeddata<-(sapply(vect,function(x) 0))
return(list(bindata=binarizeddata,thresholds=NA,reject=TRUE))
}
for(i in seq_along(vect))
{
start <- vsort[i]
stop <- vsort[i] + windowsize
k <- length(vect[(vect >= start) & (vect <= stop)])
p <- pval(k,N,w)
if(p>1)
{
p=0.99
}
if(p<=sign.level & p>0 & k >= (N*w-1) & k > 2)
{
#res <- append(res,list(c(start=start,stop=stop,pval=p)))
res <- listadapt(res,list(c(start,stop,p)))
}
}
if(length(res)!=0)
break
}
reject<-TRUE
}
else
{
reject<-FALSE
}
# search the window with the smallest sign.level.
# this window is used for the binarization
min=1000
ind=0
for(i in seq_along(res))
{
if(res[[i]][3]<min)
{
ind=i
min=res[[i]][3]
}
}
# are more points on the left or on the right side
# of the window? Based on this, the binarization is performed
bigger <- length(vect[vect > res[[ind]][2]])
smaller <- length(vect[vect < res[[ind]][1]])
if(bigger > smaller)
{
threshold<-res[[ind]][2]
small<-tail(vsort[vsort<=threshold],n=1)
big<-vsort[vsort>threshold][1]
thres<-(big+small)/2
for(i in seq_along(vect))
{
if(vect[i]<=threshold)
{
binarizeddata[i]<-0
}
else
{
binarizeddata[i]<-1
}
}
}
else
{
threshold<-res[[ind]][1]
small<-tail(vsort[vsort<threshold],n=1)
big<-vsort[vsort>=threshold][1]
thres<-(big+small)/2
for(i in seq_along(vect))
{
if(vect[i]>=threshold)
{
binarizeddata[i]<-1
}
else
{
binarizeddata[i]<-0
}
}
}
return(list(bindata=binarizeddata,thresholds=as.numeric(thres),reject=reject))
}
|
library(DBI)
library(RPostgres)
library(tidyverse)
library(dbplyr)
library(bakeoff)
# Download a PostgreSQL Database engine
# https://www.postgresql.org/
# Open PG Admin using the username + pswd you set-up
# when installing
# Create a new DB called great_brit_bakeoff_pg
# Connect to an existing database
# to write our data
con <- dbConnect(drv = Postgres(),
host = "localhost",
port = "5432",
user = "your_user_name",
password = "your_pswd",
dbname = "great_brit_bakeoff_pg")
episodes <- as_tibble(bakeoff::all_episodes)
baker_results <- as_tibble(bakeoff::baker_results)
bakers <- as_tibble(bakeoff::bakers)
bakes <- as_tibble(bakeoff::bakes)
challenge_results <- as_tibble(bakeoff::challenge_results)
challenges <- as_tibble(bakeoff::challenges)
episode_results <- as_tibble(bakeoff::episode_results)
ratings <- as_tibble(bakeoff::ratings)
ratings_seasons <- as_tibble(bakeoff::ratings_seasons)
results <- as_tibble(bakeoff::results)
seasons <- as_tibble(bakeoff::seasons)
series <- as_tibble(bakeoff::series)
DBI::dbWriteTable(conn = con, "episodes",
episodes, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "baker_results",
baker_results, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "bakers",
bakers, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "bakes",
bakes, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "challenge_results",
challenge_results, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "episode_results",
episode_results, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "ratings",
ratings, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "ratings_seasons",
ratings_seasons, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "results",
results, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "challenges",
challenges, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "seasons",
seasons, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "series",
series, overwrite = TRUE)
# Verify Tables were created
DBI::dbListTables(con)
tbl(con, "seasons")
dbDisconnect(con)
# In R Script / Rmd make an odbc connection
# You will need to download the odbc driver
# for Postgres
# con_pg <- dbConnect(
# odbc::odbc(),
# Driver = "PostgreSQL ODBC Driver(UNICODE)",
# Database = "great_brit_bakeoff_pg",
# Server = "localhost",
# UID = "your_user_name",
# PWD = "your_pswd",
# port = 5432
# )
#
# dbListTables(con_pg)
#
# RPostgres::dbGetQuery(conn = con_pg,
# statement = 'SELECT * FROM bakers LIMIT 10')
#
# dbDisconnect(con_pg)
# Alternately use the RPostgres package to
# make a connection
# con <- dbConnect(
# drv = RPostgres::Postgres(),
# dbname = "great_brit_bakeoff_pg",
# host = "localhost",
# user = "your_user_name",
# password = "your_pwsd",
# port = "5432"
# )
#
# odbc::odbcListDrivers()
|
/db_create_scripts/create_postgres_db.R
|
no_license
|
Faithmagut1/intro-to-dbs-r
|
R
| false | false | 3,089 |
r
|
library(DBI)
library(RPostgres)
library(tidyverse)
library(dbplyr)
library(bakeoff)
# Download a PostgreSQL Database engine
# https://www.postgresql.org/
# Open PG Admin using the username + pswd you set-up
# when installing
# Create a new DB called great_brit_bakeoff_pg
# Connect to an existing database
# to write our data
con <- dbConnect(drv = Postgres(),
host = "localhost",
port = "5432",
user = "your_user_name",
password = "your_pswd",
dbname = "great_brit_bakeoff_pg")
episodes <- as_tibble(bakeoff::all_episodes)
baker_results <- as_tibble(bakeoff::baker_results)
bakers <- as_tibble(bakeoff::bakers)
bakes <- as_tibble(bakeoff::bakes)
challenge_results <- as_tibble(bakeoff::challenge_results)
challenges <- as_tibble(bakeoff::challenges)
episode_results <- as_tibble(bakeoff::episode_results)
ratings <- as_tibble(bakeoff::ratings)
ratings_seasons <- as_tibble(bakeoff::ratings_seasons)
results <- as_tibble(bakeoff::results)
seasons <- as_tibble(bakeoff::seasons)
series <- as_tibble(bakeoff::series)
DBI::dbWriteTable(conn = con, "episodes",
episodes, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "baker_results",
baker_results, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "bakers",
bakers, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "bakes",
bakes, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "challenge_results",
challenge_results, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "episode_results",
episode_results, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "ratings",
ratings, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "ratings_seasons",
ratings_seasons, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "results",
results, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "challenges",
challenges, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "seasons",
seasons, overwrite = TRUE)
DBI::dbWriteTable(conn = con, "series",
series, overwrite = TRUE)
# Verify Tables were created
DBI::dbListTables(con)
tbl(con, "seasons")
dbDisconnect(con)
# In R Script / Rmd make an odbc connection
# You will need to download the odbc driver
# for Postgres
# con_pg <- dbConnect(
# odbc::odbc(),
# Driver = "PostgreSQL ODBC Driver(UNICODE)",
# Database = "great_brit_bakeoff_pg",
# Server = "localhost",
# UID = "your_user_name",
# PWD = "your_pswd",
# port = 5432
# )
#
# dbListTables(con_pg)
#
# RPostgres::dbGetQuery(conn = con_pg,
# statement = 'SELECT * FROM bakers LIMIT 10')
#
# dbDisconnect(con_pg)
# Alternately use the RPostgres package to
# make a connection
# con <- dbConnect(
# drv = RPostgres::Postgres(),
# dbname = "great_brit_bakeoff_pg",
# host = "localhost",
# user = "your_user_name",
# password = "your_pwsd",
# port = "5432"
# )
#
# odbc::odbcListDrivers()
|
#' @title Reporte para Distribuidora del Sur, S.A.
#' @author Fernanda Gonzรกlez (20180190)
#' @description Preguntas a responder:
#' 1) ยฟContratar mรกs personal?
#' 2) ยฟComprar mรกs vehรญculos? ยฟCuรกles?
#' 3) ยฟEstรกn bien las tarifas actuales?
#' 4) ยฟRoban los pilotos?
#' 5) 80-20 de clientes
#' 6) 80-20 de pilotos
#' PREPARAR AMBIENTE DE TRABAJO
install.packages("tidyverse")
library(tidyverse)
#' IMPORTAR DATOS
Entregas2017 <- read.csv("/Users/baroness/Documents/201802/CS-DS001 Data Wrangling (A)/Lab03/Reporte2017/Entregas2017.csv")
#' EXPLORAR
summary(Entregas2017)
#' HACER TIDY
Entregas2017<-Entregas2017 %>%
tidyr::separate(CLIENTE, c('CLIENTE', 'ESTATUS_ENTREGA', 'ESTATUS_ENTREGA2'), sep="([/|])+")%>%
#' Algunos clientes y estatus quedaron con whitespaces al final del nombre.
dplyr::mutate(CLIENTE = str_trim(CLIENTE)) %>%
dplyr::mutate(ESTATUS_ENTREGA = str_trim(ESTATUS_ENTREGA)) %>%
dplyr::mutate_all(toupper) %>%
#' Todas las entregas que reportaron producto faltante fueron primero despachadas.
#' Registrarlas como despachadas y faltante es redundante.
dplyr::mutate(ESTATUS_ENTREGA = dplyr::case_when(ESTATUS_ENTREGA2=="FALTANTE" ~ "FALTANTE",
TRUE ~ ESTATUS_ENTREGA)) %>%
dplyr::select(-X, -COD_VIAJE, -ESTATUS_ENTREGA2) %>%
dplyr::mutate(UBICACION = dplyr::case_when(UBICACION==76001 ~ 1,
UBICACION==76002 ~ 2,
TRUE ~ as.numeric(UBICACION)))
head(Entregas2017)
#' TRANSFORM
#' 1) Para ver si las tarifas estรกn bien y los 80-20 clientes.
#' 1.1) Total de pedidos, total por ubicacion, total de clientes, unidades y Q, crรฉdito promedio.
Resumen2017<-Entregas2017%>%
dplyr::select(Fecha, UBICACION, CLIENTE, CANTIDAD, Q, CREDITO) %>%
dplyr::summarise(PEDIDOS=sum(n()),
CLIENTES=n_distinct(CLIENTE),
CLIENTES20=n_distinct(CLIENTE)*0.2,
UBI1=sum(UBICACION== 1),
UBI2=sum(UBICACION== 2),
PRODUCTO=sum(as.numeric(CANTIDAD)),
MEDIA_CRED=mean(as.numeric(CREDITO)),
Q_TOTAL=sum(as.numeric(Q)),
Q80=sum(as.numeric(Q))*0.8)
head(Resumen2017)
#' 1.2) Faltante/devoluciรณn/despacho por cliente.
Entregas2017$ESTATUS_ENTREGA<-as.factor(Entregas2017$ESTATUS_ENTREGA)
Clientes2017<-Entregas2017 %>%
dplyr::group_by(CLIENTE) %>%
dplyr::count(ESTATUS_ENTREGA) %>%
tidyr::spread(key=ESTATUS_ENTREGA, value=n)%>%
dplyr::arrange(CLIENTE)
head(Clientes2017)
Clientes2017pt2<-Entregas2017 %>%
dplyr::select(CLIENTE, CANTIDAD, Q, CREDITO) %>%
dplyr::group_by(CLIENTE) %>%
dplyr::summarise(CANTIDAD=sum(as.numeric(CANTIDAD)),
Q=sum(as.numeric(Q)),
CREDITO=mean(as.numeric(CREDITO))) %>%
dplyr::arrange(CLIENTE)
head(Clientes2017pt2)
Clientes2017<-merge(x=Clientes2017,
y=Clientes2017pt2[, c("CLIENTE","CANTIDAD", "Q", "CREDITO")],
by="CLIENTE", all.x = TRUE)
Clientes2017[is.na(Clientes2017)]<-0
head(Clientes2017)
ClientesCrono <- Entregas2017 %>%
dplyr::select(Fecha, CLIENTE, ESTATUS_ENTREGA) %>%
dplyr::group_by(Fecha, CLIENTE) %>%
dplyr::count(ESTATUS_ENTREGA) %>%
tidyr::spread(key=ESTATUS_ENTREGA, value=n) %>%
dplyr::arrange(CLIENTE)
ClientesCrono[is.na(ClientesCrono)]<-0
head(ClientesCrono)
ClientesCronoPt2 <- Entregas2017 %>%
dplyr::select(Fecha, CLIENTE, Q) %>%
dplyr::group_by(Fecha, CLIENTE) %>%
dplyr::summarise(Q=sum(as.numeric(Q))) %>%
dplyr::arrange(CLIENTE)
head(ClientesCronoPt2)
ClientesCrono<-ClientesCronoPt2 %>%
dplyr::select(Q) %>%
dplyr::bind_cols(ClientesCrono)
head(ClientesCrono)
ClientesCrono<-subset(ClientesCrono, select=c(Fecha, CLIENTE, Q, `DESPACHO A CLIENTE`,`DEVOLUCION`,`FALTANTE`,`<NA>`))
#' 2) Para encontrar 80-20 pilotos, si roban, necesidad de mรกs pilotos/vehรญculos.
#' 2.1) Resumen de pilotos
ResumenPilotos <- Entregas2017 %>%
dplyr::select(Fecha, PILOTO, UNIDAD) %>%
dplyr::summarise(PILOTOS=n_distinct(PILOTO),
PILOTOS20=n_distinct(PILOTO)*0.2,
ENTREGA_PANEL=sum(str_count(UNIDAD, "PANEL")),
ENTREGA_GRANDE=sum(str_count(UNIDAD, "CAMION GRANDE")),
ENTREGA_PEQUE=sum(str_count(UNIDAD, "CAMION PEQUENO")))
head(ResumenPilotos)
#' 2.2) Sรกbana de pilotos.
#' Entregas por hora por piloto
Entregas2017$ESTATUS_ENTREGA<-as.factor(Entregas2017$ESTATUS_ENTREGA)
Pilotos2017<-Entregas2017 %>%
dplyr::group_by(PILOTO) %>%
dplyr::count(ESTATUS_ENTREGA) %>%
tidyr::spread(key=ESTATUS_ENTREGA, value=n)%>%
dplyr::arrange(PILOTO)
head(Pilotos2017)
#' 2.3) Tipos de vehรญculo
#' Entregas por hora por tipo de vehรญculo
Vehiculos<-Entregas2017 %>%
dplyr::group_by(PILOTO, UNIDAD) %>%
dplyr::count(ESTATUS_ENTREGA) %>%
tidyr::spread(key=ESTATUS_ENTREGA, value=n)%>%
dplyr::arrange(PILOTO)
head(Vehiculos)
Vehiculos2<-Entregas2017%>%
dplyr::group_by(UNIDAD)%>%
dplyr::summarise(Q=sum(as.numeric(Q)))
head(Vehiculos2)
#' VISUALIZE
#' MODEL
#' COMMUNICATE
write.csv(Clientes2017,"Clientes2017.csv")
write.csv(ClientesCrono, "ClientesCrono.csv")
write.csv(Pilotos2017,"Pilotos2017.csv")
write.csv(Entregas2017,"Entregas2017_TIDY.csv")
write.csv(Vehiculos,"Vehiculos.csv")
write.csv(Vehiculos2,"Vehiculos2.csv")
|
/Case studies/Distribuidora del Sur, S. A./DataPrep.R
|
no_license
|
armi3/data-wrangling-templates
|
R
| false | false | 5,408 |
r
|
#' @title Reporte para Distribuidora del Sur, S.A.
#' @author Fernanda Gonzรกlez (20180190)
#' @description Preguntas a responder:
#' 1) ยฟContratar mรกs personal?
#' 2) ยฟComprar mรกs vehรญculos? ยฟCuรกles?
#' 3) ยฟEstรกn bien las tarifas actuales?
#' 4) ยฟRoban los pilotos?
#' 5) 80-20 de clientes
#' 6) 80-20 de pilotos
#' PREPARAR AMBIENTE DE TRABAJO
install.packages("tidyverse")
library(tidyverse)
#' IMPORTAR DATOS
Entregas2017 <- read.csv("/Users/baroness/Documents/201802/CS-DS001 Data Wrangling (A)/Lab03/Reporte2017/Entregas2017.csv")
#' EXPLORAR
summary(Entregas2017)
#' HACER TIDY
Entregas2017<-Entregas2017 %>%
tidyr::separate(CLIENTE, c('CLIENTE', 'ESTATUS_ENTREGA', 'ESTATUS_ENTREGA2'), sep="([/|])+")%>%
#' Algunos clientes y estatus quedaron con whitespaces al final del nombre.
dplyr::mutate(CLIENTE = str_trim(CLIENTE)) %>%
dplyr::mutate(ESTATUS_ENTREGA = str_trim(ESTATUS_ENTREGA)) %>%
dplyr::mutate_all(toupper) %>%
#' Todas las entregas que reportaron producto faltante fueron primero despachadas.
#' Registrarlas como despachadas y faltante es redundante.
dplyr::mutate(ESTATUS_ENTREGA = dplyr::case_when(ESTATUS_ENTREGA2=="FALTANTE" ~ "FALTANTE",
TRUE ~ ESTATUS_ENTREGA)) %>%
dplyr::select(-X, -COD_VIAJE, -ESTATUS_ENTREGA2) %>%
dplyr::mutate(UBICACION = dplyr::case_when(UBICACION==76001 ~ 1,
UBICACION==76002 ~ 2,
TRUE ~ as.numeric(UBICACION)))
head(Entregas2017)
#' TRANSFORM
#' 1) Para ver si las tarifas estรกn bien y los 80-20 clientes.
#' 1.1) Total de pedidos, total por ubicacion, total de clientes, unidades y Q, crรฉdito promedio.
Resumen2017<-Entregas2017%>%
dplyr::select(Fecha, UBICACION, CLIENTE, CANTIDAD, Q, CREDITO) %>%
dplyr::summarise(PEDIDOS=sum(n()),
CLIENTES=n_distinct(CLIENTE),
CLIENTES20=n_distinct(CLIENTE)*0.2,
UBI1=sum(UBICACION== 1),
UBI2=sum(UBICACION== 2),
PRODUCTO=sum(as.numeric(CANTIDAD)),
MEDIA_CRED=mean(as.numeric(CREDITO)),
Q_TOTAL=sum(as.numeric(Q)),
Q80=sum(as.numeric(Q))*0.8)
head(Resumen2017)
#' 1.2) Faltante/devoluciรณn/despacho por cliente.
Entregas2017$ESTATUS_ENTREGA<-as.factor(Entregas2017$ESTATUS_ENTREGA)
Clientes2017<-Entregas2017 %>%
dplyr::group_by(CLIENTE) %>%
dplyr::count(ESTATUS_ENTREGA) %>%
tidyr::spread(key=ESTATUS_ENTREGA, value=n)%>%
dplyr::arrange(CLIENTE)
head(Clientes2017)
Clientes2017pt2<-Entregas2017 %>%
dplyr::select(CLIENTE, CANTIDAD, Q, CREDITO) %>%
dplyr::group_by(CLIENTE) %>%
dplyr::summarise(CANTIDAD=sum(as.numeric(CANTIDAD)),
Q=sum(as.numeric(Q)),
CREDITO=mean(as.numeric(CREDITO))) %>%
dplyr::arrange(CLIENTE)
head(Clientes2017pt2)
Clientes2017<-merge(x=Clientes2017,
y=Clientes2017pt2[, c("CLIENTE","CANTIDAD", "Q", "CREDITO")],
by="CLIENTE", all.x = TRUE)
Clientes2017[is.na(Clientes2017)]<-0
head(Clientes2017)
ClientesCrono <- Entregas2017 %>%
dplyr::select(Fecha, CLIENTE, ESTATUS_ENTREGA) %>%
dplyr::group_by(Fecha, CLIENTE) %>%
dplyr::count(ESTATUS_ENTREGA) %>%
tidyr::spread(key=ESTATUS_ENTREGA, value=n) %>%
dplyr::arrange(CLIENTE)
ClientesCrono[is.na(ClientesCrono)]<-0
head(ClientesCrono)
ClientesCronoPt2 <- Entregas2017 %>%
dplyr::select(Fecha, CLIENTE, Q) %>%
dplyr::group_by(Fecha, CLIENTE) %>%
dplyr::summarise(Q=sum(as.numeric(Q))) %>%
dplyr::arrange(CLIENTE)
head(ClientesCronoPt2)
ClientesCrono<-ClientesCronoPt2 %>%
dplyr::select(Q) %>%
dplyr::bind_cols(ClientesCrono)
head(ClientesCrono)
ClientesCrono<-subset(ClientesCrono, select=c(Fecha, CLIENTE, Q, `DESPACHO A CLIENTE`,`DEVOLUCION`,`FALTANTE`,`<NA>`))
#' 2) Para encontrar 80-20 pilotos, si roban, necesidad de mรกs pilotos/vehรญculos.
#' 2.1) Resumen de pilotos
ResumenPilotos <- Entregas2017 %>%
dplyr::select(Fecha, PILOTO, UNIDAD) %>%
dplyr::summarise(PILOTOS=n_distinct(PILOTO),
PILOTOS20=n_distinct(PILOTO)*0.2,
ENTREGA_PANEL=sum(str_count(UNIDAD, "PANEL")),
ENTREGA_GRANDE=sum(str_count(UNIDAD, "CAMION GRANDE")),
ENTREGA_PEQUE=sum(str_count(UNIDAD, "CAMION PEQUENO")))
head(ResumenPilotos)
#' 2.2) Sรกbana de pilotos.
#' Entregas por hora por piloto
Entregas2017$ESTATUS_ENTREGA<-as.factor(Entregas2017$ESTATUS_ENTREGA)
Pilotos2017<-Entregas2017 %>%
dplyr::group_by(PILOTO) %>%
dplyr::count(ESTATUS_ENTREGA) %>%
tidyr::spread(key=ESTATUS_ENTREGA, value=n)%>%
dplyr::arrange(PILOTO)
head(Pilotos2017)
#' 2.3) Tipos de vehรญculo
#' Entregas por hora por tipo de vehรญculo
Vehiculos<-Entregas2017 %>%
dplyr::group_by(PILOTO, UNIDAD) %>%
dplyr::count(ESTATUS_ENTREGA) %>%
tidyr::spread(key=ESTATUS_ENTREGA, value=n)%>%
dplyr::arrange(PILOTO)
head(Vehiculos)
Vehiculos2<-Entregas2017%>%
dplyr::group_by(UNIDAD)%>%
dplyr::summarise(Q=sum(as.numeric(Q)))
head(Vehiculos2)
#' VISUALIZE
#' MODEL
#' COMMUNICATE
write.csv(Clientes2017,"Clientes2017.csv")
write.csv(ClientesCrono, "ClientesCrono.csv")
write.csv(Pilotos2017,"Pilotos2017.csv")
write.csv(Entregas2017,"Entregas2017_TIDY.csv")
write.csv(Vehiculos,"Vehiculos.csv")
write.csv(Vehiculos2,"Vehiculos2.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cli.R
\name{cli_end}
\alias{cli_end}
\title{Close a CLI container}
\usage{
cli_end(id = NULL)
}
\arguments{
\item{id}{Id of the container to close. If missing, the current
container is closed, if any.}
}
\description{
Containers aut0-close by default, but sometimes you need to explicitly
close them. Closing a container also closes all of its nested
containeers.
}
\details{
\subsection{Explicit closing}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{cnt <- cli_par()
cli_text("First paragraph.")
cli_end(cnt)
cnt <- cli_par()
cli_text("Second paragraph.")
cli_end(cnt)
}\if{html}{\out{</div>}}
\if{html}{\figure{cli-end.svg}}
}
\subsection{Closing a stack of containers}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{list <- cli_ul()
cli_li("Item one:")
cli_li("Item two:")
cli_par()
cli_text("Still item two.")
cli_end(list)
cli_text("Not in the list any more")
}\if{html}{\out{</div>}}
\if{html}{\figure{cli-end-many.svg}}
}
\subsection{Omitting \code{id}}{
If \code{id} is omitted, the container that was opened last will be closed.\if{html}{\out{<div class="sourceCode r">}}\preformatted{cli_par()
cli_text("First paragraph")
cli_end()
cli_par()
cli_text("Second paragraph")
cli_end()
}\if{html}{\out{</div>}}
\if{html}{\figure{cli-end-noid.svg}}
}
\subsection{Debugging containers}{
You can use the internal \code{cli:::cli_debug_doc()} function to see the
currently open containers.\if{html}{\out{<div class="sourceCode r">}}\preformatted{fun <- function() \{
cli_div(id = "mydiv")
cli_par(class = "myclass")
cli:::cli_debug_doc()
\}
fun()
}\if{html}{\out{</div>}}
\if{html}{\figure{cli-end-debug.svg}}
}
}
|
/man/cli_end.Rd
|
permissive
|
isabella232/cli-12
|
R
| false | true | 1,730 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cli.R
\name{cli_end}
\alias{cli_end}
\title{Close a CLI container}
\usage{
cli_end(id = NULL)
}
\arguments{
\item{id}{Id of the container to close. If missing, the current
container is closed, if any.}
}
\description{
Containers aut0-close by default, but sometimes you need to explicitly
close them. Closing a container also closes all of its nested
containeers.
}
\details{
\subsection{Explicit closing}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{cnt <- cli_par()
cli_text("First paragraph.")
cli_end(cnt)
cnt <- cli_par()
cli_text("Second paragraph.")
cli_end(cnt)
}\if{html}{\out{</div>}}
\if{html}{\figure{cli-end.svg}}
}
\subsection{Closing a stack of containers}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{list <- cli_ul()
cli_li("Item one:")
cli_li("Item two:")
cli_par()
cli_text("Still item two.")
cli_end(list)
cli_text("Not in the list any more")
}\if{html}{\out{</div>}}
\if{html}{\figure{cli-end-many.svg}}
}
\subsection{Omitting \code{id}}{
If \code{id} is omitted, the container that was opened last will be closed.\if{html}{\out{<div class="sourceCode r">}}\preformatted{cli_par()
cli_text("First paragraph")
cli_end()
cli_par()
cli_text("Second paragraph")
cli_end()
}\if{html}{\out{</div>}}
\if{html}{\figure{cli-end-noid.svg}}
}
\subsection{Debugging containers}{
You can use the internal \code{cli:::cli_debug_doc()} function to see the
currently open containers.\if{html}{\out{<div class="sourceCode r">}}\preformatted{fun <- function() \{
cli_div(id = "mydiv")
cli_par(class = "myclass")
cli:::cli_debug_doc()
\}
fun()
}\if{html}{\out{</div>}}
\if{html}{\figure{cli-end-debug.svg}}
}
}
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L), person_id = -1415711445L)
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result)
|
/dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615938659-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false | false | 285 |
r
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L), person_id = -1415711445L)
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result)
|
#' Reverse factor levels
#'
#' @param x A vector of factors
#'
#' @return vector as a factor
#' @export
#'
#' @examples
#' a <- factor(seq(1,10))
reverse_factor_levels <- function(x) {
return(factor(x, levels = rev(levels(factor(x)))))
}
|
/R/conversions.R
|
no_license
|
joelgsponer/waRRior2
|
R
| false | false | 240 |
r
|
#' Reverse factor levels
#'
#' @param x A vector of factors
#'
#' @return vector as a factor
#' @export
#'
#' @examples
#' a <- factor(seq(1,10))
reverse_factor_levels <- function(x) {
return(factor(x, levels = rev(levels(factor(x)))))
}
|
\name{almpubmedid}
\alias{almpubmedid}
\title{Get PubMed article ID by inputting the doi for the article.}
\usage{
almpubmedid(doi, url = "http://alm.plos.org/articles",
key = getOption("PlosApiKey", stop("need an API key for PLoS Journals")),
..., curl = getCurlHandle())
}
\arguments{
\item{doi}{digital object identifier for an article in
PLoS Journals}
\item{key}{your PLoS API key, either enter, or loads from
.Rprofile}
\item{url}{the PLoS API url for the function (should be
left to default)}
\item{...}{optional additional curl options (debugging
tools mostly)}
\item{curl}{If using in a loop, call getCurlHandle()
first and pass the returned value in here (avoids
unnecessary footprint)}
}
\value{
The PubMed article ID.
}
\description{
Get PubMed article ID by inputting the doi for the
article.
}
\examples{
\dontrun{
almpubmedid('10.1371/journal.pbio.0000012')
}
}
|
/man/almpubmedid.Rd
|
no_license
|
phillord/rplos
|
R
| false | false | 920 |
rd
|
\name{almpubmedid}
\alias{almpubmedid}
\title{Get PubMed article ID by inputting the doi for the article.}
\usage{
almpubmedid(doi, url = "http://alm.plos.org/articles",
key = getOption("PlosApiKey", stop("need an API key for PLoS Journals")),
..., curl = getCurlHandle())
}
\arguments{
\item{doi}{digital object identifier for an article in
PLoS Journals}
\item{key}{your PLoS API key, either enter, or loads from
.Rprofile}
\item{url}{the PLoS API url for the function (should be
left to default)}
\item{...}{optional additional curl options (debugging
tools mostly)}
\item{curl}{If using in a loop, call getCurlHandle()
first and pass the returned value in here (avoids
unnecessary footprint)}
}
\value{
The PubMed article ID.
}
\description{
Get PubMed article ID by inputting the doi for the
article.
}
\examples{
\dontrun{
almpubmedid('10.1371/journal.pbio.0000012')
}
}
|
library(plyr)
library(dplyr)
library(ggplot2)
options(scipen=999)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
df <- subset(SCC, select = c("SCC", "Short.Name"))
NEI <- merge(NEI, df, by.x="SCC", by.y="SCC", all=TRUE)
rm(SCC)
## Q2
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips=="24510")
# from 1999 to 2008? Use the base plotting system to make a plot answering this question.
# Data prep
plot2 <- subset(NEI, fips == "24510", c("Emissions", "year","type"))
plot2 <- aggregate(Emissions ~ year, plot2, sum)
png(file = "plot2.png")
# Plot the chart
plot((Emissions / 1000) ~ year,
data = plot2,
type = "l",
#ylim = c(min(df1[ ,-1]), max(df1[ ,-1])),
xlab = "Year",
ylab = "Total emissions (/1000)",
main = "Baltimore City PM2.5 Emissions",
xaxt="n")
axis(side=1, at=c("1999", "2002", "2005", "2008"))
# Save the file.
dev.off()
|
/Course 4 - Exploratory Data Analysis/Week4_Assignment/plot2.R
|
no_license
|
Leijtenss/Coursera-JHU-Data-Science-Specialization
|
R
| false | false | 1,029 |
r
|
library(plyr)
library(dplyr)
library(ggplot2)
options(scipen=999)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
df <- subset(SCC, select = c("SCC", "Short.Name"))
NEI <- merge(NEI, df, by.x="SCC", by.y="SCC", all=TRUE)
rm(SCC)
## Q2
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips=="24510")
# from 1999 to 2008? Use the base plotting system to make a plot answering this question.
# Data prep
plot2 <- subset(NEI, fips == "24510", c("Emissions", "year","type"))
plot2 <- aggregate(Emissions ~ year, plot2, sum)
png(file = "plot2.png")
# Plot the chart
plot((Emissions / 1000) ~ year,
data = plot2,
type = "l",
#ylim = c(min(df1[ ,-1]), max(df1[ ,-1])),
xlab = "Year",
ylab = "Total emissions (/1000)",
main = "Baltimore City PM2.5 Emissions",
xaxt="n")
axis(side=1, at=c("1999", "2002", "2005", "2008"))
# Save the file.
dev.off()
|
active_new <- function(
pipeline = NULL,
meta = NULL,
names = NULL,
shortcut = NULL,
queue = NULL,
reporter = NULL,
seconds_meta = NULL,
seconds_reporter = NULL,
garbage_collection = NULL,
envir = NULL
) {
active_class$new(
pipeline = pipeline,
meta = meta,
names = names,
shortcut = shortcut,
queue = queue,
reporter = reporter,
seconds_meta = seconds_meta,
seconds_reporter = seconds_reporter,
garbage_collection = garbage_collection,
envir = envir
)
}
active_class <- R6::R6Class(
classname = "tar_active",
inherit = algorithm_class,
portable = FALSE,
cloneable = FALSE,
public = list(
garbage_collection = NULL,
envir = NULL,
exports = NULL,
process = NULL,
seconds_start = NULL,
seconds_dequeued = NULL,
initialize = function(
pipeline = NULL,
meta = NULL,
names = NULL,
shortcut = NULL,
queue = NULL,
reporter = NULL,
seconds_meta = NULL,
seconds_reporter = NULL,
envir = NULL,
garbage_collection = NULL
) {
super$initialize(
pipeline = pipeline,
meta = meta,
names = names,
shortcut = shortcut,
queue = queue,
reporter = reporter,
seconds_meta = seconds_meta,
seconds_reporter = seconds_reporter
)
self$garbage_collection <- garbage_collection
self$envir <- envir
},
ensure_meta = function() {
new_store <- !file.exists(self$meta$store)
self$meta$database$sync(prefer_local = TRUE, verbose = FALSE)
self$meta$migrate_database()
self$meta$validate()
self$meta$database$preprocess(write = TRUE)
if (new_store) {
self$write_gitignore()
self$write_user()
}
self$meta$record_imports(self$pipeline$imports, self$pipeline)
self$meta$restrict_records(self$pipeline)
},
dequeue_meta = function() {
self$meta$database$dequeue_rows(upload = TRUE)
self$scheduler$progress$database$dequeue_rows(upload = TRUE)
},
dequeue_meta_time = function() {
self$seconds_dequeued <- self$seconds_dequeued %|||% -Inf
now <- time_seconds_local()
if ((now - self$seconds_dequeued) > self$seconds_meta) {
self$dequeue_meta()
self$seconds_dequeued <- time_seconds_local()
}
},
dequeue_meta_file = function(target) {
if (target_allow_meta(target)) {
self$dequeue_meta()
}
},
write_gitignore = function() {
writeLines(
c("*", "!.gitignore", "!meta", "meta/*", "!meta/meta"),
path_gitignore(self$meta$store)
)
},
write_user = function() {
dir_create(path_user_dir(self$meta$store))
},
ensure_process = function() {
self$process <- process_init(path_store = self$meta$store)
self$process$record_process()
self$process$database$upload(verbose = FALSE)
},
produce_exports = function(envir, path_store, is_globalenv = NULL) {
map(names(envir), ~force(envir[[.x]])) # try to nix high-mem promises
if (is_globalenv %|||% identical(envir, globalenv())) {
out <- as.list(envir, all.names = TRUE)
out <- out[fltr(names(out), ~!is_internal_name(.x, envir))]
out[[".tar_envir_5048826d"]] <- "globalenv"
} else {
discard <- fltr(names(envir), ~is_internal_name(.x, envir))
remove(list = discard, envir = envir)
out <- list(.tar_envir_5048826d = envir)
}
out[[".tar_path_store_5048826d"]] <- path_store
out[[".tar_fun_5048826d"]] <- tar_runtime$fun
out[[".tar_options_5048826d"]] <- tar_options$export()
out[[".tar_envvars_5048826d"]] <- tar_envvars()
out
},
update_exports = function() {
self$exports <- self$produce_exports(
envir = self$envir,
path_store = self$meta$store
)
},
ensure_exports = function() {
if (is.null(self$exports)) {
self$update_exports()
}
},
unload_transient = function() {
pipeline_unload_transient(self$pipeline)
},
unmarshal_target = function(target) {
builder_unmarshal_value(target)
},
skip_target = function(target) {
target_skip(
target = target,
pipeline = self$pipeline,
scheduler = self$scheduler,
meta = self$meta,
active = TRUE
)
target_sync_file_meta(target, self$meta)
},
process_target = function(name) {
self$scheduler$backoff$reset()
target <- pipeline_get_target(self$pipeline, name)
target_debug(target)
target_update_depend(target, self$pipeline, self$meta)
if (target_should_run(target, self$meta)) {
self$dequeue_meta_file(target)
self$run_target(name)
} else {
self$skip_target(target)
}
},
backoff = function() {
self$scheduler$backoff$wait()
},
start = function() {
self$seconds_start <- time_seconds()
pipeline_prune_names(self$pipeline, self$names)
self$ensure_meta()
self$update_scheduler()
self$bootstrap_shortcut_deps()
self$ensure_process()
self$scheduler$progress$database$reset_storage()
self$scheduler$reporter$report_start()
},
end = function() {
scheduler <- self$scheduler
pipeline_unload_loaded(self$pipeline)
self$meta$database$dequeue_rows(upload = FALSE)
self$meta$database$deduplicate_storage()
self$meta$database$sync(prefer_local = TRUE, verbose = FALSE)
self$scheduler$progress$database$dequeue_rows(upload = TRUE)
path_scratch_del(path_store = self$meta$store)
compare_working_directories()
tar_assert_objects_files(self$meta$store)
seconds_elapsed <- time_seconds() - self$seconds_start
scheduler$reporter$report_end(scheduler$progress, seconds_elapsed)
},
validate = function() {
super$validate()
if (!is.null(self$process)) {
self$process$validate()
}
tar_assert_lgl(self$garbage_collection)
tar_assert_scalar(self$garbage_collection)
tar_assert_none_na(self$garbage_collection)
}
)
)
|
/R/class_active.R
|
permissive
|
ropensci/targets
|
R
| false | false | 6,169 |
r
|
active_new <- function(
pipeline = NULL,
meta = NULL,
names = NULL,
shortcut = NULL,
queue = NULL,
reporter = NULL,
seconds_meta = NULL,
seconds_reporter = NULL,
garbage_collection = NULL,
envir = NULL
) {
active_class$new(
pipeline = pipeline,
meta = meta,
names = names,
shortcut = shortcut,
queue = queue,
reporter = reporter,
seconds_meta = seconds_meta,
seconds_reporter = seconds_reporter,
garbage_collection = garbage_collection,
envir = envir
)
}
active_class <- R6::R6Class(
classname = "tar_active",
inherit = algorithm_class,
portable = FALSE,
cloneable = FALSE,
public = list(
garbage_collection = NULL,
envir = NULL,
exports = NULL,
process = NULL,
seconds_start = NULL,
seconds_dequeued = NULL,
initialize = function(
pipeline = NULL,
meta = NULL,
names = NULL,
shortcut = NULL,
queue = NULL,
reporter = NULL,
seconds_meta = NULL,
seconds_reporter = NULL,
envir = NULL,
garbage_collection = NULL
) {
super$initialize(
pipeline = pipeline,
meta = meta,
names = names,
shortcut = shortcut,
queue = queue,
reporter = reporter,
seconds_meta = seconds_meta,
seconds_reporter = seconds_reporter
)
self$garbage_collection <- garbage_collection
self$envir <- envir
},
ensure_meta = function() {
new_store <- !file.exists(self$meta$store)
self$meta$database$sync(prefer_local = TRUE, verbose = FALSE)
self$meta$migrate_database()
self$meta$validate()
self$meta$database$preprocess(write = TRUE)
if (new_store) {
self$write_gitignore()
self$write_user()
}
self$meta$record_imports(self$pipeline$imports, self$pipeline)
self$meta$restrict_records(self$pipeline)
},
dequeue_meta = function() {
self$meta$database$dequeue_rows(upload = TRUE)
self$scheduler$progress$database$dequeue_rows(upload = TRUE)
},
dequeue_meta_time = function() {
self$seconds_dequeued <- self$seconds_dequeued %|||% -Inf
now <- time_seconds_local()
if ((now - self$seconds_dequeued) > self$seconds_meta) {
self$dequeue_meta()
self$seconds_dequeued <- time_seconds_local()
}
},
dequeue_meta_file = function(target) {
if (target_allow_meta(target)) {
self$dequeue_meta()
}
},
write_gitignore = function() {
writeLines(
c("*", "!.gitignore", "!meta", "meta/*", "!meta/meta"),
path_gitignore(self$meta$store)
)
},
write_user = function() {
dir_create(path_user_dir(self$meta$store))
},
ensure_process = function() {
self$process <- process_init(path_store = self$meta$store)
self$process$record_process()
self$process$database$upload(verbose = FALSE)
},
produce_exports = function(envir, path_store, is_globalenv = NULL) {
map(names(envir), ~force(envir[[.x]])) # try to nix high-mem promises
if (is_globalenv %|||% identical(envir, globalenv())) {
out <- as.list(envir, all.names = TRUE)
out <- out[fltr(names(out), ~!is_internal_name(.x, envir))]
out[[".tar_envir_5048826d"]] <- "globalenv"
} else {
discard <- fltr(names(envir), ~is_internal_name(.x, envir))
remove(list = discard, envir = envir)
out <- list(.tar_envir_5048826d = envir)
}
out[[".tar_path_store_5048826d"]] <- path_store
out[[".tar_fun_5048826d"]] <- tar_runtime$fun
out[[".tar_options_5048826d"]] <- tar_options$export()
out[[".tar_envvars_5048826d"]] <- tar_envvars()
out
},
update_exports = function() {
self$exports <- self$produce_exports(
envir = self$envir,
path_store = self$meta$store
)
},
ensure_exports = function() {
if (is.null(self$exports)) {
self$update_exports()
}
},
unload_transient = function() {
pipeline_unload_transient(self$pipeline)
},
unmarshal_target = function(target) {
builder_unmarshal_value(target)
},
skip_target = function(target) {
target_skip(
target = target,
pipeline = self$pipeline,
scheduler = self$scheduler,
meta = self$meta,
active = TRUE
)
target_sync_file_meta(target, self$meta)
},
process_target = function(name) {
self$scheduler$backoff$reset()
target <- pipeline_get_target(self$pipeline, name)
target_debug(target)
target_update_depend(target, self$pipeline, self$meta)
if (target_should_run(target, self$meta)) {
self$dequeue_meta_file(target)
self$run_target(name)
} else {
self$skip_target(target)
}
},
backoff = function() {
self$scheduler$backoff$wait()
},
start = function() {
self$seconds_start <- time_seconds()
pipeline_prune_names(self$pipeline, self$names)
self$ensure_meta()
self$update_scheduler()
self$bootstrap_shortcut_deps()
self$ensure_process()
self$scheduler$progress$database$reset_storage()
self$scheduler$reporter$report_start()
},
end = function() {
scheduler <- self$scheduler
pipeline_unload_loaded(self$pipeline)
self$meta$database$dequeue_rows(upload = FALSE)
self$meta$database$deduplicate_storage()
self$meta$database$sync(prefer_local = TRUE, verbose = FALSE)
self$scheduler$progress$database$dequeue_rows(upload = TRUE)
path_scratch_del(path_store = self$meta$store)
compare_working_directories()
tar_assert_objects_files(self$meta$store)
seconds_elapsed <- time_seconds() - self$seconds_start
scheduler$reporter$report_end(scheduler$progress, seconds_elapsed)
},
validate = function() {
super$validate()
if (!is.null(self$process)) {
self$process$validate()
}
tar_assert_lgl(self$garbage_collection)
tar_assert_scalar(self$garbage_collection)
tar_assert_none_na(self$garbage_collection)
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataflow_objects.R
\name{TaskRunnerSettings}
\alias{TaskRunnerSettings}
\title{TaskRunnerSettings Object}
\usage{
TaskRunnerSettings(taskUser = NULL, taskGroup = NULL, oauthScopes = NULL,
baseUrl = NULL, dataflowApiVersion = NULL,
parallelWorkerSettings = NULL, baseTaskDir = NULL,
continueOnException = NULL, logToSerialconsole = NULL,
alsologtostderr = NULL, logUploadLocation = NULL, logDir = NULL,
tempStoragePrefix = NULL, harnessCommand = NULL,
workflowFileName = NULL, commandlinesFileName = NULL, vmId = NULL,
languageHint = NULL, streamingWorkerMainClass = NULL)
}
\arguments{
\item{taskUser}{The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e}
\item{taskGroup}{The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e}
\item{oauthScopes}{OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API}
\item{baseUrl}{The base URL for the taskrunner to use when accessing Google Cloud APIs}
\item{dataflowApiVersion}{API version of endpoint, e}
\item{parallelWorkerSettings}{Settings to pass to the parallel worker harness}
\item{baseTaskDir}{Location on the worker for task-specific subdirectories}
\item{continueOnException}{Do we continue taskrunner if an exception is hit?}
\item{logToSerialconsole}{Send taskrunner log into to Google Compute Engine VM serial console?}
\item{alsologtostderr}{Also send taskrunner log info to stderr?}
\item{logUploadLocation}{Indicates where to put logs}
\item{logDir}{Directory on the VM to store logs}
\item{tempStoragePrefix}{The prefix of the resources the taskrunner should use for temporary storage}
\item{harnessCommand}{Command to launch the worker harness}
\item{workflowFileName}{Store the workflow in this file}
\item{commandlinesFileName}{Store preprocessing commands in this file}
\item{vmId}{ID string of VM}
\item{languageHint}{Suggested backend language}
\item{streamingWorkerMainClass}{Streaming worker main class name}
}
\value{
TaskRunnerSettings object
}
\description{
TaskRunnerSettings Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Taskrunner configuration settings.
}
|
/googledataflowv1b3.auto/man/TaskRunnerSettings.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false | true | 2,258 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataflow_objects.R
\name{TaskRunnerSettings}
\alias{TaskRunnerSettings}
\title{TaskRunnerSettings Object}
\usage{
TaskRunnerSettings(taskUser = NULL, taskGroup = NULL, oauthScopes = NULL,
baseUrl = NULL, dataflowApiVersion = NULL,
parallelWorkerSettings = NULL, baseTaskDir = NULL,
continueOnException = NULL, logToSerialconsole = NULL,
alsologtostderr = NULL, logUploadLocation = NULL, logDir = NULL,
tempStoragePrefix = NULL, harnessCommand = NULL,
workflowFileName = NULL, commandlinesFileName = NULL, vmId = NULL,
languageHint = NULL, streamingWorkerMainClass = NULL)
}
\arguments{
\item{taskUser}{The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e}
\item{taskGroup}{The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e}
\item{oauthScopes}{OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API}
\item{baseUrl}{The base URL for the taskrunner to use when accessing Google Cloud APIs}
\item{dataflowApiVersion}{API version of endpoint, e}
\item{parallelWorkerSettings}{Settings to pass to the parallel worker harness}
\item{baseTaskDir}{Location on the worker for task-specific subdirectories}
\item{continueOnException}{Do we continue taskrunner if an exception is hit?}
\item{logToSerialconsole}{Send taskrunner log into to Google Compute Engine VM serial console?}
\item{alsologtostderr}{Also send taskrunner log info to stderr?}
\item{logUploadLocation}{Indicates where to put logs}
\item{logDir}{Directory on the VM to store logs}
\item{tempStoragePrefix}{The prefix of the resources the taskrunner should use for temporary storage}
\item{harnessCommand}{Command to launch the worker harness}
\item{workflowFileName}{Store the workflow in this file}
\item{commandlinesFileName}{Store preprocessing commands in this file}
\item{vmId}{ID string of VM}
\item{languageHint}{Suggested backend language}
\item{streamingWorkerMainClass}{Streaming worker main class name}
}
\value{
TaskRunnerSettings object
}
\description{
TaskRunnerSettings Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Taskrunner configuration settings.
}
|
#' Near-Zero Variance Filter
#'
#' `step_nzv` creates a *specification* of a recipe step
#' that will potentially remove variables that are highly sparse
#' and unbalanced.
#'
#' @inheritParams step_center
#' @param freq_cut,unique_cut Numeric parameters for the filtering process. See
#' the Details section below.
#' @param options A list of options for the filter (see Details
#' below).
#' @param removals A character string that contains the names of
#' columns that should be removed. These values are not determined
#' until [prep()] is called.
#' @template step-return
#' @template filter-steps
#' @family variable filter steps
#' @export
#'
#' @details This step diagnoses predictors that have one unique
#' value (i.e. are zero variance predictors) or predictors that have
#' both of the following characteristics:
#' \enumerate{
#' \item they have very few unique values relative to the number
#' of samples and
#' \item the ratio of the frequency of the most common value to
#' the frequency of the second most common value is large.
#' }
#'
#' For example, an example of near-zero variance predictor is one
#' that, for 1000 samples, has two distinct values and 999 of them
#' are a single value.
#'
#' To be flagged, first, the frequency of the most prevalent value
#' over the second most frequent value (called the "frequency
#' ratio") must be above `freq_cut`. Secondly, the "percent of
#' unique values," the number of unique values divided by the total
#' number of samples (times 100), must also be below
#' `unique_cut`.
#'
#' In the above example, the frequency ratio is 999 and the unique
#' value percent is 0.2%.
#'
#' # Tidying
#'
#' When you [`tidy()`][tidy.recipe()] this step, a tibble with column
#' `terms` (the columns that will be removed) is returned.
#'
#' @template case-weights-unsupervised
#'
#' @examplesIf rlang::is_installed("modeldata")
#' data(biomass, package = "modeldata")
#'
#' biomass$sparse <- c(1, rep(0, nrow(biomass) - 1))
#'
#' biomass_tr <- biomass[biomass$dataset == "Training", ]
#' biomass_te <- biomass[biomass$dataset == "Testing", ]
#'
#' rec <- recipe(HHV ~ carbon + hydrogen + oxygen +
#' nitrogen + sulfur + sparse,
#' data = biomass_tr
#' )
#'
#' nzv_filter <- rec %>%
#' step_nzv(all_predictors())
#'
#' filter_obj <- prep(nzv_filter, training = biomass_tr)
#'
#' filtered_te <- bake(filter_obj, biomass_te)
#' any(names(filtered_te) == "sparse")
#'
#' tidy(nzv_filter, number = 1)
#' tidy(filter_obj, number = 1)
step_nzv <-
function(recipe,
...,
role = NA,
trained = FALSE,
freq_cut = 95 / 5,
unique_cut = 10,
options = list(freq_cut = 95 / 5, unique_cut = 10),
removals = NULL,
skip = FALSE,
id = rand_id("nzv")) {
exp_list <- list(freq_cut = 95 / 5, unique_cut = 10)
if (!isTRUE(all.equal(exp_list, options))) {
freq_cut <- options$freq_cut
unique_cut <- options$unique_cut
lifecycle::deprecate_stop(
"0.1.7",
"step_nzv(options)",
details = "Please use the arguments `freq_cut` and `unique_cut` instead."
)
}
add_step(
recipe,
step_nzv_new(
terms = enquos(...),
role = role,
trained = trained,
freq_cut = freq_cut,
unique_cut = unique_cut,
options = options,
removals = removals,
skip = skip,
id = id,
case_weights = NULL
)
)
}
step_nzv_new <-
function(terms, role, trained, freq_cut, unique_cut, options,
removals, skip, id, case_weights) {
step(
subclass = "nzv",
terms = terms,
role = role,
trained = trained,
freq_cut = freq_cut,
unique_cut = unique_cut,
options = options,
removals = removals,
skip = skip,
id = id,
case_weights = case_weights
)
}
#' @export
prep.step_nzv <- function(x, training, info = NULL, ...) {
col_names <- recipes_eval_select(x$terms, training, info)
wts <- get_case_weights(info, training)
were_weights_used <- are_weights_used(wts, unsupervised = TRUE)
if (isFALSE(were_weights_used)) {
wts <- NULL
}
filter <- nzv(
x = training[, col_names],
wts = wts,
freq_cut = x$freq_cut,
unique_cut = x$unique_cut
)
step_nzv_new(
terms = x$terms,
role = x$role,
trained = TRUE,
freq_cut = x$freq_cut,
unique_cut = x$unique_cut,
options = x$options,
removals = filter,
skip = x$skip,
id = x$id,
case_weights = were_weights_used
)
}
#' @export
bake.step_nzv <- function(object, new_data, ...) {
if (length(object$removals) > 0) {
new_data <- new_data[, !(colnames(new_data) %in% object$removals)]
}
new_data
}
print.step_nzv <-
function(x, width = max(20, options()$width - 38), ...) {
if (x$trained) {
title <- "Sparse, unbalanced variable filter removed "
} else {
title <- "Sparse, unbalanced variable filter on "
}
print_step(x$removals, x$terms, x$trained, title, width,
case_weights = x$case_weights)
invisible(x)
}
nzv <- function(x,
wts,
freq_cut = 95 / 5,
unique_cut = 10) {
if (is.null(dim(x))) {
x <- matrix(x, ncol = 1)
}
fr_foo <- function(data) {
t <- weighted_table(data[!is.na(data)], wts = wts)
if (length(t) <= 1) {
return(0)
}
w <- which.max(t)
return(max(t, na.rm = TRUE) / max(t[-w], na.rm = TRUE))
}
freq_ratio <- vapply(x, fr_foo, c(ratio = 0))
uni_foo <- function(data) {
length(unique(data[!is.na(data)]))
}
lunique <- vapply(x, uni_foo, c(num = 0))
pct_unique <- 100 * lunique / vapply(x, length, c(num = 0))
zero_func <- function(data) {
all(is.na(data))
}
zero_var <- (lunique == 1) | vapply(x, zero_func, c(zv = TRUE))
out <-
which((freq_ratio > freq_cut &
pct_unique <= unique_cut) | zero_var)
names(out) <- NULL
colnames(x)[out]
}
#' @rdname tidy.recipe
#' @export
tidy.step_nzv <- tidy_filter
#' @export
tunable.step_nzv <- function(x, ...) {
tibble::tibble(
name = c("freq_cut", "unique_cut"),
call_info = list(
list(pkg = "dials", fun = "freq_cut"),
list(pkg = "dials", fun = "unique_cut")
),
source = "recipe",
component = "step_nzv",
component_id = x$id
)
}
|
/R/nzv.R
|
permissive
|
DavisVaughan/recipes
|
R
| false | false | 6,392 |
r
|
#' Near-Zero Variance Filter
#'
#' `step_nzv` creates a *specification* of a recipe step
#' that will potentially remove variables that are highly sparse
#' and unbalanced.
#'
#' @inheritParams step_center
#' @param freq_cut,unique_cut Numeric parameters for the filtering process. See
#' the Details section below.
#' @param options A list of options for the filter (see Details
#' below).
#' @param removals A character string that contains the names of
#' columns that should be removed. These values are not determined
#' until [prep()] is called.
#' @template step-return
#' @template filter-steps
#' @family variable filter steps
#' @export
#'
#' @details This step diagnoses predictors that have one unique
#' value (i.e. are zero variance predictors) or predictors that have
#' both of the following characteristics:
#' \enumerate{
#' \item they have very few unique values relative to the number
#' of samples and
#' \item the ratio of the frequency of the most common value to
#' the frequency of the second most common value is large.
#' }
#'
#' For example, an example of near-zero variance predictor is one
#' that, for 1000 samples, has two distinct values and 999 of them
#' are a single value.
#'
#' To be flagged, first, the frequency of the most prevalent value
#' over the second most frequent value (called the "frequency
#' ratio") must be above `freq_cut`. Secondly, the "percent of
#' unique values," the number of unique values divided by the total
#' number of samples (times 100), must also be below
#' `unique_cut`.
#'
#' In the above example, the frequency ratio is 999 and the unique
#' value percent is 0.2%.
#'
#' # Tidying
#'
#' When you [`tidy()`][tidy.recipe()] this step, a tibble with column
#' `terms` (the columns that will be removed) is returned.
#'
#' @template case-weights-unsupervised
#'
#' @examplesIf rlang::is_installed("modeldata")
#' data(biomass, package = "modeldata")
#'
#' biomass$sparse <- c(1, rep(0, nrow(biomass) - 1))
#'
#' biomass_tr <- biomass[biomass$dataset == "Training", ]
#' biomass_te <- biomass[biomass$dataset == "Testing", ]
#'
#' rec <- recipe(HHV ~ carbon + hydrogen + oxygen +
#' nitrogen + sulfur + sparse,
#' data = biomass_tr
#' )
#'
#' nzv_filter <- rec %>%
#' step_nzv(all_predictors())
#'
#' filter_obj <- prep(nzv_filter, training = biomass_tr)
#'
#' filtered_te <- bake(filter_obj, biomass_te)
#' any(names(filtered_te) == "sparse")
#'
#' tidy(nzv_filter, number = 1)
#' tidy(filter_obj, number = 1)
step_nzv <-
function(recipe,
...,
role = NA,
trained = FALSE,
freq_cut = 95 / 5,
unique_cut = 10,
options = list(freq_cut = 95 / 5, unique_cut = 10),
removals = NULL,
skip = FALSE,
id = rand_id("nzv")) {
exp_list <- list(freq_cut = 95 / 5, unique_cut = 10)
if (!isTRUE(all.equal(exp_list, options))) {
freq_cut <- options$freq_cut
unique_cut <- options$unique_cut
lifecycle::deprecate_stop(
"0.1.7",
"step_nzv(options)",
details = "Please use the arguments `freq_cut` and `unique_cut` instead."
)
}
add_step(
recipe,
step_nzv_new(
terms = enquos(...),
role = role,
trained = trained,
freq_cut = freq_cut,
unique_cut = unique_cut,
options = options,
removals = removals,
skip = skip,
id = id,
case_weights = NULL
)
)
}
step_nzv_new <-
function(terms, role, trained, freq_cut, unique_cut, options,
removals, skip, id, case_weights) {
step(
subclass = "nzv",
terms = terms,
role = role,
trained = trained,
freq_cut = freq_cut,
unique_cut = unique_cut,
options = options,
removals = removals,
skip = skip,
id = id,
case_weights = case_weights
)
}
#' @export
prep.step_nzv <- function(x, training, info = NULL, ...) {
col_names <- recipes_eval_select(x$terms, training, info)
wts <- get_case_weights(info, training)
were_weights_used <- are_weights_used(wts, unsupervised = TRUE)
if (isFALSE(were_weights_used)) {
wts <- NULL
}
filter <- nzv(
x = training[, col_names],
wts = wts,
freq_cut = x$freq_cut,
unique_cut = x$unique_cut
)
step_nzv_new(
terms = x$terms,
role = x$role,
trained = TRUE,
freq_cut = x$freq_cut,
unique_cut = x$unique_cut,
options = x$options,
removals = filter,
skip = x$skip,
id = x$id,
case_weights = were_weights_used
)
}
#' @export
bake.step_nzv <- function(object, new_data, ...) {
if (length(object$removals) > 0) {
new_data <- new_data[, !(colnames(new_data) %in% object$removals)]
}
new_data
}
print.step_nzv <-
function(x, width = max(20, options()$width - 38), ...) {
if (x$trained) {
title <- "Sparse, unbalanced variable filter removed "
} else {
title <- "Sparse, unbalanced variable filter on "
}
print_step(x$removals, x$terms, x$trained, title, width,
case_weights = x$case_weights)
invisible(x)
}
nzv <- function(x,
wts,
freq_cut = 95 / 5,
unique_cut = 10) {
if (is.null(dim(x))) {
x <- matrix(x, ncol = 1)
}
fr_foo <- function(data) {
t <- weighted_table(data[!is.na(data)], wts = wts)
if (length(t) <= 1) {
return(0)
}
w <- which.max(t)
return(max(t, na.rm = TRUE) / max(t[-w], na.rm = TRUE))
}
freq_ratio <- vapply(x, fr_foo, c(ratio = 0))
uni_foo <- function(data) {
length(unique(data[!is.na(data)]))
}
lunique <- vapply(x, uni_foo, c(num = 0))
pct_unique <- 100 * lunique / vapply(x, length, c(num = 0))
zero_func <- function(data) {
all(is.na(data))
}
zero_var <- (lunique == 1) | vapply(x, zero_func, c(zv = TRUE))
out <-
which((freq_ratio > freq_cut &
pct_unique <= unique_cut) | zero_var)
names(out) <- NULL
colnames(x)[out]
}
#' @rdname tidy.recipe
#' @export
tidy.step_nzv <- tidy_filter
#' @export
tunable.step_nzv <- function(x, ...) {
tibble::tibble(
name = c("freq_cut", "unique_cut"),
call_info = list(
list(pkg = "dials", fun = "freq_cut"),
list(pkg = "dials", fun = "unique_cut")
),
source = "recipe",
component = "step_nzv",
component_id = x$id
)
}
|
#' @S3method qi cloglog.net
qi.cloglog.net <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
inv <- linkinv(param)
beta <- coef(param)
# Compute expected values
#
# ...
# @param simulations ...
# @param alpha ...
# @param x
# @return ...
compute.ev <- function (simulations, x) {
# Ensure 'setx' object is valid
if (is.null(x) || is.na(x))
return(NA)
# Construct eta, the value of the linear predictors before the
# inverse-link function is applied
eta <- beta %*% t(x)
# Construct theta, the f^-1(eta)
theta <- matrix(inv(eta), nrow=nrow(beta))
# Properly name the matrix dimensions
dimnames(theta) <- dimnames(eta)
# Return
theta
}
ev1 <- compute.ev(beta, x)
ev2 <- compute.ev(beta, x1)
list(
"Expected Value (for X): E(Y|X)" = ev1,
"Expected Value (for X1): E(Y|X1)" = ev2,
"First Differences: E(Y|X1)-E(Y|X)" = ev2 - ev1
)
}
|
/R/qi.cloglog.net.R
|
no_license
|
IQSS/ZeligNetwork
|
R
| false | false | 961 |
r
|
#' @S3method qi cloglog.net
qi.cloglog.net <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
inv <- linkinv(param)
beta <- coef(param)
# Compute expected values
#
# ...
# @param simulations ...
# @param alpha ...
# @param x
# @return ...
compute.ev <- function (simulations, x) {
# Ensure 'setx' object is valid
if (is.null(x) || is.na(x))
return(NA)
# Construct eta, the value of the linear predictors before the
# inverse-link function is applied
eta <- beta %*% t(x)
# Construct theta, the f^-1(eta)
theta <- matrix(inv(eta), nrow=nrow(beta))
# Properly name the matrix dimensions
dimnames(theta) <- dimnames(eta)
# Return
theta
}
ev1 <- compute.ev(beta, x)
ev2 <- compute.ev(beta, x1)
list(
"Expected Value (for X): E(Y|X)" = ev1,
"Expected Value (for X1): E(Y|X1)" = ev2,
"First Differences: E(Y|X1)-E(Y|X)" = ev2 - ev1
)
}
|
source('utils.R')
ex <- get_aoc_input(12, 2019, Sys.getenv('COOKIE_PATH'))
input = gsub('x=|y=|z=|<|>| ', '', ex) %>%
strsplit(split = ',|\n') %>%
`[[`(1) %>%
as.numeric() %>%
matrix(byrow = T, ncol = 3) %>%
as.data.frame() %>%
`names<-`(c('x', 'y', 'z'))
# part 1
dat = input
dimens = c('x', 'y', 'z')
pairs = combn(1:4, 2, simplify = F)
n_steps = 1000
dat_new = list()
vel_new = list()
for (dimen in dimens) {
velocities = rep(0, 4)
dat_dim = dat[, dimen]
for (t in seq.int(n_steps)) {
for (pairi in pairs) {
diff = dat_dim[pairi[2]] - dat_dim[pairi[1]]
if (diff != 0) {
velocities[pairi] = velocities[pairi] +
(2 * (diff > 0) - 1) * c(1,-1)
}
}
dat_dim = dat_dim + velocities
dat_new[[dimen]] = dat_dim
vel_new[[dimen]] = velocities
}
ts = c(ts, t)
}
sum(rowSums(abs(dplyr::bind_cols(dat_new))) *
rowSums(abs(dplyr::bind_cols(vel_new))))
# part 2
n_steps = 200000
dat = input
ts = vector(mode = 'numeric')
for (dimen in dimens) {
velocities = rep(0, 4)
dat_dim = dat[, dimen]
for (t in seq.int(n_steps)) {
for (pairi in pairs) {
diff = dat_dim[pairi[2]] - dat_dim[pairi[1]]
if (diff != 0) {
velocities[pairi] = velocities[pairi] +
(2 * (diff > 0) - 1) * c(1,-1)
}
}
dat_dim = dat_dim + velocities
if (identical(velocities, rep(0, 4)))
break
}
ts = c(ts, t)
}
print(Reduce(pracma:::Lcm, ts)*2, digits = 15)
|
/2019/day12.R
|
no_license
|
trangdata/adventofcode
|
R
| false | false | 1,472 |
r
|
source('utils.R')
ex <- get_aoc_input(12, 2019, Sys.getenv('COOKIE_PATH'))
input = gsub('x=|y=|z=|<|>| ', '', ex) %>%
strsplit(split = ',|\n') %>%
`[[`(1) %>%
as.numeric() %>%
matrix(byrow = T, ncol = 3) %>%
as.data.frame() %>%
`names<-`(c('x', 'y', 'z'))
# part 1
dat = input
dimens = c('x', 'y', 'z')
pairs = combn(1:4, 2, simplify = F)
n_steps = 1000
dat_new = list()
vel_new = list()
for (dimen in dimens) {
velocities = rep(0, 4)
dat_dim = dat[, dimen]
for (t in seq.int(n_steps)) {
for (pairi in pairs) {
diff = dat_dim[pairi[2]] - dat_dim[pairi[1]]
if (diff != 0) {
velocities[pairi] = velocities[pairi] +
(2 * (diff > 0) - 1) * c(1,-1)
}
}
dat_dim = dat_dim + velocities
dat_new[[dimen]] = dat_dim
vel_new[[dimen]] = velocities
}
ts = c(ts, t)
}
sum(rowSums(abs(dplyr::bind_cols(dat_new))) *
rowSums(abs(dplyr::bind_cols(vel_new))))
# part 2
n_steps = 200000
dat = input
ts = vector(mode = 'numeric')
for (dimen in dimens) {
velocities = rep(0, 4)
dat_dim = dat[, dimen]
for (t in seq.int(n_steps)) {
for (pairi in pairs) {
diff = dat_dim[pairi[2]] - dat_dim[pairi[1]]
if (diff != 0) {
velocities[pairi] = velocities[pairi] +
(2 * (diff > 0) - 1) * c(1,-1)
}
}
dat_dim = dat_dim + velocities
if (identical(velocities, rep(0, 4)))
break
}
ts = c(ts, t)
}
print(Reduce(pracma:::Lcm, ts)*2, digits = 15)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/examen_pdf.R
\name{examen_pdf}
\alias{examen_pdf}
\alias{examen_pdf_solution}
\title{Convert to an examen PDF document}
\usage{
examen_pdf(
solution = FALSE,
suffix = "_question",
id = FALSE,
mcq = "oneparchoices",
includes = NULL,
pandoc_args = NULL,
...
)
examen_pdf_solution(suffix = "_solution", ...)
}
\arguments{
\item{solution}{Turn ON or OFF the rendering of solution chunks (default is \code{FALSE})}
\item{suffix}{Suffix which is added to the filename (default is '_question' for 'unilur::examen_pdf' and '_solution' for 'unilur::examen_pdf_solution')}
\item{id}{Draw a student identification box}
\item{mcq}{Theme for the multiple choice questions (\code{oneparchoices}, \code{oneparchoicesalt}, \code{oneparcheckboxesalt} or \code{oneparcheckboxes})}
\item{includes}{Named list of additional content to include within the
document (typically created using the \code{\link[rmarkdown]{includes}} function).}
\item{pandoc_args}{Additional command line options to pass to pandoc}
\item{...}{Arguments passed to \code{pdf_document()}.}
}
\value{
R Markdown output format to pass to \code{\link[rmarkdown]{render}}
}
\description{
Format for converting from R Markdown to an examen PDF document.
}
\details{
See the inherited `rmarkdown::pdf_document` help page for additional arguments.
}
|
/man/examen_pdf.Rd
|
no_license
|
koncina/unilur
|
R
| false | true | 1,395 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/examen_pdf.R
\name{examen_pdf}
\alias{examen_pdf}
\alias{examen_pdf_solution}
\title{Convert to an examen PDF document}
\usage{
examen_pdf(
solution = FALSE,
suffix = "_question",
id = FALSE,
mcq = "oneparchoices",
includes = NULL,
pandoc_args = NULL,
...
)
examen_pdf_solution(suffix = "_solution", ...)
}
\arguments{
\item{solution}{Turn ON or OFF the rendering of solution chunks (default is \code{FALSE})}
\item{suffix}{Suffix which is added to the filename (default is '_question' for 'unilur::examen_pdf' and '_solution' for 'unilur::examen_pdf_solution')}
\item{id}{Draw a student identification box}
\item{mcq}{Theme for the multiple choice questions (\code{oneparchoices}, \code{oneparchoicesalt}, \code{oneparcheckboxesalt} or \code{oneparcheckboxes})}
\item{includes}{Named list of additional content to include within the
document (typically created using the \code{\link[rmarkdown]{includes}} function).}
\item{pandoc_args}{Additional command line options to pass to pandoc}
\item{...}{Arguments passed to \code{pdf_document()}.}
}
\value{
R Markdown output format to pass to \code{\link[rmarkdown]{render}}
}
\description{
Format for converting from R Markdown to an examen PDF document.
}
\details{
See the inherited `rmarkdown::pdf_document` help page for additional arguments.
}
|
## terse.R
##
## code to control terseness and layout of printed output
##
## $Revision: 1.11 $ $Date: 2016/09/23 02:07:24 $
##
## paragraph break in long output e.g. ppm
parbreak <- function(terse = spatstat.options("terse")) {
if(waxlyrical('space', terse)) cat("\n")
return(invisible(NULL))
}
waxlyrical <- local({
## Values of spatstat.options('terse'):
## 0 default
## 1 suppress obvious wastage e.g. 'gory details'
## 2 contract space between paragraphs in long output
## 3 suppress extras e.g. standard errors and CI
## 4 suppress error messages eg failed to converge
TerseCutoff <- list(gory=1,
space=2,
extras=3,
errors=4)
waxlyrical <- function(type, terse = spatstat.options("terse")) {
if(!(type %in% names(TerseCutoff)))
stop(paste("Internal error: unrecognised permission request",
sQuote(type)),
call.=TRUE)
return(terse < TerseCutoff[[type]])
}
waxlyrical
})
ruletextline <- function(ch="-", n=getOption('width'),
terse=spatstat.options('terse')) {
if(waxlyrical('space', terse)) {
chn <- paste(rep(ch, n), collapse="")
chn <- substr(chn, 1, n)
cat(chn, fill=TRUE)
}
return(invisible(NULL))
}
|
/R/terse.R
|
no_license
|
rubak/spatstat
|
R
| false | false | 1,356 |
r
|
## terse.R
##
## code to control terseness and layout of printed output
##
## $Revision: 1.11 $ $Date: 2016/09/23 02:07:24 $
##
## paragraph break in long output e.g. ppm
parbreak <- function(terse = spatstat.options("terse")) {
if(waxlyrical('space', terse)) cat("\n")
return(invisible(NULL))
}
waxlyrical <- local({
## Values of spatstat.options('terse'):
## 0 default
## 1 suppress obvious wastage e.g. 'gory details'
## 2 contract space between paragraphs in long output
## 3 suppress extras e.g. standard errors and CI
## 4 suppress error messages eg failed to converge
TerseCutoff <- list(gory=1,
space=2,
extras=3,
errors=4)
waxlyrical <- function(type, terse = spatstat.options("terse")) {
if(!(type %in% names(TerseCutoff)))
stop(paste("Internal error: unrecognised permission request",
sQuote(type)),
call.=TRUE)
return(terse < TerseCutoff[[type]])
}
waxlyrical
})
ruletextline <- function(ch="-", n=getOption('width'),
terse=spatstat.options('terse')) {
if(waxlyrical('space', terse)) {
chn <- paste(rep(ch, n), collapse="")
chn <- substr(chn, 1, n)
cat(chn, fill=TRUE)
}
return(invisible(NULL))
}
|
library(tidyquant)
url_btc = "https://min-api.cryptocompare.com/data/histoday?fsym=BTC&tsym=BRL&allData=true"
url_eth = "https://min-api.cryptocompare.com/data/histoday?fsym=ETH&tsym=BRL&allData=true"
btc = fromJSON(
rawToChar(
GET(url_btc)$content
)
)
eth = fromJSON(
rawToChar(
GET(url_eth)$content
)
)
dt_btc = as.data.table(btc$Data)
dt_btc[, diff := (open - close)/open]
dt_btc[, date := as.Date(as.POSIXct(time, origin = "1970-01-01", tz = "GMT"), format="%Y-%m-%d")]
dt_eth = as.data.table(eth$Data)
dt_eth[, diff := (open - close)/open]
dt_eth[, date := as.Date(as.POSIXct(time, origin = "1970-01-01", tz = "GMT"), format="%Y-%m-%d")]
ggplot(dt_btc[date >= today() - 6*30], aes(x = date, y = close, open = open, high = high, low = low, close = close)) +
geom_candlestick() +
geom_bbands(ma_fun = EMA, sd = 2, n = 30, color_ma = "grey30", color_bands = "grey70") +
geom_smooth(method = "lm", se = F, colour = "black", linetype = "dashed", size = 0.7)+
ggtitle("BTC", subtitle = "Last 6 months") +
ylab("Closing Price (BRL)") +
xlab("") +
scale_x_date(date_breaks = '7 days', date_labels = "%d/%m/%y") +
theme(
panel.grid = element_blank(),
panel.grid.major.y = element_line(colour = "grey80"),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text.x = element_text(angle = 45, hjust = 1)
)
dt_btc[, coin := "BTC"]
dt_eth[, coin := "ETH"]
dt_coins = rbind(dt_btc, dt_eth)
ggplot(dt_coins[date >= today() - 1*30], aes(x = date, y = close, open = open, high = high, low = low, close = close)) +
geom_candlestick() +
geom_bbands(ma_fun = EMA, sd = 2, n = 7, color_ma = "grey30", color_bands = "grey70") +
geom_smooth(method = "lm", se = F, colour = "black", linetype = "dashed", size = 0.7)+
ggtitle("Cryptocurrencies", subtitle = "Last Months") +
ylab("Closing Price (BRL)") +
xlab("") +
facet_wrap(~coin, scales = "free_y", ncol = 1) +
scale_x_date(date_breaks = '1 day', date_labels = "%d/%m/%y") +
theme(
panel.grid = element_blank(),
panel.grid.major.y = element_line(colour = "grey80"),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text.x = element_text(angle = 45, hjust = 1)
)
|
/candlestick.R
|
no_license
|
ChristopherSP/telegramBitCoin
|
R
| false | false | 2,267 |
r
|
library(tidyquant)
url_btc = "https://min-api.cryptocompare.com/data/histoday?fsym=BTC&tsym=BRL&allData=true"
url_eth = "https://min-api.cryptocompare.com/data/histoday?fsym=ETH&tsym=BRL&allData=true"
btc = fromJSON(
rawToChar(
GET(url_btc)$content
)
)
eth = fromJSON(
rawToChar(
GET(url_eth)$content
)
)
dt_btc = as.data.table(btc$Data)
dt_btc[, diff := (open - close)/open]
dt_btc[, date := as.Date(as.POSIXct(time, origin = "1970-01-01", tz = "GMT"), format="%Y-%m-%d")]
dt_eth = as.data.table(eth$Data)
dt_eth[, diff := (open - close)/open]
dt_eth[, date := as.Date(as.POSIXct(time, origin = "1970-01-01", tz = "GMT"), format="%Y-%m-%d")]
ggplot(dt_btc[date >= today() - 6*30], aes(x = date, y = close, open = open, high = high, low = low, close = close)) +
geom_candlestick() +
geom_bbands(ma_fun = EMA, sd = 2, n = 30, color_ma = "grey30", color_bands = "grey70") +
geom_smooth(method = "lm", se = F, colour = "black", linetype = "dashed", size = 0.7)+
ggtitle("BTC", subtitle = "Last 6 months") +
ylab("Closing Price (BRL)") +
xlab("") +
scale_x_date(date_breaks = '7 days', date_labels = "%d/%m/%y") +
theme(
panel.grid = element_blank(),
panel.grid.major.y = element_line(colour = "grey80"),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text.x = element_text(angle = 45, hjust = 1)
)
dt_btc[, coin := "BTC"]
dt_eth[, coin := "ETH"]
dt_coins = rbind(dt_btc, dt_eth)
ggplot(dt_coins[date >= today() - 1*30], aes(x = date, y = close, open = open, high = high, low = low, close = close)) +
geom_candlestick() +
geom_bbands(ma_fun = EMA, sd = 2, n = 7, color_ma = "grey30", color_bands = "grey70") +
geom_smooth(method = "lm", se = F, colour = "black", linetype = "dashed", size = 0.7)+
ggtitle("Cryptocurrencies", subtitle = "Last Months") +
ylab("Closing Price (BRL)") +
xlab("") +
facet_wrap(~coin, scales = "free_y", ncol = 1) +
scale_x_date(date_breaks = '1 day', date_labels = "%d/%m/%y") +
theme(
panel.grid = element_blank(),
panel.grid.major.y = element_line(colour = "grey80"),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text.x = element_text(angle = 45, hjust = 1)
)
|
#' Feature Tranformation -- MaxAbsScaler (Estimator)
#'
#' Rescale each feature individually to range [-1, 1] by dividing through the
#' largest maximum absolute value in each feature. It does not shift/center the
#' data, and thus does not destroy any sparsity.
#'
#' @template roxlate-ml-feature-input-output-col
#' @template roxlate-ml-feature-transformer
#' @template roxlate-ml-feature-estimator-transformer
#' @export
ft_max_abs_scaler <- function(
x, input_col, output_col,
dataset = NULL,
uid = random_string("max_abs_scaler_"), ...) {
UseMethod("ft_max_abs_scaler")
}
#' @export
ft_max_abs_scaler.spark_connection <- function(
x, input_col, output_col,
dataset = NULL,
uid = random_string("max_abs_scaler_"), ...) {
ml_ratify_args()
estimator <- ml_new_transformer(x, "org.apache.spark.ml.feature.MaxAbsScaler",
input_col, output_col, uid) %>%
new_ml_max_abs_scaler()
if (is.null(dataset))
estimator
else
ml_fit(estimator, dataset)
}
#' @export
ft_max_abs_scaler.ml_pipeline <- function(
x, input_col, output_col,
dataset = NULL,
uid = random_string("max_abs_scaler_"), ...
) {
stage <- ml_new_stage_modified_args()
ml_add_stage(x, stage)
}
#' @export
ft_max_abs_scaler.tbl_spark <- function(
x, input_col, output_col,
dataset = NULL,
uid = random_string("max_abs_scaler_"), ...
) {
dots <- rlang::dots_list(...)
stage <- ml_new_stage_modified_args()
if (is_ml_transformer(stage))
ml_transform(stage, x)
else
ml_fit_and_transform(stage, x)
}
new_ml_max_abs_scaler <- function(jobj) {
new_ml_estimator(jobj, subclass = "ml_max_abs_scaler")
}
new_ml_max_abs_scaler_model <- function(jobj) {
new_ml_transformer(jobj, subclass = "ml_max_abs_scaler_model")
}
ml_validator_max_abs_scaler <- function(args, nms) {
args %>%
ml_extract_args(nms)
}
|
/R/ml_feature_max_abs_scaler.R
|
permissive
|
iffmainak/sparklyr
|
R
| false | false | 1,875 |
r
|
#' Feature Tranformation -- MaxAbsScaler (Estimator)
#'
#' Rescale each feature individually to range [-1, 1] by dividing through the
#' largest maximum absolute value in each feature. It does not shift/center the
#' data, and thus does not destroy any sparsity.
#'
#' @template roxlate-ml-feature-input-output-col
#' @template roxlate-ml-feature-transformer
#' @template roxlate-ml-feature-estimator-transformer
#' @export
ft_max_abs_scaler <- function(
x, input_col, output_col,
dataset = NULL,
uid = random_string("max_abs_scaler_"), ...) {
UseMethod("ft_max_abs_scaler")
}
#' @export
ft_max_abs_scaler.spark_connection <- function(
x, input_col, output_col,
dataset = NULL,
uid = random_string("max_abs_scaler_"), ...) {
ml_ratify_args()
estimator <- ml_new_transformer(x, "org.apache.spark.ml.feature.MaxAbsScaler",
input_col, output_col, uid) %>%
new_ml_max_abs_scaler()
if (is.null(dataset))
estimator
else
ml_fit(estimator, dataset)
}
#' @export
ft_max_abs_scaler.ml_pipeline <- function(
x, input_col, output_col,
dataset = NULL,
uid = random_string("max_abs_scaler_"), ...
) {
stage <- ml_new_stage_modified_args()
ml_add_stage(x, stage)
}
#' @export
ft_max_abs_scaler.tbl_spark <- function(
x, input_col, output_col,
dataset = NULL,
uid = random_string("max_abs_scaler_"), ...
) {
dots <- rlang::dots_list(...)
stage <- ml_new_stage_modified_args()
if (is_ml_transformer(stage))
ml_transform(stage, x)
else
ml_fit_and_transform(stage, x)
}
new_ml_max_abs_scaler <- function(jobj) {
new_ml_estimator(jobj, subclass = "ml_max_abs_scaler")
}
new_ml_max_abs_scaler_model <- function(jobj) {
new_ml_transformer(jobj, subclass = "ml_max_abs_scaler_model")
}
ml_validator_max_abs_scaler <- function(args, nms) {
args %>%
ml_extract_args(nms)
}
|
## I wanted to save people the trouble of doing lots of typing
## so I wrote up something quick that would read in the string
## quickly into one big chunk.
options(stringsAsFactors = FALSE)
a <- "73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450"
a <- gsub('\n', '', a)
## Define find_answer here.
## print(find_answer(a))
|
/02-R/problem_8/tom.R
|
no_license
|
ml-ai-nlp-ir/gadsdc1
|
R
| false | false | 1,300 |
r
|
## I wanted to save people the trouble of doing lots of typing
## so I wrote up something quick that would read in the string
## quickly into one big chunk.
options(stringsAsFactors = FALSE)
a <- "73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450"
a <- gsub('\n', '', a)
## Define find_answer here.
## print(find_answer(a))
|
## File Name: likelihood_adjustment.R
## File Version: 0.15
####################################################################
# likelihood adjustment
likelihood.adjustment <- function( likelihood, theta=NULL, prob.theta=NULL,
adjfac=rep(1,nrow(likelihood)), extreme.item=5,
target.EAP.rel=NULL, min_tuning=.2, max_tuning=3,
maxiter=100, conv=.0001, trait.normal=TRUE ){
like0 <- likelihood
eps <- 1E-30
normal_approx <- trait.normal
if ( is.null(theta) ){
theta <- attr( likelihood, "theta" )[,1]
}
if ( is.null(prob.theta) ){
prob.theta <- attr( likelihood, "prob.theta" )
}
attr(likelihood,"prob.theta") <- NULL
attr(likelihood,"theta") <- NULL
attr(likelihood,"G") <- NULL
#**********************
# add extreme item
N <- nrow(like0)
TP <- length(theta)
thetaM <- matrix( theta, nrow=N, ncol=TP, byrow=TRUE)
S1 <- stats::plogis( thetaM + extreme.item ) *
( 1 - stats::plogis( thetaM - extreme.item ) )
likelihood <- likelihood * S1
# Revalpr( "mean(abs( like0 - likelihood) )")
# likelihood adjustment
like2 <- likelihood_adjustment_compute( likelihood, theta, thetaM, adjfac )
#*** compute posterior given likelihood and empirical prior
if ( ! is.null( target.EAP.rel ) ){
probs <- prob.theta
probsM <- matrix( prob.theta, nrow=N, ncol=TP, byrow=TRUE)
tuning1 <- min_tuning
tuning2 <- max_tuning
EAP_rel1 <- likelihood_adjustment_tuning( likelihood, theta, thetaM, adjfac,
tuningfac=tuning1, probs=probs, normal_approx=normal_approx )
EAP_rel2 <- likelihood_adjustment_tuning( likelihood, theta, thetaM, adjfac,
tuningfac=tuning2, probs=probs, normal_approx=normal_approx)
iter <- 0
change <- 1
while( ( iter < maxiter ) & ( change > conv ) ){
tuning0 <- ( tuning1 + tuning2 ) / 2
res1 <- likelihood_adjustment_tuning( likelihood, theta, thetaM, adjfac,
tuningfac=tuning0, probs=probs, normal_approx=normal_approx)
EAP_rel0 <- res1$EAP.rel
like2 <- res1$likelihood
if ( EAP_rel0 < target.EAP.rel ){
tuning2 <- tuning0
} else {
tuning1 <- tuning0
}
iter <- iter + 1
change <- abs( EAP_rel0 - target.EAP.rel )
cat("Iteration ", iter, " | EAP reliability=", round( EAP_rel0, 4 ), "\n")
flush.console()
}
}
res <- like2
attr( res, "theta" ) <- matrix( theta, ncol=1)
attr(like0,"prob.theta") -> attr( res, "prob.theta")
attr(like0,"G") -> attr(res, "G")
return(res)
}
####################################################################
|
/R/likelihood_adjustment.R
|
no_license
|
rosefu79/sirt
|
R
| false | false | 2,950 |
r
|
## File Name: likelihood_adjustment.R
## File Version: 0.15
####################################################################
# likelihood adjustment
likelihood.adjustment <- function( likelihood, theta=NULL, prob.theta=NULL,
adjfac=rep(1,nrow(likelihood)), extreme.item=5,
target.EAP.rel=NULL, min_tuning=.2, max_tuning=3,
maxiter=100, conv=.0001, trait.normal=TRUE ){
like0 <- likelihood
eps <- 1E-30
normal_approx <- trait.normal
if ( is.null(theta) ){
theta <- attr( likelihood, "theta" )[,1]
}
if ( is.null(prob.theta) ){
prob.theta <- attr( likelihood, "prob.theta" )
}
attr(likelihood,"prob.theta") <- NULL
attr(likelihood,"theta") <- NULL
attr(likelihood,"G") <- NULL
#**********************
# add extreme item
N <- nrow(like0)
TP <- length(theta)
thetaM <- matrix( theta, nrow=N, ncol=TP, byrow=TRUE)
S1 <- stats::plogis( thetaM + extreme.item ) *
( 1 - stats::plogis( thetaM - extreme.item ) )
likelihood <- likelihood * S1
# Revalpr( "mean(abs( like0 - likelihood) )")
# likelihood adjustment
like2 <- likelihood_adjustment_compute( likelihood, theta, thetaM, adjfac )
#*** compute posterior given likelihood and empirical prior
if ( ! is.null( target.EAP.rel ) ){
probs <- prob.theta
probsM <- matrix( prob.theta, nrow=N, ncol=TP, byrow=TRUE)
tuning1 <- min_tuning
tuning2 <- max_tuning
EAP_rel1 <- likelihood_adjustment_tuning( likelihood, theta, thetaM, adjfac,
tuningfac=tuning1, probs=probs, normal_approx=normal_approx )
EAP_rel2 <- likelihood_adjustment_tuning( likelihood, theta, thetaM, adjfac,
tuningfac=tuning2, probs=probs, normal_approx=normal_approx)
iter <- 0
change <- 1
while( ( iter < maxiter ) & ( change > conv ) ){
tuning0 <- ( tuning1 + tuning2 ) / 2
res1 <- likelihood_adjustment_tuning( likelihood, theta, thetaM, adjfac,
tuningfac=tuning0, probs=probs, normal_approx=normal_approx)
EAP_rel0 <- res1$EAP.rel
like2 <- res1$likelihood
if ( EAP_rel0 < target.EAP.rel ){
tuning2 <- tuning0
} else {
tuning1 <- tuning0
}
iter <- iter + 1
change <- abs( EAP_rel0 - target.EAP.rel )
cat("Iteration ", iter, " | EAP reliability=", round( EAP_rel0, 4 ), "\n")
flush.console()
}
}
res <- like2
attr( res, "theta" ) <- matrix( theta, ncol=1)
attr(like0,"prob.theta") -> attr( res, "prob.theta")
attr(like0,"G") -> attr(res, "G")
return(res)
}
####################################################################
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
akhilyengal/ProgrammingAssignment2
|
R
| false | false | 893 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
/aulasNivelHard/aula16 - selecionando_elementos_nas_matrizes.R
|
no_license
|
rafaelandradeslv/AprendendoR
|
R
| false | false | 1,202 |
r
| ||
#
# _ _ _ _ _
# (_) | | | | | | | |
# _ __ ___ _ _ __ | |_ | |__ | | __ _ _ __ | | __
# | '_ \ / _ \ | || '_ \ | __|| '_ \ | | / _` || '_ \ | |/ /
# | |_) || (_) || || | | || |_ | |_) || || (_| || | | || <
# | .__/ \___/ |_||_| |_| \__||_.__/ |_| \__,_||_| |_||_|\_\
# | |
# |_|
#
# This file is part of the 'rich-iannone/pointblank' package.
#
# (c) Richard Iannone <riannone@me.com>
#
# For full copyright and license information, please look at
# https://rich-iannone.github.io/pointblank/LICENSE.html
#
#' Collect data extracts from a validation step
#'
#' @description
#' In an agent-based workflow (i.e., initiating with [create_agent()]), after
#' interrogation with [interrogate()], we can extract the row data that didn't
#' pass row-based validation steps with the `get_data_extracts()` function.
#' There is one discrete extract per row-based validation step and the amount of
#' data available in a particular extract depends on both the fraction of test
#' units that didn't pass the validation step and the level of sampling or
#' explicit collection from that set of units. These extracts can be collected
#' programmatically through `get_data_extracts()` but they may also be
#' downloaded as CSV files from the HTML report generated by the agent's print
#' method or through the use of [get_agent_report()].
#'
#' The availability of data extracts for each row-based validation step depends
#' on whether `extract_failed` is set to `TRUE` within the [interrogate()] call
#' (it is by default). The amount of *fail* rows extracted depends on the
#' collection parameters in [interrogate()], and the default behavior is to
#' collect up to the first 5000 *fail* rows.
#'
#' Row-based validation steps are based on those validation functions of the
#' form `col_vals_*()` and also include [conjointly()] and [rows_distinct()].
#' Only functions from that combined set of validation functions can yield data
#' extracts.
#'
#' @param agent An agent object of class `ptblank_agent`. It should have had
#' [interrogate()] called on it, such that the validation steps were carried
#' out and any sample rows from non-passing validations could potentially be
#' available in the object.
#' @param i The validation step number, which is assigned to each validation
#' step in the order of definition. If `NULL` (the default), all data extract
#' tables will be provided in a list object.
#'
#' @return A list of tables if `i` is not provided, or, a standalone table if
#' `i` is given.
#'
#' @examples
#' # Create a series of two validation
#' # steps focused on testing row values
#' # for part of the `small_table` object;
#' # `interrogate()` immediately
#' agent <-
#' create_agent(
#' read_fn = ~ small_table %>%
#' dplyr::select(a:f),
#' label = "`get_data_extracts()`"
#' ) %>%
#' col_vals_gt(vars(d), value = 1000) %>%
#' col_vals_between(
#' vars(c),
#' left = vars(a), right = vars(d),
#' na_pass = TRUE
#' ) %>%
#' interrogate()
#'
#' # Using `get_data_extracts()` with its
#' # defaults returns of a list of tables,
#' # where each table is named after the
#' # validation step that has an extract
#' # available
#' agent %>% get_data_extracts()
#'
#' # We can get an extract for a specific
#' # step by specifying it in the `i`
#' # argument; let's get the failing rows
#' # from the first validation step
#' # (`col_vals_gt`)
#' agent %>% get_data_extracts(i = 1)
#'
#' @family Post-interrogation
#' @section Function ID:
#' 8-2
#'
#' @export
get_data_extracts <- function(agent,
i = NULL) {
# Stop function if the agent hasn't
# yet performed an interrogation
if (!inherits(agent, "has_intel")) {
stop(
"The `agent` has not yet performed an interrogation.",
call. = FALSE
)
}
# Get the number of validation steps
validation_steps <- unique(agent$validation_set$i)
if (is.null(i)) {
return(agent$extracts)
}
# Stop function if the `i`th step does not exist in `agent`
if (!(i %in% seq(validation_steps))) {
stop("The provided step number does not exist.", call. = FALSE)
}
# Get the names of the extracts
extract_names <- names(agent$extracts)
# Stop function if the `i`th step does not have an extract available
if (!(as.character(i) %in% extract_names)) {
stop(
"The provided step number does not have an associated extract.",
call. = FALSE
)
}
# Get the data extract
agent$extracts[[as.character(i)]]
}
|
/R/get_data_extracts.R
|
permissive
|
guptam/pointblank
|
R
| false | false | 4,756 |
r
|
#
# _ _ _ _ _
# (_) | | | | | | | |
# _ __ ___ _ _ __ | |_ | |__ | | __ _ _ __ | | __
# | '_ \ / _ \ | || '_ \ | __|| '_ \ | | / _` || '_ \ | |/ /
# | |_) || (_) || || | | || |_ | |_) || || (_| || | | || <
# | .__/ \___/ |_||_| |_| \__||_.__/ |_| \__,_||_| |_||_|\_\
# | |
# |_|
#
# This file is part of the 'rich-iannone/pointblank' package.
#
# (c) Richard Iannone <riannone@me.com>
#
# For full copyright and license information, please look at
# https://rich-iannone.github.io/pointblank/LICENSE.html
#
#' Collect data extracts from a validation step
#'
#' @description
#' In an agent-based workflow (i.e., initiating with [create_agent()]), after
#' interrogation with [interrogate()], we can extract the row data that didn't
#' pass row-based validation steps with the `get_data_extracts()` function.
#' There is one discrete extract per row-based validation step and the amount of
#' data available in a particular extract depends on both the fraction of test
#' units that didn't pass the validation step and the level of sampling or
#' explicit collection from that set of units. These extracts can be collected
#' programmatically through `get_data_extracts()` but they may also be
#' downloaded as CSV files from the HTML report generated by the agent's print
#' method or through the use of [get_agent_report()].
#'
#' The availability of data extracts for each row-based validation step depends
#' on whether `extract_failed` is set to `TRUE` within the [interrogate()] call
#' (it is by default). The amount of *fail* rows extracted depends on the
#' collection parameters in [interrogate()], and the default behavior is to
#' collect up to the first 5000 *fail* rows.
#'
#' Row-based validation steps are based on those validation functions of the
#' form `col_vals_*()` and also include [conjointly()] and [rows_distinct()].
#' Only functions from that combined set of validation functions can yield data
#' extracts.
#'
#' @param agent An agent object of class `ptblank_agent`. It should have had
#' [interrogate()] called on it, such that the validation steps were carried
#' out and any sample rows from non-passing validations could potentially be
#' available in the object.
#' @param i The validation step number, which is assigned to each validation
#' step in the order of definition. If `NULL` (the default), all data extract
#' tables will be provided in a list object.
#'
#' @return A list of tables if `i` is not provided, or, a standalone table if
#' `i` is given.
#'
#' @examples
#' # Create a series of two validation
#' # steps focused on testing row values
#' # for part of the `small_table` object;
#' # `interrogate()` immediately
#' agent <-
#' create_agent(
#' read_fn = ~ small_table %>%
#' dplyr::select(a:f),
#' label = "`get_data_extracts()`"
#' ) %>%
#' col_vals_gt(vars(d), value = 1000) %>%
#' col_vals_between(
#' vars(c),
#' left = vars(a), right = vars(d),
#' na_pass = TRUE
#' ) %>%
#' interrogate()
#'
#' # Using `get_data_extracts()` with its
#' # defaults returns of a list of tables,
#' # where each table is named after the
#' # validation step that has an extract
#' # available
#' agent %>% get_data_extracts()
#'
#' # We can get an extract for a specific
#' # step by specifying it in the `i`
#' # argument; let's get the failing rows
#' # from the first validation step
#' # (`col_vals_gt`)
#' agent %>% get_data_extracts(i = 1)
#'
#' @family Post-interrogation
#' @section Function ID:
#' 8-2
#'
#' @export
get_data_extracts <- function(agent,
i = NULL) {
# Stop function if the agent hasn't
# yet performed an interrogation
if (!inherits(agent, "has_intel")) {
stop(
"The `agent` has not yet performed an interrogation.",
call. = FALSE
)
}
# Get the number of validation steps
validation_steps <- unique(agent$validation_set$i)
if (is.null(i)) {
return(agent$extracts)
}
# Stop function if the `i`th step does not exist in `agent`
if (!(i %in% seq(validation_steps))) {
stop("The provided step number does not exist.", call. = FALSE)
}
# Get the names of the extracts
extract_names <- names(agent$extracts)
# Stop function if the `i`th step does not have an extract available
if (!(as.character(i) %in% extract_names)) {
stop(
"The provided step number does not have an associated extract.",
call. = FALSE
)
}
# Get the data extract
agent$extracts[[as.character(i)]]
}
|
DeviceMapping <- function(dataframe, basemap = "Esri.WorldTopoMap") {
dat <- dataframe[complete.cases(dataframe[, c("long_x", "lat_y")]), ]
unique.id <- unique(dat$ndowid)
pal <- ggthemes::gdocs_pal()(20)
device.map <- leaflet() %>%
addProviderTiles(basemap, group = "topo") %>%
addProviderTiles("MapQuestOpen.Aerial", group = "satelite")
layer.group <- list()
for(i in 1:length(unique.id)) {
df <- dat[dat$ndowid == unique.id[i], ]
df <- df[order(df$timestamp), ]
device.map <- addPolylines(device.map,
lng = df$long_x, lat = df$lat_y,
group = as.character(unique.id[i]),
color = "grey",
weight = 2
)
device.map <- addCircleMarkers(device.map,
lng = df$long_x, lat = df$lat_y,
group = as.character(unique.id[i]),
radius = 4,
stroke = FALSE,
fillOpacity = .8,
color = pal[i],
popup = paste(sep = "<br>",
paste("<b>NDOW ID:</b> ", unique.id[i]),
paste("<b>timestamp:</b> ", df$timestamp),
paste("<b>LocID</b>: ", df$locid))
)
layer.group <- c(layer.group, as.character(unique.id[i]))
}
device.map <- addLayersControl(device.map,
baseGroup = c("topo", "satellite"),
overlayGroups = layer.group)
return(device.map)
}
DeviceMapping(dat)
# for(i in 1:length(unique.id)) {
# df <- dat[dat$ndowID == unique.id[i], ]
# df <- df[order(df$timestamp), ]
# if (shape == "points") {
# } else if (shape == "lines") {
# device.map <- addPolylines(device.map,
# lng = df$long_x, lat = df$lat_y,
# group = unique.id[i],
# weight = 2,
# color = pal[i])
# }
# layer.group <- c(layer.group, unique.id[i])
# }
# device.map <- addLayersControl(device.map,
# baseGroup = c("topo", "satellite"),
# overlayGroups = layer.group,
# options = layersControlOptions(collapsed = FALSE))
# return(device.map)
# }
dat <- read.csv('CollarData (2).csv')
DeviceMapping(dat)
## I WANT TO TRY AND USE LAPPLY TO CONSTRUCT THE MAP...
# builder function for lapply to call
build_leaflet_layers <- function(x, df, map, geometry = "points") {
df <- df[df$ndowid == x, ]
map <- addCircleMarkers(map,
lng = df$long_x, lat = df$lat_y,
group = as.character(x),
radius = 3,
stroke = FALSE,
fillOpacity = .8,
color = "navy",
popup = as.character(x))
}
device_map <- function(dataframe) {
dat <- dataframe[complete.cases(dataframe[, c("long_x", "lat_y")]), ]
unique.id <- unique(dat$ndowid)
device.map <- leaflet() %>%
addProviderTiles('Esri.WorldTopoMap')
device.map <- lapply(unique.id, function(x) build_leaflet_layers(x, dat, device.map, "points"))
return(device.map)
}
device_map(dat)
leaflet() %>% addTiles() %>% addCircleMarkers(dat$long_x, dat$lat_y)
## USING DATA.TABLE TO ONLY PLOT EVERY 20 POINTS, THE LINE IS GOING TO BE EVERY POINT
# I don't know if this is really necessary. It looks okay, but isn't really that great,
# may be better when there are a ton of points. The big change here however is that I'm using
# data.table for the dataframe manipulations. I'll leave it in, but comment out the line.
DeviceMapping <- function(dataframe, basemap = "Esri.WorldTopoMap") {
dat <- as.data.table(dataframe)
dat <- dat[complete.cases(dat[, .(long_x, lat_y)])]
unique.id <- unique(dat$ndowid)
pal <- ggthemes::gdocs_pal()(20)
device.map <- leaflet() %>%
addProviderTiles(basemap, group = "topo") %>%
addProviderTiles("MapQuestOpen.Aerial", group = "satelite")
layer.group <- list()
for(i in 1:length(unique.id)) {
df <- dat[ndowid == unique.id[i]]
device.map <- addPolylines(device.map,
lng = df$long_x, lat = df$lat_y,
group = as.character(unique.id[i]),
color = "grey",
weight = 2
)
#df <- df[, .SD[c(seq(1, .N, 5), .N)]]
device.map <- addCircleMarkers(device.map,
lng = df$long_x, lat = df$lat_y,
group = as.character(unique.id[i]),
radius = 3,
stroke = FALSE,
fillOpacity = .5,
color = pal[i],
popup = paste(sep = "<br>",
paste("<b>NDOW ID:</b> ", unique.id[i]),
paste("<b>timestamp:</b> ", df$timestamp),
paste("<b>LocID</b>: ", df$locid))
)
layer.group <- c(layer.group, as.character(unique.id[i]))
}
device.map <- addLayersControl(device.map,
baseGroup = c("topo", "satellite"),
overlayGroups = layer.group)
return(device.map)
}
DeviceMapping(dat)
|
/scripts/DeviceMap-fixes.R
|
no_license
|
mgritts/CollarDataExport
|
R
| false | false | 5,816 |
r
|
DeviceMapping <- function(dataframe, basemap = "Esri.WorldTopoMap") {
dat <- dataframe[complete.cases(dataframe[, c("long_x", "lat_y")]), ]
unique.id <- unique(dat$ndowid)
pal <- ggthemes::gdocs_pal()(20)
device.map <- leaflet() %>%
addProviderTiles(basemap, group = "topo") %>%
addProviderTiles("MapQuestOpen.Aerial", group = "satelite")
layer.group <- list()
for(i in 1:length(unique.id)) {
df <- dat[dat$ndowid == unique.id[i], ]
df <- df[order(df$timestamp), ]
device.map <- addPolylines(device.map,
lng = df$long_x, lat = df$lat_y,
group = as.character(unique.id[i]),
color = "grey",
weight = 2
)
device.map <- addCircleMarkers(device.map,
lng = df$long_x, lat = df$lat_y,
group = as.character(unique.id[i]),
radius = 4,
stroke = FALSE,
fillOpacity = .8,
color = pal[i],
popup = paste(sep = "<br>",
paste("<b>NDOW ID:</b> ", unique.id[i]),
paste("<b>timestamp:</b> ", df$timestamp),
paste("<b>LocID</b>: ", df$locid))
)
layer.group <- c(layer.group, as.character(unique.id[i]))
}
device.map <- addLayersControl(device.map,
baseGroup = c("topo", "satellite"),
overlayGroups = layer.group)
return(device.map)
}
DeviceMapping(dat)
# for(i in 1:length(unique.id)) {
# df <- dat[dat$ndowID == unique.id[i], ]
# df <- df[order(df$timestamp), ]
# if (shape == "points") {
# } else if (shape == "lines") {
# device.map <- addPolylines(device.map,
# lng = df$long_x, lat = df$lat_y,
# group = unique.id[i],
# weight = 2,
# color = pal[i])
# }
# layer.group <- c(layer.group, unique.id[i])
# }
# device.map <- addLayersControl(device.map,
# baseGroup = c("topo", "satellite"),
# overlayGroups = layer.group,
# options = layersControlOptions(collapsed = FALSE))
# return(device.map)
# }
dat <- read.csv('CollarData (2).csv')
DeviceMapping(dat)
## I WANT TO TRY AND USE LAPPLY TO CONSTRUCT THE MAP...
# builder function for lapply to call
build_leaflet_layers <- function(x, df, map, geometry = "points") {
df <- df[df$ndowid == x, ]
map <- addCircleMarkers(map,
lng = df$long_x, lat = df$lat_y,
group = as.character(x),
radius = 3,
stroke = FALSE,
fillOpacity = .8,
color = "navy",
popup = as.character(x))
}
device_map <- function(dataframe) {
dat <- dataframe[complete.cases(dataframe[, c("long_x", "lat_y")]), ]
unique.id <- unique(dat$ndowid)
device.map <- leaflet() %>%
addProviderTiles('Esri.WorldTopoMap')
device.map <- lapply(unique.id, function(x) build_leaflet_layers(x, dat, device.map, "points"))
return(device.map)
}
device_map(dat)
leaflet() %>% addTiles() %>% addCircleMarkers(dat$long_x, dat$lat_y)
## USING DATA.TABLE TO ONLY PLOT EVERY 20 POINTS, THE LINE IS GOING TO BE EVERY POINT
# I don't know if this is really necessary. It looks okay, but isn't really that great,
# may be better when there are a ton of points. The big change here however is that I'm using
# data.table for the dataframe manipulations. I'll leave it in, but comment out the line.
DeviceMapping <- function(dataframe, basemap = "Esri.WorldTopoMap") {
dat <- as.data.table(dataframe)
dat <- dat[complete.cases(dat[, .(long_x, lat_y)])]
unique.id <- unique(dat$ndowid)
pal <- ggthemes::gdocs_pal()(20)
device.map <- leaflet() %>%
addProviderTiles(basemap, group = "topo") %>%
addProviderTiles("MapQuestOpen.Aerial", group = "satelite")
layer.group <- list()
for(i in 1:length(unique.id)) {
df <- dat[ndowid == unique.id[i]]
device.map <- addPolylines(device.map,
lng = df$long_x, lat = df$lat_y,
group = as.character(unique.id[i]),
color = "grey",
weight = 2
)
#df <- df[, .SD[c(seq(1, .N, 5), .N)]]
device.map <- addCircleMarkers(device.map,
lng = df$long_x, lat = df$lat_y,
group = as.character(unique.id[i]),
radius = 3,
stroke = FALSE,
fillOpacity = .5,
color = pal[i],
popup = paste(sep = "<br>",
paste("<b>NDOW ID:</b> ", unique.id[i]),
paste("<b>timestamp:</b> ", df$timestamp),
paste("<b>LocID</b>: ", df$locid))
)
layer.group <- c(layer.group, as.character(unique.id[i]))
}
device.map <- addLayersControl(device.map,
baseGroup = c("topo", "satellite"),
overlayGroups = layer.group)
return(device.map)
}
DeviceMapping(dat)
|
# compute mml using glm
# glm selects the 1st value of a variable as the reference when fitting a model, hence when dealing with binar data
# an indicator matrix that contains 0 and 1 is required, where 0 -- A and 1 -- B, since "A" appears to be the 1st value
# when our dataset contains values such as "A", "B", "C", ...
# should obtain same answer as using optim, because glm also use mle
################################################## pre process data ############################################\
# transform binary data into 0 and 1
getIndicator = function(data) {
indicatorMatrix = matrix(nrow = nrow(data), ncol = ncol(data), dimnames = list(NULL, colnames(data)))
for (i in 1:ncol(data)) {
# as.numeric convert categrical data values to 1, 2, 3, ...
# -1 transfers them to 0, 1, 2, ...
indicatorMatrix[, i] = as.numeric(data[, i]) - 1
}
# add an extra column of 1s in front for the intercept of a logit model
#indicatorMatrix = cbind(1, indicatorMatrix)
return(indicatorMatrix)
}
################################################## negative log likelihood
# inner product of vectors X and Beta with the same length, where
# # X[1] = 1 and beta[1] = beta0
innerProd = function(beta, X) {
summation = 0
for (i in 1:length(beta)) summation = summation + X[i] * beta[i]
return(summation)
}
# log likelihood for a single row of data
logLikeSingle = function(indicatorMatrix, yIndex, xIndices, beta, rowIndex, base) {
betaDotX = innerProd(beta, c(1, indicatorMatrix[rowIndex, xIndices]))
logLike = -log(1 + exp(betaDotX), base) + indicatorMatrix[rowIndex, yIndex] * betaDotX
return(logLike)
}
# negative log likelihood for the entire dataset
negLogLike = function(indicatorMatrix, yIndex, xIndices, beta, base) {
logLike = 0
# cumulative sum log likelihood for the entire data set
for (i in 1:nrow(indicatorMatrix)) {
logLike = logLike + logLikeSingle(indicatorMatrix, yIndex, xIndices, beta, i, base)
}
return(-logLike)
}
# 2nd derivative of the negative log likelihood for computing the fisher information matrix
# defferentiate w.r.t j and k, where j, k = 1, 2, ..., m+1, where m+1 = lenght(beta)
negLoglike2ndDerivativeSingle = function(indicatorMatrix, xIndices, beta, rowIndex, j, k) {
betaDotX = innerProd(beta, c(1, indicatorMatrix[rowIndex, xIndices]))
# the jth coordinate of the vector x_i
x_ij = c(1, indicatorMatrix[rowIndex, ])[j]
# the kth coordinate of the vector x_i
x_ik = c(1, indicatorMatrix[rowIndex, ])[k]
nll2ndSingle = (exp(betaDotX) / (1 + exp(betaDotX)) ^ 2) * x_ij * x_ik
return(nll2ndSingle)
}
#
#
negLoglike2ndDerivative = function(indicatorMatrix, xIndices, beta, j, k) {
nll2nd = 0
for (i in 1:nrow(indicatorMatrix)) {
nll2nd = nll2nd + negLoglike2ndDerivativeSingle(indicatorMatrix, xIndices, beta, i, j, k)
}
return(nll2nd)
}
################################################## computing entries of fisher information matrix
fisherMatrix = function(indicatorMatrix, yIndex, xIndices, beta) {
FIM = matrix(NA, length(beta), length(beta))
#fill in the (1, 1) entry of FIM
FIM[1, 1] = negLoglike2ndDerivative(indicatorMatrix, xIndices, beta, 1, 1)
# fill in the lower triangular FIM
for (j in 2:nrow(FIM)) {
for (k in 2:j) {
FIM[j, k] = negLoglike2ndDerivative(indicatorMatrix, xIndices, beta, j, k)
} # end for k
} # end for j
# the 1st column is identical ti the diagnose of FIM
FIM[, 1] = diag(FIM)
# the upper triangular is identical to the lower triangular FIM
FIM[upper.tri(FIM)] = t(FIM)[upper.tri(t(FIM))]
return(FIM) # return the complete FIM
}
# calculate log of the determinant of a matrix
# matrix has to be symmetric positive definite
# use cholesky decomposition to decompose matrix FIM = L*transpose(L)
logDeterminant = function(matrix) {
choleskeyUpper = chol(matrix)
logDet = 0
for (i in 1:nrow(matrix)) {
logDet = logDet + log(diag(choleskeyUpper)[i], base = 2)
}
return(logDet)
}
###################################### msg len with no predictor #####################################
# check for this
msgLenWithNoPredictors = function(data, indicatorMatrix, yIndex, cardinalities, allNodes, sigma, base) {
# formula for empty model
formula = paste(allNodes[yIndex], "~ 1")
# estimate parameter of logit model using glm
beta = glm(formula, family = binomial(link = "logit"), data = data)$coefficients
# value for the negative log likelihood
nll = negLogLike(indicatorMatrix, yIndex, NULL, beta, base)
# fisher information matrix
fisherInfoMatrix = negLoglike2ndDerivative(indicatorMatrix, NULL, beta, 1, 1)
# log of the determinant of the FIM
logFisher = log(fisherInfoMatrix, base)
# computing mml
mml = 0.5 * log(2 * pi, base) + log(sigma, base) - 0.5 * log(cardinalities[yIndex], base) +
0.5 * beta ^ 2 / sigma ^ 2 + 0.5 * logFisher + nll + 0.5 * (1 + log(0.083333, base))
# store results in a list
lst = list(beta, nll, logFisher, mml)
names(lst) = c("par", "nll", "logFisher", "mml")
return(lst)
}
################################################## msg len ############################################
msgLenWithPredictors = function(data, indicatorMatrix, yIndex, xIndices, cardinalities,
allNodes, sigma, base) {
# arity of dependent variable y
arityOfY = cardinalities[yIndex]
# this is for binary case
nFreePar = length(xIndices) + 1
# lattice constant
k = c(0.083333, 0.080188, 0.077875, 0.07609, 0.07465, 0.07347, 0.07248, 0.07163)
if (nFreePar <= length(k)) {
latticeConst = k[nFreePar]
} else {
latticeConst = min(k)
}
# create formula for fitting logit model using glm
formula = paste(allNodes[yIndex], "~", paste0(allNodes[xIndices], collapse = "+"))
# parameter estimation of negative log likelihood using GLM
# glm always use the first level (in this case "A") for reference when estimating coefficients
# the reference can be changed by change the order of levels in data frame using relevel()
fittedLogit = glm(formula, family = binomial(link = "logit"), data = data)
# value for the negative log likelihood
nll = negLogLike(indicatorMatrix, yIndex, xIndices, fittedLogit$coefficients, base)
# fisher information matrix
fisherInfoMatrix = fisherMatrix(indicatorMatrix, yIndex, xIndices, fittedLogit$coefficients)
# log of the determinant of the FIM
logFisher = logDeterminant(fisherInfoMatrix)
# computing mml
mmlFixedPart = 0.5 * nFreePar * log(2 * pi, base) + nFreePar * log(sigma, base) - 0.5 * log(arityOfY, base) -
0.5 * sum((cardinalities[xIndices] - 1) * log(arityOfY, base) +
(arityOfY - 1) * log(cardinalities[xIndices], base)) + 0.5 * nFreePar*(1 + log(latticeConst, base))
# sum of logit parameters square
sumParSquare = 0
for (i in 1:length(nFreePar)) sumParSquare = sumParSquare + fittedLogit$coefficients[i] ^ 2
mmlNonFixedPart = 0.5 * sumParSquare / sigma ^ 2 + 0.5 * logFisher + nll
mml = mmlFixedPart + mmlNonFixedPart
# store results in a list
lst = list(fittedLogit$coefficients, nll, logFisher, mml)
names(lst) = c("par", "nll", "logFisher", "mml")
return(lst)
}
#################################################### function ####################################
# search for mb using mml
mbMMLLogit = function(data, indicatorMatrix, y, sigma = 3, base = 2, debug = FALSE) {
allNodes = colnames(data)
numNodes = length(allNodes)
yIndex = which(allNodes == y)
unUsedNodesIndexes = (1:numNodes)[-yIndex]
cardinalities = rep(2, numNodes)
for (j in 1:ncol(data)) cardinalities[j] = nlevels(data[, j])
cmb = c()
# x = c()
if (debug) {
cat("----------------------------------------------------------------\n")
cat("* learning the Markov blanket of", y, "\n")
} # then
# msg len to encode the size k of mb
# k takes integer value from [0, n - 1] uniformly
# logK = log(numNodes)
# msg len of empty markov blanket
mmlMini = msgLenWithNoPredictors(data, indicatorMatrix, yIndex, cardinalities, allNodes, sigma, base)$mml[[1]]
if (debug) {
cat(" > empty MB has msg len:", round(mmlMini, 2), "\n")
} # then
repeat{ # use greedy search for an optimal mb
if (debug) {
cat(" * calculating msg len \n")
} # end debug
toAdd = NULL
# msg len to encode the mb
# logK is explained above
# the second part is the msg len to encode which k nodes to choose from all n - 1 nodes
# log (n - 1 choose k) is the second part
# logK + log(choose(numNodes - 1, length(cmb) + 1))
for (i in 1:length(unUsedNodesIndexes)) { # combine each remaining node with current mb and compute mml
mmlCurrent = msgLenWithPredictors(data, indicatorMatrix, yIndex, c(unUsedNodesIndexes[i], cmb),
cardinalities, allNodes, sigma, base)$mml
if (debug) {
cat(" >", allNodes[unUsedNodesIndexes[i]], "has msg len:", round(mmlCurrent, 2), "\n")
} # end debug
if (mmlCurrent < mmlMini) { # if adding this node decreases the mml score, then replace mml and add into cmb
mmlMini = mmlCurrent
toAdd = i
}
}
# stop when there is nothing to add from the remaining nodes
# that is when mml score does not decrease
# it indicates adding more nodes into cmb does not make current model better
if (is.null(toAdd)) {
print("No better candidate to add \n")
break
} # end if
cmb = c(cmb, unUsedNodesIndexes[toAdd])
if (debug) {
cat(" @", allNodes[unUsedNodesIndexes[toAdd]], "include in the Markov blanket", "\n")
cat(" > Markov blanket (", length(cmb), "nodes ) now is '", allNodes[cmb], "'\n")
} # end debug
# remove added node index from unchecked nodes indices
unUsedNodesIndexes = unUsedNodesIndexes[-toAdd]
# if 0 node left for inclusion stop
if (length(unUsedNodesIndexes) == 0) {
print("No more node to add \n")
break
} # end if
} # end repeat
if (debug) {
cat(" * Algorithm stops! \n")
}
return(allNodes[cmb])
}
# when testing on asia net, glm shows warnings, glm.fit: fitted probabilities numerically 0 or 1 occurred
|
/RStudioProjects/mbDiscoveryR/others/mbMML with GLM.R
|
no_license
|
kelvinyangli/PhDProjects
|
R
| false | false | 10,682 |
r
|
# compute mml using glm
# glm selects the 1st value of a variable as the reference when fitting a model, hence when dealing with binar data
# an indicator matrix that contains 0 and 1 is required, where 0 -- A and 1 -- B, since "A" appears to be the 1st value
# when our dataset contains values such as "A", "B", "C", ...
# should obtain same answer as using optim, because glm also use mle
################################################## pre process data ############################################\
# transform binary data into 0 and 1
getIndicator = function(data) {
indicatorMatrix = matrix(nrow = nrow(data), ncol = ncol(data), dimnames = list(NULL, colnames(data)))
for (i in 1:ncol(data)) {
# as.numeric convert categrical data values to 1, 2, 3, ...
# -1 transfers them to 0, 1, 2, ...
indicatorMatrix[, i] = as.numeric(data[, i]) - 1
}
# add an extra column of 1s in front for the intercept of a logit model
#indicatorMatrix = cbind(1, indicatorMatrix)
return(indicatorMatrix)
}
################################################## negative log likelihood
# inner product of vectors X and Beta with the same length, where
# # X[1] = 1 and beta[1] = beta0
innerProd = function(beta, X) {
summation = 0
for (i in 1:length(beta)) summation = summation + X[i] * beta[i]
return(summation)
}
# log likelihood for a single row of data
logLikeSingle = function(indicatorMatrix, yIndex, xIndices, beta, rowIndex, base) {
betaDotX = innerProd(beta, c(1, indicatorMatrix[rowIndex, xIndices]))
logLike = -log(1 + exp(betaDotX), base) + indicatorMatrix[rowIndex, yIndex] * betaDotX
return(logLike)
}
# negative log likelihood for the entire dataset
negLogLike = function(indicatorMatrix, yIndex, xIndices, beta, base) {
logLike = 0
# cumulative sum log likelihood for the entire data set
for (i in 1:nrow(indicatorMatrix)) {
logLike = logLike + logLikeSingle(indicatorMatrix, yIndex, xIndices, beta, i, base)
}
return(-logLike)
}
# 2nd derivative of the negative log likelihood for computing the fisher information matrix
# defferentiate w.r.t j and k, where j, k = 1, 2, ..., m+1, where m+1 = lenght(beta)
negLoglike2ndDerivativeSingle = function(indicatorMatrix, xIndices, beta, rowIndex, j, k) {
betaDotX = innerProd(beta, c(1, indicatorMatrix[rowIndex, xIndices]))
# the jth coordinate of the vector x_i
x_ij = c(1, indicatorMatrix[rowIndex, ])[j]
# the kth coordinate of the vector x_i
x_ik = c(1, indicatorMatrix[rowIndex, ])[k]
nll2ndSingle = (exp(betaDotX) / (1 + exp(betaDotX)) ^ 2) * x_ij * x_ik
return(nll2ndSingle)
}
#
#
negLoglike2ndDerivative = function(indicatorMatrix, xIndices, beta, j, k) {
nll2nd = 0
for (i in 1:nrow(indicatorMatrix)) {
nll2nd = nll2nd + negLoglike2ndDerivativeSingle(indicatorMatrix, xIndices, beta, i, j, k)
}
return(nll2nd)
}
################################################## computing entries of fisher information matrix
fisherMatrix = function(indicatorMatrix, yIndex, xIndices, beta) {
FIM = matrix(NA, length(beta), length(beta))
#fill in the (1, 1) entry of FIM
FIM[1, 1] = negLoglike2ndDerivative(indicatorMatrix, xIndices, beta, 1, 1)
# fill in the lower triangular FIM
for (j in 2:nrow(FIM)) {
for (k in 2:j) {
FIM[j, k] = negLoglike2ndDerivative(indicatorMatrix, xIndices, beta, j, k)
} # end for k
} # end for j
# the 1st column is identical ti the diagnose of FIM
FIM[, 1] = diag(FIM)
# the upper triangular is identical to the lower triangular FIM
FIM[upper.tri(FIM)] = t(FIM)[upper.tri(t(FIM))]
return(FIM) # return the complete FIM
}
# calculate log of the determinant of a matrix
# matrix has to be symmetric positive definite
# use cholesky decomposition to decompose matrix FIM = L*transpose(L)
logDeterminant = function(matrix) {
choleskeyUpper = chol(matrix)
logDet = 0
for (i in 1:nrow(matrix)) {
logDet = logDet + log(diag(choleskeyUpper)[i], base = 2)
}
return(logDet)
}
###################################### msg len with no predictor #####################################
# check for this
msgLenWithNoPredictors = function(data, indicatorMatrix, yIndex, cardinalities, allNodes, sigma, base) {
# formula for empty model
formula = paste(allNodes[yIndex], "~ 1")
# estimate parameter of logit model using glm
beta = glm(formula, family = binomial(link = "logit"), data = data)$coefficients
# value for the negative log likelihood
nll = negLogLike(indicatorMatrix, yIndex, NULL, beta, base)
# fisher information matrix
fisherInfoMatrix = negLoglike2ndDerivative(indicatorMatrix, NULL, beta, 1, 1)
# log of the determinant of the FIM
logFisher = log(fisherInfoMatrix, base)
# computing mml
mml = 0.5 * log(2 * pi, base) + log(sigma, base) - 0.5 * log(cardinalities[yIndex], base) +
0.5 * beta ^ 2 / sigma ^ 2 + 0.5 * logFisher + nll + 0.5 * (1 + log(0.083333, base))
# store results in a list
lst = list(beta, nll, logFisher, mml)
names(lst) = c("par", "nll", "logFisher", "mml")
return(lst)
}
################################################## msg len ############################################
msgLenWithPredictors = function(data, indicatorMatrix, yIndex, xIndices, cardinalities,
allNodes, sigma, base) {
# arity of dependent variable y
arityOfY = cardinalities[yIndex]
# this is for binary case
nFreePar = length(xIndices) + 1
# lattice constant
k = c(0.083333, 0.080188, 0.077875, 0.07609, 0.07465, 0.07347, 0.07248, 0.07163)
if (nFreePar <= length(k)) {
latticeConst = k[nFreePar]
} else {
latticeConst = min(k)
}
# create formula for fitting logit model using glm
formula = paste(allNodes[yIndex], "~", paste0(allNodes[xIndices], collapse = "+"))
# parameter estimation of negative log likelihood using GLM
# glm always use the first level (in this case "A") for reference when estimating coefficients
# the reference can be changed by change the order of levels in data frame using relevel()
fittedLogit = glm(formula, family = binomial(link = "logit"), data = data)
# value for the negative log likelihood
nll = negLogLike(indicatorMatrix, yIndex, xIndices, fittedLogit$coefficients, base)
# fisher information matrix
fisherInfoMatrix = fisherMatrix(indicatorMatrix, yIndex, xIndices, fittedLogit$coefficients)
# log of the determinant of the FIM
logFisher = logDeterminant(fisherInfoMatrix)
# computing mml
mmlFixedPart = 0.5 * nFreePar * log(2 * pi, base) + nFreePar * log(sigma, base) - 0.5 * log(arityOfY, base) -
0.5 * sum((cardinalities[xIndices] - 1) * log(arityOfY, base) +
(arityOfY - 1) * log(cardinalities[xIndices], base)) + 0.5 * nFreePar*(1 + log(latticeConst, base))
# sum of logit parameters square
sumParSquare = 0
for (i in 1:length(nFreePar)) sumParSquare = sumParSquare + fittedLogit$coefficients[i] ^ 2
mmlNonFixedPart = 0.5 * sumParSquare / sigma ^ 2 + 0.5 * logFisher + nll
mml = mmlFixedPart + mmlNonFixedPart
# store results in a list
lst = list(fittedLogit$coefficients, nll, logFisher, mml)
names(lst) = c("par", "nll", "logFisher", "mml")
return(lst)
}
#################################################### function ####################################
# search for mb using mml
mbMMLLogit = function(data, indicatorMatrix, y, sigma = 3, base = 2, debug = FALSE) {
allNodes = colnames(data)
numNodes = length(allNodes)
yIndex = which(allNodes == y)
unUsedNodesIndexes = (1:numNodes)[-yIndex]
cardinalities = rep(2, numNodes)
for (j in 1:ncol(data)) cardinalities[j] = nlevels(data[, j])
cmb = c()
# x = c()
if (debug) {
cat("----------------------------------------------------------------\n")
cat("* learning the Markov blanket of", y, "\n")
} # then
# msg len to encode the size k of mb
# k takes integer value from [0, n - 1] uniformly
# logK = log(numNodes)
# msg len of empty markov blanket
mmlMini = msgLenWithNoPredictors(data, indicatorMatrix, yIndex, cardinalities, allNodes, sigma, base)$mml[[1]]
if (debug) {
cat(" > empty MB has msg len:", round(mmlMini, 2), "\n")
} # then
repeat{ # use greedy search for an optimal mb
if (debug) {
cat(" * calculating msg len \n")
} # end debug
toAdd = NULL
# msg len to encode the mb
# logK is explained above
# the second part is the msg len to encode which k nodes to choose from all n - 1 nodes
# log (n - 1 choose k) is the second part
# logK + log(choose(numNodes - 1, length(cmb) + 1))
for (i in 1:length(unUsedNodesIndexes)) { # combine each remaining node with current mb and compute mml
mmlCurrent = msgLenWithPredictors(data, indicatorMatrix, yIndex, c(unUsedNodesIndexes[i], cmb),
cardinalities, allNodes, sigma, base)$mml
if (debug) {
cat(" >", allNodes[unUsedNodesIndexes[i]], "has msg len:", round(mmlCurrent, 2), "\n")
} # end debug
if (mmlCurrent < mmlMini) { # if adding this node decreases the mml score, then replace mml and add into cmb
mmlMini = mmlCurrent
toAdd = i
}
}
# stop when there is nothing to add from the remaining nodes
# that is when mml score does not decrease
# it indicates adding more nodes into cmb does not make current model better
if (is.null(toAdd)) {
print("No better candidate to add \n")
break
} # end if
cmb = c(cmb, unUsedNodesIndexes[toAdd])
if (debug) {
cat(" @", allNodes[unUsedNodesIndexes[toAdd]], "include in the Markov blanket", "\n")
cat(" > Markov blanket (", length(cmb), "nodes ) now is '", allNodes[cmb], "'\n")
} # end debug
# remove added node index from unchecked nodes indices
unUsedNodesIndexes = unUsedNodesIndexes[-toAdd]
# if 0 node left for inclusion stop
if (length(unUsedNodesIndexes) == 0) {
print("No more node to add \n")
break
} # end if
} # end repeat
if (debug) {
cat(" * Algorithm stops! \n")
}
return(allNodes[cmb])
}
# when testing on asia net, glm shows warnings, glm.fit: fitted probabilities numerically 0 or 1 occurred
|
#################
# RiMod Frontal Neuron ChIP-seq analysis
# Using DiffBind
#################
library(DiffBind)
library(stringr)
setwd("/media/kevin/89a56127-927e-42c0-80de-e8a834dc81e8/rimod/chipseq/frontal_sakib/")
##
# Create Metadata file
##
chip.md <- read.csv("../chipseq_frontal_neuron_md.txt")
chip.md$Human_ID <- str_pad(chip.md$Human_ID, width = 5, side = "left", pad = "0")
# rimod overall sample sheet
md <- read.csv("~/rimod/files/FTD_Brain.csv")
md$SAMPLEID <- str_pad(md$SAMPLEID, width = 5, side = "left", pad = "0")
md <- md[md$SAMPLEID %in% chip.md$Human_ID,]
md <- md[md$REGION == "frontal",]
md <- data.frame(sample=md$SAMPLEID, age=md$AGE, sex=md$GENDER, mutation=md$GENE)
# fill outo missing sample 11014
#tmp <- data.frame(sample="11014", age=58, sex="M", mutation="Ser82Val")
#md <- rbind(md, tmp)
md <- merge(chip.md, md, by.x="Human_ID", by.y="sample")
# adjust sample name
md$Sample_name <- str_split(md$Sample_name, pattern="sr_", simplify = T)[,2]
# get design file to match with chip-seq file
design <- read.table("../analysis_neuron_290120/rimod_chipseq/design_rimod_chipseq_frontal_neuron.csv", sep=",", header=T)
design$sample <- paste(design$group, paste0("R",design$replicate), sep="_")
design <- design[!grepl("Input", design$sample),]
design$fastq_1 <- str_split(design$fastq_1, pattern="sakibm_", simplify = T)[,2]
design$fastq_1 <- str_split(design$fastq_1, pattern="_S", simplify = T)[,1]
design$fastq_1 <- gsub("/home/kevin/Raw_FASTQ_files_H3K4me3_Frontal_FTLD/", "", design$fastq_1)
design$fastq_1 <- gsub(".fastq.gz", "", design$fastq_1)
design <- design[, c(3, 7)]
# final merge of design and md
md <- merge(md, design, by.x="Sample_name", by.y="fastq_1")
rownames(md) <- md$sample
# Fit metadata for DiffBind
data_dir <- "/media/kevin/89a56127-927e-42c0-80de-e8a834dc81e8/rimod/chipseq/frontal_sakib/results/bwa/mergedLibrary/macs/narrowPeak/"
bam_dir <- "/media/kevin/89a56127-927e-42c0-80de-e8a834dc81e8/rimod/chipseq/frontal_sakib/results/bwa/mergedLibrary/"
md$Peaks <- paste(data_dir, md$sample, "_peaks.xls", sep="")
md$bamReads <- paste(bam_dir, md$sample, ".mLb.clN.sorted.bam", sep="")
md$PeakCaller <- rep("macs", nrow(md))
md$PeakFormat <- rep("macs", nrow(md))
md$SampleID <- md$sample
md$Condition <- md$group
write.csv(md, "chipseq_samplesheet.csv")
####
# MAPT analysis
####
md.mapt <- md[md$group %in% c("GRN", "NDC"),]
write.csv(md.mapt, "MAPT_chipseq_samplesheet.csv")
# load
mapt <- dba(sampleSheet = "MAPT_chipseq_samplesheet.csv")
# count
mapt <- dba.count(mapt)
# contrast
mapt <- dba.contrast(mapt)
# analyze
mapt <- dba.analyze(mapt)
mapt.res <- dba.report(mapt)
# make object
mapt.mask <- dba.mask(frontal, DBA_CONDITION, "MAPT")
frontal <- dba.count(frontal)
|
/kevin/rimod-analysis/chipseq/frontal_neuron_chipseq_analysis_DiffBind.R
|
no_license
|
dznetubingen/analysis_scripts
|
R
| false | false | 2,745 |
r
|
#################
# RiMod Frontal Neuron ChIP-seq analysis
# Using DiffBind
#################
library(DiffBind)
library(stringr)
setwd("/media/kevin/89a56127-927e-42c0-80de-e8a834dc81e8/rimod/chipseq/frontal_sakib/")
##
# Create Metadata file
##
chip.md <- read.csv("../chipseq_frontal_neuron_md.txt")
chip.md$Human_ID <- str_pad(chip.md$Human_ID, width = 5, side = "left", pad = "0")
# rimod overall sample sheet
md <- read.csv("~/rimod/files/FTD_Brain.csv")
md$SAMPLEID <- str_pad(md$SAMPLEID, width = 5, side = "left", pad = "0")
md <- md[md$SAMPLEID %in% chip.md$Human_ID,]
md <- md[md$REGION == "frontal",]
md <- data.frame(sample=md$SAMPLEID, age=md$AGE, sex=md$GENDER, mutation=md$GENE)
# fill outo missing sample 11014
#tmp <- data.frame(sample="11014", age=58, sex="M", mutation="Ser82Val")
#md <- rbind(md, tmp)
md <- merge(chip.md, md, by.x="Human_ID", by.y="sample")
# adjust sample name
md$Sample_name <- str_split(md$Sample_name, pattern="sr_", simplify = T)[,2]
# get design file to match with chip-seq file
design <- read.table("../analysis_neuron_290120/rimod_chipseq/design_rimod_chipseq_frontal_neuron.csv", sep=",", header=T)
design$sample <- paste(design$group, paste0("R",design$replicate), sep="_")
design <- design[!grepl("Input", design$sample),]
design$fastq_1 <- str_split(design$fastq_1, pattern="sakibm_", simplify = T)[,2]
design$fastq_1 <- str_split(design$fastq_1, pattern="_S", simplify = T)[,1]
design$fastq_1 <- gsub("/home/kevin/Raw_FASTQ_files_H3K4me3_Frontal_FTLD/", "", design$fastq_1)
design$fastq_1 <- gsub(".fastq.gz", "", design$fastq_1)
design <- design[, c(3, 7)]
# final merge of design and md
md <- merge(md, design, by.x="Sample_name", by.y="fastq_1")
rownames(md) <- md$sample
# Fit metadata for DiffBind
data_dir <- "/media/kevin/89a56127-927e-42c0-80de-e8a834dc81e8/rimod/chipseq/frontal_sakib/results/bwa/mergedLibrary/macs/narrowPeak/"
bam_dir <- "/media/kevin/89a56127-927e-42c0-80de-e8a834dc81e8/rimod/chipseq/frontal_sakib/results/bwa/mergedLibrary/"
md$Peaks <- paste(data_dir, md$sample, "_peaks.xls", sep="")
md$bamReads <- paste(bam_dir, md$sample, ".mLb.clN.sorted.bam", sep="")
md$PeakCaller <- rep("macs", nrow(md))
md$PeakFormat <- rep("macs", nrow(md))
md$SampleID <- md$sample
md$Condition <- md$group
write.csv(md, "chipseq_samplesheet.csv")
####
# MAPT analysis
####
md.mapt <- md[md$group %in% c("GRN", "NDC"),]
write.csv(md.mapt, "MAPT_chipseq_samplesheet.csv")
# load
mapt <- dba(sampleSheet = "MAPT_chipseq_samplesheet.csv")
# count
mapt <- dba.count(mapt)
# contrast
mapt <- dba.contrast(mapt)
# analyze
mapt <- dba.analyze(mapt)
mapt.res <- dba.report(mapt)
# make object
mapt.mask <- dba.mask(frontal, DBA_CONDITION, "MAPT")
frontal <- dba.count(frontal)
|
#======================================================
# Use systematic investors toolbox (SIT)
# Load Systematic Investor Toolbox (SIT)
# http://www.r-bloggers.com/backtesting-minimum-variance-portfolios/
# https://systematicinvestor.wordpress.com/2011/12/13/backtesting-minimum-variance-portfolios/
# https://systematicinvestor.wordpress.com/2013/03/22/maximum-sharpe-portfolio/
#=========================================================
#setInternet2(TRUE)
# Load Systematic Investor Toolbox (SIT)
rm(list=ls())
#setInternet2(TRUE)
con = gzcon(url('https://github.com/systematicinvestor/SIT/raw/master/sit.gz', 'rb'))
source(con)
close(con)
#*****************************************************************
# Load historical data
#******************************************************************
load.packages('quantmod,quadprog,lpSolve')
tickers = spl('SPY,QQQ,EEM,IWM,EFA,TLT,IYR,GLD')
data <- new.env()
getSymbols(tickers, src = 'yahoo', from = '1980-01-01', env = data, auto.assign = T)
for(i in ls(data)) data[[i]] = adjustOHLC(data[[i]], use.Adjusted=T)
data.weekly <- new.env()
for(i in tickers) data.weekly[[i]] = to.weekly(data[[i]], indexAt='endof')
bt.prep(data, align='remove.na', dates='1990::2018')
bt.prep(data.weekly, align='remove.na', dates='1990::2011')
#*****************************************************************
# Code Strategies
#******************************************************************
prices = data$prices
n = ncol(prices)
n
# find week ends
week.ends = endpoints(prices, 'weeks')
week.ends = week.ends[week.ends > 0]
# Equal Weight 1/N Benchmark
data$weight[] = NA
data$weight[week.ends,] = ntop(prices[week.ends,], n)
capital = 100000
data$weight[] = (capital / prices) * data$weight
equal.weight = bt.run(data, type='share')
#*****************************************************************
# Create Constraints
#*****************************************************************
constraints = new.constraints(n, lb = -Inf, ub = +Inf)
# SUM x.i = 1
constraints = add.constraints(rep(1, n), 1, type = '=', constraints)
ret = prices / mlag(prices) - 1
weight = coredata(prices)
weight[] = NA
for( i in week.ends[week.ends >= (63 + 1)] ) {
# one quarter is 63 days
hist = ret[ (i- 63 +1):i, ]
# create historical input assumptions
ia = create.historical.ia(hist, 252)
s0 = apply(coredata(hist),2,sd)
ia$cov = cor(coredata(hist), use='complete.obs',method='pearson') * (s0 %*% t(s0))
weight[i,] = min.risk.portfolio(ia, constraints)
}
# Minimum Variance
data$weight[] = weight
capital = 100000
data$weight[] = (capital / prices) * data$weight
min.var.daily = bt.run(data, type='share', capital=capital)
#
#*****************************************************************
# Code Strategies: Weekly
#******************************************************************
retw = data.weekly$prices / mlag(data.weekly$prices) - 1
weightw = coredata(prices)
weightw[] = NA
i=1793
for( i in week.ends[week.ends >= (63 + 1)] ) {
# map
j = which(index(ret[i,]) == index(retw))
# one quarter = 13 weeks
hist = retw[ (j- 13 +1):j, ]
# create historical input assumptions
ia = create.historical.ia(hist, 52)
s0 = apply(coredata(hist),2,sd)
ia$cov = cor(coredata(hist), use='complete.obs',method='pearson') * (s0 %*% t(s0))
weightw[i,] = min.risk.portfolio(ia, constraints)
}
data$weight[] = weightw
capital = 100000
data$weight[] = (capital / prices) * data$weight
min.var.weekly = bt.run(data, type='share', capital=capital)
#*****************************************************************
# Create Report
#******************************************************************
plotbt.custom.report.part1(min.var.weekly, min.var.daily, equal.weight)
# plot Daily and Weekly transition maps
layout(1:2)
plotbt.transition.map(min.var.daily$weight)
legend('topright', legend = 'min.var.daily', bty = 'n')
plotbt.transition.map(min.var.weekly$weight)
legend('topright', legend = 'min.var.weekly', bty = 'n')
|
/bt_mvp.R
|
no_license
|
nana574/portfolio_2019_spring
|
R
| false | false | 4,064 |
r
|
#======================================================
# Use systematic investors toolbox (SIT)
# Load Systematic Investor Toolbox (SIT)
# http://www.r-bloggers.com/backtesting-minimum-variance-portfolios/
# https://systematicinvestor.wordpress.com/2011/12/13/backtesting-minimum-variance-portfolios/
# https://systematicinvestor.wordpress.com/2013/03/22/maximum-sharpe-portfolio/
#=========================================================
#setInternet2(TRUE)
# Load Systematic Investor Toolbox (SIT)
rm(list=ls())
#setInternet2(TRUE)
con = gzcon(url('https://github.com/systematicinvestor/SIT/raw/master/sit.gz', 'rb'))
source(con)
close(con)
#*****************************************************************
# Load historical data
#******************************************************************
load.packages('quantmod,quadprog,lpSolve')
tickers = spl('SPY,QQQ,EEM,IWM,EFA,TLT,IYR,GLD')
data <- new.env()
getSymbols(tickers, src = 'yahoo', from = '1980-01-01', env = data, auto.assign = T)
for(i in ls(data)) data[[i]] = adjustOHLC(data[[i]], use.Adjusted=T)
data.weekly <- new.env()
for(i in tickers) data.weekly[[i]] = to.weekly(data[[i]], indexAt='endof')
bt.prep(data, align='remove.na', dates='1990::2018')
bt.prep(data.weekly, align='remove.na', dates='1990::2011')
#*****************************************************************
# Code Strategies
#******************************************************************
prices = data$prices
n = ncol(prices)
n
# find week ends
week.ends = endpoints(prices, 'weeks')
week.ends = week.ends[week.ends > 0]
# Equal Weight 1/N Benchmark
data$weight[] = NA
data$weight[week.ends,] = ntop(prices[week.ends,], n)
capital = 100000
data$weight[] = (capital / prices) * data$weight
equal.weight = bt.run(data, type='share')
#*****************************************************************
# Create Constraints
#*****************************************************************
constraints = new.constraints(n, lb = -Inf, ub = +Inf)
# SUM x.i = 1
constraints = add.constraints(rep(1, n), 1, type = '=', constraints)
ret = prices / mlag(prices) - 1
weight = coredata(prices)
weight[] = NA
for( i in week.ends[week.ends >= (63 + 1)] ) {
# one quarter is 63 days
hist = ret[ (i- 63 +1):i, ]
# create historical input assumptions
ia = create.historical.ia(hist, 252)
s0 = apply(coredata(hist),2,sd)
ia$cov = cor(coredata(hist), use='complete.obs',method='pearson') * (s0 %*% t(s0))
weight[i,] = min.risk.portfolio(ia, constraints)
}
# Minimum Variance
data$weight[] = weight
capital = 100000
data$weight[] = (capital / prices) * data$weight
min.var.daily = bt.run(data, type='share', capital=capital)
#
#*****************************************************************
# Code Strategies: Weekly
#******************************************************************
retw = data.weekly$prices / mlag(data.weekly$prices) - 1
weightw = coredata(prices)
weightw[] = NA
i=1793
for( i in week.ends[week.ends >= (63 + 1)] ) {
# map
j = which(index(ret[i,]) == index(retw))
# one quarter = 13 weeks
hist = retw[ (j- 13 +1):j, ]
# create historical input assumptions
ia = create.historical.ia(hist, 52)
s0 = apply(coredata(hist),2,sd)
ia$cov = cor(coredata(hist), use='complete.obs',method='pearson') * (s0 %*% t(s0))
weightw[i,] = min.risk.portfolio(ia, constraints)
}
data$weight[] = weightw
capital = 100000
data$weight[] = (capital / prices) * data$weight
min.var.weekly = bt.run(data, type='share', capital=capital)
#*****************************************************************
# Create Report
#******************************************************************
plotbt.custom.report.part1(min.var.weekly, min.var.daily, equal.weight)
# plot Daily and Weekly transition maps
layout(1:2)
plotbt.transition.map(min.var.daily$weight)
legend('topright', legend = 'min.var.daily', bty = 'n')
plotbt.transition.map(min.var.weekly$weight)
legend('topright', legend = 'min.var.weekly', bty = 'n')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/praiseme.r
\name{praiseme}
\alias{praiseme}
\title{Gives praise}
\usage{
praiseme()
}
\description{
you this to give a dscription : gives praise in times of need
}
|
/man/praiseme.Rd
|
no_license
|
stephstammel/praiseme-1
|
R
| false | true | 242 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/praiseme.r
\name{praiseme}
\alias{praiseme}
\title{Gives praise}
\usage{
praiseme()
}
\description{
you this to give a dscription : gives praise in times of need
}
|
# library for str_pad - allow to add leadimg zeros
library(stringr)
# unzip source file (downloaded from coursera website)
unzip("rprog-data-specdata.zip")
# returns table cantined all requested files.
readfiles <- function(dir = "specdata", id = 1:332) {
res_table <- c()
for(f in id){
# modify number to 3 digits zerofilled character
f000 <- str_pad(f, 3, side = "left", pad = 0)
file_name <- paste(dir, "/", f000, ".csv", sep="")
cur_file <- read.csv(file_name)
res_table <- rbind(res_table, cur_file)
}
res_table
}
pollutantmean <- function(directory, pollutant, id = 1:332){
pollut_data <- readfiles(directory, id)
round(mean(pollut_data[,pollutant], na.rm = TRUE), 3)
}
complete <- function(directory = "specdata", id = 1:332){
# create data frame with rigth nimber of rows
result <- data.frame(id = 1:length(id), nobs=NA)
# k - counter, will help to fill "result" data frame
k <- 1
for(i in id){
pollut_data <- readfiles(directory, i)
#file for this particular id (only required columns)
pollution_for_i <- pollut_data[, c("sulfate", "nitrate")]
# adding this complete cases for this id to result data frame
result$nobs[k] <- sum(complete.cases(pollution_for_i))
# counter for following rows
k <- k+1
}
result
}
corr <- function(directory = "specdata", threshold = 0){
com_case <- complete()
files_for_corr <- com_case$id[com_case$nobs > threshold]
result <- c()
class(result) <- "numeric"
for(i in files_for_corr){
pollut_data <- readfiles(directory, i)
pollut_data_cc <- pollut_data[
complete.cases(pollut_data),
c("sulfate", "nitrate")]
result <- c(result,
cor(pollut_data_cc$sulfate, pollut_data_cc$nitrate))
}
result
}
|
/PA1/corr.R
|
no_license
|
vmugue/Coursera_R
|
R
| false | false | 1,972 |
r
|
# library for str_pad - allow to add leadimg zeros
library(stringr)
# unzip source file (downloaded from coursera website)
unzip("rprog-data-specdata.zip")
# returns table cantined all requested files.
readfiles <- function(dir = "specdata", id = 1:332) {
res_table <- c()
for(f in id){
# modify number to 3 digits zerofilled character
f000 <- str_pad(f, 3, side = "left", pad = 0)
file_name <- paste(dir, "/", f000, ".csv", sep="")
cur_file <- read.csv(file_name)
res_table <- rbind(res_table, cur_file)
}
res_table
}
pollutantmean <- function(directory, pollutant, id = 1:332){
pollut_data <- readfiles(directory, id)
round(mean(pollut_data[,pollutant], na.rm = TRUE), 3)
}
complete <- function(directory = "specdata", id = 1:332){
# create data frame with rigth nimber of rows
result <- data.frame(id = 1:length(id), nobs=NA)
# k - counter, will help to fill "result" data frame
k <- 1
for(i in id){
pollut_data <- readfiles(directory, i)
#file for this particular id (only required columns)
pollution_for_i <- pollut_data[, c("sulfate", "nitrate")]
# adding this complete cases for this id to result data frame
result$nobs[k] <- sum(complete.cases(pollution_for_i))
# counter for following rows
k <- k+1
}
result
}
corr <- function(directory = "specdata", threshold = 0){
com_case <- complete()
files_for_corr <- com_case$id[com_case$nobs > threshold]
result <- c()
class(result) <- "numeric"
for(i in files_for_corr){
pollut_data <- readfiles(directory, i)
pollut_data_cc <- pollut_data[
complete.cases(pollut_data),
c("sulfate", "nitrate")]
result <- c(result,
cor(pollut_data_cc$sulfate, pollut_data_cc$nitrate))
}
result
}
|
\name{qb.hpdone}
\alias{qb.hpdone}
\alias{summary.qb.hpdone}
\alias{print.qb.hpdone}
\alias{plot.qb.hpdone}
\title{Highest probability density (HPD) region.}
\description{
Determine HPD region across genome, including position of posterior mode.
}
\usage{
qb.hpdone(qbObject, level = 0.5, profile = "2logBF",
effects = "cellmean", scan = "sum", chr, smooth = 3, \dots)
\method{summary}{qb.hpdone}(object, chr, digits = 3, \dots)
\method{print}{qb.hpdone}(x, \dots)
\method{plot}{qb.hpdone}(x, chr, \dots)
}
\arguments{
\item{qbObject}{Object of class \code{qb}.}
\item{object}{Object of class \code{qb.hpdone}.}
\item{x}{Object of class \code{qb.hpdone}.}
\item{level}{Value between 0 and 1 of HPD coverage.}
\item{scan}{Elements to scan; usually one of \code{"sum"},
\code{"mean"}, \code{"epistasis"}, \code{"GxE"}.}
\item{smooth}{Degree of smoothing.}
\item{chr}{Chromosomes to include; default determined by HPD region.}
\item{effects}{Effects are \code{"cellmean"} for means by genotype;
\code{"estimate"} for estimates of Cockerham main effects.}
\item{profile}{Objective profile for plot; default is \code{"2logBF"};
other choices found in option \code{type} for
\code{\link{qb.scanone}}.}
\item{digits}{Number of digits for \code{\link[base]{round}}.}
\item{\dots}{Extra parameters passed along to plot.}
}
\details{
Determine 100*\code{level} percent HPD region. Subset chromosomes based
on HPD region. Create genome scans for \code{profile} and \code{effects}.
}
\value{
\code{qb.hpdone} is a list with a \code{hpd.region} summary matrix and
\code{\link{qb.scanone}} objects for the \code{profile} and
\code{effects}. A summary of a \code{qb.hpdone} object yields a matrix
with columns for
\item{chr}{chromosome number}
\item{n.qtl}{estimated number of QTL on chromosome}
\item{pos}{estimated position of QTL}
\item{lo.nn\%}{lower nn\% HPD limit}
\item{hi.nn\%}{upper nn\% HPD limit}
\item{profile}{Peak of profile, identifed by the profile type.}
\item{effects}{Columns for the effects, appropriately labeled.}
}
\references{http://www.qtlbim.org}
\author{Brian S. Yandell}
\seealso{\code{\link{qb.scanone}}, \code{\link{qb.hpdchr}}}
\examples{
data(qbExample)
temp <- qb.hpdone(qbExample)
summary(temp)
plot(temp)
}
\keyword{hplot}
|
/man/hpd.Rd
|
permissive
|
fboehm/qtlbim
|
R
| false | false | 2,306 |
rd
|
\name{qb.hpdone}
\alias{qb.hpdone}
\alias{summary.qb.hpdone}
\alias{print.qb.hpdone}
\alias{plot.qb.hpdone}
\title{Highest probability density (HPD) region.}
\description{
Determine HPD region across genome, including position of posterior mode.
}
\usage{
qb.hpdone(qbObject, level = 0.5, profile = "2logBF",
effects = "cellmean", scan = "sum", chr, smooth = 3, \dots)
\method{summary}{qb.hpdone}(object, chr, digits = 3, \dots)
\method{print}{qb.hpdone}(x, \dots)
\method{plot}{qb.hpdone}(x, chr, \dots)
}
\arguments{
\item{qbObject}{Object of class \code{qb}.}
\item{object}{Object of class \code{qb.hpdone}.}
\item{x}{Object of class \code{qb.hpdone}.}
\item{level}{Value between 0 and 1 of HPD coverage.}
\item{scan}{Elements to scan; usually one of \code{"sum"},
\code{"mean"}, \code{"epistasis"}, \code{"GxE"}.}
\item{smooth}{Degree of smoothing.}
\item{chr}{Chromosomes to include; default determined by HPD region.}
\item{effects}{Effects are \code{"cellmean"} for means by genotype;
\code{"estimate"} for estimates of Cockerham main effects.}
\item{profile}{Objective profile for plot; default is \code{"2logBF"};
other choices found in option \code{type} for
\code{\link{qb.scanone}}.}
\item{digits}{Number of digits for \code{\link[base]{round}}.}
\item{\dots}{Extra parameters passed along to plot.}
}
\details{
Determine 100*\code{level} percent HPD region. Subset chromosomes based
on HPD region. Create genome scans for \code{profile} and \code{effects}.
}
\value{
\code{qb.hpdone} is a list with a \code{hpd.region} summary matrix and
\code{\link{qb.scanone}} objects for the \code{profile} and
\code{effects}. A summary of a \code{qb.hpdone} object yields a matrix
with columns for
\item{chr}{chromosome number}
\item{n.qtl}{estimated number of QTL on chromosome}
\item{pos}{estimated position of QTL}
\item{lo.nn\%}{lower nn\% HPD limit}
\item{hi.nn\%}{upper nn\% HPD limit}
\item{profile}{Peak of profile, identifed by the profile type.}
\item{effects}{Columns for the effects, appropriately labeled.}
}
\references{http://www.qtlbim.org}
\author{Brian S. Yandell}
\seealso{\code{\link{qb.scanone}}, \code{\link{qb.hpdchr}}}
\examples{
data(qbExample)
temp <- qb.hpdone(qbExample)
summary(temp)
plot(temp)
}
\keyword{hplot}
|
#' @docType class
#' @title Pig
#'
#' @description Pig Class
#'
#' @format An \code{R6Class} generator object
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
Pig <- R6::R6Class(
"Pig",
public = list(
#' @field actual_instance the object stored in this instance.
actual_instance = NULL,
#' @field actual_type the type of the object stored in this instance.
actual_type = NULL,
#' @field one_of a list of types defined in the oneOf schema.
one_of = list("BasquePig", "DanishPig"),
#' Initialize a new Pig.
#'
#' @description
#' Initialize a new Pig.
#'
#' @param instance an instance of the object defined in the oneOf schemas: "BasquePig", "DanishPig"
#' @export
initialize = function(instance = NULL) {
if (is.null(instance)) {
# do nothing
} else if (get(class(instance)[[1]], pos = -1)$classname == "BasquePig") {
self$actual_instance <- instance
self$actual_type <- "BasquePig"
} else if (get(class(instance)[[1]], pos = -1)$classname == "DanishPig") {
self$actual_instance <- instance
self$actual_type <- "DanishPig"
} else {
stop(paste("Failed to initialize Pig with oneOf schemas BasquePig, DanishPig. Provided class name: ",
get(class(instance)[[1]], pos = -1)$classname))
}
},
#' Deserialize JSON string into an instance of Pig.
#'
#' @description
#' Deserialize JSON string into an instance of Pig.
#' An alias to the method `fromJSON` .
#'
#' @param input The input JSON.
#' @return An instance of Pig.
#' @export
fromJSONString = function(input) {
self$fromJSON(input)
},
#' Deserialize JSON string into an instance of Pig.
#'
#' @description
#' Deserialize JSON string into an instance of Pig.
#'
#' @param input The input JSON.
#' @return An instance of Pig.
#' @export
fromJSON = function(input) {
matched <- 0 # match counter
matched_schemas <- list() #names of matched schemas
error_messages <- list()
instance <- NULL
BasquePig_result <- tryCatch({
BasquePig$public_methods$validateJSON(input)
BasquePig_instance <- BasquePig$new()
instance <- BasquePig_instance$fromJSON(input)
instance_type <- "BasquePig"
matched_schemas <- append(matched_schemas, "BasquePig")
matched <- matched + 1
},
error = function(err) err
)
if (!is.null(BasquePig_result["error"])) {
error_messages <- append(error_messages, BasquePig_result["message"])
}
DanishPig_result <- tryCatch({
DanishPig$public_methods$validateJSON(input)
DanishPig_instance <- DanishPig$new()
instance <- DanishPig_instance$fromJSON(input)
instance_type <- "DanishPig"
matched_schemas <- append(matched_schemas, "DanishPig")
matched <- matched + 1
},
error = function(err) err
)
if (!is.null(DanishPig_result["error"])) {
error_messages <- append(error_messages, DanishPig_result["message"])
}
if (matched == 1) {
# successfully match exactly 1 schema specified in oneOf
self$actual_instance <- instance
self$actual_type <- instance_type
} else if (matched > 1) {
# more than 1 match
stop("Multiple matches found when deserializing the payload into Pig with oneOf schemas BasquePig, DanishPig.")
} else {
# no match
stop(paste("No match found when deserializing the payload into Pig with oneOf schemas BasquePig, DanishPig. Details: ",
paste(error_messages, collapse = ", ")))
}
self
},
#' Serialize Pig to JSON string.
#'
#' @description
#' Serialize Pig to JSON string.
#'
#' @return JSON string representation of the Pig.
#' @export
toJSONString = function() {
if (!is.null(self$actual_instance)) {
as.character(jsonlite::minify(self$actual_instance$toJSONString()))
} else {
NULL
}
},
#' Serialize Pig to JSON.
#'
#' @description
#' Serialize Pig to JSON.
#'
#' @return JSON representation of the Pig.
#' @export
toJSON = function() {
if (!is.null(self$actual_instance)) {
self$actual_instance$toJSON()
} else {
NULL
}
},
#' Validate the input JSON with respect to Pig.
#'
#' @description
#' Validate the input JSON with respect to Pig and
#' throw exception if invalid.
#'
#' @param input The input JSON.
#' @export
validateJSON = function(input) {
# backup current values
actual_instance_bak <- self$actual_instance
actual_type_bak <- self$actual_type
# if it's not valid, an error will be thrown
self$fromJSON(input)
# no error thrown, restore old values
self$actual_instance <- actual_instance_bak
self$actual_type <- actual_type_bak
},
#' Returns the string representation of the instance.
#'
#' @description
#' Returns the string representation of the instance.
#'
#' @return The string representation of the instance.
#' @export
toString = function() {
jsoncontent <- c(
sprintf('"actual_instance": %s', if (is.null(self$actual_instance)) NULL else self$actual_instance$toJSONString()),
sprintf('"actual_type": "%s"', self$actual_type),
sprintf('"one_of": "%s"', paste(unlist(self$one_of), collapse = ", "))
)
jsoncontent <- paste(jsoncontent, collapse = ",")
as.character(jsonlite::prettify(paste("{", jsoncontent, "}", sep = "")))
},
#' Print the object
#'
#' @description
#' Print the object
#'
#' @export
print = function() {
print(jsonlite::prettify(self$toJSONString()))
invisible(self)
}
),
# Lock the class to prevent modifications to the method or field
lock_class = TRUE
)
## Uncomment below to unlock the class to allow modifications of the method or field
#Pig$unlock()
#
## Below is an example to define the print fnuction
#Pig$set("public", "print", function(...) {
# print(jsonlite::prettify(self$toJSONString()))
# invisible(self)
#})
## Uncomment below to lock the class to prevent modifications to the method or field
#Pig$lock()
|
/samples/client/petstore/R/R/pig.R
|
permissive
|
tjquinno/openapi-generator
|
R
| false | false | 6,434 |
r
|
#' @docType class
#' @title Pig
#'
#' @description Pig Class
#'
#' @format An \code{R6Class} generator object
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
Pig <- R6::R6Class(
"Pig",
public = list(
#' @field actual_instance the object stored in this instance.
actual_instance = NULL,
#' @field actual_type the type of the object stored in this instance.
actual_type = NULL,
#' @field one_of a list of types defined in the oneOf schema.
one_of = list("BasquePig", "DanishPig"),
#' Initialize a new Pig.
#'
#' @description
#' Initialize a new Pig.
#'
#' @param instance an instance of the object defined in the oneOf schemas: "BasquePig", "DanishPig"
#' @export
initialize = function(instance = NULL) {
if (is.null(instance)) {
# do nothing
} else if (get(class(instance)[[1]], pos = -1)$classname == "BasquePig") {
self$actual_instance <- instance
self$actual_type <- "BasquePig"
} else if (get(class(instance)[[1]], pos = -1)$classname == "DanishPig") {
self$actual_instance <- instance
self$actual_type <- "DanishPig"
} else {
stop(paste("Failed to initialize Pig with oneOf schemas BasquePig, DanishPig. Provided class name: ",
get(class(instance)[[1]], pos = -1)$classname))
}
},
#' Deserialize JSON string into an instance of Pig.
#'
#' @description
#' Deserialize JSON string into an instance of Pig.
#' An alias to the method `fromJSON` .
#'
#' @param input The input JSON.
#' @return An instance of Pig.
#' @export
fromJSONString = function(input) {
self$fromJSON(input)
},
#' Deserialize JSON string into an instance of Pig.
#'
#' @description
#' Deserialize JSON string into an instance of Pig.
#'
#' @param input The input JSON.
#' @return An instance of Pig.
#' @export
fromJSON = function(input) {
matched <- 0 # match counter
matched_schemas <- list() #names of matched schemas
error_messages <- list()
instance <- NULL
BasquePig_result <- tryCatch({
BasquePig$public_methods$validateJSON(input)
BasquePig_instance <- BasquePig$new()
instance <- BasquePig_instance$fromJSON(input)
instance_type <- "BasquePig"
matched_schemas <- append(matched_schemas, "BasquePig")
matched <- matched + 1
},
error = function(err) err
)
if (!is.null(BasquePig_result["error"])) {
error_messages <- append(error_messages, BasquePig_result["message"])
}
DanishPig_result <- tryCatch({
DanishPig$public_methods$validateJSON(input)
DanishPig_instance <- DanishPig$new()
instance <- DanishPig_instance$fromJSON(input)
instance_type <- "DanishPig"
matched_schemas <- append(matched_schemas, "DanishPig")
matched <- matched + 1
},
error = function(err) err
)
if (!is.null(DanishPig_result["error"])) {
error_messages <- append(error_messages, DanishPig_result["message"])
}
if (matched == 1) {
# successfully match exactly 1 schema specified in oneOf
self$actual_instance <- instance
self$actual_type <- instance_type
} else if (matched > 1) {
# more than 1 match
stop("Multiple matches found when deserializing the payload into Pig with oneOf schemas BasquePig, DanishPig.")
} else {
# no match
stop(paste("No match found when deserializing the payload into Pig with oneOf schemas BasquePig, DanishPig. Details: ",
paste(error_messages, collapse = ", ")))
}
self
},
#' Serialize Pig to JSON string.
#'
#' @description
#' Serialize Pig to JSON string.
#'
#' @return JSON string representation of the Pig.
#' @export
toJSONString = function() {
if (!is.null(self$actual_instance)) {
as.character(jsonlite::minify(self$actual_instance$toJSONString()))
} else {
NULL
}
},
#' Serialize Pig to JSON.
#'
#' @description
#' Serialize Pig to JSON.
#'
#' @return JSON representation of the Pig.
#' @export
toJSON = function() {
if (!is.null(self$actual_instance)) {
self$actual_instance$toJSON()
} else {
NULL
}
},
#' Validate the input JSON with respect to Pig.
#'
#' @description
#' Validate the input JSON with respect to Pig and
#' throw exception if invalid.
#'
#' @param input The input JSON.
#' @export
validateJSON = function(input) {
# backup current values
actual_instance_bak <- self$actual_instance
actual_type_bak <- self$actual_type
# if it's not valid, an error will be thrown
self$fromJSON(input)
# no error thrown, restore old values
self$actual_instance <- actual_instance_bak
self$actual_type <- actual_type_bak
},
#' Returns the string representation of the instance.
#'
#' @description
#' Returns the string representation of the instance.
#'
#' @return The string representation of the instance.
#' @export
toString = function() {
jsoncontent <- c(
sprintf('"actual_instance": %s', if (is.null(self$actual_instance)) NULL else self$actual_instance$toJSONString()),
sprintf('"actual_type": "%s"', self$actual_type),
sprintf('"one_of": "%s"', paste(unlist(self$one_of), collapse = ", "))
)
jsoncontent <- paste(jsoncontent, collapse = ",")
as.character(jsonlite::prettify(paste("{", jsoncontent, "}", sep = "")))
},
#' Print the object
#'
#' @description
#' Print the object
#'
#' @export
print = function() {
print(jsonlite::prettify(self$toJSONString()))
invisible(self)
}
),
# Lock the class to prevent modifications to the method or field
lock_class = TRUE
)
## Uncomment below to unlock the class to allow modifications of the method or field
#Pig$unlock()
#
## Below is an example to define the print fnuction
#Pig$set("public", "print", function(...) {
# print(jsonlite::prettify(self$toJSONString()))
# invisible(self)
#})
## Uncomment below to lock the class to prevent modifications to the method or field
#Pig$lock()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.