content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
vblpcmdrawpie <- function(center,radius,probs,n=50,colours=1:length(probs))
{
x <- c(0,cumsum(probs)/sum(probs))
dx <- diff(x)
np <- length(probs)
for (i in 1:np)
{
t2p <- 2 * pi * seq(x[i], x[i + 1], length = n)
xc <- center[1] + c(cos(t2p), 0) * radius
yc <- center[2] + c(sin(t2p), 0) * radius
polygon(xc, yc, border = FALSE, col = colours[i])
}
}
plot.vblpcm<-function(x, ..., R2=0.2, main="Variational-Bayes Positions", alpha=0.5, colours=1:x$G, RET=FALSE)
{
model<-x$model
d<-x$d
N<-x$N
P_n<-x$P_n
G<-x$G
Y<-x$Y
V_xi_n<-x$V_xi_n
V_z<-x$V_z
V_eta<-x$V_eta
V_lambda<-x$V_lambda
V_alpha<-x$V_alpha
inv_sigma02<-x$inv_sigma02
omega2<-x$omega2
mu_nought<-0
if (d<=2)
{
V_z_2D<-V_z
V_eta_2D<-V_eta
}
if (d>2)
{
cat("Dimension of latent space is greater than 2: using cmdscale of positions for 2D plot\n")
V_z_2D<-cmdscale(dist(V_z))
V_eta_2D<-matrix(NaN,G,2)
for (g in 1:G)
{
for (dd in 1:2)
{
tmpsum1 = 0
tmpsum2 = 0
for (i in 1:N)
{
tmpsum1 = tmpsum1 + 0.5*V_lambda[g,i]*inv_sigma02*V_alpha[g]*V_z_2D[i,dd]
tmpsum2 = tmpsum2 + V_lambda[g,i]*0.5*inv_sigma02*V_alpha[g]
}
V_eta_2D[g,dd] = (tmpsum1 + 0.5*mu_nought/ omega2)/(tmpsum2+0.5/ omega2)
}
}
}
XLIM=c(min(V_z_2D[,1]),max(V_z_2D[,1]))
YLIM=c(min(V_z_2D[,2]),max(V_z_2D[,2]))
XLIM=XLIM*1.1
YLIM=YLIM*1.1
pad<-formals(plot.network.default)$pad
#if (model=="plain")
object.scale = formals(plot.network.default)$object.scale
piesize <- R2
if (model=="rreceiver" | model=="rsender")
{
#piesize <- R2*(exp(scale(V_xi_n)))
tmp<-scale(V_xi_n)
piesize <- R2+R2*tmp
}
if (model=="rsocial")
{
#piesize <- R2*(exp(scale(V_xi_n[seq(1,2*N,2)]+V_xi_n[seq(2,2*N,2)])))
#piesize <- R2*(exp(scale(V_xi_n[,1]+V_xi_n[,2])))
tmp<-scale(V_xi_n[,1]+V_xi_n[,2])
piesize <- R2+R2*tmp
}
pie.order <- order(piesize, decreasing = TRUE)
vertex.cex=0
if (model!="plain") vertex.cex=piesize*3.5 else vertex.cex=(piesize*diff(XLIM))/10
plot.network(x$net, coord = V_z_2D, main = main,
xlim = XLIM, ylim = YLIM,
suppress.axes = 0, edge.col = rgb(t(rep(190/255,3)),alpha=alpha), vertex.cex=vertex.cex, vertex.col=colours, ...)
# add unknown links to plot
for (i in 1:N)
for (j in 1:N)
if (is.na(Y[i,j])) #
points(V_z_2D[c(i,j),1], V_z_2D[c(i,j),2], col=8, lty=2, t='l')
plot_matrix<-cbind(V_z_2D,vertex.cex/2,t(V_lambda))
plot_func<-function(x) vblpcmdrawpie(center=x[1:2], radius=x[2+1], probs=x[(2+2):(2+1+G)], n = 20, colours=colours)
apply(plot_matrix, 1, plot_func)
points(V_eta_2D, col=colours, pch=3)
symbols(V_eta_2D, circles=2*sqrt(1/(x$inv_sigma02*x$V_alpha)), add=1, fg=colours, inches=FALSE)
#symbols(V_eta_2D, circles=sqrt(x$V_lambda%*%x$V_sigma2*x$sigma02), add=1, fg=colours, inches=FALSE)
#points(0,0,pch=3)
if (RET)
return(list(positions=V_z_2D,clusters=V_eta_2D))
}
|
/R/plot_network.R
|
no_license
|
cran/VBLPCM
|
R
| false | false | 3,068 |
r
|
vblpcmdrawpie <- function(center,radius,probs,n=50,colours=1:length(probs))
{
x <- c(0,cumsum(probs)/sum(probs))
dx <- diff(x)
np <- length(probs)
for (i in 1:np)
{
t2p <- 2 * pi * seq(x[i], x[i + 1], length = n)
xc <- center[1] + c(cos(t2p), 0) * radius
yc <- center[2] + c(sin(t2p), 0) * radius
polygon(xc, yc, border = FALSE, col = colours[i])
}
}
plot.vblpcm<-function(x, ..., R2=0.2, main="Variational-Bayes Positions", alpha=0.5, colours=1:x$G, RET=FALSE)
{
model<-x$model
d<-x$d
N<-x$N
P_n<-x$P_n
G<-x$G
Y<-x$Y
V_xi_n<-x$V_xi_n
V_z<-x$V_z
V_eta<-x$V_eta
V_lambda<-x$V_lambda
V_alpha<-x$V_alpha
inv_sigma02<-x$inv_sigma02
omega2<-x$omega2
mu_nought<-0
if (d<=2)
{
V_z_2D<-V_z
V_eta_2D<-V_eta
}
if (d>2)
{
cat("Dimension of latent space is greater than 2: using cmdscale of positions for 2D plot\n")
V_z_2D<-cmdscale(dist(V_z))
V_eta_2D<-matrix(NaN,G,2)
for (g in 1:G)
{
for (dd in 1:2)
{
tmpsum1 = 0
tmpsum2 = 0
for (i in 1:N)
{
tmpsum1 = tmpsum1 + 0.5*V_lambda[g,i]*inv_sigma02*V_alpha[g]*V_z_2D[i,dd]
tmpsum2 = tmpsum2 + V_lambda[g,i]*0.5*inv_sigma02*V_alpha[g]
}
V_eta_2D[g,dd] = (tmpsum1 + 0.5*mu_nought/ omega2)/(tmpsum2+0.5/ omega2)
}
}
}
XLIM=c(min(V_z_2D[,1]),max(V_z_2D[,1]))
YLIM=c(min(V_z_2D[,2]),max(V_z_2D[,2]))
XLIM=XLIM*1.1
YLIM=YLIM*1.1
pad<-formals(plot.network.default)$pad
#if (model=="plain")
object.scale = formals(plot.network.default)$object.scale
piesize <- R2
if (model=="rreceiver" | model=="rsender")
{
#piesize <- R2*(exp(scale(V_xi_n)))
tmp<-scale(V_xi_n)
piesize <- R2+R2*tmp
}
if (model=="rsocial")
{
#piesize <- R2*(exp(scale(V_xi_n[seq(1,2*N,2)]+V_xi_n[seq(2,2*N,2)])))
#piesize <- R2*(exp(scale(V_xi_n[,1]+V_xi_n[,2])))
tmp<-scale(V_xi_n[,1]+V_xi_n[,2])
piesize <- R2+R2*tmp
}
pie.order <- order(piesize, decreasing = TRUE)
vertex.cex=0
if (model!="plain") vertex.cex=piesize*3.5 else vertex.cex=(piesize*diff(XLIM))/10
plot.network(x$net, coord = V_z_2D, main = main,
xlim = XLIM, ylim = YLIM,
suppress.axes = 0, edge.col = rgb(t(rep(190/255,3)),alpha=alpha), vertex.cex=vertex.cex, vertex.col=colours, ...)
# add unknown links to plot
for (i in 1:N)
for (j in 1:N)
if (is.na(Y[i,j])) #
points(V_z_2D[c(i,j),1], V_z_2D[c(i,j),2], col=8, lty=2, t='l')
plot_matrix<-cbind(V_z_2D,vertex.cex/2,t(V_lambda))
plot_func<-function(x) vblpcmdrawpie(center=x[1:2], radius=x[2+1], probs=x[(2+2):(2+1+G)], n = 20, colours=colours)
apply(plot_matrix, 1, plot_func)
points(V_eta_2D, col=colours, pch=3)
symbols(V_eta_2D, circles=2*sqrt(1/(x$inv_sigma02*x$V_alpha)), add=1, fg=colours, inches=FALSE)
#symbols(V_eta_2D, circles=sqrt(x$V_lambda%*%x$V_sigma2*x$sigma02), add=1, fg=colours, inches=FALSE)
#points(0,0,pch=3)
if (RET)
return(list(positions=V_z_2D,clusters=V_eta_2D))
}
|
library(tidymodels)
stack_train <- readRDS("data/c2_train.rds")
stack_recipe <- recipe(remote ~ ., data = stack_train) %>%
step_downsample(remote)
## Build a logistic regression model
glm_spec <- ___ %>%
set_engine("glm")
## Start a workflow (recipe only)
stack_wf <- ___ %>%
add_recipe(stack_recipe)
## Add the model and fit the workflow
stack_glm <- stack_wf %>%
add_model(___) %>%
fit(data = stack_train)
# Print the fitted model
stack_glm
|
/exercises/exc_02_11_1.R
|
permissive
|
snowdj/supervised-ML-case-studies-course
|
R
| false | false | 470 |
r
|
library(tidymodels)
stack_train <- readRDS("data/c2_train.rds")
stack_recipe <- recipe(remote ~ ., data = stack_train) %>%
step_downsample(remote)
## Build a logistic regression model
glm_spec <- ___ %>%
set_engine("glm")
## Start a workflow (recipe only)
stack_wf <- ___ %>%
add_recipe(stack_recipe)
## Add the model and fit the workflow
stack_glm <- stack_wf %>%
add_model(___) %>%
fit(data = stack_train)
# Print the fitted model
stack_glm
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AA_Generics.R
\name{simulationFilter}
\alias{simulationFilter}
\title{Create \linkS4class{SimulationFilter} class}
\usage{
simulationFilter(product = "character", ...)
}
\arguments{
\item{product}{One of "directions", "rb3D", "images".}
\item{...}{}
}
\description{
Function for creating the \linkS4class{SimulationFilter} class
}
\examples{
}
\seealso{
\code{\link{SimulationFilter-class}}
}
|
/man/simulationFilter.Rd
|
no_license
|
kitbenjamin/daRt
|
R
| false | true | 473 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AA_Generics.R
\name{simulationFilter}
\alias{simulationFilter}
\title{Create \linkS4class{SimulationFilter} class}
\usage{
simulationFilter(product = "character", ...)
}
\arguments{
\item{product}{One of "directions", "rb3D", "images".}
\item{...}{}
}
\description{
Function for creating the \linkS4class{SimulationFilter} class
}
\examples{
}
\seealso{
\code{\link{SimulationFilter-class}}
}
|
# Exercise 4: practicing with dplyr
# Install the `"nycflights13"` package. Load (`library()`) the package.
# You'll also need to load `dplyr`
install.packages("nycflights13")
library(nycflights13)
library(dplyr)
# The data frame `flights` should now be accessible to you.
# Use functions to inspect it: how many rows and columns does it have?
# What are the names of the columns?
# Use `??flights` to search for documentation on the data set (for what the
# columns represent)
nrow(flights)
ncol(flights)
colnames(flights)
?flights
# Use `dplyr` to give the data frame a new column that is the amount of time
# gained or lost while flying (that is: how much of the delay arriving occured
# during flight, as opposed to before departing).
flights <- mutate(flights, time_change = arr_delay + dep_delay)
# Use `dplyr` to sort your data frame in descending order by the column you just
# created. Remember to save this as a variable (or in the same one!)
flights <- arrange(flights, desc(time_change))
# For practice, repeat the last 2 steps in a single statement using the pipe
# operator. You can clear your environmental variables to "reset" the data frame
flights <- flights %>% mutate(time_change = arr_delay + dep_delay) %>% arrange(desc(time_change))
# Make a histogram of the amount of time gained using the `hist()` function
time_change_hist <- hist(flights$time_change)
# On average, did flights gain or lose time?
# Note: use the `na.rm = TRUE` argument to remove NA values from your aggregation
mean(flights$time_change, na.rm = TRUE)
# Gained time
# Create a data.frame of flights headed to SeaTac ('SEA'), only including the
# origin, destination, and the "gain_in_air" column you just created
flights_seatac <- flights %>% select(origin,dest, time_change) %>% filter(dest == "SEA")
# On average, did flights to SeaTac gain or loose time?
mean(flights_seatac$time_change, na.rm = TRUE)
# Gained time
# Consider flights from JFK to SEA. What was the average, min, and max air time
# of those flights? Bonus: use pipes to answer this question in one statement
# (without showing any other data)!
flights_jfk_to_sea <- filter(flights, origin == "JFK", dest == "SEA") %>% summarize(avg_time = mean(air_time, na.rm = TRUE), min_time = min(air_time, na.rm = TRUE), max_time = max(air_time, na.rm = TRUE))
|
/chapter-11-exercises/exercise-4/exercise.R
|
permissive
|
gtjrrui/book-exercises
|
R
| false | false | 2,332 |
r
|
# Exercise 4: practicing with dplyr
# Install the `"nycflights13"` package. Load (`library()`) the package.
# You'll also need to load `dplyr`
install.packages("nycflights13")
library(nycflights13)
library(dplyr)
# The data frame `flights` should now be accessible to you.
# Use functions to inspect it: how many rows and columns does it have?
# What are the names of the columns?
# Use `??flights` to search for documentation on the data set (for what the
# columns represent)
nrow(flights)
ncol(flights)
colnames(flights)
?flights
# Use `dplyr` to give the data frame a new column that is the amount of time
# gained or lost while flying (that is: how much of the delay arriving occured
# during flight, as opposed to before departing).
flights <- mutate(flights, time_change = arr_delay + dep_delay)
# Use `dplyr` to sort your data frame in descending order by the column you just
# created. Remember to save this as a variable (or in the same one!)
flights <- arrange(flights, desc(time_change))
# For practice, repeat the last 2 steps in a single statement using the pipe
# operator. You can clear your environmental variables to "reset" the data frame
flights <- flights %>% mutate(time_change = arr_delay + dep_delay) %>% arrange(desc(time_change))
# Make a histogram of the amount of time gained using the `hist()` function
time_change_hist <- hist(flights$time_change)
# On average, did flights gain or lose time?
# Note: use the `na.rm = TRUE` argument to remove NA values from your aggregation
mean(flights$time_change, na.rm = TRUE)
# Gained time
# Create a data.frame of flights headed to SeaTac ('SEA'), only including the
# origin, destination, and the "gain_in_air" column you just created
flights_seatac <- flights %>% select(origin,dest, time_change) %>% filter(dest == "SEA")
# On average, did flights to SeaTac gain or loose time?
mean(flights_seatac$time_change, na.rm = TRUE)
# Gained time
# Consider flights from JFK to SEA. What was the average, min, and max air time
# of those flights? Bonus: use pipes to answer this question in one statement
# (without showing any other data)!
flights_jfk_to_sea <- filter(flights, origin == "JFK", dest == "SEA") %>% summarize(avg_time = mean(air_time, na.rm = TRUE), min_time = min(air_time, na.rm = TRUE), max_time = max(air_time, na.rm = TRUE))
|
#----------------------------------------------------------------------
# Purpose: This test exercises the RF model downloaded as java code
# for the dhisttest data set. It checks whether the generated
# java correctly splits categorical predictors into non-
# contiguous groups at each node.
#
# Notes: Assumes unix environment.
# curl, javac, java must be installed.
# java must be at least 1.6.
#----------------------------------------------------------------------
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../h2o-runit.R")
test.drf.javapredict.smallcat <-
function() {
#----------------------------------------------------------------------
# Parameters for the test.
#----------------------------------------------------------------------
training_file <- test_file <- locate("smalldata/gbm_test/alphabet_cattest.csv")
training_frame <- h2o.importFile(training_file)
test_frame <- h2o.importFile(test_file)
params <- list()
params$ntrees <- 100
params$max_depth <- 5
params$min_rows <- 10
params$x <- c("X")
params$y <- "y"
params$training_frame <- training_frame
#----------------------------------------------------------------------
# Run the test
#----------------------------------------------------------------------
doJavapredictTest("randomForest",test_file,test_frame,params)
}
doTest("RF test", test.drf.javapredict.smallcat)
|
/h2o-r/tests/testdir_javapredict/runit_DRF_javapredict_smallcat.R
|
permissive
|
tamseo/h2o-3
|
R
| false | false | 1,573 |
r
|
#----------------------------------------------------------------------
# Purpose: This test exercises the RF model downloaded as java code
# for the dhisttest data set. It checks whether the generated
# java correctly splits categorical predictors into non-
# contiguous groups at each node.
#
# Notes: Assumes unix environment.
# curl, javac, java must be installed.
# java must be at least 1.6.
#----------------------------------------------------------------------
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../h2o-runit.R")
test.drf.javapredict.smallcat <-
function() {
#----------------------------------------------------------------------
# Parameters for the test.
#----------------------------------------------------------------------
training_file <- test_file <- locate("smalldata/gbm_test/alphabet_cattest.csv")
training_frame <- h2o.importFile(training_file)
test_frame <- h2o.importFile(test_file)
params <- list()
params$ntrees <- 100
params$max_depth <- 5
params$min_rows <- 10
params$x <- c("X")
params$y <- "y"
params$training_frame <- training_frame
#----------------------------------------------------------------------
# Run the test
#----------------------------------------------------------------------
doJavapredictTest("randomForest",test_file,test_frame,params)
}
doTest("RF test", test.drf.javapredict.smallcat)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blocks.pop.R
\docType{data}
\name{blocks.pop}
\alias{blocks}
\alias{blocks.pop}
\alias{pop}
\title{pop: Over 11 million Census Bureau 2010 block-level values in a single data.frame}
\format{A vector with 11078297 elements (Census 2010 blocks). If all the related datasets are compiled as a blocks data.frame, they provide the following:
\itemize{
\item [1] "fips" (numeric - can be converted to character with leading zeroes via lead.zeroes(blocks$fips, 15)
\item [2] "pop" (integer) - Population count in Census 2010
\item [3] "urban" (logical)
\item [4] "lat" (numeric) - decimal degrees
\item [5] "lon" (numeric) - decimal degrees
\item [6] "area" (numeric) - units? Need to check. ****
}}
\source{
2010 Census from Census Bureau \url{http://www.census.gov}
obtained 2014/2015 compiled from multiple Census files of State-level population, area, internal point, or urban code. \cr \cr
Population was obtained from files such as tabblock2010_01_pophu.dbf from within \cr
http://www2.census.gov/geo/tiger/TIGER2010BLKPOPHU/tabblock2010_01_pophu.zip \cr
as linked from here: \url{http://www.census.gov/geo/maps-data/data/tiger-line.html} \cr \cr
See \url{http://www.census.gov/geo/maps-data/data/tiger.html} for various related data products.
See \url{http://tigerweb.geo.census.gov/tigerwebmain/TIGERweb_county_based_files.html} for html formatted versions.
The data in this package is based on those TIGER files
slightly modified to store FIPS as numeric field, pop as integer, and urban as logical, to save RAM.
}
\usage{
blocks <- get.blocks()
# or to load into memory just this one vector:
data(blocks.pop)
}
\description{
These data sets provide population count, size of block (area), latitude and longitude of internal point,
whether the block is urban, for each US block, based on Census Bureau Census 2010 data,
each of these fields as a single data file (RData), all sorted in the same order, enabling quick combination into a data.frame.
All States/DC are compiled into a single data.frame.
}
\seealso{
See \code{\link{get.blocks}} in \pkg{UScensus2010blocks} to assemble this and other fields into a blocks data.frame.
See the \code{\link[UScensus2010]{UScensus2010}} package and related datasets, some of which are on CRAN and others only here: \url{http://lakshmi.calit2.uci.edu/census2000/}
but note that package provides spatial data in a single file per State,
while this package provides non-spatial data (just lat/lon) that can quickly be assembled into a single large data.frame.
}
\keyword{datasets}
|
/man/blocks.pop.Rd
|
no_license
|
Geschwindigkeitsbegrenzung/UScensus2010blocks
|
R
| false | true | 2,657 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blocks.pop.R
\docType{data}
\name{blocks.pop}
\alias{blocks}
\alias{blocks.pop}
\alias{pop}
\title{pop: Over 11 million Census Bureau 2010 block-level values in a single data.frame}
\format{A vector with 11078297 elements (Census 2010 blocks). If all the related datasets are compiled as a blocks data.frame, they provide the following:
\itemize{
\item [1] "fips" (numeric - can be converted to character with leading zeroes via lead.zeroes(blocks$fips, 15)
\item [2] "pop" (integer) - Population count in Census 2010
\item [3] "urban" (logical)
\item [4] "lat" (numeric) - decimal degrees
\item [5] "lon" (numeric) - decimal degrees
\item [6] "area" (numeric) - units? Need to check. ****
}}
\source{
2010 Census from Census Bureau \url{http://www.census.gov}
obtained 2014/2015 compiled from multiple Census files of State-level population, area, internal point, or urban code. \cr \cr
Population was obtained from files such as tabblock2010_01_pophu.dbf from within \cr
http://www2.census.gov/geo/tiger/TIGER2010BLKPOPHU/tabblock2010_01_pophu.zip \cr
as linked from here: \url{http://www.census.gov/geo/maps-data/data/tiger-line.html} \cr \cr
See \url{http://www.census.gov/geo/maps-data/data/tiger.html} for various related data products.
See \url{http://tigerweb.geo.census.gov/tigerwebmain/TIGERweb_county_based_files.html} for html formatted versions.
The data in this package is based on those TIGER files
slightly modified to store FIPS as numeric field, pop as integer, and urban as logical, to save RAM.
}
\usage{
blocks <- get.blocks()
# or to load into memory just this one vector:
data(blocks.pop)
}
\description{
These data sets provide population count, size of block (area), latitude and longitude of internal point,
whether the block is urban, for each US block, based on Census Bureau Census 2010 data,
each of these fields as a single data file (RData), all sorted in the same order, enabling quick combination into a data.frame.
All States/DC are compiled into a single data.frame.
}
\seealso{
See \code{\link{get.blocks}} in \pkg{UScensus2010blocks} to assemble this and other fields into a blocks data.frame.
See the \code{\link[UScensus2010]{UScensus2010}} package and related datasets, some of which are on CRAN and others only here: \url{http://lakshmi.calit2.uci.edu/census2000/}
but note that package provides spatial data in a single file per State,
while this package provides non-spatial data (just lat/lon) that can quickly be assembled into a single large data.frame.
}
\keyword{datasets}
|
##### For moderation regime, take first 13 years as training and last 5 years as test =>
##### cutoff year is 2004 (~72% share in training set).
#####
##### For zirp regume, take first 5 years as training and last 2 years as test =>
##### cutoff year is 2013 (~71% share in training set).
# Import regime data (change to your local directory with RDS files)
moderation <- readRDS("C:/Users/DATA/moderation.rds")
zirp <- readRDS("C:/Users/DATA/zirp.rds")
# Import libraries
library(xgboost)
library(randomForest)
library(glmnet)
##### Split macroeconomic data by Fed policy regimes #####
# Split data from Great Moderation into training and test sets
train_moderation <- as.data.frame(moderation["/2003"])
test_moderation <- as.data.frame(moderation["2004/"])
# train_moderation <- as.data.frame(data["/2007"])
# test_moderation <- as.data.frame(data["2008/"])
# Split data from Zero Interest Rate Policy regime into training and test sets
train_zirp <- as.data.frame(zirp["/2013"])
test_zirp <- as.data.frame(zirp["2014/"])
# Calculate RMSE for market baselines
RMSE.baseline.moderation <- (mean(test_moderation$UST10Y - test_moderation$FF3Y)^2)^.5
RMSE.baseline.zirp <- (mean(test_zirp$UST10Y - test_zirp$FF3Y)^2)^.5
##### MACHINE LEARNING MODELS #####
baggedTrees <- function(training_data, testing_data){
### BAGGED TREES ###
# Train the bagged model
bag <- randomForest(UST10Y ~ ., data = subset(training_data, select = -c(FF3Y)),
mtry = ncol(training_data)-2, importance = TRUE)
importance(bag)
# Testing the fitted model
yhat.bag <- predict(bag, newdata = subset(testing_data, select = -c(FF3Y)))
# Calculate RMSE
RMSE.bag <- (mean(testing_data$UST10Y - yhat.bag)^2)^.5
return(list(bag, yhat.bag, RMSE.bag))
}
randomForests <- function(training_data, testing_data){
### RANDOM FOREST ###
# Train the bagged model
rf <- randomForest(UST10Y ~ ., data = subset(training_data, select = -c(FF3Y)),
mtry = round(sqrt(ncol(training_data)-2)), importance = TRUE)
importance(rf)
# Testing the fitted model
yhat.rf <- predict(rf, newdata = subset(testing_data, select = -c(FF3Y)))
# Calculate RMSE
RMSE.rf <- (mean(testing_data$UST10Y - yhat.rf)^2)^.5
return(list(rf, yhat.rf, RMSE.rf))
}
boostedTrees <- function(training_data, testing_data){
### BOOSTED TREES ####
# Create training and test matrices
train <- xgb.DMatrix(as.matrix(subset(training_data, select = -c(FF3Y, UST10Y))), label = training_data$UST10Y)
test <- xgb.DMatrix(as.matrix(subset(testing_data, select = -c(FF3Y, UST10Y))), label = testing_data$UST10Y)
# Establish default parameters
params <- list(booster = "gbtree", objective = "reg:linear", eta=0.3, gamma=0, max_depth=10,
min_child_weight=1, subsample=1, colsample_bytree=1)
# Cross-validate for optimal number of rounds
xgbcv <- xgb.cv(params = params, data = train, nrounds = 150, nfold = 10, showsd = T,
stratified = T, print_every_n = 10, early_stopping_rounds = 20, maximize = F)
best_nrounds <- xgbcv[8]$best_iteration
# Fit the boosted model using gradient descent
boost <- xgb.train(params = params, data = train, nrounds = best_nrounds,
watchlist = list(val = test,train = train), print_every_n = 10,
early_stopping_rounds = 20, maximize = F, eval_metric = "rmse")
# Generate predictions from boosted model
yhat.boost <- predict(boost, test)
# Calculate RMSE
RMSE.boost <- (mean(testing_data$UST10Y - yhat.boost)^2)^.5
# Generate feature importance matrix
mat.boost <- xgb.importance(model = boost,
feature_names = colnames(as.matrix(subset(training_data, select = -c(FF3Y, UST10Y)))))
return(list(boost, yhat.boost, RMSE.boost, mat.boost))
}
fitLASSO <- function(training_data, testing_data){
# Convert training and test sets
x.train <- model.matrix(UST10Y ~. - FF3Y, data = training_data)
y.train <- training_data[,"UST10Y"]
x.test <- model.matrix(UST10Y ~. - FF3Y, data = testing_data)
y.test <- testing_data[,'UST10Y']
# Define grid of lambdas to retrieve optimal lambda
grid <- 10^(seq(10, -10, length = 100))
# Fit model, cross-validate for optimal lambda
lasso.mod <- glmnet(x.train, y.train, alpha = 1, lambda = grid)
cv.out <- cv.glmnet(x.train, y.train , alpha = 1)
bestlam <- cv.out$lambda.min
# Predict and record RMSE
lasso.pred <- predict(lasso.mod, s = bestlam , newx = x.test)
RMSE.lasso.mod <- (mean(lasso.pred - y.test)^2)^.5
# Obtain summary of coefficients and clean up
lasso.mod.coef <- predict(lasso.mod ,type = "coefficients", s = bestlam)
lasso.coefs <- data.frame(rownames(as.matrix(lasso.mod.coef))[-2],
as.vector(lasso.mod.coef)[-2])
colnames(lasso.coefs) <- c("variables", "coefficients")
lasso.coefs <- lasso.coefs[order(-abs(lasso.coefs$coefficients)),]
return(list(lasso.mod, lasso.pred, RMSE.lasso.mod, lasso.coefs))
}
##### RUNNING THE MACHINE LEARNING MODELS #####
set.seed(1)
### Fit bagged, random forest, and boosted models on data within the Great Moderation regime ###
# Bagged Model
bagModel.moderation <- baggedTrees(train_moderation, test_moderation)
RMSE.bag.moderation <- bagModel.moderation[[3]]
cat(as.character(bagModel.moderation[[3]]), "is the RMSE for the bagged model over the Great Moderation.")
# Random Forest Model
rfModel.moderation <- randomForests(train_moderation, test_moderation)
RMSE.rf.moderation <- rfModel.moderation[[3]]
cat(as.character(rfModel.moderation[[3]]), "is the RMSE for the random forest model over the Great Moderation.")
# Boosted Model
boostModel.moderation <- boostedTrees(train_moderation, test_moderation)
RMSE.boost.moderation <- boostModel.moderation[[3]]
cat(as.character(boostModel.moderation[[3]]), "is the RMSE for the boosted model over the Great Moderation.")
# Lasso
lassoModel.moderation <- fitLASSO(train_moderation, test_moderation)
RMSE.lasso.moderation <- lassoModel.moderation[[3]]
cat(as.character(lassoModel.moderation[[3]]), "is the RMSE for the boosted model over the Great Moderation.")
### Fit bagged, random forest, and boosted models on data within the Zero Interest Rate Policy regime ###
# Bagged Model
bagModel.zirp <- baggedTrees(train_zirp, test_zirp)
RMSE.bag.zirp <- bagModel.zirp[[3]]
cat(as.character(bagModel.zirp[[3]]), "is the RMSE for the bagged model over the Zero Interest Rate Policy regime.")
# Random Forest Model
rfModel.zirp <- randomForests(train_zirp, test_zirp)
RMSE.rf.zirp <- rfModel.zirp[[3]]
cat(as.character(rfModel.zirp[[3]]), "is the RMSE for the random forest model over the Zero Interest Rate Policy regime.")
# Boosted Model
boostModel.zirp <- boostedTrees(train_zirp, test_zirp)
RMSE.boost.zirp <- boostModel.zirp[[3]]
cat(as.character(boostModel.zirp[[3]]), "is the RMSE for the boosted model over the Zero Interest Rate Policy regime.")
# Lasso
lassoModel.zirp <- fitLASSO(train_zirp, test_zirp)
RMSE.lasso.zirp <- lassoModel.zirp[[3]]
cat(as.character(lassoModel.zirp[[3]]), "is the RMSE for the boosted model over the Zero Interest Rate Policy regime.")
##### Plot forecast curves #####
forecastCurve <- function(testing_data, model){
plot(as.Date(rownames(testing_data)), testing_data$UST10Y,
xaxt = 'n', type = 'l', xlab = "", ylab = "", lwd = 2,
main = "Actual vs. Baseline vs. Forecast")
lines(as.Date(rownames(testing_data)), testing_data$FF3Y, xaxt = 'n', lwd = 2, col = 'brown') # Uncomment to see forward rates
lines(as.Date(rownames(testing_data)), model[[2]], xaxt = 'n', lwd = 2, col = 'orange')
axis(1, as.Date(rownames(testing_data)), format(as.Date(rownames(testing_data)), "%b %Y"), cex.axis = .8)
}
### Plot forecast curves for the Great Moderation ###
forecastCurve(test_moderation, bagModel.moderation)
{legend("bottomleft", c("UST10Y", "Baseline", "Bagged Trees"),
col = c("black", "brown", "orange"), lty=1, cex = .75)}
forecastCurve(test_moderation, rfModel.moderation) # Lowest RMSE: 0.14
{legend("bottomleft", c("UST10Y", "Baseline", "Random Forest"),
col = c("black", "brown", "orange"), lty=1, cex = .75)}
forecastCurve(test_moderation, boostModel.moderation)
{legend("bottomleft", c("UST10Y", "Baseline", "Boosted Trees"),
col = c("black", "brown", "orange"), lty=1, cex = .75)}
forecastCurve(test_moderation, lassoModel.moderation)
{legend("bottomleft", c("UST10Y", "Baseline", "LASSO"),
col = c("black", "brown", "orange"), lty=1, cex = .75)}
### Plot forecast curves for the Zero Interest Rate Policy Regime ###
forecastCurve(test_zirp, bagModel.zirp) # Lowest RMSE: 0.1
{legend("topright", c("UST10Y", "Baseline", "Bagged Trees"),
col = c("black", "brown", "orange"), lty=1, cex = .75)
}
forecastCurve(test_zirp, rfModel.zirp)
{legend("topright", c("UST10Y", "Baseline", "Random Forest"),
col = c("black", "brown", "orange"), lty=1, cex = .75)
}
forecastCurve(test_zirp, boostModel.zirp)
{legend("topright", c("UST10Y", "Baseline", "Boosted Trees"),
col = c("black", "brown", "orange"), lty=1, cex = .75)
}
forecastCurve(test_zirp, lassoModel.zirp)
{legend("topright", c("UST10Y", "Baseline", "LASSO"),
col = c("black", "brown", "orange"), lty=1, cex = .75)
}
##### Plot relative importance plots for the best model in each regime #####
varImpPlot(bagModel.moderation[[1]], main = "Relative Importance: Bagged Trees")
varImpPlot(rfModel.moderation[[1]], main = "Relative Importance: Random Forest")
xgb.plot.importance(boostModel.moderation[[4]])
title(main = "Relative Importance: Boosted Trees")
lassoModel.moderation[[4]]
varImpPlot(bagModel.zirp[[1]], main = "Relative Importance: Bagged Trees")
varImpPlot(rfModel.zirp[[1]], main = "Relative Importance: Random Forest")
xgb.plot.importance(boostModel.zirp[[4]])
title(main = "Relative Importance: Boosted Trees")
lassoModel.zirp[[4]]
|
/Programs/R/moderation_zirp.R
|
no_license
|
jiantinker/forecasting-US10Y
|
R
| false | false | 10,342 |
r
|
##### For moderation regime, take first 13 years as training and last 5 years as test =>
##### cutoff year is 2004 (~72% share in training set).
#####
##### For zirp regume, take first 5 years as training and last 2 years as test =>
##### cutoff year is 2013 (~71% share in training set).
# Import regime data (change to your local directory with RDS files)
moderation <- readRDS("C:/Users/DATA/moderation.rds")
zirp <- readRDS("C:/Users/DATA/zirp.rds")
# Import libraries
library(xgboost)
library(randomForest)
library(glmnet)
##### Split macroeconomic data by Fed policy regimes #####
# Split data from Great Moderation into training and test sets
train_moderation <- as.data.frame(moderation["/2003"])
test_moderation <- as.data.frame(moderation["2004/"])
# train_moderation <- as.data.frame(data["/2007"])
# test_moderation <- as.data.frame(data["2008/"])
# Split data from Zero Interest Rate Policy regime into training and test sets
train_zirp <- as.data.frame(zirp["/2013"])
test_zirp <- as.data.frame(zirp["2014/"])
# Calculate RMSE for market baselines
RMSE.baseline.moderation <- (mean(test_moderation$UST10Y - test_moderation$FF3Y)^2)^.5
RMSE.baseline.zirp <- (mean(test_zirp$UST10Y - test_zirp$FF3Y)^2)^.5
##### MACHINE LEARNING MODELS #####
baggedTrees <- function(training_data, testing_data){
### BAGGED TREES ###
# Train the bagged model
bag <- randomForest(UST10Y ~ ., data = subset(training_data, select = -c(FF3Y)),
mtry = ncol(training_data)-2, importance = TRUE)
importance(bag)
# Testing the fitted model
yhat.bag <- predict(bag, newdata = subset(testing_data, select = -c(FF3Y)))
# Calculate RMSE
RMSE.bag <- (mean(testing_data$UST10Y - yhat.bag)^2)^.5
return(list(bag, yhat.bag, RMSE.bag))
}
randomForests <- function(training_data, testing_data){
### RANDOM FOREST ###
# Train the bagged model
rf <- randomForest(UST10Y ~ ., data = subset(training_data, select = -c(FF3Y)),
mtry = round(sqrt(ncol(training_data)-2)), importance = TRUE)
importance(rf)
# Testing the fitted model
yhat.rf <- predict(rf, newdata = subset(testing_data, select = -c(FF3Y)))
# Calculate RMSE
RMSE.rf <- (mean(testing_data$UST10Y - yhat.rf)^2)^.5
return(list(rf, yhat.rf, RMSE.rf))
}
boostedTrees <- function(training_data, testing_data){
### BOOSTED TREES ####
# Create training and test matrices
train <- xgb.DMatrix(as.matrix(subset(training_data, select = -c(FF3Y, UST10Y))), label = training_data$UST10Y)
test <- xgb.DMatrix(as.matrix(subset(testing_data, select = -c(FF3Y, UST10Y))), label = testing_data$UST10Y)
# Establish default parameters
params <- list(booster = "gbtree", objective = "reg:linear", eta=0.3, gamma=0, max_depth=10,
min_child_weight=1, subsample=1, colsample_bytree=1)
# Cross-validate for optimal number of rounds
xgbcv <- xgb.cv(params = params, data = train, nrounds = 150, nfold = 10, showsd = T,
stratified = T, print_every_n = 10, early_stopping_rounds = 20, maximize = F)
best_nrounds <- xgbcv[8]$best_iteration
# Fit the boosted model using gradient descent
boost <- xgb.train(params = params, data = train, nrounds = best_nrounds,
watchlist = list(val = test,train = train), print_every_n = 10,
early_stopping_rounds = 20, maximize = F, eval_metric = "rmse")
# Generate predictions from boosted model
yhat.boost <- predict(boost, test)
# Calculate RMSE
RMSE.boost <- (mean(testing_data$UST10Y - yhat.boost)^2)^.5
# Generate feature importance matrix
mat.boost <- xgb.importance(model = boost,
feature_names = colnames(as.matrix(subset(training_data, select = -c(FF3Y, UST10Y)))))
return(list(boost, yhat.boost, RMSE.boost, mat.boost))
}
fitLASSO <- function(training_data, testing_data){
# Convert training and test sets
x.train <- model.matrix(UST10Y ~. - FF3Y, data = training_data)
y.train <- training_data[,"UST10Y"]
x.test <- model.matrix(UST10Y ~. - FF3Y, data = testing_data)
y.test <- testing_data[,'UST10Y']
# Define grid of lambdas to retrieve optimal lambda
grid <- 10^(seq(10, -10, length = 100))
# Fit model, cross-validate for optimal lambda
lasso.mod <- glmnet(x.train, y.train, alpha = 1, lambda = grid)
cv.out <- cv.glmnet(x.train, y.train , alpha = 1)
bestlam <- cv.out$lambda.min
# Predict and record RMSE
lasso.pred <- predict(lasso.mod, s = bestlam , newx = x.test)
RMSE.lasso.mod <- (mean(lasso.pred - y.test)^2)^.5
# Obtain summary of coefficients and clean up
lasso.mod.coef <- predict(lasso.mod ,type = "coefficients", s = bestlam)
lasso.coefs <- data.frame(rownames(as.matrix(lasso.mod.coef))[-2],
as.vector(lasso.mod.coef)[-2])
colnames(lasso.coefs) <- c("variables", "coefficients")
lasso.coefs <- lasso.coefs[order(-abs(lasso.coefs$coefficients)),]
return(list(lasso.mod, lasso.pred, RMSE.lasso.mod, lasso.coefs))
}
##### RUNNING THE MACHINE LEARNING MODELS #####
set.seed(1)
### Fit bagged, random forest, and boosted models on data within the Great Moderation regime ###
# Bagged Model
bagModel.moderation <- baggedTrees(train_moderation, test_moderation)
RMSE.bag.moderation <- bagModel.moderation[[3]]
cat(as.character(bagModel.moderation[[3]]), "is the RMSE for the bagged model over the Great Moderation.")
# Random Forest Model
rfModel.moderation <- randomForests(train_moderation, test_moderation)
RMSE.rf.moderation <- rfModel.moderation[[3]]
cat(as.character(rfModel.moderation[[3]]), "is the RMSE for the random forest model over the Great Moderation.")
# Boosted Model
boostModel.moderation <- boostedTrees(train_moderation, test_moderation)
RMSE.boost.moderation <- boostModel.moderation[[3]]
cat(as.character(boostModel.moderation[[3]]), "is the RMSE for the boosted model over the Great Moderation.")
# Lasso
lassoModel.moderation <- fitLASSO(train_moderation, test_moderation)
RMSE.lasso.moderation <- lassoModel.moderation[[3]]
cat(as.character(lassoModel.moderation[[3]]), "is the RMSE for the boosted model over the Great Moderation.")
### Fit bagged, random forest, and boosted models on data within the Zero Interest Rate Policy regime ###
# Bagged Model
bagModel.zirp <- baggedTrees(train_zirp, test_zirp)
RMSE.bag.zirp <- bagModel.zirp[[3]]
cat(as.character(bagModel.zirp[[3]]), "is the RMSE for the bagged model over the Zero Interest Rate Policy regime.")
# Random Forest Model
rfModel.zirp <- randomForests(train_zirp, test_zirp)
RMSE.rf.zirp <- rfModel.zirp[[3]]
cat(as.character(rfModel.zirp[[3]]), "is the RMSE for the random forest model over the Zero Interest Rate Policy regime.")
# Boosted Model
boostModel.zirp <- boostedTrees(train_zirp, test_zirp)
RMSE.boost.zirp <- boostModel.zirp[[3]]
cat(as.character(boostModel.zirp[[3]]), "is the RMSE for the boosted model over the Zero Interest Rate Policy regime.")
# Lasso
lassoModel.zirp <- fitLASSO(train_zirp, test_zirp)
RMSE.lasso.zirp <- lassoModel.zirp[[3]]
cat(as.character(lassoModel.zirp[[3]]), "is the RMSE for the boosted model over the Zero Interest Rate Policy regime.")
##### Plot forecast curves #####
forecastCurve <- function(testing_data, model){
plot(as.Date(rownames(testing_data)), testing_data$UST10Y,
xaxt = 'n', type = 'l', xlab = "", ylab = "", lwd = 2,
main = "Actual vs. Baseline vs. Forecast")
lines(as.Date(rownames(testing_data)), testing_data$FF3Y, xaxt = 'n', lwd = 2, col = 'brown') # Uncomment to see forward rates
lines(as.Date(rownames(testing_data)), model[[2]], xaxt = 'n', lwd = 2, col = 'orange')
axis(1, as.Date(rownames(testing_data)), format(as.Date(rownames(testing_data)), "%b %Y"), cex.axis = .8)
}
### Plot forecast curves for the Great Moderation ###
forecastCurve(test_moderation, bagModel.moderation)
{legend("bottomleft", c("UST10Y", "Baseline", "Bagged Trees"),
col = c("black", "brown", "orange"), lty=1, cex = .75)}
forecastCurve(test_moderation, rfModel.moderation) # Lowest RMSE: 0.14
{legend("bottomleft", c("UST10Y", "Baseline", "Random Forest"),
col = c("black", "brown", "orange"), lty=1, cex = .75)}
forecastCurve(test_moderation, boostModel.moderation)
{legend("bottomleft", c("UST10Y", "Baseline", "Boosted Trees"),
col = c("black", "brown", "orange"), lty=1, cex = .75)}
forecastCurve(test_moderation, lassoModel.moderation)
{legend("bottomleft", c("UST10Y", "Baseline", "LASSO"),
col = c("black", "brown", "orange"), lty=1, cex = .75)}
### Plot forecast curves for the Zero Interest Rate Policy Regime ###
forecastCurve(test_zirp, bagModel.zirp) # Lowest RMSE: 0.1
{legend("topright", c("UST10Y", "Baseline", "Bagged Trees"),
col = c("black", "brown", "orange"), lty=1, cex = .75)
}
forecastCurve(test_zirp, rfModel.zirp)
{legend("topright", c("UST10Y", "Baseline", "Random Forest"),
col = c("black", "brown", "orange"), lty=1, cex = .75)
}
forecastCurve(test_zirp, boostModel.zirp)
{legend("topright", c("UST10Y", "Baseline", "Boosted Trees"),
col = c("black", "brown", "orange"), lty=1, cex = .75)
}
forecastCurve(test_zirp, lassoModel.zirp)
{legend("topright", c("UST10Y", "Baseline", "LASSO"),
col = c("black", "brown", "orange"), lty=1, cex = .75)
}
##### Plot relative importance plots for the best model in each regime #####
varImpPlot(bagModel.moderation[[1]], main = "Relative Importance: Bagged Trees")
varImpPlot(rfModel.moderation[[1]], main = "Relative Importance: Random Forest")
xgb.plot.importance(boostModel.moderation[[4]])
title(main = "Relative Importance: Boosted Trees")
lassoModel.moderation[[4]]
varImpPlot(bagModel.zirp[[1]], main = "Relative Importance: Bagged Trees")
varImpPlot(rfModel.zirp[[1]], main = "Relative Importance: Random Forest")
xgb.plot.importance(boostModel.zirp[[4]])
title(main = "Relative Importance: Boosted Trees")
lassoModel.zirp[[4]]
|
# cut interval: n groups with equal range
# cut number: n groups with equal observations
# cut width: n groups with width
|
/helpers.R
|
no_license
|
aravindhebbali/ggplot_xplorerr
|
R
| false | false | 123 |
r
|
# cut interval: n groups with equal range
# cut number: n groups with equal observations
# cut width: n groups with width
|
# Defines shiny bindings
shiny_input_bindings <- new.env(parent = emptyenv())
list2env(list(
'shiny.textInput' = list(
binding = "shiny.textInput",
update_function = "shiny::updateTextInput"
),
'shiny.textAreaInput' = list(
binding = "shiny.textareaInput",
update_function = "shiny::updateTextAreaInput"
),
'shiny.passwordInput' = list(
binding = "shiny.passwordInput",
update_function = "shiny::updateTextInput"
),
'shiny.numericInput' = list(
binding = "shiny.numberInput",
update_function = "shiny::updateNumericInput"
),
'shiny.checkboxInput' = list(
binding = "shiny.checkboxInput",
update_function = "shiny::updateCheckboxInput"
),
'shiny.sliderInput' = list(
binding = "shiny.sliderInput",
update_function = "shiny::updateSliderInput"
),
'shiny.dateInput' = list(
binding = "shiny.dateInput",
update_function = "shiny::updateDateInput"
),
'shiny.dateRangeInput' = list(
binding = "shiny.dateRangeInput",
update_function = 'shiny::updateDateRangeInput'
),
'shiny.selectInput' = list(
binding = "shiny.selectInput",
update_function = 'shiny::updateSelectInput'
),
'shiny.selectizeInput' = list(
binding = "shiny.selectInput",
update_function = 'shiny::updateSelectizeInput'
),
'shiny.varSelectInput' = list(
binding = "shiny.selectInput",
update_function = 'shiny::updateVarSelectInput'
),
'shiny.varSelectizeInput' = list(
binding = "shiny.selectInput",
update_function = 'shiny::updateVarSelectizeInput'
),
'shiny.radioButtons' = list(
binding = "shiny.radioInput",
update_function = 'shiny::updateRadioButtons'
),
'shiny.checkboxGroupInput' = list(
binding = "shiny.checkboxGroupInput",
update_function = 'shiny::updateCheckboxGroupInput'
),
'shiny.actionButton' = list(
binding = "shiny.actionButtonInput",
update_function = 'shiny::updateActionButton'
),
'shiny.actionLink' = list(
binding = "shiny.actionButtonInput",
update_function = 'shiny::updateActionButton'
),
'shiny.fileInput' = list(
binding = "shiny.fileInputBinding",
update_function = NULL
),
'dipsaus.compoundInput2' = list(
binding = "dipsaus.compoundInput2",
update_function = 'dipsaus.updateCompoundInput2'
),
'dipsaus.actionButtonStyled' = list(
binding = "shiny.actionButtonInput",
update_function = 'dipsaus.updateActionButtonStyled'
)
), envir = shiny_input_bindings)
#' Register customized input to enable support by compound input
#' @param fname character, function name, such as \code{"textInput"}
#' @param pkg character, package name, like \code{"shiny"}
#' @param shiny_binding character, 'JavaScript' binding name.See examples
#' @param update_function character, update function such as \code{"shiny::textInput"}
#' @return a list of binding functions, one is `JavaScript` object key in
#' \code{Shiny.inputBindings}, the other is `shiny` update function in R end.
#' @examples
#'
#' # register shiny textInput
#' registerInputBinding('textInput', 'shiny',
#' 'shiny.textInput', 'shiny::updateTextInput')
#'
#' # Register shiny actionLink
#' # In "Shiny.inputbindings", the binding name is "shiny.actionButtonInput",
#' # Shiny update function is "shiny::updateActionButton"
#' registerInputBinding('actionLink', 'shiny',
#' 'shiny.actionButtonInput', 'shiny::updateActionButton')
#'
#' @export
registerInputBinding <- function(fname, pkg, shiny_binding, update_function = NULL){
ns <- asNamespace(pkg)
if( !is.function(ns[[fname]]) ){
cat2(sprintf('%s::%s is not a function', pkg, fname), level = 'FATAL')
}
binding <- list(
binding = shiny_binding,
update_function = update_function
)
shiny_input_bindings[[sprintf('%s.%s', pkg, fname)]] <- binding
invisible(binding)
}
#' Obtain registered input bindings
#' @param fname input function name, character or quoted expression
#' such as \code{'shiny::textInput'} or \code{numericInput}.
#' @param pkg (optional), name of package
#' @param envir environment to evaluate \code{fname} if \code{pkg} is not provided
#'
#' @return a list containing: 1. `JavaScript` input binding name; 2. `R` updating function name
#'
#' @examples
#'
#' library(dipsaus)
#'
#' # Most recommended usage
#' getInputBinding('compoundInput2', pkg = 'dipsaus')
#'
#' # Other usages
#' getInputBinding('shiny::textInput')
#'
#'
#' getInputBinding(shiny::textInput)
#'
#' getInputBinding(compoundInput2, pkg = 'dipsaus')
#'
#' # Bad usage, raise errors in some cases
#' \dontrun{
#' ## You need to library(shiny), or set envir=asNamespace('shiny'), or pkg='shiny'
#' getInputBinding('textInput')
#' getInputBinding(textInput) # also fails
#'
#' ## Always fails
#' getInputBinding('dipsaus::compoundInput2', pkg = 'dipsaus')
#' }
#'
#' @export
getInputBinding <- function(fname, pkg = NULL, envir = parent.frame()){
if( length(pkg) != 1 || !is.character(pkg) ){
# need to get package from fname
if(is.character(fname)){
fname <- str2lang(fname)
}
fname_quoted <- substitute(fname)
if( !is.language(fname) ){
fname <- fname_quoted
}
# now we have quoted fname
if(is.call(fname) && all(as.character(fname[[1]]) %in% c('::', ':::'))){
pkg <- deparse(fname[[2]])
fname <- deparse(fname[[3]])
}else{
f <- eval(fname, envir = envir)
fenv <- environment(f)
if( isNamespace(fenv) ){
pkg <- fenv$.__NAMESPACE__.$spec[['name']]
}
fname <- deparse(fname)
}
}else{
fname_quoted <- substitute(fname)
if(!is.character(fname)){
fname <- deparse(fname_quoted)
}
}
# Check whether fname exists
if(is.null(pkg)){
cat2(sprintf('Cannot find function %s in any package loaded from envir. Please provide package name', fname), level = 'FATAL')
}
ns <- asNamespace(pkg)
if(!is.function(ns[[fname]])){
cat2(sprintf('Cannot find function %s in namespace %s', fname, pkg), level = 'FATAL')
}
binding_key <- sprintf('%s.%s', pkg, fname)
binding_re <- shiny_input_bindings[[ binding_key ]]
if(is.null(binding_re)){
cat2(sprintf('Cannot find input binding for %s. Please use\n\tdipsaus::registerInputBinding(%s, %s, shiny_binding, update_function = NULL)\n to register this input type.', binding_key, fname, pkg), level = 'FATAL')
}
binding_re$call_function <- sprintf('%s::%s', pkg, fname)
binding_re
}
#' Detect whether 'Shiny' is running
#' @return logical, true if current shiny context is active
#' @export
shiny_is_running <- function(){
if(requireNamespace('shiny', quietly = TRUE)){
return(isTRUE(!is.null(shiny::getDefaultReactiveDomain())))
}
return(FALSE)
}
#' Store/Get key-value pairs in 'shiny' session
#' @description If key is missing, it'll be created, otherwise ignored or
#' overwritten.
#' @param session 'Shiny' session
#' @param key character, key to store
#' @param val value to store
#' @param override if key exists, whether to overwrite its value
#' @return If session is shiny session, returns current value stored in
#' session, otherwise returns \code{NULL}
#' @export
add_to_session <- function(
session, key = 'rave_id',
val = paste(sample(c(letters, LETTERS, 0:9), 20), collapse = ''),
override = FALSE
){
if(missing(session)){
if(requireNamespace('shiny', quietly = TRUE)){
session <- shiny::getDefaultReactiveDomain()
} else {
stop('Please specify session')
}
}
if(!is.null(session)){
if(override || !exists(key, envir = session$userData)){
assign(key, val, envir = session$userData)
}
return(get(key, envir = session$userData))
}
return(NULL)
}
|
/dipsaus/R/shiny-inputbindings.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false | false | 7,708 |
r
|
# Defines shiny bindings
shiny_input_bindings <- new.env(parent = emptyenv())
list2env(list(
'shiny.textInput' = list(
binding = "shiny.textInput",
update_function = "shiny::updateTextInput"
),
'shiny.textAreaInput' = list(
binding = "shiny.textareaInput",
update_function = "shiny::updateTextAreaInput"
),
'shiny.passwordInput' = list(
binding = "shiny.passwordInput",
update_function = "shiny::updateTextInput"
),
'shiny.numericInput' = list(
binding = "shiny.numberInput",
update_function = "shiny::updateNumericInput"
),
'shiny.checkboxInput' = list(
binding = "shiny.checkboxInput",
update_function = "shiny::updateCheckboxInput"
),
'shiny.sliderInput' = list(
binding = "shiny.sliderInput",
update_function = "shiny::updateSliderInput"
),
'shiny.dateInput' = list(
binding = "shiny.dateInput",
update_function = "shiny::updateDateInput"
),
'shiny.dateRangeInput' = list(
binding = "shiny.dateRangeInput",
update_function = 'shiny::updateDateRangeInput'
),
'shiny.selectInput' = list(
binding = "shiny.selectInput",
update_function = 'shiny::updateSelectInput'
),
'shiny.selectizeInput' = list(
binding = "shiny.selectInput",
update_function = 'shiny::updateSelectizeInput'
),
'shiny.varSelectInput' = list(
binding = "shiny.selectInput",
update_function = 'shiny::updateVarSelectInput'
),
'shiny.varSelectizeInput' = list(
binding = "shiny.selectInput",
update_function = 'shiny::updateVarSelectizeInput'
),
'shiny.radioButtons' = list(
binding = "shiny.radioInput",
update_function = 'shiny::updateRadioButtons'
),
'shiny.checkboxGroupInput' = list(
binding = "shiny.checkboxGroupInput",
update_function = 'shiny::updateCheckboxGroupInput'
),
'shiny.actionButton' = list(
binding = "shiny.actionButtonInput",
update_function = 'shiny::updateActionButton'
),
'shiny.actionLink' = list(
binding = "shiny.actionButtonInput",
update_function = 'shiny::updateActionButton'
),
'shiny.fileInput' = list(
binding = "shiny.fileInputBinding",
update_function = NULL
),
'dipsaus.compoundInput2' = list(
binding = "dipsaus.compoundInput2",
update_function = 'dipsaus.updateCompoundInput2'
),
'dipsaus.actionButtonStyled' = list(
binding = "shiny.actionButtonInput",
update_function = 'dipsaus.updateActionButtonStyled'
)
), envir = shiny_input_bindings)
#' Register customized input to enable support by compound input
#' @param fname character, function name, such as \code{"textInput"}
#' @param pkg character, package name, like \code{"shiny"}
#' @param shiny_binding character, 'JavaScript' binding name.See examples
#' @param update_function character, update function such as \code{"shiny::textInput"}
#' @return a list of binding functions, one is `JavaScript` object key in
#' \code{Shiny.inputBindings}, the other is `shiny` update function in R end.
#' @examples
#'
#' # register shiny textInput
#' registerInputBinding('textInput', 'shiny',
#' 'shiny.textInput', 'shiny::updateTextInput')
#'
#' # Register shiny actionLink
#' # In "Shiny.inputbindings", the binding name is "shiny.actionButtonInput",
#' # Shiny update function is "shiny::updateActionButton"
#' registerInputBinding('actionLink', 'shiny',
#' 'shiny.actionButtonInput', 'shiny::updateActionButton')
#'
#' @export
registerInputBinding <- function(fname, pkg, shiny_binding, update_function = NULL){
ns <- asNamespace(pkg)
if( !is.function(ns[[fname]]) ){
cat2(sprintf('%s::%s is not a function', pkg, fname), level = 'FATAL')
}
binding <- list(
binding = shiny_binding,
update_function = update_function
)
shiny_input_bindings[[sprintf('%s.%s', pkg, fname)]] <- binding
invisible(binding)
}
#' Obtain registered input bindings
#' @param fname input function name, character or quoted expression
#' such as \code{'shiny::textInput'} or \code{numericInput}.
#' @param pkg (optional), name of package
#' @param envir environment to evaluate \code{fname} if \code{pkg} is not provided
#'
#' @return a list containing: 1. `JavaScript` input binding name; 2. `R` updating function name
#'
#' @examples
#'
#' library(dipsaus)
#'
#' # Most recommended usage
#' getInputBinding('compoundInput2', pkg = 'dipsaus')
#'
#' # Other usages
#' getInputBinding('shiny::textInput')
#'
#'
#' getInputBinding(shiny::textInput)
#'
#' getInputBinding(compoundInput2, pkg = 'dipsaus')
#'
#' # Bad usage, raise errors in some cases
#' \dontrun{
#' ## You need to library(shiny), or set envir=asNamespace('shiny'), or pkg='shiny'
#' getInputBinding('textInput')
#' getInputBinding(textInput) # also fails
#'
#' ## Always fails
#' getInputBinding('dipsaus::compoundInput2', pkg = 'dipsaus')
#' }
#'
#' @export
getInputBinding <- function(fname, pkg = NULL, envir = parent.frame()){
if( length(pkg) != 1 || !is.character(pkg) ){
# need to get package from fname
if(is.character(fname)){
fname <- str2lang(fname)
}
fname_quoted <- substitute(fname)
if( !is.language(fname) ){
fname <- fname_quoted
}
# now we have quoted fname
if(is.call(fname) && all(as.character(fname[[1]]) %in% c('::', ':::'))){
pkg <- deparse(fname[[2]])
fname <- deparse(fname[[3]])
}else{
f <- eval(fname, envir = envir)
fenv <- environment(f)
if( isNamespace(fenv) ){
pkg <- fenv$.__NAMESPACE__.$spec[['name']]
}
fname <- deparse(fname)
}
}else{
fname_quoted <- substitute(fname)
if(!is.character(fname)){
fname <- deparse(fname_quoted)
}
}
# Check whether fname exists
if(is.null(pkg)){
cat2(sprintf('Cannot find function %s in any package loaded from envir. Please provide package name', fname), level = 'FATAL')
}
ns <- asNamespace(pkg)
if(!is.function(ns[[fname]])){
cat2(sprintf('Cannot find function %s in namespace %s', fname, pkg), level = 'FATAL')
}
binding_key <- sprintf('%s.%s', pkg, fname)
binding_re <- shiny_input_bindings[[ binding_key ]]
if(is.null(binding_re)){
cat2(sprintf('Cannot find input binding for %s. Please use\n\tdipsaus::registerInputBinding(%s, %s, shiny_binding, update_function = NULL)\n to register this input type.', binding_key, fname, pkg), level = 'FATAL')
}
binding_re$call_function <- sprintf('%s::%s', pkg, fname)
binding_re
}
#' Detect whether 'Shiny' is running
#' @return logical, true if current shiny context is active
#' @export
shiny_is_running <- function(){
if(requireNamespace('shiny', quietly = TRUE)){
return(isTRUE(!is.null(shiny::getDefaultReactiveDomain())))
}
return(FALSE)
}
#' Store/Get key-value pairs in 'shiny' session
#' @description If key is missing, it'll be created, otherwise ignored or
#' overwritten.
#' @param session 'Shiny' session
#' @param key character, key to store
#' @param val value to store
#' @param override if key exists, whether to overwrite its value
#' @return If session is shiny session, returns current value stored in
#' session, otherwise returns \code{NULL}
#' @export
add_to_session <- function(
session, key = 'rave_id',
val = paste(sample(c(letters, LETTERS, 0:9), 20), collapse = ''),
override = FALSE
){
if(missing(session)){
if(requireNamespace('shiny', quietly = TRUE)){
session <- shiny::getDefaultReactiveDomain()
} else {
stop('Please specify session')
}
}
if(!is.null(session)){
if(override || !exists(key, envir = session$userData)){
assign(key, val, envir = session$userData)
}
return(get(key, envir = session$userData))
}
return(NULL)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svm_code.R
\name{plotperf}
\alias{plotperf}
\title{Performance plots for the approximation of an SVM model.}
\usage{
plotperf(mymodel, mydata, indy, mytestdata, type = "all", filename)
}
\arguments{
\item{mymodel}{Element of class \code{ksvm}.}
\item{mydata}{Data on which \code{mymodel} was trained on.}
\item{indy}{Column number of the outcome in \code{mydata}.}
\item{mytestdata}{Data on which to evaluate \code{mymodel}. (Optional)}
\item{type}{Type of performance plot (c="all","lp","probs","outcomes","contributions",
"ROC","corrplot"). See details for more information.}
\item{filename}{Name of the resulting graph.}
}
\description{
Generate performance plots for the approximation of an SVM model.
}
\details{
Different types of plots are possible. When \code{type}="all", all the options are generated. When \code{type}="lp",
the latent variables of the approximation and the original SVM model are plotted against eachother. When \code{type}="probs"
the estimated probabilities of the approximation and the SVM model are plotted against eachother. When \code{type}="outcomes" a bubble
plot indicating the agreement between the approximation and the SVM model is generated. When \code{type}="contributions", the range of
the contributions within the approximation, the range of the rest term and the range of the latent variable of the SVM model are represented
by means of boxplots. All of these are shifted to have a median equal to zero. When \code{type}="ROC", ROC curves for the approximation and
the SVM model are plotted. When mytestdata is non-empty, ROC curves for the test set are also provided.
}
\examples{
#### Support Vector Machine classifier
library(kernlab)
data(iris)
levels(iris$Species)[levels(iris$Species)=="setosa"] <- "other"
levels(iris$Species)[levels(iris$Species)=="virginica"] <- "other"
names(iris)=c("SL","SW","PL","PW","Species")
# good model
model <-ksvm(Species ~ ., data = iris,prob.model=TRUE,kpar=list(0.03),C=10)
# bad model
model2 <-ksvm(Species ~ ., data = iris,prob.model=TRUE,kpar=list(10),C=10)
# plot latent variables of approximation and SVM
plotperf(model,iris,indy=5,type="lp",filename="iris")
plotperf(model2,iris,indy=5,type="lp",filename="iris2")
# plot contributions of approximation and SVM
# good model: rest term is small in comparison with other contributions and lpmodel
# (latent variable of SVM)
plotperf(model,iris,indy=5,type="contributions",filename="iris")
# bad model: rest term is large in comparison with other contributions and lpmodel
# (latent variable of SVM)
plotperf(model2,iris,indy=5,type="contributions",filename="iris2")
# plot latent variables of approximation and SVM
plotperf(model,iris,indy=5,type="outcomes",filename="iris")
plotperf(model2,iris,indy=5,type="outcomes",filename="iris2")
}
\author{
Vanya Van Belle
}
\references{
Van Belle V., Van Calster B., Suykens J.A.K., Van Huffel S. and Lisboa P., \emph{Explaining support vector machines: a color based nomogram}, Internal Report 16-27, ESAT-Stadius, KU Leuven (Leuven, Belgium), 2016
}
|
/man/plotperf.Rd
|
no_license
|
mariakalimeri/VRPM
|
R
| false | true | 3,198 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svm_code.R
\name{plotperf}
\alias{plotperf}
\title{Performance plots for the approximation of an SVM model.}
\usage{
plotperf(mymodel, mydata, indy, mytestdata, type = "all", filename)
}
\arguments{
\item{mymodel}{Element of class \code{ksvm}.}
\item{mydata}{Data on which \code{mymodel} was trained on.}
\item{indy}{Column number of the outcome in \code{mydata}.}
\item{mytestdata}{Data on which to evaluate \code{mymodel}. (Optional)}
\item{type}{Type of performance plot (c="all","lp","probs","outcomes","contributions",
"ROC","corrplot"). See details for more information.}
\item{filename}{Name of the resulting graph.}
}
\description{
Generate performance plots for the approximation of an SVM model.
}
\details{
Different types of plots are possible. When \code{type}="all", all the options are generated. When \code{type}="lp",
the latent variables of the approximation and the original SVM model are plotted against eachother. When \code{type}="probs"
the estimated probabilities of the approximation and the SVM model are plotted against eachother. When \code{type}="outcomes" a bubble
plot indicating the agreement between the approximation and the SVM model is generated. When \code{type}="contributions", the range of
the contributions within the approximation, the range of the rest term and the range of the latent variable of the SVM model are represented
by means of boxplots. All of these are shifted to have a median equal to zero. When \code{type}="ROC", ROC curves for the approximation and
the SVM model are plotted. When mytestdata is non-empty, ROC curves for the test set are also provided.
}
\examples{
#### Support Vector Machine classifier
library(kernlab)
data(iris)
levels(iris$Species)[levels(iris$Species)=="setosa"] <- "other"
levels(iris$Species)[levels(iris$Species)=="virginica"] <- "other"
names(iris)=c("SL","SW","PL","PW","Species")
# good model
model <-ksvm(Species ~ ., data = iris,prob.model=TRUE,kpar=list(0.03),C=10)
# bad model
model2 <-ksvm(Species ~ ., data = iris,prob.model=TRUE,kpar=list(10),C=10)
# plot latent variables of approximation and SVM
plotperf(model,iris,indy=5,type="lp",filename="iris")
plotperf(model2,iris,indy=5,type="lp",filename="iris2")
# plot contributions of approximation and SVM
# good model: rest term is small in comparison with other contributions and lpmodel
# (latent variable of SVM)
plotperf(model,iris,indy=5,type="contributions",filename="iris")
# bad model: rest term is large in comparison with other contributions and lpmodel
# (latent variable of SVM)
plotperf(model2,iris,indy=5,type="contributions",filename="iris2")
# plot latent variables of approximation and SVM
plotperf(model,iris,indy=5,type="outcomes",filename="iris")
plotperf(model2,iris,indy=5,type="outcomes",filename="iris2")
}
\author{
Vanya Van Belle
}
\references{
Van Belle V., Van Calster B., Suykens J.A.K., Van Huffel S. and Lisboa P., \emph{Explaining support vector machines: a color based nomogram}, Internal Report 16-27, ESAT-Stadius, KU Leuven (Leuven, Belgium), 2016
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameters.R
\name{parameters}
\alias{parameters}
\title{Parameters}
\usage{
parameters(...)
}
\arguments{
\item{...}{\code{\link{Parameter-class}} objects.}
}
\value{
\code{\link{Parameters-class}} object.
}
\description{
Create a new collection of \code{Parameter} objects.
}
\examples{
# create two Parameter objects
p1 <- binary_parameter("parameter one", 1)
print(p1)
p2 <- numeric_parameter("parameter two", 5)
print(p2)
# store Parameter objects in a Parameters object
p <- parameters(p1, p2)
print(p)
}
\seealso{
\code{\link{array_parameters}}, \code{\link{scalar_parameters}}.
}
|
/man/parameters.Rd
|
no_license
|
prioritizr/prioritizrutils
|
R
| false | true | 670 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameters.R
\name{parameters}
\alias{parameters}
\title{Parameters}
\usage{
parameters(...)
}
\arguments{
\item{...}{\code{\link{Parameter-class}} objects.}
}
\value{
\code{\link{Parameters-class}} object.
}
\description{
Create a new collection of \code{Parameter} objects.
}
\examples{
# create two Parameter objects
p1 <- binary_parameter("parameter one", 1)
print(p1)
p2 <- numeric_parameter("parameter two", 5)
print(p2)
# store Parameter objects in a Parameters object
p <- parameters(p1, p2)
print(p)
}
\seealso{
\code{\link{array_parameters}}, \code{\link{scalar_parameters}}.
}
|
#' Check species names against spp2exclude and spp2include
#'
#' Check species names in a data frame. First, rows whose species name matches any in spp2exclude
#' are automatically removed from the data frame. Then remaining rows are checked
#' against spp2include: if any species name does not match spp2include, an error
#' will report which these cases are so that they can be revised.
#'
#' @param df Data frame with a column named species.
#' @param wide Logical. Data frame in wide format? Only TRUE for raw cover data (default is FALSE).
#' It wide = TRUE, species are assumed to start at the 5th column (until the end of the data frame).
#'
#' @return a data frame, possibly with some rows removed (those identified as species to exclude).
#' @export
#' @import dplyr
#'
check_spnames <- function(df, wide = FALSE) {
if (wide) {
df.clean <- suppressWarnings(dplyr::select(df, -one_of(spp2exclude)))
spnames <- names(df.clean)[5:ncol(df.clean)]
} else {
df.clean <- dplyr::filter(df, ! species %in% spp2exclude)
spnames <- unique(df.clean$species)
}
if (!all(spnames %in% spp2include)) {
stop("\n\n", paste(spnames[!spnames %in% spp2include], collapse = "\n"),
"\n\ndo not match species names in spp2include.csv.
\nPlease check species names in data frame.")
}
return(df.clean)
}
|
/R/check_spnames.R
|
permissive
|
Pakillo/exclosures-Almoraima
|
R
| false | false | 1,347 |
r
|
#' Check species names against spp2exclude and spp2include
#'
#' Check species names in a data frame. First, rows whose species name matches any in spp2exclude
#' are automatically removed from the data frame. Then remaining rows are checked
#' against spp2include: if any species name does not match spp2include, an error
#' will report which these cases are so that they can be revised.
#'
#' @param df Data frame with a column named species.
#' @param wide Logical. Data frame in wide format? Only TRUE for raw cover data (default is FALSE).
#' It wide = TRUE, species are assumed to start at the 5th column (until the end of the data frame).
#'
#' @return a data frame, possibly with some rows removed (those identified as species to exclude).
#' @export
#' @import dplyr
#'
check_spnames <- function(df, wide = FALSE) {
if (wide) {
df.clean <- suppressWarnings(dplyr::select(df, -one_of(spp2exclude)))
spnames <- names(df.clean)[5:ncol(df.clean)]
} else {
df.clean <- dplyr::filter(df, ! species %in% spp2exclude)
spnames <- unique(df.clean$species)
}
if (!all(spnames %in% spp2include)) {
stop("\n\n", paste(spnames[!spnames %in% spp2include], collapse = "\n"),
"\n\ndo not match species names in spp2include.csv.
\nPlease check species names in data frame.")
}
return(df.clean)
}
|
######################################
# 11.10.2016
# Multiple Linear Regression (MLR) example
# BISC 481
######################################
## Install and initialize packages
install.packages("ggplot2")
install.packages("grid")
library(ggplot2)
library(grid)
## Theme
my.theme <- theme(
plot.margin = unit(c(0.1, 0.5, 0.1, 0.1), "cm"),
axis.text = element_text(colour="black", size=12),
axis.title.x = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=12),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_text(colour ="black"),
axis.ticks = element_line(colour = "black")
)
## Data preparation
data1 <- c(0.775018, 0.7855332, 0.7778943)
data2 <- c(0.8633669, 0.8643545,0.8552534)
## Ploting
ggplot() +
geom_point(aes(x = data1, y = data2), color = "red", size=1) +
geom_abline(slope=1) + geom_vline(xintercept=0) + geom_hline(yintercept=0) +
coord_fixed(ratio = 1, xlim = c(0,1), ylim = c(0,1)) +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0)) +
my.theme
|
/High Thoroughput in vitro data analysis.R
|
no_license
|
rhyunyp/BISC481-assignment
|
R
| false | false | 1,187 |
r
|
######################################
# 11.10.2016
# Multiple Linear Regression (MLR) example
# BISC 481
######################################
## Install and initialize packages
install.packages("ggplot2")
install.packages("grid")
library(ggplot2)
library(grid)
## Theme
my.theme <- theme(
plot.margin = unit(c(0.1, 0.5, 0.1, 0.1), "cm"),
axis.text = element_text(colour="black", size=12),
axis.title.x = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=12),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_text(colour ="black"),
axis.ticks = element_line(colour = "black")
)
## Data preparation
data1 <- c(0.775018, 0.7855332, 0.7778943)
data2 <- c(0.8633669, 0.8643545,0.8552534)
## Ploting
ggplot() +
geom_point(aes(x = data1, y = data2), color = "red", size=1) +
geom_abline(slope=1) + geom_vline(xintercept=0) + geom_hline(yintercept=0) +
coord_fixed(ratio = 1, xlim = c(0,1), ylim = c(0,1)) +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0)) +
my.theme
|
library(wavethresh)
### Name: PsiJ
### Title: Compute discrete autocorrelation wavelets.
### Aliases: PsiJ
### Keywords: manip
### ** Examples
#
# Let us create the discrete autocorrelation wavelets for the Haar wavelet.
# We shall create up to scale 4.
#
PsiJ(-4, filter.number=1, family="DaubExPhase")
#Computing PsiJ
#Returning precomputed version
#Took 0.00999999 seconds
#[[1]]:
#[1] -0.5 1.0 -0.5
#
#[[2]]:
#[1] -0.25 -0.50 0.25 1.00 0.25 -0.50 -0.25
#
#[[3]]:
# [1] -0.125 -0.250 -0.375 -0.500 -0.125 0.250 0.625 1.000 0.625 0.250
#[11] -0.125 -0.500 -0.375 -0.250 -0.125
#
#[[4]]:
# [1] -0.0625 -0.1250 -0.1875 -0.2500 -0.3125 -0.3750 -0.4375 -0.5000 -0.3125
#[10] -0.1250 0.0625 0.2500 0.4375 0.6250 0.8125 1.0000 0.8125 0.6250
#[19] 0.4375 0.2500 0.0625 -0.1250 -0.3125 -0.5000 -0.4375 -0.3750 -0.3125
#[28] -0.2500 -0.1875 -0.1250 -0.0625
#
# You can plot the fourth component to get an idea of what the
# autocorrelation wavelet looks like.
#
# Note that the previous call stores the autocorrelation wavelet
# in Psi.4.1.DaubExPhase. This is mainly so that it doesn't have to
# be recomputed.
#
# Note that the x-coordinates in the following are approximate.
#
## Not run:
##D plot(seq(from=-1, to=1, length=length(Psi.4.1.DaubExPhase[[4]])),
##D Psi.4.1.DaubExPhase[[4]], type="l",
##D xlab = "t", ylab = "Haar Autocorrelation Wavelet")
## End(Not run)
#
#
# Now let us repeat the above for the Daubechies Least-Asymmetric wavelet
# with 10 vanishing moments.
# We shall create up to scale 6, a higher resolution version than last
# time.
#
p6 <- PsiJ(-6, filter.number=10, family="DaubLeAsymm", OPLENGTH=5000)
p6
##[[1]]:
# [1] 3.537571e-07 5.699601e-16 -7.512135e-06 -7.705013e-15 7.662378e-05
# [6] 5.637163e-14 -5.010016e-04 -2.419432e-13 2.368371e-03 9.976593e-13
#[11] -8.684028e-03 -1.945435e-12 2.605208e-02 6.245832e-12 -6.773542e-02
#[16] 4.704777e-12 1.693386e-01 2.011086e-10 -6.209080e-01 1.000000e+00
#[21] -6.209080e-01 2.011086e-10 1.693386e-01 4.704777e-12 -6.773542e-02
#[26] 6.245832e-12 2.605208e-02 -1.945435e-12 -8.684028e-03 9.976593e-13
#[31] 2.368371e-03 -2.419432e-13 -5.010016e-04 5.637163e-14 7.662378e-05
#[36] -7.705013e-15 -7.512135e-06 5.699601e-16 3.537571e-07
#
#[[2]]
# scale 2 etc. etc.
#
#[[3]] scale 3 etc. etc.
#
#scales [[4]] and [[5]]...
#
#[[6]]
#...
# remaining scale 6 elements...
#...
#[2371] -1.472225e-31 -1.176478e-31 -4.069848e-32 -2.932736e-41 6.855259e-33
#[2376] 5.540202e-33 2.286296e-33 1.164962e-42 -3.134088e-35 3.427783e-44
#[2381] -1.442993e-34 -2.480298e-44 5.325726e-35 9.346398e-45 -2.699644e-36
#[2386] -4.878634e-46 -4.489527e-36 -4.339365e-46 1.891864e-36 2.452556e-46
#[2391] -3.828924e-37 -4.268733e-47 4.161874e-38 3.157694e-48 -1.959885e-39
##
# Let's now plot the 6th component (6th scale, this is the finest
# resolution, all the other scales will be coarser representations)
#
#
# Note that the x-coordinates in the following are non-existant!
#
## Not run:
##D ts.plot(p6[[6]], xlab = "t",
##D ylab = "Daubechies N=10 least-asymmetric Autocorrelation Wavelet")
## End(Not run)
|
/data/genthat_extracted_code/wavethresh/examples/PsiJ.rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 3,136 |
r
|
library(wavethresh)
### Name: PsiJ
### Title: Compute discrete autocorrelation wavelets.
### Aliases: PsiJ
### Keywords: manip
### ** Examples
#
# Let us create the discrete autocorrelation wavelets for the Haar wavelet.
# We shall create up to scale 4.
#
PsiJ(-4, filter.number=1, family="DaubExPhase")
#Computing PsiJ
#Returning precomputed version
#Took 0.00999999 seconds
#[[1]]:
#[1] -0.5 1.0 -0.5
#
#[[2]]:
#[1] -0.25 -0.50 0.25 1.00 0.25 -0.50 -0.25
#
#[[3]]:
# [1] -0.125 -0.250 -0.375 -0.500 -0.125 0.250 0.625 1.000 0.625 0.250
#[11] -0.125 -0.500 -0.375 -0.250 -0.125
#
#[[4]]:
# [1] -0.0625 -0.1250 -0.1875 -0.2500 -0.3125 -0.3750 -0.4375 -0.5000 -0.3125
#[10] -0.1250 0.0625 0.2500 0.4375 0.6250 0.8125 1.0000 0.8125 0.6250
#[19] 0.4375 0.2500 0.0625 -0.1250 -0.3125 -0.5000 -0.4375 -0.3750 -0.3125
#[28] -0.2500 -0.1875 -0.1250 -0.0625
#
# You can plot the fourth component to get an idea of what the
# autocorrelation wavelet looks like.
#
# Note that the previous call stores the autocorrelation wavelet
# in Psi.4.1.DaubExPhase. This is mainly so that it doesn't have to
# be recomputed.
#
# Note that the x-coordinates in the following are approximate.
#
## Not run:
##D plot(seq(from=-1, to=1, length=length(Psi.4.1.DaubExPhase[[4]])),
##D Psi.4.1.DaubExPhase[[4]], type="l",
##D xlab = "t", ylab = "Haar Autocorrelation Wavelet")
## End(Not run)
#
#
# Now let us repeat the above for the Daubechies Least-Asymmetric wavelet
# with 10 vanishing moments.
# We shall create up to scale 6, a higher resolution version than last
# time.
#
p6 <- PsiJ(-6, filter.number=10, family="DaubLeAsymm", OPLENGTH=5000)
p6
##[[1]]:
# [1] 3.537571e-07 5.699601e-16 -7.512135e-06 -7.705013e-15 7.662378e-05
# [6] 5.637163e-14 -5.010016e-04 -2.419432e-13 2.368371e-03 9.976593e-13
#[11] -8.684028e-03 -1.945435e-12 2.605208e-02 6.245832e-12 -6.773542e-02
#[16] 4.704777e-12 1.693386e-01 2.011086e-10 -6.209080e-01 1.000000e+00
#[21] -6.209080e-01 2.011086e-10 1.693386e-01 4.704777e-12 -6.773542e-02
#[26] 6.245832e-12 2.605208e-02 -1.945435e-12 -8.684028e-03 9.976593e-13
#[31] 2.368371e-03 -2.419432e-13 -5.010016e-04 5.637163e-14 7.662378e-05
#[36] -7.705013e-15 -7.512135e-06 5.699601e-16 3.537571e-07
#
#[[2]]
# scale 2 etc. etc.
#
#[[3]] scale 3 etc. etc.
#
#scales [[4]] and [[5]]...
#
#[[6]]
#...
# remaining scale 6 elements...
#...
#[2371] -1.472225e-31 -1.176478e-31 -4.069848e-32 -2.932736e-41 6.855259e-33
#[2376] 5.540202e-33 2.286296e-33 1.164962e-42 -3.134088e-35 3.427783e-44
#[2381] -1.442993e-34 -2.480298e-44 5.325726e-35 9.346398e-45 -2.699644e-36
#[2386] -4.878634e-46 -4.489527e-36 -4.339365e-46 1.891864e-36 2.452556e-46
#[2391] -3.828924e-37 -4.268733e-47 4.161874e-38 3.157694e-48 -1.959885e-39
##
# Let's now plot the 6th component (6th scale, this is the finest
# resolution, all the other scales will be coarser representations)
#
#
# Note that the x-coordinates in the following are non-existant!
#
## Not run:
##D ts.plot(p6[[6]], xlab = "t",
##D ylab = "Daubechies N=10 least-asymmetric Autocorrelation Wavelet")
## End(Not run)
|
library(dplyr)
library(lubridate)
#Import data
power_data <- read_delim("household_power_consumption.txt", delim = ";") %>%
filter(Date %in% c("2/2/2007", "1/2/2007")) %>%
mutate(DateTime = dmy_hms(paste(Date,as.character(Time))))
png(filename = "plot1.png", width = 504, height = 504)
with(power_data,hist(Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts"))
dev.off()
|
/plot1.R
|
no_license
|
MottledOne/ExData_Plotting1
|
R
| false | false | 452 |
r
|
library(dplyr)
library(lubridate)
#Import data
power_data <- read_delim("household_power_consumption.txt", delim = ";") %>%
filter(Date %in% c("2/2/2007", "1/2/2007")) %>%
mutate(DateTime = dmy_hms(paste(Date,as.character(Time))))
png(filename = "plot1.png", width = 504, height = 504)
with(power_data,hist(Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts"))
dev.off()
|
##' @export
id <- function(object,...) UseMethod("id")
##' Extract different 'id' of neuro netCDF files
##'
##' @title Extract different 'id' of neuro netCDF files
##' @param object netCDF filename
##' @param ... Additional low level argument parsed on to lower level functions
##' @author Klaus K. Holst
##' @S3method id neurocdf
##' @method id neurocdf
id.neurocdf <- function(object,...) {
if (!file.exists(object)) stop("netCDF file not found")
nc <- with(neuro.env, openNCDF)(object)
types <- as.vector(with(neuro.env, getvarNCDF)(nc,"SubjectDescription",c(1,1,1,1),c(-1,1,1,-1)))
with(neuro.env, closeNCDF)(nc)
return(types)
}
|
/R/id.R
|
no_license
|
kkholst/neurocdf
|
R
| false | false | 644 |
r
|
##' @export
id <- function(object,...) UseMethod("id")
##' Extract different 'id' of neuro netCDF files
##'
##' @title Extract different 'id' of neuro netCDF files
##' @param object netCDF filename
##' @param ... Additional low level argument parsed on to lower level functions
##' @author Klaus K. Holst
##' @S3method id neurocdf
##' @method id neurocdf
id.neurocdf <- function(object,...) {
if (!file.exists(object)) stop("netCDF file not found")
nc <- with(neuro.env, openNCDF)(object)
types <- as.vector(with(neuro.env, getvarNCDF)(nc,"SubjectDescription",c(1,1,1,1),c(-1,1,1,-1)))
with(neuro.env, closeNCDF)(nc)
return(types)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_batch_create_partition}
\alias{glue_batch_create_partition}
\title{Creates one or more partitions in a batch operation}
\usage{
glue_batch_create_partition(CatalogId, DatabaseName, TableName,
PartitionInputList)
}
\arguments{
\item{CatalogId}{The ID of the catalog in which the partition is to be created.
Currently, this should be the AWS account ID.}
\item{DatabaseName}{[required] The name of the metadata database in which the partition is to be
created.}
\item{TableName}{[required] The name of the metadata table in which the partition is to be created.}
\item{PartitionInputList}{[required] A list of \code{PartitionInput} structures that define the partitions to be
created.}
}
\description{
Creates one or more partitions in a batch operation.
}
\section{Request syntax}{
\preformatted{svc$batch_create_partition(
CatalogId = "string",
DatabaseName = "string",
TableName = "string",
PartitionInputList = list(
list(
Values = list(
"string"
),
LastAccessTime = as.POSIXct(
"2015-01-01"
),
StorageDescriptor = list(
Columns = list(
list(
Name = "string",
Type = "string",
Comment = "string",
Parameters = list(
"string"
)
)
),
Location = "string",
InputFormat = "string",
OutputFormat = "string",
Compressed = TRUE|FALSE,
NumberOfBuckets = 123,
SerdeInfo = list(
Name = "string",
SerializationLibrary = "string",
Parameters = list(
"string"
)
),
BucketColumns = list(
"string"
),
SortColumns = list(
list(
Column = "string",
SortOrder = 123
)
),
Parameters = list(
"string"
),
SkewedInfo = list(
SkewedColumnNames = list(
"string"
),
SkewedColumnValues = list(
"string"
),
SkewedColumnValueLocationMaps = list(
"string"
)
),
StoredAsSubDirectories = TRUE|FALSE
),
Parameters = list(
"string"
),
LastAnalyzedTime = as.POSIXct(
"2015-01-01"
)
)
)
)
}
}
\keyword{internal}
|
/paws/man/glue_batch_create_partition.Rd
|
permissive
|
johnnytommy/paws
|
R
| false | true | 2,450 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_batch_create_partition}
\alias{glue_batch_create_partition}
\title{Creates one or more partitions in a batch operation}
\usage{
glue_batch_create_partition(CatalogId, DatabaseName, TableName,
PartitionInputList)
}
\arguments{
\item{CatalogId}{The ID of the catalog in which the partition is to be created.
Currently, this should be the AWS account ID.}
\item{DatabaseName}{[required] The name of the metadata database in which the partition is to be
created.}
\item{TableName}{[required] The name of the metadata table in which the partition is to be created.}
\item{PartitionInputList}{[required] A list of \code{PartitionInput} structures that define the partitions to be
created.}
}
\description{
Creates one or more partitions in a batch operation.
}
\section{Request syntax}{
\preformatted{svc$batch_create_partition(
CatalogId = "string",
DatabaseName = "string",
TableName = "string",
PartitionInputList = list(
list(
Values = list(
"string"
),
LastAccessTime = as.POSIXct(
"2015-01-01"
),
StorageDescriptor = list(
Columns = list(
list(
Name = "string",
Type = "string",
Comment = "string",
Parameters = list(
"string"
)
)
),
Location = "string",
InputFormat = "string",
OutputFormat = "string",
Compressed = TRUE|FALSE,
NumberOfBuckets = 123,
SerdeInfo = list(
Name = "string",
SerializationLibrary = "string",
Parameters = list(
"string"
)
),
BucketColumns = list(
"string"
),
SortColumns = list(
list(
Column = "string",
SortOrder = 123
)
),
Parameters = list(
"string"
),
SkewedInfo = list(
SkewedColumnNames = list(
"string"
),
SkewedColumnValues = list(
"string"
),
SkewedColumnValueLocationMaps = list(
"string"
)
),
StoredAsSubDirectories = TRUE|FALSE
),
Parameters = list(
"string"
),
LastAnalyzedTime = as.POSIXct(
"2015-01-01"
)
)
)
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GOsummer.R
\name{GOsummer}
\alias{GOsummer}
\title{GOsummer is the function that you can use to do summerization on the myGO object.}
\usage{
GOsummer(mygo, Type, Term)
}
\arguments{
\item{mygo, Type, Term}{Type is the ontology type: MF, BP, CC, PC
Term is the GOterms or terms you are interested to see, such as response, binding, et.
MF terms include: "transporter activity", "binding", "catalytic activity", "structural molecular activity", "receptor activity", "antioxidant activity"
BP terms include:"cellular process", "localization", "metabolic process", "response to stimulus", "biological regulation", "cellular process", "cellular component organization or biogenesis"
CC terms include: "cell part", "marcromolecular complex", "membrane", "organelle"
PC terms include:"chaperone", "hydrolase","transporter", "oxidoreductase", "ligase", "enzyme modulator", "isomerase", "nucleic acid binding", "receptor", "transcription factor", "transfer/carrier protein", "transferase", "defense/immunity protein", "ribosomal RNA", "tRNA"
IF inputs are not in Type or Term, it returns a list of terms you can choose from
IF Type input is not in the type vector, and you know the Term, it returns a list of summaries based on the Term
IF Term is unknown, and you know the Type, it returns a list of summaries based on the Type
IF both inputs are in the function, it returns a list of summaries according to the Type and Term}
}
\description{
GOsummer is the function that you can use to do summerization on the myGO object.
}
\examples{
\dontrun{GOsummer(mygo.e.34hr, "MF", "transporter activity")}
\dontrun{GOsummer(mygo.e.34hr, "I don't know", "transporter")}
}
|
/DraOnto.db/man/GOsummer.Rd
|
no_license
|
xpingli/DraOnto.db
|
R
| false | true | 1,747 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GOsummer.R
\name{GOsummer}
\alias{GOsummer}
\title{GOsummer is the function that you can use to do summerization on the myGO object.}
\usage{
GOsummer(mygo, Type, Term)
}
\arguments{
\item{mygo, Type, Term}{Type is the ontology type: MF, BP, CC, PC
Term is the GOterms or terms you are interested to see, such as response, binding, et.
MF terms include: "transporter activity", "binding", "catalytic activity", "structural molecular activity", "receptor activity", "antioxidant activity"
BP terms include:"cellular process", "localization", "metabolic process", "response to stimulus", "biological regulation", "cellular process", "cellular component organization or biogenesis"
CC terms include: "cell part", "marcromolecular complex", "membrane", "organelle"
PC terms include:"chaperone", "hydrolase","transporter", "oxidoreductase", "ligase", "enzyme modulator", "isomerase", "nucleic acid binding", "receptor", "transcription factor", "transfer/carrier protein", "transferase", "defense/immunity protein", "ribosomal RNA", "tRNA"
IF inputs are not in Type or Term, it returns a list of terms you can choose from
IF Type input is not in the type vector, and you know the Term, it returns a list of summaries based on the Term
IF Term is unknown, and you know the Type, it returns a list of summaries based on the Type
IF both inputs are in the function, it returns a list of summaries according to the Type and Term}
}
\description{
GOsummer is the function that you can use to do summerization on the myGO object.
}
\examples{
\dontrun{GOsummer(mygo.e.34hr, "MF", "transporter activity")}
\dontrun{GOsummer(mygo.e.34hr, "I don't know", "transporter")}
}
|
library("tidyverse")
library("tidyr")
library("stringr")
library("dplyr")
library("tibble")
library("readr")
library("ggplot2")
library("dplyr")
x = read_csv("eddypro.csv", skip = 1, na =c("","NA","-9999","-9999.0"), comment=c("["))
x = x[-1,]
x
glimpse(x)
x = select(x, -(roll))
x<-x[,c(-1,-3,-9,-12,-15,-18,-21,-30,-35,-70,-88:-99)]
x
x = x %>% mutate_if(is.character, factor)
names(x) = str_replace_all(names(x), "[!]","_emph_")
names(x) = names(x) %>%
str_replace_all("[!]","_emph_") %>%
str_replace_all("[?]","_quest_") %>%
str_replace_all("[*]","_star_") %>%
str_replace_all("[+]","_plus_") %>%
str_replace_all("[-]","_minus_") %>%
str_replace_all("[@]","_at_") %>%
str_replace_all("[$]","_dollar_") %>%
str_replace_all("[#]","_hash_") %>%
str_replace_all("[/]","_div_") %>%
str_replace_all("[%]","_perc_") %>%
str_replace_all("[&]","_amp_") %>%
str_replace_all("[\\^]","_power_") %>%
str_replace_all("[()]","_")
glimpse(x)
x=x[x$DOY > 152 & x$DOY < 244,]
x
sapply(x,is.numeric)
x_numeric = x[,sapply(x,is.numeric)]
x_numeric
cor_x = cor(x_numeric)
cor_x
cor_x = cor(na.omit(x_numeric))
cor_x
cor_x = cor(na.omit(x_numeric)) %>% as.data.frame %>% select(co2_flux)
cor_x
vars = row.names(cor_x)[cor_x$co2_flux^2 > .7] %>% na.exclude
vars
formula = as.formula(paste("co2_flux~", paste(vars,collapse = "+"), sep = ""))
formula
row_numbers = 1:length(x$date)
teach = sample(row_numbers, floor(length(x$date)*.7))
test = row_numbers[-teach]
teaching_x_unq = x[teach,]
testing_x_unq = x[test,]
mod = lm(formula, data=x)
mod
coef(mod)
resid(mod)
confint(mod)
summary(mod)
anova(mod)
mod1=lm(co2_flux~(DOY+Tau+rand_err_Tau + H+rand_err_H +LE+ rand_err_LE +co2_flux+rand_err_co2_flux+ rand_err_h2o_flux + H_strg + co2_molar_density +h2o_time_lag + sonic_temperature + air_temperature+air_density+air_molar_volume+es+RH+VPD+u_star_+TKE+T_star_+un_H+un_Tau+un_LE+un_co2_flux+un_h2o_flux+flowrate)^2,data=x)
mod1
coef(mod1)
resid(mod1)
confint(mod1)
summary(mod1)
anova(mod1)
|
/test1.R
|
no_license
|
alexissolovev24/MathMod
|
R
| false | false | 2,061 |
r
|
library("tidyverse")
library("tidyr")
library("stringr")
library("dplyr")
library("tibble")
library("readr")
library("ggplot2")
library("dplyr")
x = read_csv("eddypro.csv", skip = 1, na =c("","NA","-9999","-9999.0"), comment=c("["))
x = x[-1,]
x
glimpse(x)
x = select(x, -(roll))
x<-x[,c(-1,-3,-9,-12,-15,-18,-21,-30,-35,-70,-88:-99)]
x
x = x %>% mutate_if(is.character, factor)
names(x) = str_replace_all(names(x), "[!]","_emph_")
names(x) = names(x) %>%
str_replace_all("[!]","_emph_") %>%
str_replace_all("[?]","_quest_") %>%
str_replace_all("[*]","_star_") %>%
str_replace_all("[+]","_plus_") %>%
str_replace_all("[-]","_minus_") %>%
str_replace_all("[@]","_at_") %>%
str_replace_all("[$]","_dollar_") %>%
str_replace_all("[#]","_hash_") %>%
str_replace_all("[/]","_div_") %>%
str_replace_all("[%]","_perc_") %>%
str_replace_all("[&]","_amp_") %>%
str_replace_all("[\\^]","_power_") %>%
str_replace_all("[()]","_")
glimpse(x)
x=x[x$DOY > 152 & x$DOY < 244,]
x
sapply(x,is.numeric)
x_numeric = x[,sapply(x,is.numeric)]
x_numeric
cor_x = cor(x_numeric)
cor_x
cor_x = cor(na.omit(x_numeric))
cor_x
cor_x = cor(na.omit(x_numeric)) %>% as.data.frame %>% select(co2_flux)
cor_x
vars = row.names(cor_x)[cor_x$co2_flux^2 > .7] %>% na.exclude
vars
formula = as.formula(paste("co2_flux~", paste(vars,collapse = "+"), sep = ""))
formula
row_numbers = 1:length(x$date)
teach = sample(row_numbers, floor(length(x$date)*.7))
test = row_numbers[-teach]
teaching_x_unq = x[teach,]
testing_x_unq = x[test,]
mod = lm(formula, data=x)
mod
coef(mod)
resid(mod)
confint(mod)
summary(mod)
anova(mod)
mod1=lm(co2_flux~(DOY+Tau+rand_err_Tau + H+rand_err_H +LE+ rand_err_LE +co2_flux+rand_err_co2_flux+ rand_err_h2o_flux + H_strg + co2_molar_density +h2o_time_lag + sonic_temperature + air_temperature+air_density+air_molar_volume+es+RH+VPD+u_star_+TKE+T_star_+un_H+un_Tau+un_LE+un_co2_flux+un_h2o_flux+flowrate)^2,data=x)
mod1
coef(mod1)
resid(mod1)
confint(mod1)
summary(mod1)
anova(mod1)
|
CMADE <- function(par, fn, ..., lower, upper, control=list()) {
library("ringbuffer")
## Function to check the presence of options in the arguments specified by the user
# @name - argument name
# @default - default value of the argument
# RETURN: value specified by user if the given argument name found, default value otherwise
##
controlParam <- function(name, default) {
v <- control[[name]]
if (is.null(v))
return (default)
else
return (v)
}
N <- length(par) ## Number of dimensions
## Check if user specified box constraints values.
## If not accept the default values.
if (missing(lower))
lower <- rep(-100, N)
else if (length(lower) == 1)
lower <- rep(lower, N)
if (missing(upper))
upper <- rep(100, N)
else if (length(upper) == 1)
upper <- rep(upper, N)
bounceBackBoundary2 <- function(x){
x[is.na(x)] <- .Machine$double.xmax
x[is.infinite(x)] <- .Machine$double.xmax
if(all(x >= cbind(lower)) && all(x <= cbind(upper)))
return (x)
else if(any(x < cbind(lower)))
for(i in which(x < cbind(lower)) )
x[i] <- lower[i] + abs(lower[i] - x[i])%% (upper[i]- lower[i])
else if(any(x > cbind(upper)))
for(i in which(x > cbind(upper)) )
x[i] <- upper[i] - abs(upper[i] - x[i])%% (upper[i]- lower[i])
return (bounceBackBoundary2(x))
}
#############################
## Algorithm parameters: ##
#############################
Ft <- controlParam("Ft", 1) ## Scaling factor of difference vectors (a variable!)
initFt <- controlParam("initFt", 1)
stopfitness <- controlParam("stopfitness", -Inf) ## Fitness value after which the convergence is reached
stopvariance<- controlParam("stopvariance", 1e-12*Ft) ## Genetic diversity minimum value(stop fitness variance)
## Strategy parameter setting:
budget <- controlParam("budget", 10000*N ) ## The maximum number of fitness function calls
initlambda <- controlParam("lambda", floor((4+sqrt(N)/2)*N)) ## Population starting size
lambda <- initlambda ## Population size
mu <- controlParam("mu", floor(lambda/2)) ## Selection size
weights <- controlParam("weights", log(mu+1) - log(1:mu)) ## Weights to calculate mean from selected individuals
#weights <- controlParam("weights", (1:mu)*0+1)
weights <- weights/sum(weights) ## \-> weights are normalized by the sum
weightsSumS <- sum(weights^2) ## weights sum square
mueff <- controlParam("mueff", sum(weights)^2/sum(weights^2)) ## Variance effectiveness factor
cc <- controlParam("cc", 4/(N+4)) ## Evolution Path decay factor
c_pc <- controlParam("cpc", 0.2) ## Covariance deformation factor
cc_mueff <- sqrt(cc*(2 - cc) )#*sqrt( mueff) ## 'cc' and 'mueff' are constant so as this equation
c_cov <- controlParam("c_cov", 1/2) ## Mutation vectors weight constant
pathLength <- controlParam("pathLength", 6) ## Size of evolution path
maxiter <- controlParam("maxit", floor(budget/(lambda+1))) ## Maximum number of iterations after which algorithm stops
c_Ft <- controlParam("c_Ft", 1/((sqrt(2)*gamma((N+1)/2)/gamma(N/2)) )) ## Variance scaling constant
pathRatio <- controlParam("pathRatio",sqrt(pathLength)) ## Path Length Control reference value
checkMiddle <- controlParam("checkMiddle", TRUE) ## Vatiable telling if algorithm should save check middle point in every iteration
histSize <- controlParam("history", 6+ceiling(3*sqrt(N))) ## Size of the window of history - the step length history
#histSize <- controlParam("history", 0.5*N^2) ## Size of the window of history - the step length history
histSize <- ceiling(histSize) ## \-> size should be integer
Ft_scale <- controlParam("Ft_scale", ((mueff+2)/(N+mueff+3))/(1 + 2*max(0, sqrt((mueff-1)/(N+1))-1) + (mueff+2)/(N+mueff+3)))
tol <- controlParam("tol", 10^-18)
## Logging options:
log.all <- controlParam("diag", FALSE)
log.Ft <- controlParam("diag.Ft", log.all)
log.value <- controlParam("diag.value", log.all)
log.mean <- controlParam("diag.mean", log.all)
log.pop <- controlParam("diag.pop", log.all)
log.pathRat <- controlParam("diag.pathRatio", log.all)
## nonLamarckian approach allows individuals to violate boundaries.
## Fitness value is estimeted by fitness of repaired individual.
Lamarckism <- controlParam("Lamarckism", TRUE)
fn_p <- function(P, P_repaired) {
# Lamarckian approach
if(Lamarckism==TRUE){
if(is.matrix(P)){
return ( apply(P, 2, fn) )
}else{
return ( fn(P) )
}
}
# nonLamarckian approach
else{
P[is.na(P)] <- .Machine$double.xmax
P[is.infinite(P)] <- .Machine$double.xmax
if(is.matrix(P) && is.matrix(P_repaired)){
repairedInd <- apply(P!=P_repaired,2,all)
P_fit <- apply(P_repaired, 2, fn)
hammingDist <- colSums((P - P_repaired)^2)
P_fit[which(repairedInd)] <- P_fit[which(repairedInd)] + hammingDist[which(repairedInd)]
return(P_fit)
}else{
P_fit <- fn(P_repaired) + sum(P-P_repaired)^2
return ( P_fit )
}
}
}
## Asserts - safety checks:
stopifnot(length(upper) == N)
stopifnot(length(lower) == N)
stopifnot(all(lower < upper))
stopifnot(length(Ft) == 1)
## Initialize variables for the best solution found so far:
best.fit <- Inf
best.par <- NULL
last.restart <-0
restart.length <-0
restart.number <-0
## According to user specification, preallocate logging structures:
if (log.Ft)
Ft.log <- numeric(maxiter)
if (log.value)
value.log <- matrix(0, nrow=maxiter, ncol=lambda)
if (log.mean)
mean.log <- numeric(maxiter)
if (log.pop)
pop.log <- array(0, c(N, lambda, maxiter))
if (log.pathRat)
pathRatio.log <- numeric(maxiter)
## Allocate buffers:
steps <- ringbuffer(size = pathLength*N) ## Cyclical buffer containing last 'pathLength' steps of algorithm
FtHistory <- array(0, histSize) ## Array buffer containing 'histSize' last values of 'Ft'
## Initialize internal strategy parameters
counteval <- 0 ## Number of function evaluations
msg <- NULL ## Reason for terminating
lambda <- initlambda
pc <- rep(0.0, N)/sqrt(N)
histHead <- 0 ## Pointer to the history buffer head
iter <- 0L ## Number of iterations
history <- array(0, c(N, mu, histSize)) ## Array stores best 'mu' individuals for 'hsize' recent iterations
Ft <- initFt
# Create first population
population <- replicate(lambda, runif(N,0.8*lower,0.8*upper))
cumMean=rowMeans(population)
# Check constraints violations
populationRepaired <- apply(population,2,bounceBackBoundary2)
if(Lamarckism==TRUE){
population <- populationRepaired
}
selection <- rep(0, mu)
selectedPoints <- matrix(0, nrow=N, ncol=mu)
fitness <- fn_p(population, populationRepaired)
counteval <- counteval + lambda
oldMean <- numeric(N)
newMean <- par
pc <- rep(0.0, N)/sqrt(N)
limit <- 0
## Matrices for creating diffs
diffs <- matrix(0, N, lambda)
x1sample <- numeric(lambda)
x2sample <- numeric(lambda)
chiN <- (sqrt(2)*gamma((N+1)/2)/gamma(N/2))
histNorm <- sqrt(mu/(mu+1))/sqrt(2)
counterRepaired <- 0
while (counteval < budget) {
iter <- iter + 1L
histHead <- (histHead %% histSize) + 1
if (log.Ft) Ft.log[iter] <- Ft
if (log.value) value.log[iter,] <- fitness
if (log.mean) mean.log[iter] <- fn_p(bounceBackBoundary2(newMean))
if (log.pop) pop.log[,,iter] <- population
## Select best 'mu' individuals of population
selection <- order(fitness)[1:mu]
selectedPoints <- population[,selection]
# Save selected population in the history buffer
history[,,histHead] <- selectedPoints * histNorm/Ft
## Calculate weighted mean of selected points
oldMean <- newMean
newMean <- drop(selectedPoints %*% weights)
## Write to buffers
step <- (newMean - oldMean) / Ft
steps$write(step)
## Update Ft
FtHistory[histHead] = Ft
oldFt <- Ft
if (iter > pathLength-1 && (sum(step == 0) == 0) && counterRepaired<0.1*lambda) {
#if (iter > pathLength-1 ) {
Ft <- calculateFt(steps, N, lambda, pathLength, Ft, c_Ft, pathRatio, chiN, mueff)
if (log.pathRat) pathRatio.log[iter] <- totalToDirectRatio(steps, N, pathLength)
}else {
if (log.pathRat && iter==1L) pathRatio.log[iter] <- 0
if (log.pathRat && iter!=1L) pathRatio.log[iter] <- pathRatio.log[iter-1]
}
## Update parameters
pc = (1 - cc)* pc + cc* step
## Sample from history with uniform distribution
limit <- ifelse(iter < histSize, histHead, histSize)
historySample <- sample(1:limit,lambda, T)
x1sample <- sample(1:mu, lambda, replace=TRUE)#, weights)
x2sample <- sample(1:mu, lambda, replace=TRUE)#, weights)
## Make diffs
for (i in 1:lambda) {
x1 <- history[, x1sample[i], historySample[i]]
x2 <- history[, x2sample[i], historySample[i]]
diffs[,i] <- (x1 - x2)/sqrt(2) + sqrt(1-c_pc)*rnorm(1)*pc*chiN +
sqrt(c_pc) * (rnorm(1) * pc * chiN +
0*rnorm(N)/chiN*tol )
}
if(counterRepaired>0)
Ft <- FtHistory[histHead] + abs(Ft-FtHistory[histHead])*((lambda-counterRepaired)/lambda)*c_Ft
## New population
population <- newMean + Ft * diffs
# Check constraints violations
# Repair the individual if necessary
populationTemp <- population
populationRepaired <- apply(population,2,bounceBackBoundary2)
counterRepaired=0
for(tt in 1:ncol(populationTemp)){
if(any(populationTemp[,tt] != populationRepaired[,tt]))
counterRepaired = counterRepaired + 1
}
if(Lamarckism==TRUE){
population <- populationRepaired
}
## Evaluation
fitness <- fn_p(population, populationRepaired)
## Break if fit:
wb <- which.min(fitness)
if (fitness[wb] < best.fit) {
best.fit <- fitness[wb]
best.par <- population[,wb]
}
counteval <- counteval + lambda
## Check if the middle point is the best found so far
##cumMean <- 0.8*cumMean+0.2*newMean
cumMean <- newMean
cumMeanRepaired <-bounceBackBoundary2(cumMean)
fn_cum <- fn_p(cumMean, cumMeanRepaired)
if (fn_cum < best.fit) {
best.fit <- drop(fn_cum)
best.par <- cumMean
}
counteval <- counteval + 1
## Escape from flat-land:
if (min(fitness) == sort(fitness,partial=min(1+floor(lambda/2), 2+ceiling(lambda/4)))[min(1+floor(lambda/2), 2+ceiling(lambda/4))]) {
Ft <- Ft * exp(0.2*Ft_scale);
}
if (fitness[1] <= stopfitness) {
msg <- "Stop fitness reached."
break
}
}
# Restart paramaters adaptation
lambda <- round(lambda+initlambda * 0.2)
mu <- floor(lambda/2)
weights <- log(mu+1) - log(1:mu)
#weights <- (1:mu)*0+1
weights <- weights/sum(weights)
cnt <- c(`function`=as.integer(counteval))
log <- list()
## Subset lognostic data to only include those iterations which
## where actually performed.
if (log.Ft) log$Ft <- Ft.log[1:iter]
if (log.value) log$value <- value.log[1:iter,]
if (log.mean) log$mean <- mean.log[1:iter]
if (log.pop) log$pop <- pop.log[,,1:iter]
if (log.pathRat) log$pathRatio <- pathRatio.log[1:iter]
## Drop names from value object
names(best.fit) <- NULL
res <- list(par=best.par,
value=best.fit,
counts=cnt,
convergence=ifelse(iter >= maxiter, 1L, 0L),
message=msg,
diagnostic=log
)
class(res) <- "cmade.result"
return(res)
}
## Norm: function that assigns a strictly positive length to each vector in a vector space.
# @vectorX - vector to norm
# RETURN: euclidean norm of the given vector
##
norm <- function(vectorX)
drop(sqrt(crossprod(vectorX)))
## Function calculate what proportion of the population has a better fitness
# than its center.
# @benchmarkFitness - fitness value of mean individual of population
# @popFitness - actual population fitness array
# RETURN: proportion of better fitted individuals to the whole population
##
p_succ<-function(benchmarkFitness, popFitness) {
return (sum(popFitness < benchmarkFitness) / length(popFitness))
}
## Function to calculate total to direct path ratio.
# @arguments - according to their names
# RETURN: total/direct path
##
totalToDirectRatio <- function (stepsBuffer, N, pathLength) {
steps <- split(stepsBuffer$peek(), ceiling(seq_along(stepsBuffer$peek())/N))
directPath <- rep(0,N)
for (i in 1:pathLength) {
directPath <- directPath + steps[[i]]
}
directPath <- norm(directPath)
totalPath <- 0
for (i in 1:pathLength) {
totalPath <- totalPath + norm(steps[[i]])
}
return (totalPath / directPath)
}
## Function to calculate new scaling factor F(step size).
# @arguments - according to their names
# RETURN: new Ft value
##
calculateFt <- function(stepsBuffer, N, lambda, pathLength, currentFt, c_Ft, pathRatio, chiN, mueff) {
steps <- split(stepsBuffer$peek(), ceiling(seq_along(stepsBuffer$peek())/N))
directPath <- rep(0,N)
for (i in 1:pathLength) {
directPath <- directPath + steps[[i]]
}
directPath <- norm(directPath)
totalPath <- 0
for (i in 1:pathLength) {
totalPath <- totalPath + norm(steps[[i]])
}
g_sd <- currentFt * exp(1/(sqrt(N)+1) *(c_Ft * (chiN / (totalPath / directPath)-1)))
return (rnorm(1,g_sd))
#return (currentFt * exp(c_Ft * (pathRatio / (totalPath / directPath)-1)))
#return (currentFt * exp(1/(sqrt(N)+1) * ((chiN / (totalPath / directPath) - 1)*((mueff+2)/(N+mueff+3))/( 1 + 2*max(0, sqrt((mueff-1)/(N+1))-1) + ((mueff+2)/(N+mueff+3))))))
}
## Function to calculate path length control reference value based on a problem
# dimensions and history buffer size
# @N - number of problem dimensions
# @pathLength - size of evolution path
# RETURN: new path ratio value
##
calculatePathRatio <- function(N, pathLength) {
randomWalk <- function(N) {
m <-matrix(0, 10000, N)
for (i in 2:10000) {
m[i,] = m[i-1,] + rnorm(N)
}
return (m)
}
m <- randomWalk(N)
steps <- matrix(0, 10000, N)
for (i in 1:9999) {
steps[i,] = m[i+1,] - m[i,]
}
a <- 0
FtWeights <- (1-a)^(0:(pathLength-1))
wSteps <- list()
ratio <- rep(NA, 10000-pathLength)
for (i in (pathLength+1):10000) {
for (j in 1:pathLength) {
wSteps[[j]] = steps[i+j-pathLength-1,] * FtWeights[j]
}
directPath <- rep(0,N)
for (j in 1:pathLength) {
directPath <- directPath + wSteps[[j]]
}
directPath <- norm(directPath)
totalPath <- 0
for (j in 1:pathLength) {
totalPath <- totalPath + norm(wSteps[[j]])
}
ratio[i-pathLength] <- totalPath / directPath
}
return (mean(ratio))
}
|
/CEC2017/CMADE-v12/CMADEv12.R
|
no_license
|
Jagorius/EvolutionAlgorithms
|
R
| false | false | 16,221 |
r
|
CMADE <- function(par, fn, ..., lower, upper, control=list()) {
library("ringbuffer")
## Function to check the presence of options in the arguments specified by the user
# @name - argument name
# @default - default value of the argument
# RETURN: value specified by user if the given argument name found, default value otherwise
##
controlParam <- function(name, default) {
v <- control[[name]]
if (is.null(v))
return (default)
else
return (v)
}
N <- length(par) ## Number of dimensions
## Check if user specified box constraints values.
## If not accept the default values.
if (missing(lower))
lower <- rep(-100, N)
else if (length(lower) == 1)
lower <- rep(lower, N)
if (missing(upper))
upper <- rep(100, N)
else if (length(upper) == 1)
upper <- rep(upper, N)
bounceBackBoundary2 <- function(x){
x[is.na(x)] <- .Machine$double.xmax
x[is.infinite(x)] <- .Machine$double.xmax
if(all(x >= cbind(lower)) && all(x <= cbind(upper)))
return (x)
else if(any(x < cbind(lower)))
for(i in which(x < cbind(lower)) )
x[i] <- lower[i] + abs(lower[i] - x[i])%% (upper[i]- lower[i])
else if(any(x > cbind(upper)))
for(i in which(x > cbind(upper)) )
x[i] <- upper[i] - abs(upper[i] - x[i])%% (upper[i]- lower[i])
return (bounceBackBoundary2(x))
}
#############################
## Algorithm parameters: ##
#############################
Ft <- controlParam("Ft", 1) ## Scaling factor of difference vectors (a variable!)
initFt <- controlParam("initFt", 1)
stopfitness <- controlParam("stopfitness", -Inf) ## Fitness value after which the convergence is reached
stopvariance<- controlParam("stopvariance", 1e-12*Ft) ## Genetic diversity minimum value(stop fitness variance)
## Strategy parameter setting:
budget <- controlParam("budget", 10000*N ) ## The maximum number of fitness function calls
initlambda <- controlParam("lambda", floor((4+sqrt(N)/2)*N)) ## Population starting size
lambda <- initlambda ## Population size
mu <- controlParam("mu", floor(lambda/2)) ## Selection size
weights <- controlParam("weights", log(mu+1) - log(1:mu)) ## Weights to calculate mean from selected individuals
#weights <- controlParam("weights", (1:mu)*0+1)
weights <- weights/sum(weights) ## \-> weights are normalized by the sum
weightsSumS <- sum(weights^2) ## weights sum square
mueff <- controlParam("mueff", sum(weights)^2/sum(weights^2)) ## Variance effectiveness factor
cc <- controlParam("cc", 4/(N+4)) ## Evolution Path decay factor
c_pc <- controlParam("cpc", 0.2) ## Covariance deformation factor
cc_mueff <- sqrt(cc*(2 - cc) )#*sqrt( mueff) ## 'cc' and 'mueff' are constant so as this equation
c_cov <- controlParam("c_cov", 1/2) ## Mutation vectors weight constant
pathLength <- controlParam("pathLength", 6) ## Size of evolution path
maxiter <- controlParam("maxit", floor(budget/(lambda+1))) ## Maximum number of iterations after which algorithm stops
c_Ft <- controlParam("c_Ft", 1/((sqrt(2)*gamma((N+1)/2)/gamma(N/2)) )) ## Variance scaling constant
pathRatio <- controlParam("pathRatio",sqrt(pathLength)) ## Path Length Control reference value
checkMiddle <- controlParam("checkMiddle", TRUE) ## Vatiable telling if algorithm should save check middle point in every iteration
histSize <- controlParam("history", 6+ceiling(3*sqrt(N))) ## Size of the window of history - the step length history
#histSize <- controlParam("history", 0.5*N^2) ## Size of the window of history - the step length history
histSize <- ceiling(histSize) ## \-> size should be integer
Ft_scale <- controlParam("Ft_scale", ((mueff+2)/(N+mueff+3))/(1 + 2*max(0, sqrt((mueff-1)/(N+1))-1) + (mueff+2)/(N+mueff+3)))
tol <- controlParam("tol", 10^-18)
## Logging options:
log.all <- controlParam("diag", FALSE)
log.Ft <- controlParam("diag.Ft", log.all)
log.value <- controlParam("diag.value", log.all)
log.mean <- controlParam("diag.mean", log.all)
log.pop <- controlParam("diag.pop", log.all)
log.pathRat <- controlParam("diag.pathRatio", log.all)
## nonLamarckian approach allows individuals to violate boundaries.
## Fitness value is estimeted by fitness of repaired individual.
Lamarckism <- controlParam("Lamarckism", TRUE)
fn_p <- function(P, P_repaired) {
# Lamarckian approach
if(Lamarckism==TRUE){
if(is.matrix(P)){
return ( apply(P, 2, fn) )
}else{
return ( fn(P) )
}
}
# nonLamarckian approach
else{
P[is.na(P)] <- .Machine$double.xmax
P[is.infinite(P)] <- .Machine$double.xmax
if(is.matrix(P) && is.matrix(P_repaired)){
repairedInd <- apply(P!=P_repaired,2,all)
P_fit <- apply(P_repaired, 2, fn)
hammingDist <- colSums((P - P_repaired)^2)
P_fit[which(repairedInd)] <- P_fit[which(repairedInd)] + hammingDist[which(repairedInd)]
return(P_fit)
}else{
P_fit <- fn(P_repaired) + sum(P-P_repaired)^2
return ( P_fit )
}
}
}
## Asserts - safety checks:
stopifnot(length(upper) == N)
stopifnot(length(lower) == N)
stopifnot(all(lower < upper))
stopifnot(length(Ft) == 1)
## Initialize variables for the best solution found so far:
best.fit <- Inf
best.par <- NULL
last.restart <-0
restart.length <-0
restart.number <-0
## According to user specification, preallocate logging structures:
if (log.Ft)
Ft.log <- numeric(maxiter)
if (log.value)
value.log <- matrix(0, nrow=maxiter, ncol=lambda)
if (log.mean)
mean.log <- numeric(maxiter)
if (log.pop)
pop.log <- array(0, c(N, lambda, maxiter))
if (log.pathRat)
pathRatio.log <- numeric(maxiter)
## Allocate buffers:
steps <- ringbuffer(size = pathLength*N) ## Cyclical buffer containing last 'pathLength' steps of algorithm
FtHistory <- array(0, histSize) ## Array buffer containing 'histSize' last values of 'Ft'
## Initialize internal strategy parameters
counteval <- 0 ## Number of function evaluations
msg <- NULL ## Reason for terminating
lambda <- initlambda
pc <- rep(0.0, N)/sqrt(N)
histHead <- 0 ## Pointer to the history buffer head
iter <- 0L ## Number of iterations
history <- array(0, c(N, mu, histSize)) ## Array stores best 'mu' individuals for 'hsize' recent iterations
Ft <- initFt
# Create first population
population <- replicate(lambda, runif(N,0.8*lower,0.8*upper))
cumMean=rowMeans(population)
# Check constraints violations
populationRepaired <- apply(population,2,bounceBackBoundary2)
if(Lamarckism==TRUE){
population <- populationRepaired
}
selection <- rep(0, mu)
selectedPoints <- matrix(0, nrow=N, ncol=mu)
fitness <- fn_p(population, populationRepaired)
counteval <- counteval + lambda
oldMean <- numeric(N)
newMean <- par
pc <- rep(0.0, N)/sqrt(N)
limit <- 0
## Matrices for creating diffs
diffs <- matrix(0, N, lambda)
x1sample <- numeric(lambda)
x2sample <- numeric(lambda)
chiN <- (sqrt(2)*gamma((N+1)/2)/gamma(N/2))
histNorm <- sqrt(mu/(mu+1))/sqrt(2)
counterRepaired <- 0
while (counteval < budget) {
iter <- iter + 1L
histHead <- (histHead %% histSize) + 1
if (log.Ft) Ft.log[iter] <- Ft
if (log.value) value.log[iter,] <- fitness
if (log.mean) mean.log[iter] <- fn_p(bounceBackBoundary2(newMean))
if (log.pop) pop.log[,,iter] <- population
## Select best 'mu' individuals of population
selection <- order(fitness)[1:mu]
selectedPoints <- population[,selection]
# Save selected population in the history buffer
history[,,histHead] <- selectedPoints * histNorm/Ft
## Calculate weighted mean of selected points
oldMean <- newMean
newMean <- drop(selectedPoints %*% weights)
## Write to buffers
step <- (newMean - oldMean) / Ft
steps$write(step)
## Update Ft
FtHistory[histHead] = Ft
oldFt <- Ft
if (iter > pathLength-1 && (sum(step == 0) == 0) && counterRepaired<0.1*lambda) {
#if (iter > pathLength-1 ) {
Ft <- calculateFt(steps, N, lambda, pathLength, Ft, c_Ft, pathRatio, chiN, mueff)
if (log.pathRat) pathRatio.log[iter] <- totalToDirectRatio(steps, N, pathLength)
}else {
if (log.pathRat && iter==1L) pathRatio.log[iter] <- 0
if (log.pathRat && iter!=1L) pathRatio.log[iter] <- pathRatio.log[iter-1]
}
## Update parameters
pc = (1 - cc)* pc + cc* step
## Sample from history with uniform distribution
limit <- ifelse(iter < histSize, histHead, histSize)
historySample <- sample(1:limit,lambda, T)
x1sample <- sample(1:mu, lambda, replace=TRUE)#, weights)
x2sample <- sample(1:mu, lambda, replace=TRUE)#, weights)
## Make diffs
for (i in 1:lambda) {
x1 <- history[, x1sample[i], historySample[i]]
x2 <- history[, x2sample[i], historySample[i]]
diffs[,i] <- (x1 - x2)/sqrt(2) + sqrt(1-c_pc)*rnorm(1)*pc*chiN +
sqrt(c_pc) * (rnorm(1) * pc * chiN +
0*rnorm(N)/chiN*tol )
}
if(counterRepaired>0)
Ft <- FtHistory[histHead] + abs(Ft-FtHistory[histHead])*((lambda-counterRepaired)/lambda)*c_Ft
## New population
population <- newMean + Ft * diffs
# Check constraints violations
# Repair the individual if necessary
populationTemp <- population
populationRepaired <- apply(population,2,bounceBackBoundary2)
counterRepaired=0
for(tt in 1:ncol(populationTemp)){
if(any(populationTemp[,tt] != populationRepaired[,tt]))
counterRepaired = counterRepaired + 1
}
if(Lamarckism==TRUE){
population <- populationRepaired
}
## Evaluation
fitness <- fn_p(population, populationRepaired)
## Break if fit:
wb <- which.min(fitness)
if (fitness[wb] < best.fit) {
best.fit <- fitness[wb]
best.par <- population[,wb]
}
counteval <- counteval + lambda
## Check if the middle point is the best found so far
##cumMean <- 0.8*cumMean+0.2*newMean
cumMean <- newMean
cumMeanRepaired <-bounceBackBoundary2(cumMean)
fn_cum <- fn_p(cumMean, cumMeanRepaired)
if (fn_cum < best.fit) {
best.fit <- drop(fn_cum)
best.par <- cumMean
}
counteval <- counteval + 1
## Escape from flat-land:
if (min(fitness) == sort(fitness,partial=min(1+floor(lambda/2), 2+ceiling(lambda/4)))[min(1+floor(lambda/2), 2+ceiling(lambda/4))]) {
Ft <- Ft * exp(0.2*Ft_scale);
}
if (fitness[1] <= stopfitness) {
msg <- "Stop fitness reached."
break
}
}
# Restart paramaters adaptation
lambda <- round(lambda+initlambda * 0.2)
mu <- floor(lambda/2)
weights <- log(mu+1) - log(1:mu)
#weights <- (1:mu)*0+1
weights <- weights/sum(weights)
cnt <- c(`function`=as.integer(counteval))
log <- list()
## Subset lognostic data to only include those iterations which
## where actually performed.
if (log.Ft) log$Ft <- Ft.log[1:iter]
if (log.value) log$value <- value.log[1:iter,]
if (log.mean) log$mean <- mean.log[1:iter]
if (log.pop) log$pop <- pop.log[,,1:iter]
if (log.pathRat) log$pathRatio <- pathRatio.log[1:iter]
## Drop names from value object
names(best.fit) <- NULL
res <- list(par=best.par,
value=best.fit,
counts=cnt,
convergence=ifelse(iter >= maxiter, 1L, 0L),
message=msg,
diagnostic=log
)
class(res) <- "cmade.result"
return(res)
}
## Norm: function that assigns a strictly positive length to each vector in a vector space.
# @vectorX - vector to norm
# RETURN: euclidean norm of the given vector
##
norm <- function(vectorX)
drop(sqrt(crossprod(vectorX)))
## Function calculate what proportion of the population has a better fitness
# than its center.
# @benchmarkFitness - fitness value of mean individual of population
# @popFitness - actual population fitness array
# RETURN: proportion of better fitted individuals to the whole population
##
p_succ<-function(benchmarkFitness, popFitness) {
return (sum(popFitness < benchmarkFitness) / length(popFitness))
}
## Function to calculate total to direct path ratio.
# @arguments - according to their names
# RETURN: total/direct path
##
totalToDirectRatio <- function (stepsBuffer, N, pathLength) {
steps <- split(stepsBuffer$peek(), ceiling(seq_along(stepsBuffer$peek())/N))
directPath <- rep(0,N)
for (i in 1:pathLength) {
directPath <- directPath + steps[[i]]
}
directPath <- norm(directPath)
totalPath <- 0
for (i in 1:pathLength) {
totalPath <- totalPath + norm(steps[[i]])
}
return (totalPath / directPath)
}
## Function to calculate new scaling factor F(step size).
# @arguments - according to their names
# RETURN: new Ft value
##
calculateFt <- function(stepsBuffer, N, lambda, pathLength, currentFt, c_Ft, pathRatio, chiN, mueff) {
steps <- split(stepsBuffer$peek(), ceiling(seq_along(stepsBuffer$peek())/N))
directPath <- rep(0,N)
for (i in 1:pathLength) {
directPath <- directPath + steps[[i]]
}
directPath <- norm(directPath)
totalPath <- 0
for (i in 1:pathLength) {
totalPath <- totalPath + norm(steps[[i]])
}
g_sd <- currentFt * exp(1/(sqrt(N)+1) *(c_Ft * (chiN / (totalPath / directPath)-1)))
return (rnorm(1,g_sd))
#return (currentFt * exp(c_Ft * (pathRatio / (totalPath / directPath)-1)))
#return (currentFt * exp(1/(sqrt(N)+1) * ((chiN / (totalPath / directPath) - 1)*((mueff+2)/(N+mueff+3))/( 1 + 2*max(0, sqrt((mueff-1)/(N+1))-1) + ((mueff+2)/(N+mueff+3))))))
}
## Function to calculate path length control reference value based on a problem
# dimensions and history buffer size
# @N - number of problem dimensions
# @pathLength - size of evolution path
# RETURN: new path ratio value
##
calculatePathRatio <- function(N, pathLength) {
randomWalk <- function(N) {
m <-matrix(0, 10000, N)
for (i in 2:10000) {
m[i,] = m[i-1,] + rnorm(N)
}
return (m)
}
m <- randomWalk(N)
steps <- matrix(0, 10000, N)
for (i in 1:9999) {
steps[i,] = m[i+1,] - m[i,]
}
a <- 0
FtWeights <- (1-a)^(0:(pathLength-1))
wSteps <- list()
ratio <- rep(NA, 10000-pathLength)
for (i in (pathLength+1):10000) {
for (j in 1:pathLength) {
wSteps[[j]] = steps[i+j-pathLength-1,] * FtWeights[j]
}
directPath <- rep(0,N)
for (j in 1:pathLength) {
directPath <- directPath + wSteps[[j]]
}
directPath <- norm(directPath)
totalPath <- 0
for (j in 1:pathLength) {
totalPath <- totalPath + norm(wSteps[[j]])
}
ratio[i-pathLength] <- totalPath / directPath
}
return (mean(ratio))
}
|
## Put comments here that give an overall description of what your
## functions do
## Creating a list with functions defined in an environment
## where data are saved.
makeCacheMatrix <- function(x = matrix()) {
ivr <- NULL
set <- function(y) {
x <<- y
ivr <<- NULL
}
get <- function() x
inv <- function() ivr
setinv <- function(v) ivr <<- v
list(set = set, get = get, setinv = setinv, inv = inv)
}
## Get the cached inverse or calculate it and chache the result.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
ivr <- x$inv()
if (!is.null(ivr)) {
message("getting cached data.")
return(ivr)
}
ivr <- solve(x$get(), ...)
x$setinv(ivr)
ivr
}
|
/cachematrix.R
|
no_license
|
archelangelo/ProgrammingAssignment2
|
R
| false | false | 742 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Creating a list with functions defined in an environment
## where data are saved.
makeCacheMatrix <- function(x = matrix()) {
ivr <- NULL
set <- function(y) {
x <<- y
ivr <<- NULL
}
get <- function() x
inv <- function() ivr
setinv <- function(v) ivr <<- v
list(set = set, get = get, setinv = setinv, inv = inv)
}
## Get the cached inverse or calculate it and chache the result.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
ivr <- x$inv()
if (!is.null(ivr)) {
message("getting cached data.")
return(ivr)
}
ivr <- solve(x$get(), ...)
x$setinv(ivr)
ivr
}
|
get_council_le <- function(data, input){
reactive({
req(input$select_sex)
req(input$select_year)
data %>%
filter(council_name != "Scotland Wide",
sex %in% input$select_sex,
year == input$select_year,
simd_quintiles == "All") %>%
filter(council_name %in% input$select_council)
})
}
|
/life_ex_tab_functions/4_get_council_le.R
|
no_license
|
hgw2/scotland_health_group_project
|
R
| false | false | 383 |
r
|
get_council_le <- function(data, input){
reactive({
req(input$select_sex)
req(input$select_year)
data %>%
filter(council_name != "Scotland Wide",
sex %in% input$select_sex,
year == input$select_year,
simd_quintiles == "All") %>%
filter(council_name %in% input$select_council)
})
}
|
library(dplyr)
library(ggplot2)
library(ggthemes)
library(rpart)
library(rpart.plot)
dataset <- read.csv(file.choose())
# Conhecendo os dados
dim(dataset)
str(dataset)
summary(dataset)
dataset$userid <- NULL # Remove o ID
# Alterando o nome das colunas
colnames(dataset) <- c('Idade', 'Dia', 'Ano', 'Mês', 'Sexo',
'Tenure', 'Qtd_Amigos', 'Amizades_Iniciadas',
'Curtidas', 'Curtidas_Recebidas', 'Celular_Curtida',
'Celular_Curtidas_Recebidas', 'www_Curtidas', 'www_Curtidas_Recebidas')
dataset$Sexo <- ifelse(dataset$Sexo == 'male', 'Masculino',
ifelse(dataset$Sexo == 'female', 'Feminino', NA))
retornar_sexo <- function(x) {
sexo_na <- x %>%
filter(is.na(Sexo)) %>%
select(Curtidas, Qtd_Amigos, Amizades_Iniciadas,
Curtidas_Recebidas, Celular_Curtida, Celular_Curtidas_Recebidas,
www_Curtidas, www_Curtidas_Recebidas)
sexo <- rpart(Sexo ~ Curtidas + Qtd_Amigos + Amizades_Iniciadas +
Curtidas_Recebidas + Celular_Curtida +
Celular_Curtidas_Recebidas + www_Curtidas + www_Curtidas_Recebidas,
data = x) %>%
predict(newdata = sexo_na)
sexo <- ifelse(sexo >= 0.6, 'Masculino', 'Feminino')
return(sexo)
}
dataset[is.na(dataset$Sexo), 'Sexo'] <- retornar_sexo(dataset)
# Análise exploratória dos dados - Sexo
dataset %>%
filter(!is.na(Sexo)) %>%
group_by(Sexo) %>%
summarise(Quantidade = n()) %>%
select(Sexo, Quantidade) %>%
ggplot(aes(x = Sexo, y = Quantidade, fill = Sexo)) +
geom_bar(stat = 'identity') +
labs(title = 'Quantidade de usuários por gênero',
x = 'Gênero',
y = 'Quantidade')
# Total de amizades por sexo
dataset %>%
group_by(Sexo) %>%
summarise(qtde_amigos = sum(Qtd_Amigos)) %>%
ggplot(aes(x = Sexo, y = qtde_amigos, fill = Sexo)) +
geom_bar(stat = 'identity') +
labs(title = 'Quantidade de amizades por sexo',
x = 'Gênero',
y = 'Quantidade')
# Idades que mais utilizam facebook
dataset %>%
group_by(Idade) %>%
summarise(Qtde = n()) %>%
select(Idade, Qtde) %>%
arrange(-Qtde) %>%
ggplot(aes(x = as.character(Idade), y = Qtde)) +
geom_bar(stat = 'identity')+
geom_text(aes(label = as.character(Idade)),
vjust = -0.6,
color = 'black',
size = 3.0,
position = 'dodge') +
labs(title = 'Quantidade de usuários por idade',
x = 'Idade',
y = 'Quantidade') +
theme_economist_white()
# Sexo que mais possuí amizades no Facebook nas idades outlier
dataset %>%
filter(Idade %in% c(18, 23, 33, 53, 63)) %>%
group_by(Sexo, Idade) %>%
summarise(qtde = sum(Qtd_Amigos)) %>%
arrange(Idade) %>%
ggplot(aes(x = as.character(Idade), y = qtde, fill = Sexo)) +
geom_col() +
theme(legend.position = 'top' ) +
labs(title = 'Quantidade de amizades por gênero',
x = 'Gênero',
y = 'Quantidade')
# Sexo que recebeu maior número de curtidas
dataset %>%
group_by(Sexo) %>%
summarise(qtde = sum(Curtidas)) %>%
ggplot(aes(x = Sexo, y = qtde, fill = Sexo)) +
geom_bar(stat = 'identity') +
labs(title = 'Quantida de curtidas recebidas por gênero',
x = 'Gênero',
y = 'Quantidade')
# Quantidade de curtidas dadas por sexo
dataset %>%
group_by(Sexo) %>%
summarise(qtd_web = sum(www_Curtidas),
qtd_mob = sum(Celular_Curtida),
qtd_rec_www = sum(www_Curtidas_Recebidas),
qtd_rec_mob = sum(Curtidas_Recebidas)) %>%
select(Sexo, qtd_web, qtd_mob, qtd_rec_www, qtd_rec_mob) %>%
ggplot(aes(x = Sexo, y = qtd_web, fill = Sexo, group = 1)) +
geom_bar(stat = 'identity') +
geom_line(aes(x = Sexo, y = qtd_rec_www, colour = 'Qtde' )) +
scale_color_discrete(name = 'Qtd. Curtidas Recebidas')
|
/analise_de_dados_do_facebook/analise_de_dados_facebook.R
|
no_license
|
murilo-cremon/R-Lang
|
R
| false | false | 4,007 |
r
|
library(dplyr)
library(ggplot2)
library(ggthemes)
library(rpart)
library(rpart.plot)
dataset <- read.csv(file.choose())
# Conhecendo os dados
dim(dataset)
str(dataset)
summary(dataset)
dataset$userid <- NULL # Remove o ID
# Alterando o nome das colunas
colnames(dataset) <- c('Idade', 'Dia', 'Ano', 'Mês', 'Sexo',
'Tenure', 'Qtd_Amigos', 'Amizades_Iniciadas',
'Curtidas', 'Curtidas_Recebidas', 'Celular_Curtida',
'Celular_Curtidas_Recebidas', 'www_Curtidas', 'www_Curtidas_Recebidas')
dataset$Sexo <- ifelse(dataset$Sexo == 'male', 'Masculino',
ifelse(dataset$Sexo == 'female', 'Feminino', NA))
retornar_sexo <- function(x) {
sexo_na <- x %>%
filter(is.na(Sexo)) %>%
select(Curtidas, Qtd_Amigos, Amizades_Iniciadas,
Curtidas_Recebidas, Celular_Curtida, Celular_Curtidas_Recebidas,
www_Curtidas, www_Curtidas_Recebidas)
sexo <- rpart(Sexo ~ Curtidas + Qtd_Amigos + Amizades_Iniciadas +
Curtidas_Recebidas + Celular_Curtida +
Celular_Curtidas_Recebidas + www_Curtidas + www_Curtidas_Recebidas,
data = x) %>%
predict(newdata = sexo_na)
sexo <- ifelse(sexo >= 0.6, 'Masculino', 'Feminino')
return(sexo)
}
dataset[is.na(dataset$Sexo), 'Sexo'] <- retornar_sexo(dataset)
# Análise exploratória dos dados - Sexo
dataset %>%
filter(!is.na(Sexo)) %>%
group_by(Sexo) %>%
summarise(Quantidade = n()) %>%
select(Sexo, Quantidade) %>%
ggplot(aes(x = Sexo, y = Quantidade, fill = Sexo)) +
geom_bar(stat = 'identity') +
labs(title = 'Quantidade de usuários por gênero',
x = 'Gênero',
y = 'Quantidade')
# Total de amizades por sexo
dataset %>%
group_by(Sexo) %>%
summarise(qtde_amigos = sum(Qtd_Amigos)) %>%
ggplot(aes(x = Sexo, y = qtde_amigos, fill = Sexo)) +
geom_bar(stat = 'identity') +
labs(title = 'Quantidade de amizades por sexo',
x = 'Gênero',
y = 'Quantidade')
# Idades que mais utilizam facebook
dataset %>%
group_by(Idade) %>%
summarise(Qtde = n()) %>%
select(Idade, Qtde) %>%
arrange(-Qtde) %>%
ggplot(aes(x = as.character(Idade), y = Qtde)) +
geom_bar(stat = 'identity')+
geom_text(aes(label = as.character(Idade)),
vjust = -0.6,
color = 'black',
size = 3.0,
position = 'dodge') +
labs(title = 'Quantidade de usuários por idade',
x = 'Idade',
y = 'Quantidade') +
theme_economist_white()
# Sexo que mais possuí amizades no Facebook nas idades outlier
dataset %>%
filter(Idade %in% c(18, 23, 33, 53, 63)) %>%
group_by(Sexo, Idade) %>%
summarise(qtde = sum(Qtd_Amigos)) %>%
arrange(Idade) %>%
ggplot(aes(x = as.character(Idade), y = qtde, fill = Sexo)) +
geom_col() +
theme(legend.position = 'top' ) +
labs(title = 'Quantidade de amizades por gênero',
x = 'Gênero',
y = 'Quantidade')
# Sexo que recebeu maior número de curtidas
dataset %>%
group_by(Sexo) %>%
summarise(qtde = sum(Curtidas)) %>%
ggplot(aes(x = Sexo, y = qtde, fill = Sexo)) +
geom_bar(stat = 'identity') +
labs(title = 'Quantida de curtidas recebidas por gênero',
x = 'Gênero',
y = 'Quantidade')
# Quantidade de curtidas dadas por sexo
dataset %>%
group_by(Sexo) %>%
summarise(qtd_web = sum(www_Curtidas),
qtd_mob = sum(Celular_Curtida),
qtd_rec_www = sum(www_Curtidas_Recebidas),
qtd_rec_mob = sum(Curtidas_Recebidas)) %>%
select(Sexo, qtd_web, qtd_mob, qtd_rec_www, qtd_rec_mob) %>%
ggplot(aes(x = Sexo, y = qtd_web, fill = Sexo, group = 1)) +
geom_bar(stat = 'identity') +
geom_line(aes(x = Sexo, y = qtd_rec_www, colour = 'Qtde' )) +
scale_color_discrete(name = 'Qtd. Curtidas Recebidas')
|
rm(list = ls())
library(dplyr)
library(leaflet)
library(leaflet.extras)
library(rgdal)
library(mapview)
library(sf)
setwd("C:/Users/REACH/Dropbox (SSD REACH)/REACH South Sudan upscale/34_WFP/11_WFP_IACWG")
coordinates <- read.csv('8. Dashboard/r_dashboard/app_plot/coordinates.csv')
jmmi <- read.csv('7. JMMI Data/new/6_longterm/JMMI_longterm_bylocation.csv', stringsAsFactors = FALSE)
jmmi_2 <- read.csv('12. JMMI COVID Data/new/6_longterm/JMMI_longterm_bylocation.csv', stringsAsFactors = FALSE)
# filter data
jmmi_fil <- jmmi %>%
select(Year, Month, Location, USD) %>%
filter(Year == 2020 & Month == 10) %>%
arrange(USD)
jmmi_fil_2 <- jmmi %>%
select(Year, Month, Location, USD) %>%
filter(Year == 2020 & Month == 9) %>%
arrange(USD)
# add coordinates
jmmi_coord <- left_join(coordinates, jmmi_fil, by = 'Location') %>%
filter(Month == 10)
jmmi_coord_2 <- left_join(coordinates, jmmi_fil_2, by = 'Location') %>%
filter(Month == 9)
func = colorNumeric(c("#A2CD91", "#FFF54C","#ED5758"), domain = jmmi_coord$USD)
func_2 = colorNumeric(c("#A2CD91", "#FFF54C","#ED5758"), domain = jmmi_coord_2$USD)
disputed <- st_read('8. Dashboard/r_dashboard/app_plot/Disputed/SSD_Undetermined.shp')
disputed <- st_transform(disputed,"+init=epsg:4326" )
country <- st_read('8. Dashboard/r_dashboard/app_plot/Country/SSD_Country.shp')
country <- st_transform(country,"+init=epsg:4326" )
states <- st_read('8. Dashboard/r_dashboard/app_plot/States/SSD_States.shp')
states <- st_transform(states,"+init=epsg:4326" )
counties <- st_read('8. Dashboard/r_dashboard/app_plot/Counties/SSD_counties.shp')
counties <- st_transform(counties,"+init=epsg:4326" )
rivers <- st_read('8. Dashboard/r_dashboard/app_plot/Rivers/SSD_Rivers.shp')
rivers <- st_transform(rivers, "+init=epsg:4326")
rivers_primary <- rivers %>% filter(OBJECTID == c(5, 6))
#st_write(rivers_primary, "rivers_primary.shp")
lakes <- st_read('8. Dashboard/r_dashboard/app_plot/Lakes/SSD_Lakes.shp')
lakes <- st_transform(lakes, "+init=epsg:4326")
roads <- st_read('8. Dashboard/r_dashboard/app_plot/Roads/SSD_Roads.shp')
roads <- st_transform(roads, "+init=epsg:4326")
roads_primary <- roads %>% filter(CLASS == "Primary")
#st_write(roads_primary, "roads_primary.shp")
USD_format <- function(x) {paste(sep = " ", round(x), "SSP")}
jmmi_coord <- mutate(jmmi_coord, USD_SSP = USD_format(USD))
jmmi_coord_2 <- mutate(jmmi_coord_2, USD_SSP = USD_format(USD))
m <- leaflet() %>%
setView(lat = 7.7, lng = 30, zoom = 7) %>%
addPolygons(data = lakes, group = "Lakes", fill = TRUE, stroke = FALSE, fillColor = "#D5EAF1", fillOpacity = 0.75) %>%
# addPolygons(data = disputed, group = "Disputed", fill = FALSE, stroke = TRUE, dashArray = c(5, 5), color = "#58585A", weight = 1, opacity = 0.7) %>%
addPolygons(data = counties, group = "Counties", fill = FALSE, stroke = TRUE, color = "#BDBDBD", weight = 0.6, opacity = 0.5) %>%
addPolygons(data = states, group = "States", fill = FALSE, stroke = TRUE, color = "#58585A", weight = 1, opacity = 0.7, label = states$admin1Name,
labelOptions = labelOptions(noHide = T, textOnly = TRUE, direction = "center", style=list("font-size"="14px",
"text-shadow"="10px 10px 10px '#ffffff'",
"color"="rgb(88, 88, 91)",
"font-family" = "Arial Narrow",
"font-weight" = "bold"
))) %>%
addPolylines(data = rivers_primary, group = "Rivers", stroke = TRUE, color = "#94CCDC", weight = 1.3, opacity = 0.7) %>%
addPolylines(data = roads_primary, group = "Roads", stroke = TRUE, color = "#F69E61", weight = 1.5, opacity = 0.4) %>%
addCircleMarkers(data = jmmi_coord,
fillColor = ~func(USD),
color = 'black',
fillOpacity = 1,
popup = ~paste(sep = " ", USD, 'SSP'),
label = ~Location,
radius = 7,
weight = 1,
stroke = NULL,
group = 'September Third Week SSP Rate') %>%
addCircleMarkers(data = jmmi_coord_2,
fillColor = ~func_2(USD),
color = 'black',
fillOpacity = 1,
popup = ~paste(sep = " ", USD, 'SSP'),
label = ~Location,
radius = 7,
weight = 1,
stroke = NULL,
group = 'September SSP Rate') %>%
addLayersControl(baseGroups = c('September SSP Rate', 'October SSP Rate'),
options = layersControlOptions(collapsed = FALSE),
) %>%
hideGroup('September First Week Median SSP Rate') %>%
addLegend("bottomright", pal = func, values = jmmi_coord$USD,
title = "October SSP Rate",
labFormat = labelFormat(prefix = "SSP "),
opacity = 1,
layerId = 'October SSP Rate',
group = 'October SSP Rate',
na.label = 'No USD Available',
bins = 3) %>%
addLegend("bottomright", pal = func_2, values = jmmi_coord_2$USD,
title = "September SSP Rate",
labFormat = labelFormat(prefix = "SSP "),
opacity = 1,
layerId = 'September SSP Rate',
group = 'September SSP Rate',
na.label = 'No USD Available',
bins = 3) %>%
setMapWidgetStyle(list(background = 'transparent'))
m
mapshot(m, url = 'sspratemap.html')
|
/app_plot/4_Practice/Leaflet_CWG.R
|
no_license
|
JonathanBuckleyREACHSSD/SSD-JMMI-Draft
|
R
| false | false | 5,989 |
r
|
rm(list = ls())
library(dplyr)
library(leaflet)
library(leaflet.extras)
library(rgdal)
library(mapview)
library(sf)
setwd("C:/Users/REACH/Dropbox (SSD REACH)/REACH South Sudan upscale/34_WFP/11_WFP_IACWG")
coordinates <- read.csv('8. Dashboard/r_dashboard/app_plot/coordinates.csv')
jmmi <- read.csv('7. JMMI Data/new/6_longterm/JMMI_longterm_bylocation.csv', stringsAsFactors = FALSE)
jmmi_2 <- read.csv('12. JMMI COVID Data/new/6_longterm/JMMI_longterm_bylocation.csv', stringsAsFactors = FALSE)
# filter data
jmmi_fil <- jmmi %>%
select(Year, Month, Location, USD) %>%
filter(Year == 2020 & Month == 10) %>%
arrange(USD)
jmmi_fil_2 <- jmmi %>%
select(Year, Month, Location, USD) %>%
filter(Year == 2020 & Month == 9) %>%
arrange(USD)
# add coordinates
jmmi_coord <- left_join(coordinates, jmmi_fil, by = 'Location') %>%
filter(Month == 10)
jmmi_coord_2 <- left_join(coordinates, jmmi_fil_2, by = 'Location') %>%
filter(Month == 9)
func = colorNumeric(c("#A2CD91", "#FFF54C","#ED5758"), domain = jmmi_coord$USD)
func_2 = colorNumeric(c("#A2CD91", "#FFF54C","#ED5758"), domain = jmmi_coord_2$USD)
disputed <- st_read('8. Dashboard/r_dashboard/app_plot/Disputed/SSD_Undetermined.shp')
disputed <- st_transform(disputed,"+init=epsg:4326" )
country <- st_read('8. Dashboard/r_dashboard/app_plot/Country/SSD_Country.shp')
country <- st_transform(country,"+init=epsg:4326" )
states <- st_read('8. Dashboard/r_dashboard/app_plot/States/SSD_States.shp')
states <- st_transform(states,"+init=epsg:4326" )
counties <- st_read('8. Dashboard/r_dashboard/app_plot/Counties/SSD_counties.shp')
counties <- st_transform(counties,"+init=epsg:4326" )
rivers <- st_read('8. Dashboard/r_dashboard/app_plot/Rivers/SSD_Rivers.shp')
rivers <- st_transform(rivers, "+init=epsg:4326")
rivers_primary <- rivers %>% filter(OBJECTID == c(5, 6))
#st_write(rivers_primary, "rivers_primary.shp")
lakes <- st_read('8. Dashboard/r_dashboard/app_plot/Lakes/SSD_Lakes.shp')
lakes <- st_transform(lakes, "+init=epsg:4326")
roads <- st_read('8. Dashboard/r_dashboard/app_plot/Roads/SSD_Roads.shp')
roads <- st_transform(roads, "+init=epsg:4326")
roads_primary <- roads %>% filter(CLASS == "Primary")
#st_write(roads_primary, "roads_primary.shp")
USD_format <- function(x) {paste(sep = " ", round(x), "SSP")}
jmmi_coord <- mutate(jmmi_coord, USD_SSP = USD_format(USD))
jmmi_coord_2 <- mutate(jmmi_coord_2, USD_SSP = USD_format(USD))
m <- leaflet() %>%
setView(lat = 7.7, lng = 30, zoom = 7) %>%
addPolygons(data = lakes, group = "Lakes", fill = TRUE, stroke = FALSE, fillColor = "#D5EAF1", fillOpacity = 0.75) %>%
# addPolygons(data = disputed, group = "Disputed", fill = FALSE, stroke = TRUE, dashArray = c(5, 5), color = "#58585A", weight = 1, opacity = 0.7) %>%
addPolygons(data = counties, group = "Counties", fill = FALSE, stroke = TRUE, color = "#BDBDBD", weight = 0.6, opacity = 0.5) %>%
addPolygons(data = states, group = "States", fill = FALSE, stroke = TRUE, color = "#58585A", weight = 1, opacity = 0.7, label = states$admin1Name,
labelOptions = labelOptions(noHide = T, textOnly = TRUE, direction = "center", style=list("font-size"="14px",
"text-shadow"="10px 10px 10px '#ffffff'",
"color"="rgb(88, 88, 91)",
"font-family" = "Arial Narrow",
"font-weight" = "bold"
))) %>%
addPolylines(data = rivers_primary, group = "Rivers", stroke = TRUE, color = "#94CCDC", weight = 1.3, opacity = 0.7) %>%
addPolylines(data = roads_primary, group = "Roads", stroke = TRUE, color = "#F69E61", weight = 1.5, opacity = 0.4) %>%
addCircleMarkers(data = jmmi_coord,
fillColor = ~func(USD),
color = 'black',
fillOpacity = 1,
popup = ~paste(sep = " ", USD, 'SSP'),
label = ~Location,
radius = 7,
weight = 1,
stroke = NULL,
group = 'September Third Week SSP Rate') %>%
addCircleMarkers(data = jmmi_coord_2,
fillColor = ~func_2(USD),
color = 'black',
fillOpacity = 1,
popup = ~paste(sep = " ", USD, 'SSP'),
label = ~Location,
radius = 7,
weight = 1,
stroke = NULL,
group = 'September SSP Rate') %>%
addLayersControl(baseGroups = c('September SSP Rate', 'October SSP Rate'),
options = layersControlOptions(collapsed = FALSE),
) %>%
hideGroup('September First Week Median SSP Rate') %>%
addLegend("bottomright", pal = func, values = jmmi_coord$USD,
title = "October SSP Rate",
labFormat = labelFormat(prefix = "SSP "),
opacity = 1,
layerId = 'October SSP Rate',
group = 'October SSP Rate',
na.label = 'No USD Available',
bins = 3) %>%
addLegend("bottomright", pal = func_2, values = jmmi_coord_2$USD,
title = "September SSP Rate",
labFormat = labelFormat(prefix = "SSP "),
opacity = 1,
layerId = 'September SSP Rate',
group = 'September SSP Rate',
na.label = 'No USD Available',
bins = 3) %>%
setMapWidgetStyle(list(background = 'transparent'))
m
mapshot(m, url = 'sspratemap.html')
|
library(dplyr)
library(here)
setwd(paste0(here(), "/LER_inputs/"))
oneday <- filter(manual_buoy_temptst, datetime == "1986-07-03 12:00:00")
oneday$Water_Temperature_celsius <- 4
oneday$datetime <- as.POSIXct("1975-01-01 12:00:00", format = "%Y-%m-%d %H:%M:%S")
write.csv(oneday, row.names = FALSE, file = "ic_historical.csv")
onedayother <- oneday
onedayother$datetime <- as.POSIXct("2006-01-01 12:00:00", format = "%Y-%m-%d %H:%M:%S")
write.csv(onedayother, row.names = FALSE, file = "ic_projections.csv")
ic_historical <- read.csv("ic_historical.csv")
ic_historical$datetime <- as.Date(ic_historical$datetime)
head(ic_historical)
ic_historical$hour <- c("00:00:00")
head(ic_historical)
ic_historical$datetime <- paste0(ic_historical$datetime, " ", ic_historical$hour)
# ic_historical$datetime <- as.POSIXct(ic_historical$datetime, format = "%Y-%m-%d %H:%M:%S")
head(ic_historical)
ic_historical <- select(ic_historical, -hour)
str(ic_historical)
write.csv(ic_historical, "ic_historical_hr0.csv", row.names = FALSE)
setwd(paste0(here(), "/LER_inputs/"))
ic_projections <- read.csv("ic_projections.csv")
ic_projections$datetime <- as.Date(ic_projections$datetime)
head(ic_projections)
ic_projections$hour <- c("00:00:00")
head(ic_projections)
ic_projections$datetime <- paste0(ic_projections$datetime, " ", ic_projections$hour)
# ic_historical$datetime <- as.POSIXct(ic_historical$datetime, format = "%Y-%m-%d %H:%M:%S")
head(ic_projections)
ic_projections <- select(ic_projections, -hour)
str(ic_projections)
write.csv(ic_projections, "ic_projections_hr0.csv", row.names = FALSE)
|
/scripts/stepthrough/step_1_input_data/s1.5_initial_conditions.R
|
no_license
|
jacob8776/sunapee_LER_projections
|
R
| false | false | 1,614 |
r
|
library(dplyr)
library(here)
setwd(paste0(here(), "/LER_inputs/"))
oneday <- filter(manual_buoy_temptst, datetime == "1986-07-03 12:00:00")
oneday$Water_Temperature_celsius <- 4
oneday$datetime <- as.POSIXct("1975-01-01 12:00:00", format = "%Y-%m-%d %H:%M:%S")
write.csv(oneday, row.names = FALSE, file = "ic_historical.csv")
onedayother <- oneday
onedayother$datetime <- as.POSIXct("2006-01-01 12:00:00", format = "%Y-%m-%d %H:%M:%S")
write.csv(onedayother, row.names = FALSE, file = "ic_projections.csv")
ic_historical <- read.csv("ic_historical.csv")
ic_historical$datetime <- as.Date(ic_historical$datetime)
head(ic_historical)
ic_historical$hour <- c("00:00:00")
head(ic_historical)
ic_historical$datetime <- paste0(ic_historical$datetime, " ", ic_historical$hour)
# ic_historical$datetime <- as.POSIXct(ic_historical$datetime, format = "%Y-%m-%d %H:%M:%S")
head(ic_historical)
ic_historical <- select(ic_historical, -hour)
str(ic_historical)
write.csv(ic_historical, "ic_historical_hr0.csv", row.names = FALSE)
setwd(paste0(here(), "/LER_inputs/"))
ic_projections <- read.csv("ic_projections.csv")
ic_projections$datetime <- as.Date(ic_projections$datetime)
head(ic_projections)
ic_projections$hour <- c("00:00:00")
head(ic_projections)
ic_projections$datetime <- paste0(ic_projections$datetime, " ", ic_projections$hour)
# ic_historical$datetime <- as.POSIXct(ic_historical$datetime, format = "%Y-%m-%d %H:%M:%S")
head(ic_projections)
ic_projections <- select(ic_projections, -hour)
str(ic_projections)
write.csv(ic_projections, "ic_projections_hr0.csv", row.names = FALSE)
|
#######################################################This code is for running INLA Spatial models #######################################################
#######################################################set seed to get similar results#######################################################
#######################################################Author: KM Wambui #######################################################
set.seed(1221223)
rm(list = ls())
library(R2WinBUGS)
##Mapping
library(rgeos)
library(maptools)
library("ggplot2")
library(broom) ## for converting a map to a data frame
#library(glm2)
#library(ResourceSelection) ## for hosmer and lemeshow testing
library(dplyr)
library(INLA)
library(spdep)
## coloring the spplot
library(colorspace)
###reading and exporting the shape file
## shape file available upon request
kilifi_sub <- maptools::readShapePoly( "data/kilif_sub_loc_Shape/DSS_subloc_Arc.shp",
IDvar="Adj_ID", proj4string=CRS("+proj=longlat +ellps=clrk66"))
temp <- spdep::poly2nb(kilifi_sub)
nb2INLA("data/kilif_sub_loc_Shape/DSS_subloc_Arc.graph", temp)
klf.adj <- paste(getwd(),"/data/kilif_sub_loc_Shape/DSS_subloc_Arc.graph",sep="")
### load the admissions data
## data available upon request
admData <- read.csv("data/morbidity.csv")
admData$rain_mm <- admData$rain_mm/50
admData$severe_disease <- factor(admData$severe_disease , levels=c(0,1,2,3))
admData <- admData %>% mutate(gender2= ifelse(gender==1 ,0,1 ))
admData$gender2 <- factor(admData$gender2 , levels=c(0,1) )
admData$gender <- admData$gender2
admData2 <- admData %>% dplyr::select(Adj_ID , sublocation,mnth, nagem, gender ,
severe_disease ,
cumulitive_count,cumulitive_time , EVI_VALUE ,count_adm ,rain_mm,
total_admission ,admdays ,nweight ,yr)
###generate othe variables to be used within INLa
admData2$Adj_ID2 <- admData2$Adj_ID
admData2$Adj_ID3 <- admData2$Adj_ID
admData2$count_adm2 <- admData2$count_adm
admData2$count_adm3 <- admData2$count_adm
admData2$count_adm4 <- admData2$count_adm
admData2$count_adm5 <- admData2$count_adm
admData2$count_adm6 <- admData2$count_adm
admData2$count_adm7 <- admData2$count_adm
admData2$EVI_VALUE2 <- admData2$EVI_VALUE
admData2$rain_mm2 <- admData2$rain_mm
admData2$nagem2 <- admData2$nagem
admData2$severe_disease2 <- as.factor(admData2$severe_disease)
admData2$mnth2 <- admData2$mnth
admData2$nweight2 <- admData2$nweight
admData2$admdays2 <- admData2$admdays
###defining the priors
prior.iid = c(1,0.01)
prior.besag = c(1,0.001)
initial.iid = 4
initial.besag = 3
#' \\\_Model_1_\\\ #
###############MOdel 1 #######################################################
## spatial unstructured
formulaUH0 <- cumulitive_count ~ EVI_VALUE + rain_mm + gender + severe_disease +
total_admission + admdays + nweight +
f(Adj_ID, model = "iid",prior="normal",param=c(0, 0.001) , initial = 1)
resultUH0 <- inla(formulaUH0,family="nbinomial",
data=admData2, control.compute=list(dic=TRUE,cpo=TRUE),E=log(nagem) ,
control.predictor = list(compute = TRUE))
##summary in 3 decimal places
summary(resultUH0)
exp(resultUH0$summary.fixed)
write.csv(data.frame(resultUH0$summary.fixed), "results1_14504_36.csv")
pdresultUH0 <- resultUH0$dic$p.eff
#' \\\_Model_1_\\\ #
#' #
#' \\\_Model 1B\\\ #
###############MOdel 1B#######################################################
### spatial model structured and unstrustured without
### to comapare with Winbugs
formulaUHB <- cumulitive_count ~ EVI_VALUE + rain_mm + gender + severe_disease +
total_admission + admdays + nweight +
f(Adj_ID, model = "bym" ,graph=klf.adj , scale.model=TRUE,
hyper=list(prec.unstruct=list(prior="loggamma",param=c(0.0111,0.001)),
prec.spatial=list(prior="loggamma",param=c(0.0011,0.001))))
resultUHB <- inla(formulaUHB,family="nbinomial",
data=admData2, control.compute=list(dic=TRUE,cpo=TRUE),E=log(nagem)
,control.predictor(compute=TRUE))
summary(resultUHB)
pdresultUHB <- resultUHB$dic$p.eff #25.03
exp(resultUHB$summary.fixed)
write.csv(data.frame(resultUHB$summary.fixed), "results2_14498.08.csv")
#write.csv(data.frame(resultUHB$summary.fixed), "results_20.05_under5_10700.63.csv")
####The computation of the posterior mean for the random effects 𝝃 is performed in two
# steps as we have more than one parameter:
# we extract the marginal posterior distribution for each element of the random effect
csi <- resultUHB$marginals.random$Adj_ID[1:40]
## then apply the exponential transformation and calculate the posterior mean for each of them using the lapply function.
zeta <- lapply(csi,function(x) inla.emarginal(exp,x))
##define the cut offs for your risk ratio
zeta.cutoff <- c(0.9, 0.95, 0.999 ,1.0,1.01,1.05, 1.1)
#Transform zeta in categorical variable
cat.zeta <- cut(unlist(zeta),breaks=zeta.cutoff,
include.lowest=TRUE )
#Create a dataframe with all the information needed for the map
maps.cat.zeta <- data.frame(unique(admData2$Adj_ID), cat.zeta=cat.zeta)
#Add the categorized zeta to the kilifi spatial polygon
##
data.kilifi <- attr(kilifi_sub, "data")
attr(kilifi_sub, "data") <- merge(data.kilifi, maps.cat.zeta,
by.x="Adj_ID" , by.y="unique.admData2.Adj_ID.")
## mapping the risk ratio
#spplot(obj=kilifi_sub, zcol= "cat.zeta", col.regions=gray(seq(0.9,0.1,length=4)), asp=1)
spplot(obj=kilifi_sub, zcol= "cat.zeta",col.regions=diverge_hsv(8), scales=list(draw = TRUE), asp=1)
#' \\\_Model 2\\\ #
###############MOdel 2#######################################################
### spatial model structured and unstrustured with the temporal component included
### fitting model 1
admData2$nagem_int <- as.integer(admData2$nagem)
formulaUH <- cumulitive_count ~ EVI_VALUE + rain_mm + gender + severe_disease + total_admission + admdays + nweight +
f(Adj_ID, model = "bym" ,graph=klf.adj , scale.model=TRUE,hyper=list(prec.unstruct=list(prior="loggamma",param=c(0.0111,0.001)),
prec.spatial=list(prior="loggamma",param=c(0.0011,0.001)))) + f(count_adm, model = "ar1")
# f(count_adm, model = "ar1", replicate = Adj_ID3)
resultUH <- inla(formulaUH,family="nbinomial",
data=admData2, control.compute=list(dic=TRUE,cpo=TRUE),E=log(nagem_int)
,control.predictor(compute=TRUE))
summary(resultUH)
pdresultUH <- resultUH$dic$p.eff #35.50
exp(resultUH$summary.fixed)
write.csv(data.frame(resultUH$summary.fixed), "nm_results2_13640.2.csv")
####The computation of the posterior mean for the random effects 𝝃 is performed in two
# steps as we have more than one parameter:
# we extract the marginal posterior distribution for each element of the random effect
csi <- resultUH$marginals.random$Adj_ID[1:40]
## then apply the exponential transformation and calculate the posterior mean for each of them using the lapply function.
zeta <- lapply(csi,function(x) inla.emarginal(exp,x))
##define the cut offs for your risk ratio
zeta.cutoff <- c(0.83,0.9, 0.95, 0.999 ,1.0,1.01,1.05, 1.1 ,1.2)
#Transform zeta in categorical variable
cat.zeta <- cut(unlist(zeta),breaks=zeta.cutoff,
include.lowest=TRUE )
#Create a dataframe with all the information needed for the map
maps.cat.zeta <- data.frame(unique(admData2$Adj_ID), cat.zeta=cat.zeta)
#Add the categorized zeta to the kilifi spatial polygon
##
data.kilifi <- attr(kilifi_sub, "data")
attr(kilifi_sub, "data") <- merge(data.kilifi, maps.cat.zeta,
by.x="Adj_ID" , by.y="unique.admData2.Adj_ID.")
## mapping the risk ratio
png(filename=paste0("figure4A","img.png") , width = 19.45 , height = 22.40 , units = "cm" , res=300)
spplot(obj=kilifi_sub, zcol= "cat.zeta",col.regions=diverge_hsv(8), scales=list(draw = TRUE), asp=1)
dev.off()
### temporal graph
plot( resultUH, plot.fixed.effects = TRUE, constant=FALSE,
plot.lincomb = TRUE,
plot.random.effects = TRUE,
plot.hyperparameters = TRUE,
plot.predictor = TRUE,
plot.q = TRUE,
plot.cpo = TRUE,
single = TRUE)
plot( resultUH, plot.fixed.effects = TRUE , constant=FALSE,plot.cpo = F,single =F)
save.image("stModel.RDA")
#' \\\_Model 3\\\ #
#' ###############MOdel With variables changing over time#######################################################
#### Fitting a SPATIAL Temporal Model
formulaUH2b <- cumulitive_count ~ EVI_VALUE + gender +
severe_disease + total_admission + rain_mm + admdays + nweight +
f(Adj_ID, model = "bym" ,graph=klf.adj , scale.model=TRUE,
hyper=list(prec.unstruct=list(prior="loggamma",param=c(0.001,0.001)),
prec.spatial=list(prior="loggamma",param=c(0.1,0.01))))+
f(EVI_VALUE2 , count_adm2, model = "iid") +
f(rain_mm2 , count_adm3, model = "iid") +
f(nweight2 , count_adm5, model = "iid") +
f(admdays2 , count_adm6, model = "iid") +
f( count_adm7, model = "ar1")
### added due to heissan values errors
##https://groups.google.com/forum/#!topic/r-inla-discussion-group/rTdjAnILdnM
resultUH2b <- inla(formulaUH2b,family="nbinomial",
data=admData2, control.compute=list(dic=TRUE),control.predictor(compute=TRUE) ,
control.inla = list(tolerance = 1e-20, h = 1e-08),E=log(nagem))
pdresultH2 <- resultUH2b$dic$p.eff #447.2864
summary(resultUH2b)
pdresultUH2b <- resultUH2b$dic$p.eff
write.csv(data.frame(resultUH2b$summary.fixed), "resultsST_10296.73.csv")
csi2 <- resultUH2b$marginals.random$Adj_ID[1:40]
## then apply the exponential transformation and calculate the posterior mean for each of them using the lapply function.
zeta2 <- lapply(csi2,function(x) inla.emarginal(exp,x))
##define the cut offs for your risk ratio
zeta.cutoff2 <- c(0.8,0.99, 1.0,1.001,1.1, 1.2)
#Transform zeta in categorical variable
cat.zeta2 <- cut(unlist(zeta2),breaks=zeta.cutoff2,
include.lowest=TRUE)
#Create a dataframe with all the information needed for the map
maps.cat.zeta2 <- data.frame(unique(admData2$Adj_ID), cat.zeta2=cat.zeta2)
#Add the categorized zeta to the kilifi spatial polygon
##
data.kilifi2 <- attr(kilifi_sub, "data")
attr(kilifi_sub, "data") <- merge(data.kilifi2, maps.cat.zeta2,
by.x="Adj_ID" , by.y="unique.admData2.Adj_ID.")
## mapping the risk ratio
spplot(obj=kilifi_sub, zcol= "cat.zeta2",
col.regions=diverge_hsv(8),
scales=list(draw = TRUE), asp=1)
### temporal graph
plot( resultUH2b, plot.fixed.effects = TRUE , constant=FALSE,plot.cpo = F,single =F)
plot( resultUH2b, plot.fixed.effects = TRUE, constant=FALSE,
plot.lincomb = TRUE,
plot.random.effects = TRUE,
plot.hyperparameters = TRUE,
plot.predictor = TRUE,
plot.q = TRUE,
plot.cpo = TRUE,
single = TRUE)
|
/code/inla_spatial_temporal_morbidity.R
|
no_license
|
emilyrfl/SDSS-Datahack-2019
|
R
| false | false | 11,313 |
r
|
#######################################################This code is for running INLA Spatial models #######################################################
#######################################################set seed to get similar results#######################################################
#######################################################Author: KM Wambui #######################################################
set.seed(1221223)
rm(list = ls())
library(R2WinBUGS)
##Mapping
library(rgeos)
library(maptools)
library("ggplot2")
library(broom) ## for converting a map to a data frame
#library(glm2)
#library(ResourceSelection) ## for hosmer and lemeshow testing
library(dplyr)
library(INLA)
library(spdep)
## coloring the spplot
library(colorspace)
###reading and exporting the shape file
## shape file available upon request
kilifi_sub <- maptools::readShapePoly( "data/kilif_sub_loc_Shape/DSS_subloc_Arc.shp",
IDvar="Adj_ID", proj4string=CRS("+proj=longlat +ellps=clrk66"))
temp <- spdep::poly2nb(kilifi_sub)
nb2INLA("data/kilif_sub_loc_Shape/DSS_subloc_Arc.graph", temp)
klf.adj <- paste(getwd(),"/data/kilif_sub_loc_Shape/DSS_subloc_Arc.graph",sep="")
### load the admissions data
## data available upon request
admData <- read.csv("data/morbidity.csv")
admData$rain_mm <- admData$rain_mm/50
admData$severe_disease <- factor(admData$severe_disease , levels=c(0,1,2,3))
admData <- admData %>% mutate(gender2= ifelse(gender==1 ,0,1 ))
admData$gender2 <- factor(admData$gender2 , levels=c(0,1) )
admData$gender <- admData$gender2
admData2 <- admData %>% dplyr::select(Adj_ID , sublocation,mnth, nagem, gender ,
severe_disease ,
cumulitive_count,cumulitive_time , EVI_VALUE ,count_adm ,rain_mm,
total_admission ,admdays ,nweight ,yr)
###generate othe variables to be used within INLa
admData2$Adj_ID2 <- admData2$Adj_ID
admData2$Adj_ID3 <- admData2$Adj_ID
admData2$count_adm2 <- admData2$count_adm
admData2$count_adm3 <- admData2$count_adm
admData2$count_adm4 <- admData2$count_adm
admData2$count_adm5 <- admData2$count_adm
admData2$count_adm6 <- admData2$count_adm
admData2$count_adm7 <- admData2$count_adm
admData2$EVI_VALUE2 <- admData2$EVI_VALUE
admData2$rain_mm2 <- admData2$rain_mm
admData2$nagem2 <- admData2$nagem
admData2$severe_disease2 <- as.factor(admData2$severe_disease)
admData2$mnth2 <- admData2$mnth
admData2$nweight2 <- admData2$nweight
admData2$admdays2 <- admData2$admdays
###defining the priors
prior.iid = c(1,0.01)
prior.besag = c(1,0.001)
initial.iid = 4
initial.besag = 3
#' \\\_Model_1_\\\ #
###############MOdel 1 #######################################################
## spatial unstructured
formulaUH0 <- cumulitive_count ~ EVI_VALUE + rain_mm + gender + severe_disease +
total_admission + admdays + nweight +
f(Adj_ID, model = "iid",prior="normal",param=c(0, 0.001) , initial = 1)
resultUH0 <- inla(formulaUH0,family="nbinomial",
data=admData2, control.compute=list(dic=TRUE,cpo=TRUE),E=log(nagem) ,
control.predictor = list(compute = TRUE))
##summary in 3 decimal places
summary(resultUH0)
exp(resultUH0$summary.fixed)
write.csv(data.frame(resultUH0$summary.fixed), "results1_14504_36.csv")
pdresultUH0 <- resultUH0$dic$p.eff
#' \\\_Model_1_\\\ #
#' #
#' \\\_Model 1B\\\ #
###############MOdel 1B#######################################################
### spatial model structured and unstrustured without
### to comapare with Winbugs
formulaUHB <- cumulitive_count ~ EVI_VALUE + rain_mm + gender + severe_disease +
total_admission + admdays + nweight +
f(Adj_ID, model = "bym" ,graph=klf.adj , scale.model=TRUE,
hyper=list(prec.unstruct=list(prior="loggamma",param=c(0.0111,0.001)),
prec.spatial=list(prior="loggamma",param=c(0.0011,0.001))))
resultUHB <- inla(formulaUHB,family="nbinomial",
data=admData2, control.compute=list(dic=TRUE,cpo=TRUE),E=log(nagem)
,control.predictor(compute=TRUE))
summary(resultUHB)
pdresultUHB <- resultUHB$dic$p.eff #25.03
exp(resultUHB$summary.fixed)
write.csv(data.frame(resultUHB$summary.fixed), "results2_14498.08.csv")
#write.csv(data.frame(resultUHB$summary.fixed), "results_20.05_under5_10700.63.csv")
####The computation of the posterior mean for the random effects 𝝃 is performed in two
# steps as we have more than one parameter:
# we extract the marginal posterior distribution for each element of the random effect
csi <- resultUHB$marginals.random$Adj_ID[1:40]
## then apply the exponential transformation and calculate the posterior mean for each of them using the lapply function.
zeta <- lapply(csi,function(x) inla.emarginal(exp,x))
##define the cut offs for your risk ratio
zeta.cutoff <- c(0.9, 0.95, 0.999 ,1.0,1.01,1.05, 1.1)
#Transform zeta in categorical variable
cat.zeta <- cut(unlist(zeta),breaks=zeta.cutoff,
include.lowest=TRUE )
#Create a dataframe with all the information needed for the map
maps.cat.zeta <- data.frame(unique(admData2$Adj_ID), cat.zeta=cat.zeta)
#Add the categorized zeta to the kilifi spatial polygon
##
data.kilifi <- attr(kilifi_sub, "data")
attr(kilifi_sub, "data") <- merge(data.kilifi, maps.cat.zeta,
by.x="Adj_ID" , by.y="unique.admData2.Adj_ID.")
## mapping the risk ratio
#spplot(obj=kilifi_sub, zcol= "cat.zeta", col.regions=gray(seq(0.9,0.1,length=4)), asp=1)
spplot(obj=kilifi_sub, zcol= "cat.zeta",col.regions=diverge_hsv(8), scales=list(draw = TRUE), asp=1)
#' \\\_Model 2\\\ #
###############MOdel 2#######################################################
### spatial model structured and unstrustured with the temporal component included
### fitting model 1
admData2$nagem_int <- as.integer(admData2$nagem)
formulaUH <- cumulitive_count ~ EVI_VALUE + rain_mm + gender + severe_disease + total_admission + admdays + nweight +
f(Adj_ID, model = "bym" ,graph=klf.adj , scale.model=TRUE,hyper=list(prec.unstruct=list(prior="loggamma",param=c(0.0111,0.001)),
prec.spatial=list(prior="loggamma",param=c(0.0011,0.001)))) + f(count_adm, model = "ar1")
# f(count_adm, model = "ar1", replicate = Adj_ID3)
resultUH <- inla(formulaUH,family="nbinomial",
data=admData2, control.compute=list(dic=TRUE,cpo=TRUE),E=log(nagem_int)
,control.predictor(compute=TRUE))
summary(resultUH)
pdresultUH <- resultUH$dic$p.eff #35.50
exp(resultUH$summary.fixed)
write.csv(data.frame(resultUH$summary.fixed), "nm_results2_13640.2.csv")
####The computation of the posterior mean for the random effects 𝝃 is performed in two
# steps as we have more than one parameter:
# we extract the marginal posterior distribution for each element of the random effect
csi <- resultUH$marginals.random$Adj_ID[1:40]
## then apply the exponential transformation and calculate the posterior mean for each of them using the lapply function.
zeta <- lapply(csi,function(x) inla.emarginal(exp,x))
##define the cut offs for your risk ratio
zeta.cutoff <- c(0.83,0.9, 0.95, 0.999 ,1.0,1.01,1.05, 1.1 ,1.2)
#Transform zeta in categorical variable
cat.zeta <- cut(unlist(zeta),breaks=zeta.cutoff,
include.lowest=TRUE )
#Create a dataframe with all the information needed for the map
maps.cat.zeta <- data.frame(unique(admData2$Adj_ID), cat.zeta=cat.zeta)
#Add the categorized zeta to the kilifi spatial polygon
##
data.kilifi <- attr(kilifi_sub, "data")
attr(kilifi_sub, "data") <- merge(data.kilifi, maps.cat.zeta,
by.x="Adj_ID" , by.y="unique.admData2.Adj_ID.")
## mapping the risk ratio
png(filename=paste0("figure4A","img.png") , width = 19.45 , height = 22.40 , units = "cm" , res=300)
spplot(obj=kilifi_sub, zcol= "cat.zeta",col.regions=diverge_hsv(8), scales=list(draw = TRUE), asp=1)
dev.off()
### temporal graph
plot( resultUH, plot.fixed.effects = TRUE, constant=FALSE,
plot.lincomb = TRUE,
plot.random.effects = TRUE,
plot.hyperparameters = TRUE,
plot.predictor = TRUE,
plot.q = TRUE,
plot.cpo = TRUE,
single = TRUE)
plot( resultUH, plot.fixed.effects = TRUE , constant=FALSE,plot.cpo = F,single =F)
save.image("stModel.RDA")
#' \\\_Model 3\\\ #
#' ###############MOdel With variables changing over time#######################################################
#### Fitting a SPATIAL Temporal Model
formulaUH2b <- cumulitive_count ~ EVI_VALUE + gender +
severe_disease + total_admission + rain_mm + admdays + nweight +
f(Adj_ID, model = "bym" ,graph=klf.adj , scale.model=TRUE,
hyper=list(prec.unstruct=list(prior="loggamma",param=c(0.001,0.001)),
prec.spatial=list(prior="loggamma",param=c(0.1,0.01))))+
f(EVI_VALUE2 , count_adm2, model = "iid") +
f(rain_mm2 , count_adm3, model = "iid") +
f(nweight2 , count_adm5, model = "iid") +
f(admdays2 , count_adm6, model = "iid") +
f( count_adm7, model = "ar1")
### added due to heissan values errors
##https://groups.google.com/forum/#!topic/r-inla-discussion-group/rTdjAnILdnM
resultUH2b <- inla(formulaUH2b,family="nbinomial",
data=admData2, control.compute=list(dic=TRUE),control.predictor(compute=TRUE) ,
control.inla = list(tolerance = 1e-20, h = 1e-08),E=log(nagem))
pdresultH2 <- resultUH2b$dic$p.eff #447.2864
summary(resultUH2b)
pdresultUH2b <- resultUH2b$dic$p.eff
write.csv(data.frame(resultUH2b$summary.fixed), "resultsST_10296.73.csv")
csi2 <- resultUH2b$marginals.random$Adj_ID[1:40]
## then apply the exponential transformation and calculate the posterior mean for each of them using the lapply function.
zeta2 <- lapply(csi2,function(x) inla.emarginal(exp,x))
##define the cut offs for your risk ratio
zeta.cutoff2 <- c(0.8,0.99, 1.0,1.001,1.1, 1.2)
#Transform zeta in categorical variable
cat.zeta2 <- cut(unlist(zeta2),breaks=zeta.cutoff2,
include.lowest=TRUE)
#Create a dataframe with all the information needed for the map
maps.cat.zeta2 <- data.frame(unique(admData2$Adj_ID), cat.zeta2=cat.zeta2)
#Add the categorized zeta to the kilifi spatial polygon
##
data.kilifi2 <- attr(kilifi_sub, "data")
attr(kilifi_sub, "data") <- merge(data.kilifi2, maps.cat.zeta2,
by.x="Adj_ID" , by.y="unique.admData2.Adj_ID.")
## mapping the risk ratio
spplot(obj=kilifi_sub, zcol= "cat.zeta2",
col.regions=diverge_hsv(8),
scales=list(draw = TRUE), asp=1)
### temporal graph
plot( resultUH2b, plot.fixed.effects = TRUE , constant=FALSE,plot.cpo = F,single =F)
plot( resultUH2b, plot.fixed.effects = TRUE, constant=FALSE,
plot.lincomb = TRUE,
plot.random.effects = TRUE,
plot.hyperparameters = TRUE,
plot.predictor = TRUE,
plot.q = TRUE,
plot.cpo = TRUE,
single = TRUE)
|
##############################################################
# Getting and Cleaning Data Course
# Final Programming Assignment
##############################################################
library(dplyr)
library(tidyr)
# 1. Read in traing and test sets
# feature names (note: remove "meanFreq")
feat_names <- read.table('./UCI HAR Dataset/features.txt', col.names = c('Number', 'Features'))
use_cols <- grepl('(mean|std)\\(', feat_names$Features)
train <- read.table('./UCI HAR Dataset/train/X_train.txt', col.names = feat_names$Features)
test <- read.table('./UCI HAR Dataset/test/X_test.txt', col.names = feat_names$Features)
# 2. Extract mean/std variables and merge them
train <- train[, use_cols]
test <- test[, use_cols]
df <- rbind(train, test)
# 3a. label column
activity_labels <- read.table('./UCI HAR Dataset/activity_labels.txt', colClasses = c('numeric', 'character'))
train_label <- read.table('./UCI HAR Dataset/train/y_train.txt')
test_label <- read.table('./UCI HAR Dataset/test/y_test.txt')
combined_label <- rbind(train_label, test_label)
combined_label <- merge(combined_label, activity_labels, by = 'V1')
df$activity <- combined_label$V2
# 3b. subject column
train_subjects <- read.table('./UCI HAR Dataset/train/subject_train.txt')
test_subjects <- read.table('./UCI HAR Dataset/test/subject_test.txt')
subjects <- rbind(train_subjects, test_subjects)
df$subject <- subjects$V1
# 4. Change name of columns
tmp <- gsub('^t', 'Time.', names(df))
tmp <- gsub('^f', 'Freq.', tmp)
tmp <- gsub('Acc', '.Accelerometer', tmp)
tmp <- gsub('Gyro', '.Gyroscope', tmp)
names(df) <- gsub('Mag', '.Magnitude', tmp)
# 5. Tidy data
tidy_data <- df %>%
group_by(activity, subject) %>%
summarize_all(mean)
|
/run_analysis.R
|
no_license
|
frankzyx/Getting_and_cleaning_data__Coursera
|
R
| false | false | 1,731 |
r
|
##############################################################
# Getting and Cleaning Data Course
# Final Programming Assignment
##############################################################
library(dplyr)
library(tidyr)
# 1. Read in traing and test sets
# feature names (note: remove "meanFreq")
feat_names <- read.table('./UCI HAR Dataset/features.txt', col.names = c('Number', 'Features'))
use_cols <- grepl('(mean|std)\\(', feat_names$Features)
train <- read.table('./UCI HAR Dataset/train/X_train.txt', col.names = feat_names$Features)
test <- read.table('./UCI HAR Dataset/test/X_test.txt', col.names = feat_names$Features)
# 2. Extract mean/std variables and merge them
train <- train[, use_cols]
test <- test[, use_cols]
df <- rbind(train, test)
# 3a. label column
activity_labels <- read.table('./UCI HAR Dataset/activity_labels.txt', colClasses = c('numeric', 'character'))
train_label <- read.table('./UCI HAR Dataset/train/y_train.txt')
test_label <- read.table('./UCI HAR Dataset/test/y_test.txt')
combined_label <- rbind(train_label, test_label)
combined_label <- merge(combined_label, activity_labels, by = 'V1')
df$activity <- combined_label$V2
# 3b. subject column
train_subjects <- read.table('./UCI HAR Dataset/train/subject_train.txt')
test_subjects <- read.table('./UCI HAR Dataset/test/subject_test.txt')
subjects <- rbind(train_subjects, test_subjects)
df$subject <- subjects$V1
# 4. Change name of columns
tmp <- gsub('^t', 'Time.', names(df))
tmp <- gsub('^f', 'Freq.', tmp)
tmp <- gsub('Acc', '.Accelerometer', tmp)
tmp <- gsub('Gyro', '.Gyroscope', tmp)
names(df) <- gsub('Mag', '.Magnitude', tmp)
# 5. Tidy data
tidy_data <- df %>%
group_by(activity, subject) %>%
summarize_all(mean)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_facets.R
\name{get_facets}
\alias{get_facets}
\title{Get Dataverse facets}
\usage{
get_facets(dataverse, key = Sys.getenv("DATAVERSE_KEY"),
server = Sys.getenv("DATAVERSE_SERVER"), ...)
}
\arguments{
\item{dataverse}{A character string specifying a Dataverse name or an object of class \dQuote{dataverse}.}
\item{key}{A character string specifying a Dataverse server API key. If one is not specified, functions calling authenticated API endpoints will fail. Keys can be specified atomically or globally using \code{Sys.setenv("DATAVERSE_KEY" = "examplekey")}.}
\item{server}{A character string specifying a Dataverse server. There are multiple Dataverse installations, but the defaults is to use the Harvard Dataverse. This can be modified atomically or globally using \code{Sys.setenv("DATAVERSE_SERVER" = "dataverse.example.com")}.}
\item{...}{Additional arguments passed to an HTTP request function, such as \code{\link[httr]{GET}}, \code{\link[httr]{POST}}, or \code{\link[httr]{DELETE}}.}
}
\value{
A list.
}
\description{
Dataverse metadata facets
}
\details{
Retrieve a list of Dataverse metadata facets.
}
\examples{
\dontrun{
# download file from:
# https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/ARKOTI
monogan <- get_dataverse("monogan")
(monogan_data <- dataverse_contents(monogan))
# get facets
get_facets(monogan)
}
}
\seealso{
To manage Dataverses: \code{\link{create_dataverse}}, \code{\link{delete_dataverse}}, \code{\link{publish_dataverse}}, \code{\link{dataverse_contents}}; to get datasets: \code{\link{get_dataset}}; to search for Dataverses, datasets, or files: \code{\link{dataverse_search}}
}
|
/man/get_facets.Rd
|
permissive
|
wibeasley/dataverse-client-r
|
R
| false | true | 1,734 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_facets.R
\name{get_facets}
\alias{get_facets}
\title{Get Dataverse facets}
\usage{
get_facets(dataverse, key = Sys.getenv("DATAVERSE_KEY"),
server = Sys.getenv("DATAVERSE_SERVER"), ...)
}
\arguments{
\item{dataverse}{A character string specifying a Dataverse name or an object of class \dQuote{dataverse}.}
\item{key}{A character string specifying a Dataverse server API key. If one is not specified, functions calling authenticated API endpoints will fail. Keys can be specified atomically or globally using \code{Sys.setenv("DATAVERSE_KEY" = "examplekey")}.}
\item{server}{A character string specifying a Dataverse server. There are multiple Dataverse installations, but the defaults is to use the Harvard Dataverse. This can be modified atomically or globally using \code{Sys.setenv("DATAVERSE_SERVER" = "dataverse.example.com")}.}
\item{...}{Additional arguments passed to an HTTP request function, such as \code{\link[httr]{GET}}, \code{\link[httr]{POST}}, or \code{\link[httr]{DELETE}}.}
}
\value{
A list.
}
\description{
Dataverse metadata facets
}
\details{
Retrieve a list of Dataverse metadata facets.
}
\examples{
\dontrun{
# download file from:
# https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/ARKOTI
monogan <- get_dataverse("monogan")
(monogan_data <- dataverse_contents(monogan))
# get facets
get_facets(monogan)
}
}
\seealso{
To manage Dataverses: \code{\link{create_dataverse}}, \code{\link{delete_dataverse}}, \code{\link{publish_dataverse}}, \code{\link{dataverse_contents}}; to get datasets: \code{\link{get_dataset}}; to search for Dataverses, datasets, or files: \code{\link{dataverse_search}}
}
|
#' Vector of evaluated parameters in the simulation study that will be summarised
#' using the rsimsum package
#' @return vector of length 12 with the names of the to be evaluated parameters
#' and the name of their monte carlo standerd error
eval_param_rsimsum <- function(){
eval_param_rsimsum <- c("bias",
"mse",
"cover",
"modelse",
"empse",
"nsim")
eval_param_rsimsum <- c(rbind(eval_param_rsimsum,
paste0(eval_param_rsimsum, "_mcse")))
eval_param_rsimsum
}
#' Initialise summary object
#'
#' @param scen_nos scen no.'s to be used in the summary object
#' @param input input of simulation study
#' @return a data.frame with length(scen_nos) rows and length(eval_param_rsimsum())
#' +1 columns. See function eval_param_rsimsum() for evaluated parameters in
#' summary. Additionally, a column for the estimated r-squared is added.
init_summary <- function(scen_nos,
input){
# parameters that will be evaluated
eval_param <- eval_param_rsimsum()
# init dataframe that will hold the results of the sim study
summary_eval_param <- matrix(ncol = length(eval_param) + 1, # + col for r-squared
nrow = length(scen_nos))
summary_eval_param <- data.frame(summary_eval_param)
summary_eval_param$scen_no = scen_nos
colnames(summary_eval_param)[1:length(eval_param)] <- eval_param
colnames(summary_eval_param)[length(eval_param) + 1] <- "r_squared_est"
# for all scen_nos, 3 methods are used
summary <- expand.grid(scen_no = scen_nos,
method = c("uncor", "mecor", "simex"))
summary <- dplyr::left_join(summary,
input,
by = "scen_no")
summary <- dplyr::left_join(summary,
summary_eval_param,
by = "scen_no")
return(summary)
}
#' Summarise simulation study
#'
#' @param scen_nos scen no.'s to be summarised
#' @param use_input input of simulation study
#' @param processed_dir directory where the processed files of the simulation is to be found
#' found
#' @return a filled data.frame that is initiated using the function
#' init_summary(), and additionally a column with the percentage bias in each
#' scenario for each method
#'
#' @export
summarise_sim <- function(scen_nos,
use_input,
processed_dir = "./output/processed/"){
summary <- init_summary(scen_nos,
use_input)
for (i in seq_along(scen_nos)){
summary <- summarise_one_scen_no(summary,
scen_nos[i],
use_input,
processed_dir)
}
summary$perc_bias <- (summary$bias / summary$beta) * 100
return(summary)
}
#' Summarise one scenario no.
#'
#' @param summary data.frame initiated by init_summary() to be filled with
#' summary parameters
#' @param scen_no scen no. to be summarised
#' @param use_input input of the simulation study
#' @param processed_dir directory where processed simulation output is to be
#' found, defaults to "./output/processed/"
#' @return this function will fill out the rows in the summary object of the
#' corresponding scen_no
summarise_one_scen_no <- function(summary,
scen_no,
use_input,
processed_dir = "./output/processed/"){
file <- paste0(processed_dir, "scen_no", scen_no, ".Rds")
processed_output <- data.frame(readRDS(file = file))
simsum <- rsimsum::simsum(data = processed_output,
estvarname = "effect",
true = use_input[use_input$scen_no == scen_no, "beta"],
se = "se",
methodvar = "method",
ci.limits = c("ci.lower", "ci.upper"),
ref = "uncor",
x = TRUE)
# stats contains the params that will be pulled from the simsum object
stats <- eval_param_rsimsum()[- grep("_mcse", eval_param_rsimsum())]
methods <- c("uncor", "mecor", "simex")
for(i in 1:NROW(stats)){
for (j in seq_along(methods)){
summary <- fill_stat_and_mcse(summary,
simsum,
scen_no,
stats[i],
methods[j])
}
}
r_squared <- # r-squared is for every method the same (since its r-squared of the outcome model using just X)
mean(processed_output$r_squared[processed_output$method == "uncor"])
summary[summary$scen_no == scen_no &
summary$method %in% methods, "r_squared_est"] <- r_squared
return(summary)
}
#' Fill summary with the evaluated parameter 'stat' and its mcse for a specific
#' scen_no and method using the simsum object from the package rsimsum
#'
#' @param summary the summary object initialised by init_summary()
#' @param simsum a simsum object created by the package rsimsum for a specific
#' scenario no.
#' @param scen_no scenario no. of the scenario that's evaluated
#' @param stat one of the parameters in eval_param_rsimsum()
#' @param method the method that's evaluated, either "uncor" / "mecor" / "simex"
#' @return this function will fill out the specific cells with 'stat' and its
#' 'stat'_mcse in the summary object (for a given scen_no and method)
fill_stat_and_mcse <- function(summary,
simsum,
scen_no,
stat,
method){
summary[summary$scen_no == scen_no &
summary$method == method, stat] <-
simsum$summ[simsum$summ$stat == stat &
simsum$summ$method == method, "est"]
summary[summary$scen_no == scen_no &
summary$method == method, paste0(stat, ("_mcse"))] <-
simsum$summ[simsum$summ$stat == stat &
simsum$summ$method == method, "mcse"]
return(summary)
}
|
/R/sumsim.R
|
no_license
|
LindaNab/simexvsmecor
|
R
| false | false | 6,218 |
r
|
#' Vector of evaluated parameters in the simulation study that will be summarised
#' using the rsimsum package
#' @return vector of length 12 with the names of the to be evaluated parameters
#' and the name of their monte carlo standerd error
eval_param_rsimsum <- function(){
eval_param_rsimsum <- c("bias",
"mse",
"cover",
"modelse",
"empse",
"nsim")
eval_param_rsimsum <- c(rbind(eval_param_rsimsum,
paste0(eval_param_rsimsum, "_mcse")))
eval_param_rsimsum
}
#' Initialise summary object
#'
#' @param scen_nos scen no.'s to be used in the summary object
#' @param input input of simulation study
#' @return a data.frame with length(scen_nos) rows and length(eval_param_rsimsum())
#' +1 columns. See function eval_param_rsimsum() for evaluated parameters in
#' summary. Additionally, a column for the estimated r-squared is added.
init_summary <- function(scen_nos,
input){
# parameters that will be evaluated
eval_param <- eval_param_rsimsum()
# init dataframe that will hold the results of the sim study
summary_eval_param <- matrix(ncol = length(eval_param) + 1, # + col for r-squared
nrow = length(scen_nos))
summary_eval_param <- data.frame(summary_eval_param)
summary_eval_param$scen_no = scen_nos
colnames(summary_eval_param)[1:length(eval_param)] <- eval_param
colnames(summary_eval_param)[length(eval_param) + 1] <- "r_squared_est"
# for all scen_nos, 3 methods are used
summary <- expand.grid(scen_no = scen_nos,
method = c("uncor", "mecor", "simex"))
summary <- dplyr::left_join(summary,
input,
by = "scen_no")
summary <- dplyr::left_join(summary,
summary_eval_param,
by = "scen_no")
return(summary)
}
#' Summarise simulation study
#'
#' @param scen_nos scen no.'s to be summarised
#' @param use_input input of simulation study
#' @param processed_dir directory where the processed files of the simulation is to be found
#' found
#' @return a filled data.frame that is initiated using the function
#' init_summary(), and additionally a column with the percentage bias in each
#' scenario for each method
#'
#' @export
summarise_sim <- function(scen_nos,
use_input,
processed_dir = "./output/processed/"){
summary <- init_summary(scen_nos,
use_input)
for (i in seq_along(scen_nos)){
summary <- summarise_one_scen_no(summary,
scen_nos[i],
use_input,
processed_dir)
}
summary$perc_bias <- (summary$bias / summary$beta) * 100
return(summary)
}
#' Summarise one scenario no.
#'
#' @param summary data.frame initiated by init_summary() to be filled with
#' summary parameters
#' @param scen_no scen no. to be summarised
#' @param use_input input of the simulation study
#' @param processed_dir directory where processed simulation output is to be
#' found, defaults to "./output/processed/"
#' @return this function will fill out the rows in the summary object of the
#' corresponding scen_no
summarise_one_scen_no <- function(summary,
scen_no,
use_input,
processed_dir = "./output/processed/"){
file <- paste0(processed_dir, "scen_no", scen_no, ".Rds")
processed_output <- data.frame(readRDS(file = file))
simsum <- rsimsum::simsum(data = processed_output,
estvarname = "effect",
true = use_input[use_input$scen_no == scen_no, "beta"],
se = "se",
methodvar = "method",
ci.limits = c("ci.lower", "ci.upper"),
ref = "uncor",
x = TRUE)
# stats contains the params that will be pulled from the simsum object
stats <- eval_param_rsimsum()[- grep("_mcse", eval_param_rsimsum())]
methods <- c("uncor", "mecor", "simex")
for(i in 1:NROW(stats)){
for (j in seq_along(methods)){
summary <- fill_stat_and_mcse(summary,
simsum,
scen_no,
stats[i],
methods[j])
}
}
r_squared <- # r-squared is for every method the same (since its r-squared of the outcome model using just X)
mean(processed_output$r_squared[processed_output$method == "uncor"])
summary[summary$scen_no == scen_no &
summary$method %in% methods, "r_squared_est"] <- r_squared
return(summary)
}
#' Fill summary with the evaluated parameter 'stat' and its mcse for a specific
#' scen_no and method using the simsum object from the package rsimsum
#'
#' @param summary the summary object initialised by init_summary()
#' @param simsum a simsum object created by the package rsimsum for a specific
#' scenario no.
#' @param scen_no scenario no. of the scenario that's evaluated
#' @param stat one of the parameters in eval_param_rsimsum()
#' @param method the method that's evaluated, either "uncor" / "mecor" / "simex"
#' @return this function will fill out the specific cells with 'stat' and its
#' 'stat'_mcse in the summary object (for a given scen_no and method)
fill_stat_and_mcse <- function(summary,
simsum,
scen_no,
stat,
method){
summary[summary$scen_no == scen_no &
summary$method == method, stat] <-
simsum$summ[simsum$summ$stat == stat &
simsum$summ$method == method, "est"]
summary[summary$scen_no == scen_no &
summary$method == method, paste0(stat, ("_mcse"))] <-
simsum$summ[simsum$summ$stat == stat &
simsum$summ$method == method, "mcse"]
return(summary)
}
|
# Emi Tanaka (@statsgen) and Garrick Aden-Buie (@grrrck) and Evangeline Reynolds (@EvaMaeRey)
# have contributed to this code
# how to solve "no visible binding for global variable" note
utils::globalVariables(
c('func', '.', 'raw_code', 'non_seq', 'func', '.','raw_code', '.',
'replacements','line','code','highlight','connector','line','func',
'.','raw_code','open_par','closed_par', 'auto','connector','line1',
'text','open_square','line','token','open_par','closed_par','open_curly',
'closed_curly','closed_square','full_line','num_open_par','num_closed_par',
'num_open_curly','num_closed_curly','num_open_square','num_closed_square',
'balanced_paren','balanced_curly','balanced_square','all_parenteses_balanced',
'raw_code', 'connector','all_parentheses_balanced', 'line', 'auto', 'user',
'non_seq', 'rotate','raw_code', '.')
)
####### Make some test code available as character strings #####
create_code <- function(){ # for testing w/o knitting
"cars %>% # the data #BREAK
filter(speed > 4) %>% # subset
ggplot() + # pipe to ggplot
aes(x = speed) +
aes(y = dist) +
# Describing what follows
geom_point(alpha = .3) + #BREAK
geom_point(alpha = 1) + #BREAK2
geom_jitter(alpha = .5) + #BREAK3
aes(color =
speed > 14
) %+%
cars ->
my_plot #BREAK
NULL #OMIT
1 + 1 #BREAK"
}
create_code_remove <- function(){
"ggplot(data = cars) +
aes(x = speed) +
aes(y = dist) + #BREAK-2
geom_rug()"
}
create_code_rotate_omit <- function(){
'ggplot(data = cars) +
aes(x = speed) +
aes(y = dist) +
geom_point(size = 8,
shape = 21,
alpha = .9,
color = "snow") +
aes(fill = speed) +
scale_fill_viridis_c(option = "magma") + #OMIT
scale_fill_viridis_c(option = "magma") + #ROTATE
scale_fill_viridis_c(option = "cividis") + #ROTATE
scale_fill_viridis_c(option = "plasma") + #ROTATE
NULL'
}
create_injectable_code <- function(){
"for (i in 1:10){
print(i)
} "
}
create_rotate_code <- function(){ # for testing w/o knitting
"cars %>% # the data #BREAK
filter(speed > 4) %>% # subset
ggplot() + # pipe to ggplot
aes(x = speed) +
aes(y = dist) +
# Describing what follows
geom_point(alpha = .3) + #ROTATE
geom_point(color = 'blue') + #ROTATE
geom_point(shape = 'square') -> #ROTATE
my_plot #BREAK
1 + 1 #BREAK"
}
create_short_code <- function(){ # for testing w/o knitting
"cars %>% # the data
filter(speed > 4) %>% # subset #BREAK
ggplot() #BREAK"
}
#' Title
#'
#' @export
#'
#' @examples
#' create_base_pipe_code()
#'
#' create_base_pipe_code() %>%
#' code_parse()
create_base_pipe_code <- function(){ # for testing w/o knitting
"cars |> # the data
filter(speed > 4) |>
ggplot() #BREAK"
}
create_single_line_code <- function(){ # for testing no reveal
"cars"
}
create_ggplot_code <- function(){ # for testing w/o knitting
"ggplot2::ggplot(data = cars) + # initiate ggplot
ggplot2::aes(x = speed) +
ggplot2::aes(y = dist) +
# Describing what follows
ggplot2::geom_point(alpha = .3) "
}
create_python_code <- function(){
"xobject = load_iris()
xobject = pd.DataFrame(xobject.data,
columns=xobject.feature_names)
def evenOdd( x ):
if (x % 2 == 0):
print \"even\"
else:
print \"odd\"
# Driver code
evenOdd(2)
xobject.pipe(remove_units).pipe(length_times_width)"
}
create_sql_code <- function(){
"SELECT *
FROM tbl_hello_world
WHERE "
}
create_python_code_pipeline <- function(){
"student_scores \\\n .melt(id_vars=['student', \"sex\"], \n var_name=\"subject\", \n value_name=\"final_grade\") \\\n .sort_values(by=['final_grade'], ascending=False) \\\n .head(3)"
}
create_data_table_code <- function(){ # for testing w/o knitting
'gapminder::gapminder %>%
data.table() %>%
.[year > 1980] %>%
.[ ,
mean(gdpPercap) ,
by = .(continent, year) ]'
}
create_left_assign_code <- function(){
# for testing w/o knitting
"my_cars <- cars %>% # the data #BREAK
filter(speed > 4) %>% # subset
ggplot() + # pipe to ggplot
aes(x = speed) +
aes(y = dist) +
# Describing what follows
geom_point(alpha = .3)"
}
|
/R/a_create_test_code.R
|
permissive
|
brshallo/flipbookr
|
R
| false | false | 4,314 |
r
|
# Emi Tanaka (@statsgen) and Garrick Aden-Buie (@grrrck) and Evangeline Reynolds (@EvaMaeRey)
# have contributed to this code
# how to solve "no visible binding for global variable" note
utils::globalVariables(
c('func', '.', 'raw_code', 'non_seq', 'func', '.','raw_code', '.',
'replacements','line','code','highlight','connector','line','func',
'.','raw_code','open_par','closed_par', 'auto','connector','line1',
'text','open_square','line','token','open_par','closed_par','open_curly',
'closed_curly','closed_square','full_line','num_open_par','num_closed_par',
'num_open_curly','num_closed_curly','num_open_square','num_closed_square',
'balanced_paren','balanced_curly','balanced_square','all_parenteses_balanced',
'raw_code', 'connector','all_parentheses_balanced', 'line', 'auto', 'user',
'non_seq', 'rotate','raw_code', '.')
)
####### Make some test code available as character strings #####
create_code <- function(){ # for testing w/o knitting
"cars %>% # the data #BREAK
filter(speed > 4) %>% # subset
ggplot() + # pipe to ggplot
aes(x = speed) +
aes(y = dist) +
# Describing what follows
geom_point(alpha = .3) + #BREAK
geom_point(alpha = 1) + #BREAK2
geom_jitter(alpha = .5) + #BREAK3
aes(color =
speed > 14
) %+%
cars ->
my_plot #BREAK
NULL #OMIT
1 + 1 #BREAK"
}
create_code_remove <- function(){
"ggplot(data = cars) +
aes(x = speed) +
aes(y = dist) + #BREAK-2
geom_rug()"
}
create_code_rotate_omit <- function(){
'ggplot(data = cars) +
aes(x = speed) +
aes(y = dist) +
geom_point(size = 8,
shape = 21,
alpha = .9,
color = "snow") +
aes(fill = speed) +
scale_fill_viridis_c(option = "magma") + #OMIT
scale_fill_viridis_c(option = "magma") + #ROTATE
scale_fill_viridis_c(option = "cividis") + #ROTATE
scale_fill_viridis_c(option = "plasma") + #ROTATE
NULL'
}
create_injectable_code <- function(){
"for (i in 1:10){
print(i)
} "
}
create_rotate_code <- function(){ # for testing w/o knitting
"cars %>% # the data #BREAK
filter(speed > 4) %>% # subset
ggplot() + # pipe to ggplot
aes(x = speed) +
aes(y = dist) +
# Describing what follows
geom_point(alpha = .3) + #ROTATE
geom_point(color = 'blue') + #ROTATE
geom_point(shape = 'square') -> #ROTATE
my_plot #BREAK
1 + 1 #BREAK"
}
create_short_code <- function(){ # for testing w/o knitting
"cars %>% # the data
filter(speed > 4) %>% # subset #BREAK
ggplot() #BREAK"
}
#' Title
#'
#' @export
#'
#' @examples
#' create_base_pipe_code()
#'
#' create_base_pipe_code() %>%
#' code_parse()
create_base_pipe_code <- function(){ # for testing w/o knitting
"cars |> # the data
filter(speed > 4) |>
ggplot() #BREAK"
}
create_single_line_code <- function(){ # for testing no reveal
"cars"
}
create_ggplot_code <- function(){ # for testing w/o knitting
"ggplot2::ggplot(data = cars) + # initiate ggplot
ggplot2::aes(x = speed) +
ggplot2::aes(y = dist) +
# Describing what follows
ggplot2::geom_point(alpha = .3) "
}
create_python_code <- function(){
"xobject = load_iris()
xobject = pd.DataFrame(xobject.data,
columns=xobject.feature_names)
def evenOdd( x ):
if (x % 2 == 0):
print \"even\"
else:
print \"odd\"
# Driver code
evenOdd(2)
xobject.pipe(remove_units).pipe(length_times_width)"
}
create_sql_code <- function(){
"SELECT *
FROM tbl_hello_world
WHERE "
}
create_python_code_pipeline <- function(){
"student_scores \\\n .melt(id_vars=['student', \"sex\"], \n var_name=\"subject\", \n value_name=\"final_grade\") \\\n .sort_values(by=['final_grade'], ascending=False) \\\n .head(3)"
}
create_data_table_code <- function(){ # for testing w/o knitting
'gapminder::gapminder %>%
data.table() %>%
.[year > 1980] %>%
.[ ,
mean(gdpPercap) ,
by = .(continent, year) ]'
}
create_left_assign_code <- function(){
# for testing w/o knitting
"my_cars <- cars %>% # the data #BREAK
filter(speed > 4) %>% # subset
ggplot() + # pipe to ggplot
aes(x = speed) +
aes(y = dist) +
# Describing what follows
geom_point(alpha = .3)"
}
|
library(rvest)
res_ptt_get <- read_html('https://www.ptt.cc/bbs/hotboards.html',encoding = "big5")
ptt_nodes <- html_nodes(res_ptt_get, xpath = '//div[@class="board-title"]')
html_text(ptt_nodes) %>% head()
|
/Week2/Simple_Crawer/post_PPT_title.r
|
no_license
|
d336643/2018_CSX_RProject
|
R
| false | false | 206 |
r
|
library(rvest)
res_ptt_get <- read_html('https://www.ptt.cc/bbs/hotboards.html',encoding = "big5")
ptt_nodes <- html_nodes(res_ptt_get, xpath = '//div[@class="board-title"]')
html_text(ptt_nodes) %>% head()
|
# library(R2admb)
setwd("C:/admb/admb101-gcc452-win64/examples/admb/SnapInner2015")
area="nw" # HERE set area="ea" or "nw" or "sw"
plot.out=0 # HERE set =0 or 1 for screen or =2 for tiff output
# ====================================================================================================
dat <- read.csv('InnerSBsnapperAges1997_2013.csv', header = T)
dat
xx=dat[(dat$Area==area) & (as.numeric(as.character(dat$Cohort)))>0,c('Year', 'LCFmm', 'TLmm', 'Cohorts')]
yy=expand.grid(Year=1997:2013, Cohorts=1:31)
IGages=merge(yy,xx,by=c("Year","Cohorts"),all.x=TRUE)
IGages
IGages$weight=0.000000148*IGages$LCFmm^2.6703 # add a coulmn of weights
IGages
sy=aggregate(weight ~ Cohorts,mean,data=IGages)
ss=expand.grid(Cohorts=1:31)
IGweight=merge(ss,sy,by=c("Cohorts"),all.x=TRUE)
IGweight[is.na(IGweight)]=0
IGweight[1,2]=0.1
IGweight
IGweight=IGweight[,'weight']
IGweight
maxwt <- max(IGweight)
corrwt <- ifelse(area=='eg', 6.0, ifelse(area=='nw', 5.5, 5.7))
plot(1:31,IGweight,"o")
IGweight[17]<-corrwt
IGweight[IGweight==0]<-maxwt
plot(1:31,IGweight,"o")
for (i in 8:30) IGweight[i]=(IGweight[i-1]+IGweight[i]+IGweight[i+1])/3
plot(1:31,IGweight,"o")
for (i in 15:30) IGweight[i]=(IGweight[i-1]+IGweight[i]+IGweight[i+1])/3
plot(1:31,IGweight,"o")
# determine age composition
xx$Year<-factor(xx$Year,levels=1997:2013) # add in years with no data
xx$Cohorts<-factor(xx$Cohorts,levels=1:31) # add in ages 1
IGagecomp=tapply(xx[,1],list(xx$Year,xx$Cohorts),length)
IGagecomp[is.na(IGagecomp)]=0
names(IGagecomp)=NULL
Nages=rowSums(IGagecomp)
names(Nages)=NULL
## Write a comment here
nIGagecomp <- matrix('\n', ncol=ncol(IGagecomp)+1, nrow=nrow(IGagecomp))
nIGagecomp[1:nrow(IGagecomp),1:ncol(IGagecomp)] <- IGagecomp
nIGagecomp <- t(nIGagecomp)
# ================================= bring in CATCH data ===========================
options(stringsAsFactors = FALSE)
dat <- read.csv('CA_20150513.csv', header = T)
dat
dat$CAM=as.numeric(as.character(dat$CAM))
catch=subset(dat, Area==area, select=c('Year', 'CAL', 'CAM', 'CAH','CAC','CAcv'))
catch$CAT=catch$CAM+catch$CAC
catch$CAsd=catch$CAM*catch$CAcv
Ocatch=catch[,'CAT']
Ocatchsd=catch[,'CAsd']
Ocatchsd[Ocatchsd==0]=1
# ===================Bring in DEPM data ============================================
options(stringsAsFactors = FALSE)
datt <- read.csv('DEPMdata - Zdaily 0.3 20150513.csv', header = T)
depm=subset(datt, Area==area, select=c('Area','DEPML','DEPMM','DEPMU','DEPMsd','DEPMcv'))
depm$lnDEPM=log(depm$DEPMM)
depm$lnDEPM[depm$DEPMM==0]=0
DEPML=depm[,'DEPML']
DEPMM=depm[,'DEPMM']
DEPMU=depm[,'DEPMU']
DEPMsd=depm[,'DEPMsd']
DEPMcv=depm[,'DEPMcv']
lnDEPMM=depm[,'lnDEPM']
lnDEPMsd=depm[,'DEPMcv']
# Pool Age-Composition data over all years;
options(stringsAsFactors = FALSE)
dat <- read.csv('InnerSBsnapperAges1997_2013.csv', header = T)
dd=dat[(dat$Area==area) & (as.numeric(as.character(dat$Cohorts)))>0,c('TLmm', 'Cohorts')]
dd$TLcm=round((dd$TLmm+5)/10,0)
zz=dd[c('TLcm', 'Cohorts')]
zz
zz$TLcm<-factor(zz$TLcm,levels=1:90) # add in years with no data
zz$Cohorts<-factor(zz$Cohorts,levels=1:31) # add in ages 0,1
AgeLen=tapply(zz[,1],list(zz$TLcm,zz$Cohorts),length)
AgeLen[is.na(AgeLen)]=0
# Determine of TL's in each age class
aa<-colSums(AgeLen)
aa[aa==0]<-0.1
ageprop<-t(round(t(AgeLen)/aa,5))
ageprop
# determine proportions retained pre year 2000
zz=ageprop[1:44,]
zz[1,1:2]=1
ageprop45=1-colSums(zz)
plot(1:31,ageprop45,"o")
ageprop45[2]=0
plot(1:31,ageprop45,"o")
names(ageprop45)=NULL
ageprop45
yy=ageprop[50:70,]
ageprop5070=colSums(yy)
plot(1:31,ageprop5070,"o")
ageprop5070[ageprop5070==1]=0
plot(1:31,ageprop5070,"o")
ageprop5070[14] <- ifelse(area=='nw', 0.4)
plot(1:31,ageprop5070,"o")
for (i in 8:20) ageprop5070[i]=(ageprop5070[i-1]+ageprop5070[i]+ageprop5070[i+1])/3
plot(1:31,ageprop5070,"o")
for (i in 8:20) ageprop5070[i]=(ageprop5070[i-1]+ageprop5070[i]+ageprop5070[i+1])/3
plot(1:31,ageprop5070,"o")
names(ageprop5070)=NULL
# ==================== sexual maturity =======================================
# ==== VonB parameters circa 01/06/2011 supplied by gary 14/05/2014 =========
# ====== maturity parameters supplied by Gary 14/05/2014 ===================
K <- c(0.174, 0.151, 0.169)
t0 <- c(-0.022, -0.084, 0.100)
Linf <- c(755, 728, 770)
LM50 <- c(350,400,420)
LM95 <- c(480,600,570)
LMslope=log(19)/(LM95-LM50)
# =========================================================================
pos <- ifelse(area=='eg', 1, ifelse(area=='nw', 2, 3))
LL=rep(NA, length(1:31))
SexMat=LL
LL = Linf[pos]*(1-exp(-K[pos]*(1:31+0.5-t0[pos])))
SexMat=1/(1+exp(-LMslope[pos]*(LL-LM50[pos])))
SexMat[SexMat>0.99] <- 1
# =======make the ADMB .dat file ==================================
# ===== set future catch level ===================================
for (i in 32:38) Ocatch[i]=10
for (i in 32:38) Ocatchsd[i]=Ocatch[i]*catch$CAcv[i]
# old SexMat
# 0.0058,0.0544,0.2820,0.6637,0.8845,0.96,0.98,0.99,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,
getwd()
fisharea=paste(area,"F.dat",sep="")
sink(fisharea)
cat("#Mode_quota\n",
1,10,
"\n#objective_function_weights\n",
1,1,1,1,1,
"\n#phases_R*_Q_Ho_M_steep_select_dev_AnnualF\n",
1,-1,-2,-2,-4,3,3,1,
"\n#fyear_dyear_lyear_pyear_fage_lage\n",
1983,1997,2013,2020,1,31,
"\n#legSize_PropF_Sdcorrect\n",
3,0.5,0.69,
"\n#Mprior_Mpriorsd_steepprior_steeppriorSD\n",
0.12,0.005,0.75,0.02,
"\n#qgrow\n",
rep.int(1,2013-1983+1),
"\n#age\n",
1:31,
"\n#Sexmat\n",
SexMat,
"\n#Retention_pre2001\n",
ageprop45,
"\n#Retention_post2000\n",
ageprop5070,
"\n#weightF\n",
IGweight,
"\n#weightM\n",
IGweight,
"\n#obsCatch\n",
Ocatch,
"\n#obsCatchsd\n",
Ocatchsd,
"\n#Ndepm\n",
17,
"\n#DEPM_SSB_1998_2013_LO_ME_HI_CV_SD\n",
DEPML,"\n",
DEPMM,"\n",
DEPMU,"\n",
DEPMcv,"\n",
DEPMsd,"\n",
"\n#lnNdepm\n",
lnDEPMM,
"\n#lnNdepm_sd\n",
lnDEPMsd,
"\n#Nage_samples\n",
Nages,
"\n#ages\n",
nIGagecomp,
"\n#checknum\n",
1234)
sink()
fisharea=paste(area,"F.pin",sep="")
sink(fisharea)
cat("#Rstar_Q_F0_M_H_A50_Aslope \n",
17.0,1.0,0.02,0.12,0.75,4.3,1.2,
"\n#dev \n",
-0.08,0.11,-0.04,0.2,0.02,0.53,0.54,0.79,0.52,-0.09,-1.17,-1.08,-0.48,-0.4,-0.43,-0.15,0.31,0.36,0.02,0.35,-0.11,-0.06,0.46,0.61,0.48,-0.44,-0.44,-0.21,-0.09,-0.01,-0.01,0,0,0,0,0,0,0,
"\n#devSD_RdevSD\n",
0.6,0.6,
"\n#F \n",
0.038,0.05,0.06,0.07,0.082,0.096,0.114,0.139,0.162,0.21,0.304,0.418,0.716,0.889,0.848,0.111,0.01,0.012,0.008,0.01,0.083,0.059,0.033,0.067,0.071,0.064,0.076,0.061,0.052,0.046,0.044,0.122,0.143,0.162,0.178,0.187,0.195,0.202,
"\n#EndofFile")
sink()
# =====================================================================================================
# ======================= RUN THE MODEL 8 TIMES TO GET PHI TO CONVERGE =============================
fisharea=paste(area,"F.bat",sep="")
shell(fisharea)
# ======================================================================================================
# ============== GET THE MODEL OUTPUTS SET UP =======================================================
# rm(list=ls())
options(stringsAsFactors=FALSE)
library(zoo)
par(cex=0.8, cex.axis=0.8, cex.lab=0.8, cex.main=1)
allyears=1983:2020
ageyears=1997:2013
depmyears=1998:2013
startage=1
endage=31
model.name=area
predprop=read.table(paste("pred_prop_age_composition.dat",sep=""), header=FALSE)
head(predprop)
obsprop=read.table(paste("prop_age_composition.dat",sep=""), header=FALSE)
head(obsprop)
phi=read.table(paste("value_of_phi.dat",sep=""), header=FALSE)
phi
list.files()
std=read.table(paste(area,"F.std",sep=""), header=TRUE, fill=TRUE)
std[,5]=NULL
head(std)
unique(std$name)
lin=readLines(paste(area, "F.rep",sep=""))
obscatch=strsplit(lin[grep("Observed Catch", lin)+2], " ")[[1]]
obscatch=as.numeric(obscatch[obscatch!=""])
depm=strsplit(lin[grep("DEPM", lin)+1], " ")[[1]]
depm=as.numeric(depm[depm!=""])
depmlow=strsplit(lin[grep("DEPM", lin)+2], " ")[[1]]
depmlow=as.numeric(depmlow[depmlow!=""])
depmupp=strsplit(lin[grep("DEPM", lin)+3], " ")[[1]]
depmupp=as.numeric(depmupp[depmupp!=""])
obscatch=strsplit(lin[grep("Observed Catch", lin)+2], " ")[[1]]
obscatch=as.numeric(obscatch[obscatch!=""])
obscatch=strsplit(lin[grep("Observed Catch", lin)+2], " ")[[1]]
obscatch=as.numeric(obscatch[obscatch!=""])
retain1=strsplit(lin[grep("Retain1", lin)+1], " ")[[1]]
retain1=as.numeric(retain1[retain1!=""])
retain2=strsplit(lin[grep("Retain2", lin)+1], " ")[[1]]
retain2=as.numeric(retain2[retain2!=""])
Nsamp=strsplit(lin[grep("NageSamp", lin)+1], " ")[[1]]
Nsamp=as.numeric(Nsamp[Nsamp!=""])
SSage=strsplit(lin[grep("SSage", lin)+1], " ")[[1]]
SSage=as.numeric(SSage[SSage!=""])[1]
SSageWT=strsplit(lin[grep("Wt_age", lin)+1], " ")[[1]]
SSageWT=as.numeric(SSageWT[SSageWT!=""])[1]
phi=phi*SSageWT/SSage
SRRa=std[which(std$name=="SRRa"), 3]
SRRb=std[which(std$name=="SRRb"), 3]
VEstar=std[which(std$name=="VEstar"), 3]
Estar=std[which(std$name=="Estar"), 3]
Rstar=std[which(std$name=="Rstar"), 3]
SelA50=std[which(std$name=="SelA50"), 3]
SelSlope=std[which(std$name=="SelSlope"), 3]
##spawning biomass
unique(std$name)
ssb=std[which(std$name=="SSB"), 3:4]
ssb$uppCL=ssb$value+1.96*ssb$std
ssb$lowCL=ssb$value-1.96*ssb$std
ssb$year=allyears
##prop SSB
propSSB=std[which(std$name=="propSSB"), 3:4]
propSSB$uppCL=propSSB$value+1.96*propSSB$std
propSSB$lowCL=propSSB$value-1.96*propSSB$std
propSSB$year=allyears
##recruits
recdev=std[which(std$name=="dev" | std$name=="fut_dev"), 3:4]
recdev$uppCL=recdev$value+1.96*recdev$std
recdev$lowCL=recdev$value-1.96*recdev$std
recdev$year=allyears
unique(std$name)
rec=std[which(std$name=="Rec"), 3:4]
rec$uppCL=rec$value+1.96*rec$std
rec$lowCL=rec$value-1.96*rec$std
rec$year=allyears
##fish mort
fmort=std[which(std$name=="Fy"), 3:4]
fmort$uppCL=fmort$value+1.96*fmort$std
fmort$lowCL=fmort$value-1.96*fmort$std
fmort$year=allyears
##eff fish mort
eff.fmort=std[which(std$name=="Fy_eff"), 3:4]
eff.fmort$uppCL=eff.fmort$value+1.96*eff.fmort$std
eff.fmort$lowCL=eff.fmort$value-1.96*eff.fmort$std
eff.fmort$year=allyears
##catch
predcatch=std[which(std$name=="CA"), 3:4]
predcatch$uppCL=predcatch$value+1.96*predcatch$std
predcatch$lowCL=predcatch$value-1.96*predcatch$std
predcatch$year=allyears
##age comp
# =============================================================================
# ====== now plot model fit and save plots =================================
# =============================================================================
# save to single pdf file or individual tiff files
# if (plot.out==1){
# ##plot to single pdf file
# pdf(file=paste("Model Output ", model.name, ".pdf", sep=""), width=7, height=11, onefile=TRUE, paper="A4")
# }
if (plot.out==1 | plot.out==0){ par(mfrow=c(4,2), xaxs="i", yaxs="i", mar=c(4,4,2,2), mgp=c(2.5,1,0))
} else par(mfrow=c(1,1), xaxs="i", yaxs="i", mar=c(4,4,2,2), mgp=c(2.5,1,0))
# =============================================================================
## (1) plot SRR
# =============================================================================
SS=0:600
plot(SS, SS/(SRRa+SRRb*SS), ylim=c(0,1.05*max(SS/(SRRa+SRRb*SS))), type="l", xlab="Spawning Stock", ylab="Recruitment",main="Stock Recruitment Relationship", las=1)
points(VEstar, Rstar, col=2, cex=2, pch=16)
points(ssb$value[1], ssb$value[1]/(SRRa+SRRb*ssb$value[1]), col=4, cex=2, pch=4)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 1 Stock Recruitment Relationship", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
# =============================================================================
## (2) plot SB
# =============================================================================
plot(allyears, ssb$value, type="l", ylim=c(0,1.05*max(ssb$uppCL, VEstar)), las=1,
xlab="Year", ylab="Spawning Biomass", main="Spawning Biomass")
polygon(c(allyears, rev(allyears)), c(ssb$lowCL, rev(ssb$uppCL)), col=8, border=NA)
lines(allyears, ssb$value, type="l", lwd=2)
lines(depmyears[depm>0], depm[depm>0], "p", pch=16)
arrows(depmyears[depm>0], depmlow[depm>0], depmyears[depm>0], depmupp[depm>0], code=3, angle=90, length=0.1, col=1, lty=1)
abline(h=c(0.2, 0.3, 0.4, 1)*VEstar, col=2, lwd=c(1,1,2, 1), lty=c(1, 1, 1, 2))
# mtext(paste("q=",round(depm_catchability, 2), sep=""), side=3, line=-1.5, adj=0.99, cex=0.8)
box()
# mtext(paste("H0=",round(initial_harvest_fraction, 2), sep=""), side=3, line=-2.8, adj=0.99, cex=0.8)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 2 Spawning Biomass", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
plot(allyears, propSSB$value, type="l", ylim=c(0,100), las=1,
xlab="Year", ylab="Spawning Biomass (% Unfished)", main="Spawning Biomass (% Unfished)")
polygon(c(allyears, rev(allyears)), c(propSSB$lowCL, rev(propSSB$uppCL)), col=8, border=NA)
lines(allyears, propSSB$value, type="l", lwd=2)
lines(depmyears[depm>0], depm[depm>0]/VEstar*100, "p", pch=16)
arrows(depmyears[depm>0], depmlow[depm>0]/VEstar*100, depmyears[depm>0], depmupp[depm>0]/VEstar*100, code=3, angle=90, length=0.1, col=1, lty=1)
abline(h=c(0.2, 0.3, 0.4)*100, col=2, lwd=c(1,1,2), lty=c(1, 1, 1))
# mtext(paste("q=",round(depm_catchability, 2), sep=""), side=3, line=-1.5, adj=0.99, cex=0.8)
box()
# mtext(paste("H0=",round(initial_harvest_fraction, 2), sep=""), side=3, line=-2.8, adj=0.99, cex=0.8)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 2a Spawning Biomass Prop", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
# =============================================================================
## (3) catch
# =============================================================================
plot(allyears, predcatch$value, type="l", ylim=c(0,ceiling(max(40/10))*10), las=1, xlab="Year", ylab="Annual Yield", main="Annual Yield")
polygon(c(allyears, rev(allyears)), c(predcatch$lowCL, rev(predcatch$uppCL)), col=8, border=NA)
lines(allyears, predcatch$value, type="l", lwd=2)
points(allyears, obscatch, pch=16, col=2)
box()
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 3 Annual Yield", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
## =============================================================================
## (5) Selectivity and Retention
## =============================================================================
ages=startage:endage
plot(ages, 1/ (1.0 + exp(-SelSlope * (ages - SelA50) )), type="l", lwd=2, xlim=c(0,endage), ylim=c(0,1), las=1,
xlab="Age", ylab="Probability", main="Selectivity")
lines(ages, retain1[1:length(ages)], type="l", col=4)
lines(ages, retain2[1:length(ages)], type="l", col=2)
legend("right", c("Selectivity", "Retention to 2000", "Retention from 2001"), lty=1, lwd=c(2,1,1), col=c(1,4,2), bty="n", y.intersp=1.5)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 4 Selectivity and Retention", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
# =============================================================================
## (5) fishing mortality
# =============================================================================
plot(allyears, fmort$value, type="l", ylim=c(0,1.0*max(fmort$uppCL)), las=1, xlab="Year", ylab="Fishing Mortality", main="Fully Selected Fishing Mortality")
polygon(c(allyears, rev(allyears)), c(fmort$lowCL, rev(fmort$uppCL)), col=8, border=NA)
lines(allyears, fmort$value, type="l", lwd=2)
abline(h=c(2/3,1,1.5)*0.12, col=2, lty=c(2,1,2))
box()
# mtext(paste("Q=",round(depm_catchability, 2), sep=""), side=3, line=-1, adj=0.99)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 5 Fishing Mortality", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
# =============================================================================
## (6) effective fishing mortality
# =============================================================================
plot(allyears, eff.fmort$value, type="l", ylim=c(0,1.05*max(eff.fmort$uppCL)), las=1,
xlab="Year", ylab="Fishing Mortality", main="Effective Fishing Mortality")
polygon(c(allyears, rev(allyears)), c(eff.fmort$lowCL, rev(eff.fmort$uppCL)), col=8, border=NA)
lines(allyears, eff.fmort$value, type="l", lwd=2)
abline(h=c(2/3,1,1.5)*0.12, col=2, lty=c(2,1,2))
box()
# mtext(paste("Q=",round(depm_catchability, 2), sep=""), side=3, line=-1, adj=0.99)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 5a Effective Fishing Mortality", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
## =============================================================================
## (5) recruitment
## =============================================================================
plot(allyears, recdev$value, type="l", ylim=c(-2.6,2.6), las=1, xlab="Year", ylab="Recruitment", main="Recruitment Deviation")
polygon(c(allyears, rev(allyears)), c(recdev$lowCL, rev(recdev$uppCL)), col=8, border=NA)
lines(allyears, recdev$value, type="l", lwd=2)
abline(h=0, col=1, lty=2)
abline(h=mean(recdev$value[recdev$year<=2013]), col=2)
box()
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 7 Recruitment Deviation", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
plot(allyears, rec$value, type="l", ylim=c(0, max(rec$uppCL)), las=1, xlab="Year", ylab="Recruitment", main="Recruitment")
polygon(c(allyears, rev(allyears)), c(rec$lowCL, rev(rec$uppCL)), col=8, border=NA)
lines(allyears, rec$value, type="l", lwd=2)
box()
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 7a Recruitment", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
## =============================================================================
## (5) plot age composition data
## =============================================================================
##data
predprop
obsprop
par(mfrow=c(4,3), mar=c(4,3,2,2), xaxs="i", yaxs="i", mgp=c(2.5,1,0))
yr <- 2011
for (yr in ageyears){
if (sum(obsprop[yr-min(ageyears)+1, ])>0){
age_comp_index=which(ageyears==yr)
data_index=which(allyears==yr)
plot(startage:endage,obsprop[age_comp_index,], type="o", xlim=c(0,endage),
ylim=c(0,max(predprop[age_comp_index,], obsprop[age_comp_index,])*1.05),
lty=2, main="", xlab="Age", ylab="", las=1)
lines(startage:endage,predprop[age_comp_index,],col="red")
mtext(yr, side=3, line=0.2, adj=0.5, cex=0.6, font=2)
mtext(paste("N=",Nsamp[age_comp_index], sep=""), side=3, line=-1, adj=0.99, cex=0.5)
mtext(paste("Neff=",round(phi*Nsamp[age_comp_index],0), sep=""), side=3, line=-2, adj=0.99, cex=0.5)
legend("right", c("obs", "exp"), lty=c(NA,1), col=1:2, pch=c(1, NA), cex=0.8, bty="n")
}
}
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 8 Age Composition", ".tiff", sep=""), width=170, height=200, units="mm",
res=300, pointsize = 14, compression ='lzw')}
if (plot.out==1) dev.off()
|
/InnGulfFdata.R
|
no_license
|
peterfish55/InnerBaysnapper
|
R
| false | false | 19,903 |
r
|
# library(R2admb)
setwd("C:/admb/admb101-gcc452-win64/examples/admb/SnapInner2015")
area="nw" # HERE set area="ea" or "nw" or "sw"
plot.out=0 # HERE set =0 or 1 for screen or =2 for tiff output
# ====================================================================================================
dat <- read.csv('InnerSBsnapperAges1997_2013.csv', header = T)
dat
xx=dat[(dat$Area==area) & (as.numeric(as.character(dat$Cohort)))>0,c('Year', 'LCFmm', 'TLmm', 'Cohorts')]
yy=expand.grid(Year=1997:2013, Cohorts=1:31)
IGages=merge(yy,xx,by=c("Year","Cohorts"),all.x=TRUE)
IGages
IGages$weight=0.000000148*IGages$LCFmm^2.6703 # add a coulmn of weights
IGages
sy=aggregate(weight ~ Cohorts,mean,data=IGages)
ss=expand.grid(Cohorts=1:31)
IGweight=merge(ss,sy,by=c("Cohorts"),all.x=TRUE)
IGweight[is.na(IGweight)]=0
IGweight[1,2]=0.1
IGweight
IGweight=IGweight[,'weight']
IGweight
maxwt <- max(IGweight)
corrwt <- ifelse(area=='eg', 6.0, ifelse(area=='nw', 5.5, 5.7))
plot(1:31,IGweight,"o")
IGweight[17]<-corrwt
IGweight[IGweight==0]<-maxwt
plot(1:31,IGweight,"o")
for (i in 8:30) IGweight[i]=(IGweight[i-1]+IGweight[i]+IGweight[i+1])/3
plot(1:31,IGweight,"o")
for (i in 15:30) IGweight[i]=(IGweight[i-1]+IGweight[i]+IGweight[i+1])/3
plot(1:31,IGweight,"o")
# determine age composition
xx$Year<-factor(xx$Year,levels=1997:2013) # add in years with no data
xx$Cohorts<-factor(xx$Cohorts,levels=1:31) # add in ages 1
IGagecomp=tapply(xx[,1],list(xx$Year,xx$Cohorts),length)
IGagecomp[is.na(IGagecomp)]=0
names(IGagecomp)=NULL
Nages=rowSums(IGagecomp)
names(Nages)=NULL
## Write a comment here
nIGagecomp <- matrix('\n', ncol=ncol(IGagecomp)+1, nrow=nrow(IGagecomp))
nIGagecomp[1:nrow(IGagecomp),1:ncol(IGagecomp)] <- IGagecomp
nIGagecomp <- t(nIGagecomp)
# ================================= bring in CATCH data ===========================
options(stringsAsFactors = FALSE)
dat <- read.csv('CA_20150513.csv', header = T)
dat
dat$CAM=as.numeric(as.character(dat$CAM))
catch=subset(dat, Area==area, select=c('Year', 'CAL', 'CAM', 'CAH','CAC','CAcv'))
catch$CAT=catch$CAM+catch$CAC
catch$CAsd=catch$CAM*catch$CAcv
Ocatch=catch[,'CAT']
Ocatchsd=catch[,'CAsd']
Ocatchsd[Ocatchsd==0]=1
# ===================Bring in DEPM data ============================================
options(stringsAsFactors = FALSE)
datt <- read.csv('DEPMdata - Zdaily 0.3 20150513.csv', header = T)
depm=subset(datt, Area==area, select=c('Area','DEPML','DEPMM','DEPMU','DEPMsd','DEPMcv'))
depm$lnDEPM=log(depm$DEPMM)
depm$lnDEPM[depm$DEPMM==0]=0
DEPML=depm[,'DEPML']
DEPMM=depm[,'DEPMM']
DEPMU=depm[,'DEPMU']
DEPMsd=depm[,'DEPMsd']
DEPMcv=depm[,'DEPMcv']
lnDEPMM=depm[,'lnDEPM']
lnDEPMsd=depm[,'DEPMcv']
# Pool Age-Composition data over all years;
options(stringsAsFactors = FALSE)
dat <- read.csv('InnerSBsnapperAges1997_2013.csv', header = T)
dd=dat[(dat$Area==area) & (as.numeric(as.character(dat$Cohorts)))>0,c('TLmm', 'Cohorts')]
dd$TLcm=round((dd$TLmm+5)/10,0)
zz=dd[c('TLcm', 'Cohorts')]
zz
zz$TLcm<-factor(zz$TLcm,levels=1:90) # add in years with no data
zz$Cohorts<-factor(zz$Cohorts,levels=1:31) # add in ages 0,1
AgeLen=tapply(zz[,1],list(zz$TLcm,zz$Cohorts),length)
AgeLen[is.na(AgeLen)]=0
# Determine of TL's in each age class
aa<-colSums(AgeLen)
aa[aa==0]<-0.1
ageprop<-t(round(t(AgeLen)/aa,5))
ageprop
# determine proportions retained pre year 2000
zz=ageprop[1:44,]
zz[1,1:2]=1
ageprop45=1-colSums(zz)
plot(1:31,ageprop45,"o")
ageprop45[2]=0
plot(1:31,ageprop45,"o")
names(ageprop45)=NULL
ageprop45
yy=ageprop[50:70,]
ageprop5070=colSums(yy)
plot(1:31,ageprop5070,"o")
ageprop5070[ageprop5070==1]=0
plot(1:31,ageprop5070,"o")
ageprop5070[14] <- ifelse(area=='nw', 0.4)
plot(1:31,ageprop5070,"o")
for (i in 8:20) ageprop5070[i]=(ageprop5070[i-1]+ageprop5070[i]+ageprop5070[i+1])/3
plot(1:31,ageprop5070,"o")
for (i in 8:20) ageprop5070[i]=(ageprop5070[i-1]+ageprop5070[i]+ageprop5070[i+1])/3
plot(1:31,ageprop5070,"o")
names(ageprop5070)=NULL
# ==================== sexual maturity =======================================
# ==== VonB parameters circa 01/06/2011 supplied by gary 14/05/2014 =========
# ====== maturity parameters supplied by Gary 14/05/2014 ===================
K <- c(0.174, 0.151, 0.169)
t0 <- c(-0.022, -0.084, 0.100)
Linf <- c(755, 728, 770)
LM50 <- c(350,400,420)
LM95 <- c(480,600,570)
LMslope=log(19)/(LM95-LM50)
# =========================================================================
pos <- ifelse(area=='eg', 1, ifelse(area=='nw', 2, 3))
LL=rep(NA, length(1:31))
SexMat=LL
LL = Linf[pos]*(1-exp(-K[pos]*(1:31+0.5-t0[pos])))
SexMat=1/(1+exp(-LMslope[pos]*(LL-LM50[pos])))
SexMat[SexMat>0.99] <- 1
# =======make the ADMB .dat file ==================================
# ===== set future catch level ===================================
for (i in 32:38) Ocatch[i]=10
for (i in 32:38) Ocatchsd[i]=Ocatch[i]*catch$CAcv[i]
# old SexMat
# 0.0058,0.0544,0.2820,0.6637,0.8845,0.96,0.98,0.99,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,1.00,
getwd()
fisharea=paste(area,"F.dat",sep="")
sink(fisharea)
cat("#Mode_quota\n",
1,10,
"\n#objective_function_weights\n",
1,1,1,1,1,
"\n#phases_R*_Q_Ho_M_steep_select_dev_AnnualF\n",
1,-1,-2,-2,-4,3,3,1,
"\n#fyear_dyear_lyear_pyear_fage_lage\n",
1983,1997,2013,2020,1,31,
"\n#legSize_PropF_Sdcorrect\n",
3,0.5,0.69,
"\n#Mprior_Mpriorsd_steepprior_steeppriorSD\n",
0.12,0.005,0.75,0.02,
"\n#qgrow\n",
rep.int(1,2013-1983+1),
"\n#age\n",
1:31,
"\n#Sexmat\n",
SexMat,
"\n#Retention_pre2001\n",
ageprop45,
"\n#Retention_post2000\n",
ageprop5070,
"\n#weightF\n",
IGweight,
"\n#weightM\n",
IGweight,
"\n#obsCatch\n",
Ocatch,
"\n#obsCatchsd\n",
Ocatchsd,
"\n#Ndepm\n",
17,
"\n#DEPM_SSB_1998_2013_LO_ME_HI_CV_SD\n",
DEPML,"\n",
DEPMM,"\n",
DEPMU,"\n",
DEPMcv,"\n",
DEPMsd,"\n",
"\n#lnNdepm\n",
lnDEPMM,
"\n#lnNdepm_sd\n",
lnDEPMsd,
"\n#Nage_samples\n",
Nages,
"\n#ages\n",
nIGagecomp,
"\n#checknum\n",
1234)
sink()
fisharea=paste(area,"F.pin",sep="")
sink(fisharea)
cat("#Rstar_Q_F0_M_H_A50_Aslope \n",
17.0,1.0,0.02,0.12,0.75,4.3,1.2,
"\n#dev \n",
-0.08,0.11,-0.04,0.2,0.02,0.53,0.54,0.79,0.52,-0.09,-1.17,-1.08,-0.48,-0.4,-0.43,-0.15,0.31,0.36,0.02,0.35,-0.11,-0.06,0.46,0.61,0.48,-0.44,-0.44,-0.21,-0.09,-0.01,-0.01,0,0,0,0,0,0,0,
"\n#devSD_RdevSD\n",
0.6,0.6,
"\n#F \n",
0.038,0.05,0.06,0.07,0.082,0.096,0.114,0.139,0.162,0.21,0.304,0.418,0.716,0.889,0.848,0.111,0.01,0.012,0.008,0.01,0.083,0.059,0.033,0.067,0.071,0.064,0.076,0.061,0.052,0.046,0.044,0.122,0.143,0.162,0.178,0.187,0.195,0.202,
"\n#EndofFile")
sink()
# =====================================================================================================
# ======================= RUN THE MODEL 8 TIMES TO GET PHI TO CONVERGE =============================
fisharea=paste(area,"F.bat",sep="")
shell(fisharea)
# ======================================================================================================
# ============== GET THE MODEL OUTPUTS SET UP =======================================================
# rm(list=ls())
options(stringsAsFactors=FALSE)
library(zoo)
par(cex=0.8, cex.axis=0.8, cex.lab=0.8, cex.main=1)
allyears=1983:2020
ageyears=1997:2013
depmyears=1998:2013
startage=1
endage=31
model.name=area
predprop=read.table(paste("pred_prop_age_composition.dat",sep=""), header=FALSE)
head(predprop)
obsprop=read.table(paste("prop_age_composition.dat",sep=""), header=FALSE)
head(obsprop)
phi=read.table(paste("value_of_phi.dat",sep=""), header=FALSE)
phi
list.files()
std=read.table(paste(area,"F.std",sep=""), header=TRUE, fill=TRUE)
std[,5]=NULL
head(std)
unique(std$name)
lin=readLines(paste(area, "F.rep",sep=""))
obscatch=strsplit(lin[grep("Observed Catch", lin)+2], " ")[[1]]
obscatch=as.numeric(obscatch[obscatch!=""])
depm=strsplit(lin[grep("DEPM", lin)+1], " ")[[1]]
depm=as.numeric(depm[depm!=""])
depmlow=strsplit(lin[grep("DEPM", lin)+2], " ")[[1]]
depmlow=as.numeric(depmlow[depmlow!=""])
depmupp=strsplit(lin[grep("DEPM", lin)+3], " ")[[1]]
depmupp=as.numeric(depmupp[depmupp!=""])
obscatch=strsplit(lin[grep("Observed Catch", lin)+2], " ")[[1]]
obscatch=as.numeric(obscatch[obscatch!=""])
obscatch=strsplit(lin[grep("Observed Catch", lin)+2], " ")[[1]]
obscatch=as.numeric(obscatch[obscatch!=""])
retain1=strsplit(lin[grep("Retain1", lin)+1], " ")[[1]]
retain1=as.numeric(retain1[retain1!=""])
retain2=strsplit(lin[grep("Retain2", lin)+1], " ")[[1]]
retain2=as.numeric(retain2[retain2!=""])
Nsamp=strsplit(lin[grep("NageSamp", lin)+1], " ")[[1]]
Nsamp=as.numeric(Nsamp[Nsamp!=""])
SSage=strsplit(lin[grep("SSage", lin)+1], " ")[[1]]
SSage=as.numeric(SSage[SSage!=""])[1]
SSageWT=strsplit(lin[grep("Wt_age", lin)+1], " ")[[1]]
SSageWT=as.numeric(SSageWT[SSageWT!=""])[1]
phi=phi*SSageWT/SSage
SRRa=std[which(std$name=="SRRa"), 3]
SRRb=std[which(std$name=="SRRb"), 3]
VEstar=std[which(std$name=="VEstar"), 3]
Estar=std[which(std$name=="Estar"), 3]
Rstar=std[which(std$name=="Rstar"), 3]
SelA50=std[which(std$name=="SelA50"), 3]
SelSlope=std[which(std$name=="SelSlope"), 3]
##spawning biomass
unique(std$name)
ssb=std[which(std$name=="SSB"), 3:4]
ssb$uppCL=ssb$value+1.96*ssb$std
ssb$lowCL=ssb$value-1.96*ssb$std
ssb$year=allyears
##prop SSB
propSSB=std[which(std$name=="propSSB"), 3:4]
propSSB$uppCL=propSSB$value+1.96*propSSB$std
propSSB$lowCL=propSSB$value-1.96*propSSB$std
propSSB$year=allyears
##recruits
recdev=std[which(std$name=="dev" | std$name=="fut_dev"), 3:4]
recdev$uppCL=recdev$value+1.96*recdev$std
recdev$lowCL=recdev$value-1.96*recdev$std
recdev$year=allyears
unique(std$name)
rec=std[which(std$name=="Rec"), 3:4]
rec$uppCL=rec$value+1.96*rec$std
rec$lowCL=rec$value-1.96*rec$std
rec$year=allyears
##fish mort
fmort=std[which(std$name=="Fy"), 3:4]
fmort$uppCL=fmort$value+1.96*fmort$std
fmort$lowCL=fmort$value-1.96*fmort$std
fmort$year=allyears
##eff fish mort
eff.fmort=std[which(std$name=="Fy_eff"), 3:4]
eff.fmort$uppCL=eff.fmort$value+1.96*eff.fmort$std
eff.fmort$lowCL=eff.fmort$value-1.96*eff.fmort$std
eff.fmort$year=allyears
##catch
predcatch=std[which(std$name=="CA"), 3:4]
predcatch$uppCL=predcatch$value+1.96*predcatch$std
predcatch$lowCL=predcatch$value-1.96*predcatch$std
predcatch$year=allyears
##age comp
# =============================================================================
# ====== now plot model fit and save plots =================================
# =============================================================================
# save to single pdf file or individual tiff files
# if (plot.out==1){
# ##plot to single pdf file
# pdf(file=paste("Model Output ", model.name, ".pdf", sep=""), width=7, height=11, onefile=TRUE, paper="A4")
# }
if (plot.out==1 | plot.out==0){ par(mfrow=c(4,2), xaxs="i", yaxs="i", mar=c(4,4,2,2), mgp=c(2.5,1,0))
} else par(mfrow=c(1,1), xaxs="i", yaxs="i", mar=c(4,4,2,2), mgp=c(2.5,1,0))
# =============================================================================
## (1) plot SRR
# =============================================================================
SS=0:600
plot(SS, SS/(SRRa+SRRb*SS), ylim=c(0,1.05*max(SS/(SRRa+SRRb*SS))), type="l", xlab="Spawning Stock", ylab="Recruitment",main="Stock Recruitment Relationship", las=1)
points(VEstar, Rstar, col=2, cex=2, pch=16)
points(ssb$value[1], ssb$value[1]/(SRRa+SRRb*ssb$value[1]), col=4, cex=2, pch=4)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 1 Stock Recruitment Relationship", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
# =============================================================================
## (2) plot SB
# =============================================================================
plot(allyears, ssb$value, type="l", ylim=c(0,1.05*max(ssb$uppCL, VEstar)), las=1,
xlab="Year", ylab="Spawning Biomass", main="Spawning Biomass")
polygon(c(allyears, rev(allyears)), c(ssb$lowCL, rev(ssb$uppCL)), col=8, border=NA)
lines(allyears, ssb$value, type="l", lwd=2)
lines(depmyears[depm>0], depm[depm>0], "p", pch=16)
arrows(depmyears[depm>0], depmlow[depm>0], depmyears[depm>0], depmupp[depm>0], code=3, angle=90, length=0.1, col=1, lty=1)
abline(h=c(0.2, 0.3, 0.4, 1)*VEstar, col=2, lwd=c(1,1,2, 1), lty=c(1, 1, 1, 2))
# mtext(paste("q=",round(depm_catchability, 2), sep=""), side=3, line=-1.5, adj=0.99, cex=0.8)
box()
# mtext(paste("H0=",round(initial_harvest_fraction, 2), sep=""), side=3, line=-2.8, adj=0.99, cex=0.8)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 2 Spawning Biomass", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
plot(allyears, propSSB$value, type="l", ylim=c(0,100), las=1,
xlab="Year", ylab="Spawning Biomass (% Unfished)", main="Spawning Biomass (% Unfished)")
polygon(c(allyears, rev(allyears)), c(propSSB$lowCL, rev(propSSB$uppCL)), col=8, border=NA)
lines(allyears, propSSB$value, type="l", lwd=2)
lines(depmyears[depm>0], depm[depm>0]/VEstar*100, "p", pch=16)
arrows(depmyears[depm>0], depmlow[depm>0]/VEstar*100, depmyears[depm>0], depmupp[depm>0]/VEstar*100, code=3, angle=90, length=0.1, col=1, lty=1)
abline(h=c(0.2, 0.3, 0.4)*100, col=2, lwd=c(1,1,2), lty=c(1, 1, 1))
# mtext(paste("q=",round(depm_catchability, 2), sep=""), side=3, line=-1.5, adj=0.99, cex=0.8)
box()
# mtext(paste("H0=",round(initial_harvest_fraction, 2), sep=""), side=3, line=-2.8, adj=0.99, cex=0.8)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 2a Spawning Biomass Prop", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
# =============================================================================
## (3) catch
# =============================================================================
plot(allyears, predcatch$value, type="l", ylim=c(0,ceiling(max(40/10))*10), las=1, xlab="Year", ylab="Annual Yield", main="Annual Yield")
polygon(c(allyears, rev(allyears)), c(predcatch$lowCL, rev(predcatch$uppCL)), col=8, border=NA)
lines(allyears, predcatch$value, type="l", lwd=2)
points(allyears, obscatch, pch=16, col=2)
box()
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 3 Annual Yield", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
## =============================================================================
## (5) Selectivity and Retention
## =============================================================================
ages=startage:endage
plot(ages, 1/ (1.0 + exp(-SelSlope * (ages - SelA50) )), type="l", lwd=2, xlim=c(0,endage), ylim=c(0,1), las=1,
xlab="Age", ylab="Probability", main="Selectivity")
lines(ages, retain1[1:length(ages)], type="l", col=4)
lines(ages, retain2[1:length(ages)], type="l", col=2)
legend("right", c("Selectivity", "Retention to 2000", "Retention from 2001"), lty=1, lwd=c(2,1,1), col=c(1,4,2), bty="n", y.intersp=1.5)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 4 Selectivity and Retention", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
# =============================================================================
## (5) fishing mortality
# =============================================================================
plot(allyears, fmort$value, type="l", ylim=c(0,1.0*max(fmort$uppCL)), las=1, xlab="Year", ylab="Fishing Mortality", main="Fully Selected Fishing Mortality")
polygon(c(allyears, rev(allyears)), c(fmort$lowCL, rev(fmort$uppCL)), col=8, border=NA)
lines(allyears, fmort$value, type="l", lwd=2)
abline(h=c(2/3,1,1.5)*0.12, col=2, lty=c(2,1,2))
box()
# mtext(paste("Q=",round(depm_catchability, 2), sep=""), side=3, line=-1, adj=0.99)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 5 Fishing Mortality", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
# =============================================================================
## (6) effective fishing mortality
# =============================================================================
plot(allyears, eff.fmort$value, type="l", ylim=c(0,1.05*max(eff.fmort$uppCL)), las=1,
xlab="Year", ylab="Fishing Mortality", main="Effective Fishing Mortality")
polygon(c(allyears, rev(allyears)), c(eff.fmort$lowCL, rev(eff.fmort$uppCL)), col=8, border=NA)
lines(allyears, eff.fmort$value, type="l", lwd=2)
abline(h=c(2/3,1,1.5)*0.12, col=2, lty=c(2,1,2))
box()
# mtext(paste("Q=",round(depm_catchability, 2), sep=""), side=3, line=-1, adj=0.99)
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 5a Effective Fishing Mortality", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
## =============================================================================
## (5) recruitment
## =============================================================================
plot(allyears, recdev$value, type="l", ylim=c(-2.6,2.6), las=1, xlab="Year", ylab="Recruitment", main="Recruitment Deviation")
polygon(c(allyears, rev(allyears)), c(recdev$lowCL, rev(recdev$uppCL)), col=8, border=NA)
lines(allyears, recdev$value, type="l", lwd=2)
abline(h=0, col=1, lty=2)
abline(h=mean(recdev$value[recdev$year<=2013]), col=2)
box()
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 7 Recruitment Deviation", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
plot(allyears, rec$value, type="l", ylim=c(0, max(rec$uppCL)), las=1, xlab="Year", ylab="Recruitment", main="Recruitment")
polygon(c(allyears, rev(allyears)), c(rec$lowCL, rev(rec$uppCL)), col=8, border=NA)
lines(allyears, rec$value, type="l", lwd=2)
box()
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 7a Recruitment", ".tiff", sep=""), width=170, height=120, units="mm",
res=300, pointsize = 14, compression ='lzw')}
## =============================================================================
## (5) plot age composition data
## =============================================================================
##data
predprop
obsprop
par(mfrow=c(4,3), mar=c(4,3,2,2), xaxs="i", yaxs="i", mgp=c(2.5,1,0))
yr <- 2011
for (yr in ageyears){
if (sum(obsprop[yr-min(ageyears)+1, ])>0){
age_comp_index=which(ageyears==yr)
data_index=which(allyears==yr)
plot(startage:endage,obsprop[age_comp_index,], type="o", xlim=c(0,endage),
ylim=c(0,max(predprop[age_comp_index,], obsprop[age_comp_index,])*1.05),
lty=2, main="", xlab="Age", ylab="", las=1)
lines(startage:endage,predprop[age_comp_index,],col="red")
mtext(yr, side=3, line=0.2, adj=0.5, cex=0.6, font=2)
mtext(paste("N=",Nsamp[age_comp_index], sep=""), side=3, line=-1, adj=0.99, cex=0.5)
mtext(paste("Neff=",round(phi*Nsamp[age_comp_index],0), sep=""), side=3, line=-2, adj=0.99, cex=0.5)
legend("right", c("obs", "exp"), lty=c(NA,1), col=1:2, pch=c(1, NA), cex=0.8, bty="n")
}
}
if (plot.out==2){dev.print(device=tiff, paste(model.name, " 8 Age Composition", ".tiff", sep=""), width=170, height=200, units="mm",
res=300, pointsize = 14, compression ='lzw')}
if (plot.out==1) dev.off()
|
######
##### Mothur output files and resulting modified files:
# - alpha diversity measures --> xanthan_name_summary.txt
# - input file is a summary file of alpha diversity measures created in mothur
# - output file combines these with metadata
# - xanthan_name.final.0.03.cons.taxonomy --> xanthan_name.taxonomy.names.txt
# - input file lists the taxonomic classifications for each of the OTUs at 0.03 cutoff
# - output file was modified to reflect new OTU names that includes the taxonomy, as well as cleaner classifications
# - xanthan_name.final.0.03.pick.0.03.filter.summary --> xanthan_name.betasummary.txt
# - input file represents pairwise distances (beta diversity) generated in mothur, filtered to include only relevant samples
# - output file adds sample meta data to sampleIDs
# - xanthan_name.final.0.03.pick.0.03.filter.shared, xanthan_name_metadata.txt --> xanthan_name_otus.w.meta.txt
# - input .shared file was previously filtered using the specified measures
# - output file combines metadata with OTU counts
# - combining all data (metadata, alpha metrics, relative abundance of OTUs, and metabolites) --> xanthan_name_allmeasures.txt
# - all data input files
# - output file has all variables, combined in one file
# - --> xanthan_name_genfrac2p.all_w.meta.txt
# - input file is a file produced in mothur classifying sequences directly to the RDP database
# - output file is a file with phylotype information (relative abundance of genus-level sequence assignments)
###### alpha summaries --> xanthan_name_summary.txt
# - xanthan_name.final.0.03.pick.0.03.pick.groups.summary
# - xanthan_name.final.0.03.pick.0.03.pick.thetayc.0.03.lt.pcoa.axes
# - xanthan_name.final.0.03.pick.0.03.pick.thetayc.0.03.lt.nmds.axes
# - xanthan_name_metadata.txt
#####
setwd("C:/Users/mksch/Box Sync/working_folder_schnizlein/collaborations/xanthan_gum_community_martens/experiment_1_20171127_youngmice/mothur_analysis/")
substrRight <- function(x, n){
substr(x, nchar(x)-n+1, nchar(x))
}
substrLeft <- function(x, n){
substr(x, 1, n)
}
#####
## read in files and merge together:
meta<-read.table(file="r_analysis/xanthan_metadata.tsv", header=TRUE)
meta$timepoint<-as.character(meta$sampleID)
meta$timepoint<-substring(meta$timepoint,6)
meta$cecum_result<-substrRight(meta$timepoint,2)
meta$cecum_result<-gsub("cr","cecum",meta$cecum_result)
meta$cecum_result<-gsub("0c","cecum",meta$cecum_result)
meta$cecum_result<-gsub("4c","cecum",meta$cecum_result)
meta$cecum_result<-gsub("[^cecum]+", "feces", meta$cecum_result)
meta$timepoint<-substrLeft(meta$timepoint, 3)
# One sample was mislabelled as d-11 when it was actually d-12
meta$timepoint<-gsub("n11","n12",meta$timepoint)
meta$timepoint<-gsub("p", "", meta$timepoint)
meta$timepoint<-gsub("d", "", meta$timepoint)
meta$timepoint<-gsub("n","-", meta$timepoint)
meta$timepoint<-as.numeric(meta$timepoint)
meta$timepoint<-gsub("-14","a",meta$timepoint)
meta$timepoint<-gsub("-12","b",meta$timepoint)
meta$timepoint<-gsub("-3","l",meta$timepoint)
meta$timepoint<-gsub("3","e",meta$timepoint)
meta$timepoint<-gsub("4","f",meta$timepoint)
meta$timepoint<-gsub("6","g",meta$timepoint)
meta$timepoint<-gsub("7","h",meta$timepoint)
meta$timepoint<-gsub("8","i",meta$timepoint)
meta$timepoint<-gsub("9","j",meta$timepoint)
meta$timepoint<-gsub("10","k",meta$timepoint)
meta$timepoint<-gsub("1","d",meta$timepoint)
meta$timepoint<-gsub("0","c",meta$timepoint)
meta$timepoint<-gsub("a","0",meta$timepoint)
meta$timepoint<-gsub("b","2",meta$timepoint)
meta$timepoint<-gsub("c","14",meta$timepoint)
meta$timepoint<-gsub("d","15",meta$timepoint)
meta$timepoint<-gsub("e","17",meta$timepoint)
meta$timepoint<-gsub("f","18",meta$timepoint)
meta$timepoint<-gsub("g","20",meta$timepoint)
meta$timepoint<-gsub("h","21",meta$timepoint)
meta$timepoint<-gsub("i","22",meta$timepoint)
meta$timepoint<-gsub("j","23",meta$timepoint)
meta$timepoint<-gsub("k","24",meta$timepoint)
meta$timepoint<-gsub("l","-3",meta$timepoint)
meta$group2<-meta$group
meta$group2<-as.character(meta$group2)
meta$group2<-as.factor(meta$group2)
pcoa<-read.table(file="xanthan_name.final.0.03.pick.0.03.filter.thetayc.0.03.lt.pcoa.axes", header=TRUE)
pcoa<-pcoa[,1:4]
colnames(pcoa)[2:4] <- paste("pcoa03", colnames(pcoa)[2:4], sep = "_")
colnames(pcoa)[1]<-"sampleID"
nmds<-read.table(file="xanthan_name.final.0.03.pick.0.03.filter.thetayc.0.03.lt.nmds.axes", header=TRUE)
nmds<-nmds[1:4]
colnames(nmds)[2:4] <- paste("nmds03", colnames(nmds)[2:4], sep = "_")
colnames(nmds)[1]<-"sampleID"
sum<-read.table(file="xanthan_name.final.0.03.pick.groups.summary", header=TRUE)
sum<-subset(sum, select=-c(label))
colnames(sum)[2:16] <- paste(colnames(sum)[2:16], "03", sep = "_")
colnames(sum)[1]<-"sampleID"
combined.pcoa<-merge(meta, pcoa, by.x=c("seqID"), by.y=c("sampleID"))
combined.nmds<-merge(combined.pcoa, nmds, by.x=c("seqID"), by.y=c("sampleID"))
combined.sum<-merge(combined.nmds, sum, by.x=c("seqID"), by.y=c("sampleID"))
write.table(combined.sum, 'xanthan_summary.txt',quote=FALSE,sep="\t", col.names=TRUE, row.names=FALSE)
#####
## xanthan_name.final.0.03.cons.taxonomy --> xanthan_name.taxonomy.names.txt
taxonomy_file<-read.table(file="xanthan_name.final.0.03.cons.taxonomy", header=TRUE)
# taxname to OTU:
tax <- taxonomy_file$Taxonomy
tax <- gsub("\\(\\d*\\)", "", tax)
tax <- gsub(";unclassified", "", tax)
tax <- gsub("_1", "", tax)
tax <- gsub(";$", "", tax)
tax <- gsub("/.*", "", tax)
tax <- gsub(".*;", "", tax)
tax.names <-paste(taxonomy_file$OTU, tax, sep="_")
tax.names <-gsub("000", "", tax.names)
taxonomy_file$taxname<-tax.names
# phylum variable:
phylum <- taxonomy_file$Taxonomy
phylum <- gsub("\\(\\d*\\)", "", phylum)
phylum <- gsub("Bacteria;", "", phylum)
phylum <- gsub(";$", "", phylum)
phylum <- gsub(";.*", "", phylum)
taxonomy_file$phylum<-phylum
# family variable:
fam <- taxonomy_file$Taxonomy
fam <- gsub("\\(\\d*\\)", "", tax)
fam <- gsub(";unclassified", "", tax)
fam <- gsub("_1", "", tax)
fam <- gsub(";$", "", tax)
fam <- gsub("/.*", "", tax)
fam <- gsub(".*les;", "", tax)
fam <- gsub(";.*", "", tax)
taxonomy_file$family<-fam
write.table(taxonomy_file, file="xanthan.taxonomy.names.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
# note: some family level marks may still be messed up due to naming scheme
# these can be addressed when necessary
#####
## Subsetting pairwise distances specific to human files
# - xanthan_name.final.0.03.pick.0.03.filter.0.03.pick.summary, xanthan_metadata.txt --> xanthan.betasummary.txt
mdist<-read.table(file="xanthan_name.final.0.03.pick.0.03.filter.summary", header=TRUE, sep="\t", row.names = NULL)
mdist<-mdist[,-c(1,4)]
# now add metadata for each sample comparison:
var<-read.table(file='xanthan_summary.txt', header=TRUE)
var.sm<-var[,c(1,4,6)]
var.comp<-var.sm
colnames(var.comp)<-paste0(colnames(var.comp),"_comp")
# merge files:
m1<-merge(var.sm, mdist, by.x=c("seqID"), by.y=c("label")) #this merges the data based on the sampleID/group1 match
m2<-merge(var.comp, m1, by.x=c("seqID_comp"), by.y=c("comparison")) #this merges the data based on the sampleID/group1 match
write.table(m2, file="xanthan.betasummary.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
#####
# Creating an OTU count file:
## heatmap with metadata and taxonomy, and clustering:
# read in files:
meta<-read.table(file="xanthan_summary.txt", header=TRUE)
shared<-read.table(file="xanthan_name.final.0.03.pick.0.03.filter.shared", header=TRUE, row.names=2)
dim(shared)
shared$seqID<-rownames(shared)
# merge with meta:
sum.shared<-merge(meta, shared, by.x="seqID", by.y="seqID")
sum.shared<-subset(sum.shared, select =-c(label, numOtus) )
sum.shared<-droplevels(sum.shared)
# Output gives raw OTU sequence counts
write.table(sum.shared, file="xanthan_otus.w.meta.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
#####
# Creating OTU rel abund file:
meta<-read.table(file="xanthan_summary.txt", header=TRUE)
shared<-read.table(file="xanthan_name.final.0.03.pick.0.03.filter.shared", header=TRUE, row.names=2)
otu<-subset(shared, select =-c(label, numOtus) )
otu.rel<-otu/rowSums(otu)
otu.rel$sampleID<-rownames(otu.rel)
combo2<-merge(meta, otu.rel, by="sampleID", all.x=TRUE)
write.table(combo2, file="xanthan.allmeasures.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
#####
###### xanthan_name_genfrac2p.all_w.meta.txt
# - xanthan_name.trim.contigs.good.unique.good.filter.unique.precluster.pick.rdp.wang.tax.summary
# - xanthan_name_all.genera.txt #created previously with mouse data)
# - xanthan_name_summary.txt (as metadata)
# step 1: create a 'phylotype' file with phylum levels
# read in mothur file; get genus-level assignments and assign phyla
tax<-read.table(file="xanthan_name.trim.contigs.good.unique.good.filter.unique.precluster.pick.seed_v128.wang.tax.summary", header=TRUE)
# get phylum designations for level 6 (genera) rows, and curate levels for graphing (later):
tax3<-tax[which(tax$taxlevel==3), ]
tax3[, c("rankID", "taxon")]
tax6<-tax[which(tax$taxlevel==6), ]
tax6$rankID<-gsub("^0.1.1.*", "20_Euryarchaeota", tax6$rankID)
tax6$rankID<-gsub("^0.2.1\\..*", "04_Actinobacteria", tax6$rankID)
tax6$rankID<-gsub("^0.2.2\\..*", "11_Bacteria_Unclassified", tax6$rankID)
tax6$rankID<-gsub("^0.2.3\\..*", "01_Bacteroidetes", tax6$rankID)
tax6$rankID<-gsub("^0.2.4\\..*", "20_Cyanobacteria", tax6$rankID)
tax6$rankID<-gsub("^0.2.5\\..*", "20_Firmicutes", tax6$rankID)
tax6$rankID<-gsub("^0.2.6\\..*", "20_Fusobacteria", tax6$rankID)
tax6$rankID<-gsub("^0.2.7\\..*", "20_Proteobacteria", tax6$rankID)
tax6$rankID<-gsub("^0.2.8\\..*", "20_Saccharibacteria", tax6$rankID)
tax6$rankID<-gsub("^0.2.9\\..*", "20_Tenericutes", tax6$rankID)
tax6$rankID<-gsub("^0.2.10..*", "20_Verrucomicrobia", tax6$rankID)
colnames(tax6)[2]<-"phylum"
# remove samples w/ <5000:
subtax6<-subset(tax6, select=-c(taxlevel, daughterlevels))
subtax6<-subtax6[order(subtax6$phylum, -subtax6$total), ]
taxmatrix<-subtax6[, c(4:ncol(subtax6))]
duplicated(subtax6$taxon) #identify any duplicated taxon names
subtax6$taxon<-as.character(subtax6$taxon)
subtax6$taxon[15]<-"Actinobacteria_unclassified2"
subtax6$taxon[30]<-"Cyanobacteria_unclassified2"
subtax6$taxon<-as.factor(subtax6$taxon)
rownames(taxmatrix)<-make.names(subtax6$taxon)
genera<- taxmatrix[, colSums(taxmatrix)>5000,]
genera<-genera[,1:143]
# get rel. abund fraction:
genmatrix<-as.data.frame(t(genera))
genera.fr<-genmatrix/rowSums(genmatrix)*100
genus.fr<-t(genera.fr)
all.genera<-cbind(subtax6[1:3], genus.fr)
write.table(all.genera, file="allxanthan_all.genera.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
### step 2: combine with metadata and filter out data relevant to human stuff:
# read in files
combined<-read.table(file="xanthan.allmeasures.txt", header = TRUE)
meta<-combined[, 1:9]
all.genera<-read.table(file="allxanthan_all.genera.txt", header = TRUE)
genbar<-all.genera
rownames(genbar)<-make.names(genbar$taxon, unique=TRUE)
mice<-genbar
phyla<-subset(genbar, select=c(phylum,taxon,total))
mice<-subset(mice, select=-c(phylum,taxon,total))
# now filter to 1 or 2%:
mice[] <- lapply(mice[,1:143],as.numeric)
genus1<- mice[rowSums(mice>=1)>=1,]
# mice[,1:3] <- lapply(mice[,1:3],as.factor)
# namelist1p<-as.character(rownames(genus1))
# phyla1p<-phyla[phyla$taxon %in% namelist1p, ]
# genera1<-cbind(phyla, genus1)
# get top 2%
genus2<- mice[rowSums(mice>=2)>=2,]
namelist2p<-as.character(rownames(genus2))
phyla2p<-phyla[phyla$taxon %in% namelist2p, ]
genera2<-genus2
#write.table(genera2, file="xanthan_name_genfrac2p.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
# read in file and combine with meta:
barg<-NULL
genbar<-genera2
# rm_g<-subset(genbar, select =-c(phylum, total) )
# barg<-as.data.frame(t(rm_g))
barg<-as.data.frame(t(genbar))
# barg[4:148,] <- sapply(barg[4:148,],as.numeric)
# barg$other<-100-rowSums(barg[4:148,])
# barg<-barg[-1,]
barg[]<-lapply(barg[], as.character)
barg[]<-lapply(barg[], as.numeric)
barg$other<-100-rowSums(barg)
barg$sampleID<-rownames(barg)
# col.gen<-c(as.character(genbar$color), "grey47")
barg$sampleID<-gsub("X", "", barg$sampleID)
bar<-merge(combined, barg, by.x=c("sampleID"), by.y=c("sampleID"), all.y = TRUE, all.x = TRUE)
write.table(bar, 'xanthan_genfrac2p.all_w.meta.txt',quote=FALSE,sep="\t", col.names=NA)
######
# if you want all genera (including the rarer guys), do this:
meta<-meta
genbar<-read.table(file="allxanthan_all.genera.txt", header=TRUE, row.names=NULL)
#genbar5<- genbar[rowSums(genbar[ ,3:ncol(genbar)]>=5)>=5,]
rm_g<-subset(genbar, select =-c(phylum, total) )
barg<-as.data.frame(t(rm_g))
taxon.name<-barg[1,]
taxon.name<-as.data.frame(t(taxon.name))
colnames(barg)<-taxon.name$taxon
barg$sampleID<-rownames(barg)
bar2<-merge(meta, barg, by.x="sampleID", by.y="sampleID", all.y = TRUE)
write.table(bar2, 'allxanthan_allgenera_w.meta.txt',quote=FALSE,sep="\t", col.names=NA)
|
/Code/mothur_preanalysis_file_processing_cefoperazone.R
|
no_license
|
mschnizlein/xg_microbiota
|
R
| false | false | 13,268 |
r
|
######
##### Mothur output files and resulting modified files:
# - alpha diversity measures --> xanthan_name_summary.txt
# - input file is a summary file of alpha diversity measures created in mothur
# - output file combines these with metadata
# - xanthan_name.final.0.03.cons.taxonomy --> xanthan_name.taxonomy.names.txt
# - input file lists the taxonomic classifications for each of the OTUs at 0.03 cutoff
# - output file was modified to reflect new OTU names that includes the taxonomy, as well as cleaner classifications
# - xanthan_name.final.0.03.pick.0.03.filter.summary --> xanthan_name.betasummary.txt
# - input file represents pairwise distances (beta diversity) generated in mothur, filtered to include only relevant samples
# - output file adds sample meta data to sampleIDs
# - xanthan_name.final.0.03.pick.0.03.filter.shared, xanthan_name_metadata.txt --> xanthan_name_otus.w.meta.txt
# - input .shared file was previously filtered using the specified measures
# - output file combines metadata with OTU counts
# - combining all data (metadata, alpha metrics, relative abundance of OTUs, and metabolites) --> xanthan_name_allmeasures.txt
# - all data input files
# - output file has all variables, combined in one file
# - --> xanthan_name_genfrac2p.all_w.meta.txt
# - input file is a file produced in mothur classifying sequences directly to the RDP database
# - output file is a file with phylotype information (relative abundance of genus-level sequence assignments)
###### alpha summaries --> xanthan_name_summary.txt
# - xanthan_name.final.0.03.pick.0.03.pick.groups.summary
# - xanthan_name.final.0.03.pick.0.03.pick.thetayc.0.03.lt.pcoa.axes
# - xanthan_name.final.0.03.pick.0.03.pick.thetayc.0.03.lt.nmds.axes
# - xanthan_name_metadata.txt
#####
setwd("C:/Users/mksch/Box Sync/working_folder_schnizlein/collaborations/xanthan_gum_community_martens/experiment_1_20171127_youngmice/mothur_analysis/")
substrRight <- function(x, n){
substr(x, nchar(x)-n+1, nchar(x))
}
substrLeft <- function(x, n){
substr(x, 1, n)
}
#####
## read in files and merge together:
meta<-read.table(file="r_analysis/xanthan_metadata.tsv", header=TRUE)
meta$timepoint<-as.character(meta$sampleID)
meta$timepoint<-substring(meta$timepoint,6)
meta$cecum_result<-substrRight(meta$timepoint,2)
meta$cecum_result<-gsub("cr","cecum",meta$cecum_result)
meta$cecum_result<-gsub("0c","cecum",meta$cecum_result)
meta$cecum_result<-gsub("4c","cecum",meta$cecum_result)
meta$cecum_result<-gsub("[^cecum]+", "feces", meta$cecum_result)
meta$timepoint<-substrLeft(meta$timepoint, 3)
# One sample was mislabelled as d-11 when it was actually d-12
meta$timepoint<-gsub("n11","n12",meta$timepoint)
meta$timepoint<-gsub("p", "", meta$timepoint)
meta$timepoint<-gsub("d", "", meta$timepoint)
meta$timepoint<-gsub("n","-", meta$timepoint)
meta$timepoint<-as.numeric(meta$timepoint)
meta$timepoint<-gsub("-14","a",meta$timepoint)
meta$timepoint<-gsub("-12","b",meta$timepoint)
meta$timepoint<-gsub("-3","l",meta$timepoint)
meta$timepoint<-gsub("3","e",meta$timepoint)
meta$timepoint<-gsub("4","f",meta$timepoint)
meta$timepoint<-gsub("6","g",meta$timepoint)
meta$timepoint<-gsub("7","h",meta$timepoint)
meta$timepoint<-gsub("8","i",meta$timepoint)
meta$timepoint<-gsub("9","j",meta$timepoint)
meta$timepoint<-gsub("10","k",meta$timepoint)
meta$timepoint<-gsub("1","d",meta$timepoint)
meta$timepoint<-gsub("0","c",meta$timepoint)
meta$timepoint<-gsub("a","0",meta$timepoint)
meta$timepoint<-gsub("b","2",meta$timepoint)
meta$timepoint<-gsub("c","14",meta$timepoint)
meta$timepoint<-gsub("d","15",meta$timepoint)
meta$timepoint<-gsub("e","17",meta$timepoint)
meta$timepoint<-gsub("f","18",meta$timepoint)
meta$timepoint<-gsub("g","20",meta$timepoint)
meta$timepoint<-gsub("h","21",meta$timepoint)
meta$timepoint<-gsub("i","22",meta$timepoint)
meta$timepoint<-gsub("j","23",meta$timepoint)
meta$timepoint<-gsub("k","24",meta$timepoint)
meta$timepoint<-gsub("l","-3",meta$timepoint)
meta$group2<-meta$group
meta$group2<-as.character(meta$group2)
meta$group2<-as.factor(meta$group2)
pcoa<-read.table(file="xanthan_name.final.0.03.pick.0.03.filter.thetayc.0.03.lt.pcoa.axes", header=TRUE)
pcoa<-pcoa[,1:4]
colnames(pcoa)[2:4] <- paste("pcoa03", colnames(pcoa)[2:4], sep = "_")
colnames(pcoa)[1]<-"sampleID"
nmds<-read.table(file="xanthan_name.final.0.03.pick.0.03.filter.thetayc.0.03.lt.nmds.axes", header=TRUE)
nmds<-nmds[1:4]
colnames(nmds)[2:4] <- paste("nmds03", colnames(nmds)[2:4], sep = "_")
colnames(nmds)[1]<-"sampleID"
sum<-read.table(file="xanthan_name.final.0.03.pick.groups.summary", header=TRUE)
sum<-subset(sum, select=-c(label))
colnames(sum)[2:16] <- paste(colnames(sum)[2:16], "03", sep = "_")
colnames(sum)[1]<-"sampleID"
combined.pcoa<-merge(meta, pcoa, by.x=c("seqID"), by.y=c("sampleID"))
combined.nmds<-merge(combined.pcoa, nmds, by.x=c("seqID"), by.y=c("sampleID"))
combined.sum<-merge(combined.nmds, sum, by.x=c("seqID"), by.y=c("sampleID"))
write.table(combined.sum, 'xanthan_summary.txt',quote=FALSE,sep="\t", col.names=TRUE, row.names=FALSE)
#####
## xanthan_name.final.0.03.cons.taxonomy --> xanthan_name.taxonomy.names.txt
taxonomy_file<-read.table(file="xanthan_name.final.0.03.cons.taxonomy", header=TRUE)
# taxname to OTU:
tax <- taxonomy_file$Taxonomy
tax <- gsub("\\(\\d*\\)", "", tax)
tax <- gsub(";unclassified", "", tax)
tax <- gsub("_1", "", tax)
tax <- gsub(";$", "", tax)
tax <- gsub("/.*", "", tax)
tax <- gsub(".*;", "", tax)
tax.names <-paste(taxonomy_file$OTU, tax, sep="_")
tax.names <-gsub("000", "", tax.names)
taxonomy_file$taxname<-tax.names
# phylum variable:
phylum <- taxonomy_file$Taxonomy
phylum <- gsub("\\(\\d*\\)", "", phylum)
phylum <- gsub("Bacteria;", "", phylum)
phylum <- gsub(";$", "", phylum)
phylum <- gsub(";.*", "", phylum)
taxonomy_file$phylum<-phylum
# family variable:
fam <- taxonomy_file$Taxonomy
fam <- gsub("\\(\\d*\\)", "", tax)
fam <- gsub(";unclassified", "", tax)
fam <- gsub("_1", "", tax)
fam <- gsub(";$", "", tax)
fam <- gsub("/.*", "", tax)
fam <- gsub(".*les;", "", tax)
fam <- gsub(";.*", "", tax)
taxonomy_file$family<-fam
write.table(taxonomy_file, file="xanthan.taxonomy.names.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
# note: some family level marks may still be messed up due to naming scheme
# these can be addressed when necessary
#####
## Subsetting pairwise distances specific to human files
# - xanthan_name.final.0.03.pick.0.03.filter.0.03.pick.summary, xanthan_metadata.txt --> xanthan.betasummary.txt
mdist<-read.table(file="xanthan_name.final.0.03.pick.0.03.filter.summary", header=TRUE, sep="\t", row.names = NULL)
mdist<-mdist[,-c(1,4)]
# now add metadata for each sample comparison:
var<-read.table(file='xanthan_summary.txt', header=TRUE)
var.sm<-var[,c(1,4,6)]
var.comp<-var.sm
colnames(var.comp)<-paste0(colnames(var.comp),"_comp")
# merge files:
m1<-merge(var.sm, mdist, by.x=c("seqID"), by.y=c("label")) #this merges the data based on the sampleID/group1 match
m2<-merge(var.comp, m1, by.x=c("seqID_comp"), by.y=c("comparison")) #this merges the data based on the sampleID/group1 match
write.table(m2, file="xanthan.betasummary.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
#####
# Creating an OTU count file:
## heatmap with metadata and taxonomy, and clustering:
# read in files:
meta<-read.table(file="xanthan_summary.txt", header=TRUE)
shared<-read.table(file="xanthan_name.final.0.03.pick.0.03.filter.shared", header=TRUE, row.names=2)
dim(shared)
shared$seqID<-rownames(shared)
# merge with meta:
sum.shared<-merge(meta, shared, by.x="seqID", by.y="seqID")
sum.shared<-subset(sum.shared, select =-c(label, numOtus) )
sum.shared<-droplevels(sum.shared)
# Output gives raw OTU sequence counts
write.table(sum.shared, file="xanthan_otus.w.meta.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
#####
# Creating OTU rel abund file:
meta<-read.table(file="xanthan_summary.txt", header=TRUE)
shared<-read.table(file="xanthan_name.final.0.03.pick.0.03.filter.shared", header=TRUE, row.names=2)
otu<-subset(shared, select =-c(label, numOtus) )
otu.rel<-otu/rowSums(otu)
otu.rel$sampleID<-rownames(otu.rel)
combo2<-merge(meta, otu.rel, by="sampleID", all.x=TRUE)
write.table(combo2, file="xanthan.allmeasures.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
#####
###### xanthan_name_genfrac2p.all_w.meta.txt
# - xanthan_name.trim.contigs.good.unique.good.filter.unique.precluster.pick.rdp.wang.tax.summary
# - xanthan_name_all.genera.txt #created previously with mouse data)
# - xanthan_name_summary.txt (as metadata)
# step 1: create a 'phylotype' file with phylum levels
# read in mothur file; get genus-level assignments and assign phyla
tax<-read.table(file="xanthan_name.trim.contigs.good.unique.good.filter.unique.precluster.pick.seed_v128.wang.tax.summary", header=TRUE)
# get phylum designations for level 6 (genera) rows, and curate levels for graphing (later):
tax3<-tax[which(tax$taxlevel==3), ]
tax3[, c("rankID", "taxon")]
tax6<-tax[which(tax$taxlevel==6), ]
tax6$rankID<-gsub("^0.1.1.*", "20_Euryarchaeota", tax6$rankID)
tax6$rankID<-gsub("^0.2.1\\..*", "04_Actinobacteria", tax6$rankID)
tax6$rankID<-gsub("^0.2.2\\..*", "11_Bacteria_Unclassified", tax6$rankID)
tax6$rankID<-gsub("^0.2.3\\..*", "01_Bacteroidetes", tax6$rankID)
tax6$rankID<-gsub("^0.2.4\\..*", "20_Cyanobacteria", tax6$rankID)
tax6$rankID<-gsub("^0.2.5\\..*", "20_Firmicutes", tax6$rankID)
tax6$rankID<-gsub("^0.2.6\\..*", "20_Fusobacteria", tax6$rankID)
tax6$rankID<-gsub("^0.2.7\\..*", "20_Proteobacteria", tax6$rankID)
tax6$rankID<-gsub("^0.2.8\\..*", "20_Saccharibacteria", tax6$rankID)
tax6$rankID<-gsub("^0.2.9\\..*", "20_Tenericutes", tax6$rankID)
tax6$rankID<-gsub("^0.2.10..*", "20_Verrucomicrobia", tax6$rankID)
colnames(tax6)[2]<-"phylum"
# remove samples w/ <5000:
subtax6<-subset(tax6, select=-c(taxlevel, daughterlevels))
subtax6<-subtax6[order(subtax6$phylum, -subtax6$total), ]
taxmatrix<-subtax6[, c(4:ncol(subtax6))]
duplicated(subtax6$taxon) #identify any duplicated taxon names
subtax6$taxon<-as.character(subtax6$taxon)
subtax6$taxon[15]<-"Actinobacteria_unclassified2"
subtax6$taxon[30]<-"Cyanobacteria_unclassified2"
subtax6$taxon<-as.factor(subtax6$taxon)
rownames(taxmatrix)<-make.names(subtax6$taxon)
genera<- taxmatrix[, colSums(taxmatrix)>5000,]
genera<-genera[,1:143]
# get rel. abund fraction:
genmatrix<-as.data.frame(t(genera))
genera.fr<-genmatrix/rowSums(genmatrix)*100
genus.fr<-t(genera.fr)
all.genera<-cbind(subtax6[1:3], genus.fr)
write.table(all.genera, file="allxanthan_all.genera.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
### step 2: combine with metadata and filter out data relevant to human stuff:
# read in files
combined<-read.table(file="xanthan.allmeasures.txt", header = TRUE)
meta<-combined[, 1:9]
all.genera<-read.table(file="allxanthan_all.genera.txt", header = TRUE)
genbar<-all.genera
rownames(genbar)<-make.names(genbar$taxon, unique=TRUE)
mice<-genbar
phyla<-subset(genbar, select=c(phylum,taxon,total))
mice<-subset(mice, select=-c(phylum,taxon,total))
# now filter to 1 or 2%:
mice[] <- lapply(mice[,1:143],as.numeric)
genus1<- mice[rowSums(mice>=1)>=1,]
# mice[,1:3] <- lapply(mice[,1:3],as.factor)
# namelist1p<-as.character(rownames(genus1))
# phyla1p<-phyla[phyla$taxon %in% namelist1p, ]
# genera1<-cbind(phyla, genus1)
# get top 2%
genus2<- mice[rowSums(mice>=2)>=2,]
namelist2p<-as.character(rownames(genus2))
phyla2p<-phyla[phyla$taxon %in% namelist2p, ]
genera2<-genus2
#write.table(genera2, file="xanthan_name_genfrac2p.txt", sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
# read in file and combine with meta:
barg<-NULL
genbar<-genera2
# rm_g<-subset(genbar, select =-c(phylum, total) )
# barg<-as.data.frame(t(rm_g))
barg<-as.data.frame(t(genbar))
# barg[4:148,] <- sapply(barg[4:148,],as.numeric)
# barg$other<-100-rowSums(barg[4:148,])
# barg<-barg[-1,]
barg[]<-lapply(barg[], as.character)
barg[]<-lapply(barg[], as.numeric)
barg$other<-100-rowSums(barg)
barg$sampleID<-rownames(barg)
# col.gen<-c(as.character(genbar$color), "grey47")
barg$sampleID<-gsub("X", "", barg$sampleID)
bar<-merge(combined, barg, by.x=c("sampleID"), by.y=c("sampleID"), all.y = TRUE, all.x = TRUE)
write.table(bar, 'xanthan_genfrac2p.all_w.meta.txt',quote=FALSE,sep="\t", col.names=NA)
######
# if you want all genera (including the rarer guys), do this:
meta<-meta
genbar<-read.table(file="allxanthan_all.genera.txt", header=TRUE, row.names=NULL)
#genbar5<- genbar[rowSums(genbar[ ,3:ncol(genbar)]>=5)>=5,]
rm_g<-subset(genbar, select =-c(phylum, total) )
barg<-as.data.frame(t(rm_g))
taxon.name<-barg[1,]
taxon.name<-as.data.frame(t(taxon.name))
colnames(barg)<-taxon.name$taxon
barg$sampleID<-rownames(barg)
bar2<-merge(meta, barg, by.x="sampleID", by.y="sampleID", all.y = TRUE)
write.table(bar2, 'allxanthan_allgenera_w.meta.txt',quote=FALSE,sep="\t", col.names=NA)
|
setwd("/Users/chiewluanl/RProject/ExData_Plotting1")
library(httr)
if(!file.exists("./data")){
dir.create("./data")
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="./data/household_power_consumption.zip",method="curl")
unzip(zipfile="./data/household_power_consumption.zip",exdir="./data")
path_rf <- file.path("./data")
files<-list.files(path_rf, recursive=TRUE)
files
}
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
/Plot2.R
|
no_license
|
chiewluanl/ExData_Plotting1
|
R
| false | false | 972 |
r
|
setwd("/Users/chiewluanl/RProject/ExData_Plotting1")
library(httr)
if(!file.exists("./data")){
dir.create("./data")
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="./data/household_power_consumption.zip",method="curl")
unzip(zipfile="./data/household_power_consumption.zip",exdir="./data")
path_rf <- file.path("./data")
files<-list.files(path_rf, recursive=TRUE)
files
}
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
# Problem: Does a employee level 6.5 earn 160 k?
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# # install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$Salary, SplitRatio = 2/3)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Fitting the Random Forest Regressionl to the dataset
#install.packages('randomForest')
library('randomForest')
set.seed = (1234)
regressor = randomForest(x = dataset[1],
y = dataset$Salary,
ntree = 500)
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
# Visualising the Regression Model results (for higher resolution and smoother curve)
# install.packages('ggplot2')
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.01)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (Random Forest Regression)') +
xlab('Level') +
ylab('Salary')
|
/Part 2 - Regression/Section 9 - Random Forest Regression/forest_regression.R
|
no_license
|
Kostevski/ML-scripts
|
R
| false | false | 1,345 |
r
|
# Problem: Does a employee level 6.5 earn 160 k?
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# # install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$Salary, SplitRatio = 2/3)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Fitting the Random Forest Regressionl to the dataset
#install.packages('randomForest')
library('randomForest')
set.seed = (1234)
regressor = randomForest(x = dataset[1],
y = dataset$Salary,
ntree = 500)
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
# Visualising the Regression Model results (for higher resolution and smoother curve)
# install.packages('ggplot2')
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.01)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (Random Forest Regression)') +
xlab('Level') +
ylab('Salary')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mosek-solver.R
\docType{methods}
\name{status_map,MOSEK-method}
\alias{status_map,MOSEK-method}
\title{MOSEK Status Map}
\usage{
\S4method{status_map}{MOSEK}(solver, status)
}
\arguments{
\item{solver}{A \linkS4class{MOSEK} object.}
\item{status}{An exit code returned by MOSEK. See the \href{http://docs.mosek.com/8.0/dotnetfusion/solution_status.html}{MOSEK documentation} for details.}
}
\value{
A string indicating the status, either "optimal", "infeasible", "unbounded", "optimal_inaccurate", "infeasible_inaccurate", "unbounded_inaccurate", or "solver_error".
}
\description{
Map of MOSEK status to CVXR status.
}
|
/man/MOSEK-status_map.Rd
|
permissive
|
aszekMosek/CVXR
|
R
| false | true | 699 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mosek-solver.R
\docType{methods}
\name{status_map,MOSEK-method}
\alias{status_map,MOSEK-method}
\title{MOSEK Status Map}
\usage{
\S4method{status_map}{MOSEK}(solver, status)
}
\arguments{
\item{solver}{A \linkS4class{MOSEK} object.}
\item{status}{An exit code returned by MOSEK. See the \href{http://docs.mosek.com/8.0/dotnetfusion/solution_status.html}{MOSEK documentation} for details.}
}
\value{
A string indicating the status, either "optimal", "infeasible", "unbounded", "optimal_inaccurate", "infeasible_inaccurate", "unbounded_inaccurate", or "solver_error".
}
\description{
Map of MOSEK status to CVXR status.
}
|
{
set.seed(42782)
tic()
# DC implementation
source("20200705-DCDR-Functions.R")
# getRES function is now relocated to a separate file (20200720-Algos-code.R)
source("20200720-Algos-code.R")
# regression models for GLM / AIPW
if (estimateWithMore == T){
out_path <- "/Users/garethalex/Desktop/HuangGroup/cvtmle_plasmode/Data/"
plas_org <- haven::read_dta(paste0(out_path,"plas_data.dta"))
vars <- names(plas_org[3:333])[1:p.est]
p <- p.est
}
# idx.handpick <- c(1,2,5,18, 217, sample(2:40,10))
# if (isHandPick == T){
# var.idx <- idx.handpick
# plas_org <- haven::read_dta(paste0("./plas_data.dta"))
# vars <- names(plas_org[3:333])[var.idx]
# }
reg.formulas <- make.formula("Y", "A",ver = data.ver,sims.ver = sims.ver, vars=vars,
interact = T,
interact.w.exp = F)
expForm <- reg.formulas$expForm
outForm <- reg.formulas$outForm
print("Estimation: ")
print(paste0("OR form: ",outForm))
print(paste0("PS form: ",expForm))
# set .errorhandling="remove" if want to discard
# set .errorhandling="stop" by default
# boot1 <- foreach(i = 1:N_sims,.errorhandling="stop") %dopar% {
boot1 <- foreach(i = 1:N_sims,.errorhandling=errorhandling) %dopar% {
require(tidyverse)
require(tmle3)
require(sl3)
require(SuperLearner)
# Initialize dataset
if (sims.ver == "plas" | sims.ver =="5var.then.plas"){
# i <- 1
plas_data <- cbind(id = plas_sims$Sim_Data[i],
A = plas_sims$Sim_Data[i + (2*plas_sim_N)],
Y = plas_sims$Sim_Data[i + plas_sim_N])
colnames(plas_data) <- c("id", "A", "Y")
set1 <- suppressMessages(left_join(as_tibble(plas_data), as_tibble(plas))) #dplyr::select(as_tibble(plas), -Y5, -A1))) # add covariates
tset <- set1 %>% mutate(YT = (Y-min(set1$Y))/(max(set1$Y)- min(set1$Y)))
}else{
# Initialize dataset
# i<-2
ss <- 600
set1 <- as_tibble(cbind(C1 = sim_boots[[i]]$C1[1:ss],
C2 = sim_boots[[i]]$C2[1:ss],
C3 = sim_boots[[i]]$C3[1:ss],
C4 = sim_boots[[i]]$C4[1:ss],
C5 = sim_boots[[i]]$C5[1:ss],
A = sim_boots[[i]]$A[1:ss],
Y = sim_boots[[i]]$Y[1:ss]))
tset <- set1 %>% mutate(YT = (Y-min(set1$Y))/(max(set1$Y)- min(set1$Y))) # generate a bounded Y for TMLE
}
getRES(set1, tset, aipw_lib, tmle_lib, short_tmle_lib,
doIPW = doIPW, doLASSO=doLASSO,
doAIPW=doAIPW,
doDCAIPW=doDCAIPW,
doManuTMLE=doManuTMLE, doShortTMLE = doShortTMLE,
doDCTMLE=doDCTMLE,doGComp=doGComp,
num_cf=num_cf,
control=control,
parallel=parallel.for.DC
)
}
toc()
}
|
/Code/20200904-run-sim-code.R
|
no_license
|
mengeks/drml-plasmode
|
R
| false | false | 2,925 |
r
|
{
set.seed(42782)
tic()
# DC implementation
source("20200705-DCDR-Functions.R")
# getRES function is now relocated to a separate file (20200720-Algos-code.R)
source("20200720-Algos-code.R")
# regression models for GLM / AIPW
if (estimateWithMore == T){
out_path <- "/Users/garethalex/Desktop/HuangGroup/cvtmle_plasmode/Data/"
plas_org <- haven::read_dta(paste0(out_path,"plas_data.dta"))
vars <- names(plas_org[3:333])[1:p.est]
p <- p.est
}
# idx.handpick <- c(1,2,5,18, 217, sample(2:40,10))
# if (isHandPick == T){
# var.idx <- idx.handpick
# plas_org <- haven::read_dta(paste0("./plas_data.dta"))
# vars <- names(plas_org[3:333])[var.idx]
# }
reg.formulas <- make.formula("Y", "A",ver = data.ver,sims.ver = sims.ver, vars=vars,
interact = T,
interact.w.exp = F)
expForm <- reg.formulas$expForm
outForm <- reg.formulas$outForm
print("Estimation: ")
print(paste0("OR form: ",outForm))
print(paste0("PS form: ",expForm))
# set .errorhandling="remove" if want to discard
# set .errorhandling="stop" by default
# boot1 <- foreach(i = 1:N_sims,.errorhandling="stop") %dopar% {
boot1 <- foreach(i = 1:N_sims,.errorhandling=errorhandling) %dopar% {
require(tidyverse)
require(tmle3)
require(sl3)
require(SuperLearner)
# Initialize dataset
if (sims.ver == "plas" | sims.ver =="5var.then.plas"){
# i <- 1
plas_data <- cbind(id = plas_sims$Sim_Data[i],
A = plas_sims$Sim_Data[i + (2*plas_sim_N)],
Y = plas_sims$Sim_Data[i + plas_sim_N])
colnames(plas_data) <- c("id", "A", "Y")
set1 <- suppressMessages(left_join(as_tibble(plas_data), as_tibble(plas))) #dplyr::select(as_tibble(plas), -Y5, -A1))) # add covariates
tset <- set1 %>% mutate(YT = (Y-min(set1$Y))/(max(set1$Y)- min(set1$Y)))
}else{
# Initialize dataset
# i<-2
ss <- 600
set1 <- as_tibble(cbind(C1 = sim_boots[[i]]$C1[1:ss],
C2 = sim_boots[[i]]$C2[1:ss],
C3 = sim_boots[[i]]$C3[1:ss],
C4 = sim_boots[[i]]$C4[1:ss],
C5 = sim_boots[[i]]$C5[1:ss],
A = sim_boots[[i]]$A[1:ss],
Y = sim_boots[[i]]$Y[1:ss]))
tset <- set1 %>% mutate(YT = (Y-min(set1$Y))/(max(set1$Y)- min(set1$Y))) # generate a bounded Y for TMLE
}
getRES(set1, tset, aipw_lib, tmle_lib, short_tmle_lib,
doIPW = doIPW, doLASSO=doLASSO,
doAIPW=doAIPW,
doDCAIPW=doDCAIPW,
doManuTMLE=doManuTMLE, doShortTMLE = doShortTMLE,
doDCTMLE=doDCTMLE,doGComp=doGComp,
num_cf=num_cf,
control=control,
parallel=parallel.for.DC
)
}
toc()
}
|
#' Country names
#'
#' Convert country names to echarts format.
#'
#' @param data Data.frame in which to find column names.
#' @param input,output Input and output columns.
#' @param type Passed to \link[countrycode]{countrycode} \code{origin} parameter.
#' @param ... Any other parameter to pass to \link[countrycode]{countrycode}.
#'
#' @details Taiwan and Hong Kong cannot be plotted.
#'
#' @examples
#' cns <- data.frame(country = c("US", "BE"))
#'
#' # replace
#' e_country_names(cns, country)
#'
#' # specify output
#' e_country_names(cns, country, country_name)
#' @rdname e_country_names
#' @export
e_country_names <- function(data, input, output, type = "iso2c", ...) {
if (missing(data) || missing(input)) {
stop("must pass data and input", call. = FALSE)
}
if (missing(output)) {
output <- NULL
} else {
output <- deparse(substitute(output))
}
e_country_names_(data, deparse(substitute(input)), output, type, ...)
}
#' @rdname e_country_names
#' @export
e_country_names_ <- function(data, input, output = NULL, type = "iso2c", ...) {
if (missing(data) || missing(input)) {
stop("must pass data and input", call. = FALSE)
}
src <- input
cn <- countrycode::countrycode(data[[src]], origin = type, destination = "country.name", ...)
if (is.null(output)) {
output <- src
}
data[[output]] <- .correct_countries(cn)
data
}
#' Color range
#'
#' Build manual color range
#'
#' @param data Data.frame in which to find column names.
#' @param input,output Input and output columns.
#' @param colors Colors to pass to \code{\link{colorRampPalette}}.
#' @param ... Any other argument to pass to \code{\link{colorRampPalette}}.
#'
#' @examples
#' df <- data.frame(val = 1:10)
#'
#' e_color_range(df, val, colors)
#' @rdname e_color_range
#' @export
e_color_range <- function(data, input, output, colors = c("#bf444c", "#d88273", "#f6efa6"), ...) {
if (missing(data) || missing(input) || missing(output)) {
stop("must pass data, input and output", call. = FALSE)
}
e_color_range_(data, deparse(substitute(input)), deparse(substitute(output)), colors, ...)
}
#' @rdname e_color_range
#' @export
e_color_range_ <- function(data, input, output, colors = c("#bf444c", "#d88273", "#f6efa6"), ...) {
if (missing(data) || missing(input) || missing(output)) {
stop("must pass data, input and output", call. = FALSE)
}
serie <- data[[input]]
if (inherits(serie, "factor") || inherits(serie, "character")) {
col <- scales::col_numeric(colors, domain = range(serie))(serie)
} else {
col <- scales::col_numeric(colors, domain = range(serie))(serie)
}
data[[output]] <- col
data
}
#' Get data
#'
#' Get data passed to \code{\link{e_charts}}.
#'
#' @inheritParams e_bar
#'
#' @return A list of data.frames, one for each group.
#'
#' @examples
#' echart <- cars |>
#' e_charts(speed) |>
#' e_scatter(dist) |>
#' e_lm(dist ~ speed)
#'
#' echart
#'
#' e_get_data(echart)[[1]]
#' @export
e_get_data <- function(e) {
e$x$data
}
#' Formatters
#'
#' Simple formatters as helpers.
#'
#' @inheritParams e_bar
#' @param axis Axis to apply formatter to.
#' @param suffix,prefix Suffix and prefix of label.
#' @param ... Any other arguments to pass to \code{\link{e_axis}}.
#'
#' @examples
#' # Y = %
#' df <- data.frame(
#' x = 1:10,
#' y = round(
#' runif(10, 1, 100),
#' 2
#' )
#' )
#'
#' df |>
#' e_charts(x) |>
#' e_line(y) |>
#' e_format_y_axis(suffix = "%") |>
#' e_format_x_axis(prefix = "A")
#' @rdname formatters
#' @export
e_format_axis <- function(e, axis = "y", suffix = NULL, prefix = NULL, ...) {
if (missing(e)) {
stop("must pass e", call. = FALSE)
}
if (is.null(suffix) && is.null(prefix)) {
stop("missing formatting")
}
fmt <- paste(prefix, "{value}", suffix)
e <- e |>
e_axis(
axis = axis,
axisLabel = list(formatter = fmt),
...
)
e
}
#' @rdname formatters
#' @export
e_format_x_axis <- function(e, suffix = NULL, prefix = NULL, ...) {
e_format_axis(e, "x", suffix, prefix, ...)
}
#' @rdname formatters
#' @export
e_format_y_axis <- function(e, suffix = NULL, prefix = NULL, ...) {
e_format_axis(e, "y", suffix, prefix, ...)
}
#' Format labels
#'
#' @inheritParams e_bar
#' @param show Set to \code{TRUE} to show the labels.
#' @param position Position of labels, see
#' \href{https://echarts.apache.org/en/option.html#series-line.label.position}{official documentation}
#' for the full list of options.
#' @param ... Any other options see
#' \href{https://echarts.apache.org/en/option.html#series-line.label}{documentation} for other options.
#'
#' @examples
#' mtcars |>
#' e_chart(wt) |>
#' e_scatter(qsec, cyl) |>
#' e_labels(fontSize = 9)
#'
#' mtcars |>
#' group_by(cyl) |>
#' e_chart(wt) |>
#' e_scatter(qsec, mpg) |>
#' e_labels(fontSize = 9)
#'
#' # timeline
#' mtcars |>
#' group_by(cyl) |>
#' e_chart(wt) |>
#' e_scatter(qsec, mpg) |>
#' e_labels(fontSize = 9)
#' @export
e_labels <- function(e, show = TRUE, position = "top", ...) {
if (missing(e)) {
stop("must pass e", call. = FALSE)
}
opts <- list(
show = show,
position = position,
...
)
if (!e$x$tl) {
for (i in seq_along(e$x$opts$series)) {
e$x$opts$series[[i]]$label <- opts
}
} else {
for (i in seq_along(e$x$opts$baseOption$series)) {
e$x$opts$baseOption$series[[i]]$label <- opts
}
}
return(e)
}
#' List
#'
#' simply pass a list of options, similar to a \code{JSON}.
#'
#' @inheritParams e_bar
#' @param list A \code{list} of options passed to \code{setOptions}.
#' @param append if \code{TRUE} the \code{list} is appended to the options,
#' otherwise it \emph{overwrites} everything.
#'
#' @examples
#' N <- 20 # data points
#'
#' opts <- list(
#' xAxis = list(
#' type = "category",
#' data = LETTERS[1:N]
#' ),
#' yAxis = list(
#' type = "value"
#' ),
#' series = list(
#' list(
#' type = "line",
#' data = round(runif(N, 5, 20))
#' )
#' )
#' )
#'
#' e_charts() |>
#' e_list(opts)
#' @export
e_list <- function(e, list, append = FALSE) {
if (missing(list)) {
stop("missing list", call. = FALSE)
}
if (isTRUE(append)) {
e$x$opts <- append(e$x$opts, list)
} else {
e$x$opts <- list
}
e
}
#' Aria
#'
#' W3C defined the Accessible Rich Internet Applications Suite (WAI-ARIA)
#' to make Web content and Web applications more accessible to the disabled.
#' From ECharts 4.0, echarts4r supports ARIA by generating description for
#' charts automatically.
#'
#' @details There should be an aria-label attribute on the chart DOM, which
#' can help the disabled understand the content of charts with the help of certain devices.
#'
#' @inheritParams e_bar
#' @param enabled Whether to enable aria helper text.
#'
#' @seealso \href{https://echarts.apache.org/en/option.html#aria}{official documentation}
#'
#' @export
e_aria <- function(e, enabled = TRUE, ...) {
e$x$opts$aria <- list(
enabled = enabled,
...
)
return(e)
}
#' To & From JSON
#'
#' Get JSON options from an echarts4r object and build one from JSON.
#'
#' @inheritParams e_bar
#' @param json Whether to return the JSON, otherwise returns a \code{list}.
#' @param txt JSON character string, url, or file.
#' @param ... Additional options to pass to \link[=jsonlite]{toJSON}.
#'
#' @details \code{txt} should contain the full list of options required to build a chart.
#' This is subsequently passed to the \code{setOption} ECharts (JavaScript) function.
#'
#' @examples
#' p <- cars |>
#' e_charts(dist) |>
#' e_scatter(speed, symbol_size = 10)
#'
#' p # plot
#'
#' # extract the JSON
#' json <- p |>
#' e_inspect(
#' json = TRUE,
#' pretty = TRUE
#' )
#'
#' # print json
#' json
#'
#' # rebuild plot
#' echarts_from_json(json) |>
#' e_theme("dark") # modify
#' @return \code{e_inspect} Returns a \code{list} if \code{json} is \code{FALSE} and a
#' JSON string otherwise. \code{echarts_from_json} returns an object of class \code{echarts4r}.
#'
#' @note Must be passed as last option.
#'
#' @rdname echartsNJSON
#' @export
e_inspect <- function(e, json = FALSE, ...) {
opts <- e$x$opts
if (isTRUE(json)) {
opts <- jsonlite::toJSON(opts, force = TRUE, auto_unbox = TRUE, null = "null", ...)
}
return(opts)
}
#' @rdname echartsNJSON
#' @export
echarts_from_json <- function(txt) {
json <- jsonlite::fromJSON(txt, simplifyVector = FALSE)
e_charts() -> e
e$x$opts <- json
e
}
|
/R/helpers.R
|
no_license
|
cran/echarts4r
|
R
| false | false | 8,496 |
r
|
#' Country names
#'
#' Convert country names to echarts format.
#'
#' @param data Data.frame in which to find column names.
#' @param input,output Input and output columns.
#' @param type Passed to \link[countrycode]{countrycode} \code{origin} parameter.
#' @param ... Any other parameter to pass to \link[countrycode]{countrycode}.
#'
#' @details Taiwan and Hong Kong cannot be plotted.
#'
#' @examples
#' cns <- data.frame(country = c("US", "BE"))
#'
#' # replace
#' e_country_names(cns, country)
#'
#' # specify output
#' e_country_names(cns, country, country_name)
#' @rdname e_country_names
#' @export
e_country_names <- function(data, input, output, type = "iso2c", ...) {
if (missing(data) || missing(input)) {
stop("must pass data and input", call. = FALSE)
}
if (missing(output)) {
output <- NULL
} else {
output <- deparse(substitute(output))
}
e_country_names_(data, deparse(substitute(input)), output, type, ...)
}
#' @rdname e_country_names
#' @export
e_country_names_ <- function(data, input, output = NULL, type = "iso2c", ...) {
if (missing(data) || missing(input)) {
stop("must pass data and input", call. = FALSE)
}
src <- input
cn <- countrycode::countrycode(data[[src]], origin = type, destination = "country.name", ...)
if (is.null(output)) {
output <- src
}
data[[output]] <- .correct_countries(cn)
data
}
#' Color range
#'
#' Build manual color range
#'
#' @param data Data.frame in which to find column names.
#' @param input,output Input and output columns.
#' @param colors Colors to pass to \code{\link{colorRampPalette}}.
#' @param ... Any other argument to pass to \code{\link{colorRampPalette}}.
#'
#' @examples
#' df <- data.frame(val = 1:10)
#'
#' e_color_range(df, val, colors)
#' @rdname e_color_range
#' @export
e_color_range <- function(data, input, output, colors = c("#bf444c", "#d88273", "#f6efa6"), ...) {
if (missing(data) || missing(input) || missing(output)) {
stop("must pass data, input and output", call. = FALSE)
}
e_color_range_(data, deparse(substitute(input)), deparse(substitute(output)), colors, ...)
}
#' @rdname e_color_range
#' @export
e_color_range_ <- function(data, input, output, colors = c("#bf444c", "#d88273", "#f6efa6"), ...) {
if (missing(data) || missing(input) || missing(output)) {
stop("must pass data, input and output", call. = FALSE)
}
serie <- data[[input]]
if (inherits(serie, "factor") || inherits(serie, "character")) {
col <- scales::col_numeric(colors, domain = range(serie))(serie)
} else {
col <- scales::col_numeric(colors, domain = range(serie))(serie)
}
data[[output]] <- col
data
}
#' Get data
#'
#' Get data passed to \code{\link{e_charts}}.
#'
#' @inheritParams e_bar
#'
#' @return A list of data.frames, one for each group.
#'
#' @examples
#' echart <- cars |>
#' e_charts(speed) |>
#' e_scatter(dist) |>
#' e_lm(dist ~ speed)
#'
#' echart
#'
#' e_get_data(echart)[[1]]
#' @export
e_get_data <- function(e) {
e$x$data
}
#' Formatters
#'
#' Simple formatters as helpers.
#'
#' @inheritParams e_bar
#' @param axis Axis to apply formatter to.
#' @param suffix,prefix Suffix and prefix of label.
#' @param ... Any other arguments to pass to \code{\link{e_axis}}.
#'
#' @examples
#' # Y = %
#' df <- data.frame(
#' x = 1:10,
#' y = round(
#' runif(10, 1, 100),
#' 2
#' )
#' )
#'
#' df |>
#' e_charts(x) |>
#' e_line(y) |>
#' e_format_y_axis(suffix = "%") |>
#' e_format_x_axis(prefix = "A")
#' @rdname formatters
#' @export
e_format_axis <- function(e, axis = "y", suffix = NULL, prefix = NULL, ...) {
if (missing(e)) {
stop("must pass e", call. = FALSE)
}
if (is.null(suffix) && is.null(prefix)) {
stop("missing formatting")
}
fmt <- paste(prefix, "{value}", suffix)
e <- e |>
e_axis(
axis = axis,
axisLabel = list(formatter = fmt),
...
)
e
}
#' @rdname formatters
#' @export
e_format_x_axis <- function(e, suffix = NULL, prefix = NULL, ...) {
e_format_axis(e, "x", suffix, prefix, ...)
}
#' @rdname formatters
#' @export
e_format_y_axis <- function(e, suffix = NULL, prefix = NULL, ...) {
e_format_axis(e, "y", suffix, prefix, ...)
}
#' Format labels
#'
#' @inheritParams e_bar
#' @param show Set to \code{TRUE} to show the labels.
#' @param position Position of labels, see
#' \href{https://echarts.apache.org/en/option.html#series-line.label.position}{official documentation}
#' for the full list of options.
#' @param ... Any other options see
#' \href{https://echarts.apache.org/en/option.html#series-line.label}{documentation} for other options.
#'
#' @examples
#' mtcars |>
#' e_chart(wt) |>
#' e_scatter(qsec, cyl) |>
#' e_labels(fontSize = 9)
#'
#' mtcars |>
#' group_by(cyl) |>
#' e_chart(wt) |>
#' e_scatter(qsec, mpg) |>
#' e_labels(fontSize = 9)
#'
#' # timeline
#' mtcars |>
#' group_by(cyl) |>
#' e_chart(wt) |>
#' e_scatter(qsec, mpg) |>
#' e_labels(fontSize = 9)
#' @export
e_labels <- function(e, show = TRUE, position = "top", ...) {
if (missing(e)) {
stop("must pass e", call. = FALSE)
}
opts <- list(
show = show,
position = position,
...
)
if (!e$x$tl) {
for (i in seq_along(e$x$opts$series)) {
e$x$opts$series[[i]]$label <- opts
}
} else {
for (i in seq_along(e$x$opts$baseOption$series)) {
e$x$opts$baseOption$series[[i]]$label <- opts
}
}
return(e)
}
#' List
#'
#' simply pass a list of options, similar to a \code{JSON}.
#'
#' @inheritParams e_bar
#' @param list A \code{list} of options passed to \code{setOptions}.
#' @param append if \code{TRUE} the \code{list} is appended to the options,
#' otherwise it \emph{overwrites} everything.
#'
#' @examples
#' N <- 20 # data points
#'
#' opts <- list(
#' xAxis = list(
#' type = "category",
#' data = LETTERS[1:N]
#' ),
#' yAxis = list(
#' type = "value"
#' ),
#' series = list(
#' list(
#' type = "line",
#' data = round(runif(N, 5, 20))
#' )
#' )
#' )
#'
#' e_charts() |>
#' e_list(opts)
#' @export
e_list <- function(e, list, append = FALSE) {
if (missing(list)) {
stop("missing list", call. = FALSE)
}
if (isTRUE(append)) {
e$x$opts <- append(e$x$opts, list)
} else {
e$x$opts <- list
}
e
}
#' Aria
#'
#' W3C defined the Accessible Rich Internet Applications Suite (WAI-ARIA)
#' to make Web content and Web applications more accessible to the disabled.
#' From ECharts 4.0, echarts4r supports ARIA by generating description for
#' charts automatically.
#'
#' @details There should be an aria-label attribute on the chart DOM, which
#' can help the disabled understand the content of charts with the help of certain devices.
#'
#' @inheritParams e_bar
#' @param enabled Whether to enable aria helper text.
#'
#' @seealso \href{https://echarts.apache.org/en/option.html#aria}{official documentation}
#'
#' @export
e_aria <- function(e, enabled = TRUE, ...) {
e$x$opts$aria <- list(
enabled = enabled,
...
)
return(e)
}
#' To & From JSON
#'
#' Get JSON options from an echarts4r object and build one from JSON.
#'
#' @inheritParams e_bar
#' @param json Whether to return the JSON, otherwise returns a \code{list}.
#' @param txt JSON character string, url, or file.
#' @param ... Additional options to pass to \link[=jsonlite]{toJSON}.
#'
#' @details \code{txt} should contain the full list of options required to build a chart.
#' This is subsequently passed to the \code{setOption} ECharts (JavaScript) function.
#'
#' @examples
#' p <- cars |>
#' e_charts(dist) |>
#' e_scatter(speed, symbol_size = 10)
#'
#' p # plot
#'
#' # extract the JSON
#' json <- p |>
#' e_inspect(
#' json = TRUE,
#' pretty = TRUE
#' )
#'
#' # print json
#' json
#'
#' # rebuild plot
#' echarts_from_json(json) |>
#' e_theme("dark") # modify
#' @return \code{e_inspect} Returns a \code{list} if \code{json} is \code{FALSE} and a
#' JSON string otherwise. \code{echarts_from_json} returns an object of class \code{echarts4r}.
#'
#' @note Must be passed as last option.
#'
#' @rdname echartsNJSON
#' @export
e_inspect <- function(e, json = FALSE, ...) {
opts <- e$x$opts
if (isTRUE(json)) {
opts <- jsonlite::toJSON(opts, force = TRUE, auto_unbox = TRUE, null = "null", ...)
}
return(opts)
}
#' @rdname echartsNJSON
#' @export
echarts_from_json <- function(txt) {
json <- jsonlite::fromJSON(txt, simplifyVector = FALSE)
e_charts() -> e
e$x$opts <- json
e
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcax_escdata.R
\name{rcax_escdata}
\alias{rcax_escdata}
\title{Get EscData table}
\usage{
rcax_escdata(
tablename = "EscData",
flist = NULL,
qlist = NULL,
cols = NULL,
sortcols = c("countdate", "refid"),
type = c("data.frame", "colnames"),
GETargs = list(table_id = NULL, recordloc = "records", key = NULL, parse = TRUE),
...
)
}
\arguments{
\item{tablename}{The name of the table in the CAX API. See \code{vignette("CAX_Tables", package = "rCAX")}}
\item{flist}{A filter for the query. See details and \code{rcax_filter()}}
\item{qlist}{Additional query parameters. See \code{rcax_table_query()} for details.}
\item{cols}{column names to return. Use cols=NULL if you want all columns. Names are case insensitive.}
\item{sortcols}{The columns to sort on for the returned table. The order of sortcols indicates the order of sorting. Start with the smallest group and work outwards, e.g. \code{c("spawningyear", "popid", "MPG")}. Names are case insensitive.}
\item{type}{whether to return the table ("data.frame") or colnames with definitions ("colnames"). Default is to return the table as a data frame.}
\item{GETargs}{A list of arguments for the \code{rcax_GET()} call. These do not need to be specified by the user unless the user wants to change the default values. \code{table_id} This is the CAX table id. It is looked up using \code{tablename} and \code{rCAX:::caxpops} (an internal dataset). If \code{table_id} is passed in via \code{GETargs}, it will override the default table id lookup. \code{recordloc} This is the name of the record we want in the list returned from the GET call. \code{key} is the API key.}
\item{...}{Curl options passed to \code{\link[crul]{HttpClient}}}
}
\value{
data frame if \code{type="data.frame"} (default) and the colnames if \code{type="colnames"}
}
\description{
Returns EscData table sorted by refid and countdate.
The EscData table_id is set automatically using a saved data frame from a \code{rcax_datasets()} call. The table is saved in \code{R/sysdata.rda}.
\code{rcax_escdata()} will download 1000 records.
}
\examples{
# First 5 columns of first 5 results
rcax_escdata(cols = NULL, qlist=list(limit=5))[,1:5]
# to print the first column names and definitions
head(rcax_escdata(type="colnames"))
}
|
/man/rcax_escdata.Rd
|
permissive
|
nwfsc-math-bio/rCAX
|
R
| false | true | 2,350 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcax_escdata.R
\name{rcax_escdata}
\alias{rcax_escdata}
\title{Get EscData table}
\usage{
rcax_escdata(
tablename = "EscData",
flist = NULL,
qlist = NULL,
cols = NULL,
sortcols = c("countdate", "refid"),
type = c("data.frame", "colnames"),
GETargs = list(table_id = NULL, recordloc = "records", key = NULL, parse = TRUE),
...
)
}
\arguments{
\item{tablename}{The name of the table in the CAX API. See \code{vignette("CAX_Tables", package = "rCAX")}}
\item{flist}{A filter for the query. See details and \code{rcax_filter()}}
\item{qlist}{Additional query parameters. See \code{rcax_table_query()} for details.}
\item{cols}{column names to return. Use cols=NULL if you want all columns. Names are case insensitive.}
\item{sortcols}{The columns to sort on for the returned table. The order of sortcols indicates the order of sorting. Start with the smallest group and work outwards, e.g. \code{c("spawningyear", "popid", "MPG")}. Names are case insensitive.}
\item{type}{whether to return the table ("data.frame") or colnames with definitions ("colnames"). Default is to return the table as a data frame.}
\item{GETargs}{A list of arguments for the \code{rcax_GET()} call. These do not need to be specified by the user unless the user wants to change the default values. \code{table_id} This is the CAX table id. It is looked up using \code{tablename} and \code{rCAX:::caxpops} (an internal dataset). If \code{table_id} is passed in via \code{GETargs}, it will override the default table id lookup. \code{recordloc} This is the name of the record we want in the list returned from the GET call. \code{key} is the API key.}
\item{...}{Curl options passed to \code{\link[crul]{HttpClient}}}
}
\value{
data frame if \code{type="data.frame"} (default) and the colnames if \code{type="colnames"}
}
\description{
Returns EscData table sorted by refid and countdate.
The EscData table_id is set automatically using a saved data frame from a \code{rcax_datasets()} call. The table is saved in \code{R/sysdata.rda}.
\code{rcax_escdata()} will download 1000 records.
}
\examples{
# First 5 columns of first 5 results
rcax_escdata(cols = NULL, qlist=list(limit=5))[,1:5]
# to print the first column names and definitions
head(rcax_escdata(type="colnames"))
}
|
# Stream residence time of salmon
# 2015-07-10 CJS Update with split; ggplot2; lsmemans; contrasts etc
# The stream residence of time was measured for individually tagged fish in
# a number of YearFs.
# Example of a two factor CRD analysis of variance with unbalanced data
options(useFancyQuotes=FALSE) # renders summary output corrects
library(car)
library(ggplot2)
library(lsmeans)
library(plyr)
source("http://www.stat.sfu.ca/~cschwarz/Stat-650/Notes/MyPrograms/schwarz.functions.r")
# Read in the actual data
sink("residence-R-001.txt", split=TRUE)
##***part001b;
cat(" Effect of Sex and YearF on residence time levels in Fish \n\n")
restime <- read.csv("residence.csv", header=TRUE, as.is=TRUE, strip.white=TRUE)
restime$SexF <- factor(restime$Sex)
restime$YearF <- as.factor(restime$Year)
restime$trtF <- interaction(restime$Sex, restime$Year)
cat("Listing of part of the raw data \n")
head(restime)
##***part001e;
sink()
str(restime)
# Preliminary plot
# Get side-by-side dot plots
# Get side-by-side dot plots
##***part010b;
plot1 <- ggplot(data=restime, aes(x=trtF, y=ResidenceTime))+
ggtitle("Residence Time levels in different sex/year combinations")+
ylab("Residence Time")+xlab("Sex and year")+
geom_point( position=position_jitter(width=0.2), size=4)+
geom_boxplot(alpha=0.2, width=0.5, notch=TRUE)
plot1
##***part010e;
ggsave(plot=plot1, file="residence-R-010.png", height=4, width=6, units="in", dpi=300)
# Get some simple summary statistics
sink('residence-R-020.txt', split=TRUE)
##***part020b;
report <- ddply(restime, c("SexF","YearF"),sf.simple.summary, variable="ResidenceTime", crd=TRUE)
cat("\n\n Summary report \n")
report
##***part020e;
sink()
# Draw a profile plot
##***part030b;
profile.plot <- ggplot(data=report, aes(x=SexF, y=mean, group=YearF, shape=YearF))+
ggtitle("Profile plot of mean ResidenceTime scores")+
ylab("Mean ResidenceTime with 95% ci")+xlab('year')+
geom_point(size=4, position=position_dodge(.1))+
geom_errorbar(aes(ymin=lcl, ymax=ucl), width=0.1, position=position_dodge(.1))+
geom_line(position=position_dodge(.1), aes(linetype=YearF))
profile.plot
##***part030e;
ggsave(plot=profile.plot, file="residence-R-030.png", height=4, width=6, unit="in", dpi=300) # send the plot to a png file
# Fit the linear model, get the anova table, and the usual stuff
# CAUTION!!! Because the design is unbalance, the default model
# fit by aov gives the WRONG sum of squares and F-tests.
# The default tests are "sequential tests" where terms are added
# in the order specified. You want the marginal tests
# (which are reported in JMP or SAS)
#
# Read the entry at
# http://r-eco-evo.blogspot.com/2007/10/infamous-type-iii-ss-before-i-started_20.html
#
# You can also use the Anova() function from the car package.
sink('residence-R-100.txt', split=TRUE)
##***part100b;
cat("The sum of squares and F-tests from the anova() below are INCORRECT in unbalanced data\n")
cat("because they are sequential and only adjust for effect\n")
cat("that enter the model prior to the term in question.")
result.lm <- lm( ResidenceTime ~ SexF + YearF + SexF*YearF, data=restime)
cat("\n\n Analysis of variance -- this is NOT CORRECT because design is unbalanced \n")
anova(result.lm)
cat("\n\nUse the Type III tests from the Anova() function from the car package")
cat( "\nbut you need to set the treatment contrasts to sum rather than treatment")
cat(" \nSee http://r.789695.n4.nabble.com/Type-I-v-s-Type-III-Sum-Of-Squares-in-ANOVA-td1573657.html")
result.lm2 <- lm( ResidenceTime ~ SexF + YearF + SexF*YearF, data=restime,
contrasts=list(SexF="contr.sum", YearF="contr.sum"))
Anova(result.lm2,type=3)
##***part100e;
sink()
##***part100diagnosticb;
diagplot <- sf.autoplot.lm(result.lm2)
diagplot
diagplot
##***part100diagnostice;
ggsave(plot=diagplot, file='residence-R-100-diagnostic.png', h=6, w=6, units='in', dpi=300)
# LSmeans for sex after a lm() fit
sink('residence-R-100LSM-sex.txt', split=-TRUE)
##***part100LSM-sexb;
result.sex.lsmo <- lsmeans::lsmeans(result.lm2, ~SexF)
cat("\n\n Estimated marginal means \n\n")
summary(result.sex.lsmo, infer=TRUE)
##***part100LSM-sexe;
sink()
sink('residence-R-100LSM-year.txt', split=TRUE)
##***part100LSM-yearb;
cat("\n\n Estimated marginal means \n\n")
result.year.lsmo <- lsmeans::lsmeans(result.lm, ~YearF)
cat("\n\n Estimated marginal means \n\n")
summary(result.year.lsmo, infer=TRUE)
##***part100LSM-yeare;
sink()
sink('residence-R-100LSM-int.txt', split=TRUE)
##***part100LSM-intb;
result.year.sex.lsmo <- lsmeans::lsmeans(result.lm, ~YearF:SexF)
cat("\n\n Estimated marginal means \n\n")
summary(result.year.sex.lsmo, infer=TRUE)
##***part100LSM-inte;
sink()
# Do the multiple comparisons using the compact letter display from the lsmeans package
sink('residence-R-100multcomp-sex.txt', split=TRUE)
##***part100multcomp-sexb;
cat("\n\n Multiple comparison for Sex effect \n")
summary(pairs(result.sex.lsmo), infer=TRUE)
result.sex.cld <- cld(result.sex.lsmo)
result.sex.cld
sink()
sex.cld.plot <- sf.cld.plot.bar(result.sex.cld, "SexF")+
ggtitle("Comparing mean ResidenceTime over sex levels")+
xlab("Sex\nNumbers indicated CLD display")+ylab("Mean ResidenceTime")
sex.cld.plot
##***part100multcomp-sexe;
ggsave(plot=sex.cld.plot, file='residence-R-100multcomp-sex.png', h=4, w=6, units="in", dpi=300)
sink('residence-R-100multcomp-year.txt', split=TRUE)
##***part100multcomp-yearb;
cat("\n\n Multiple comparison for year effect \n")
summary(pairs(result.year.lsmo), infer=TRUE)
result.year.cld <- cld(result.year.lsmo)
result.year.cld
sink()
year.cld.plot <- sf.cld.plot.bar(result.year.cld, "YearF")+
ggtitle("Comparing mean ResidenceTime over year levels")+
xlab("year\nNumbers indicated CLD display")+ylab("Mean ResidenceTime")
year.cld.plot
##***part100multcomp-yeare;
ggsave(plot=year.cld.plot, file='residence-R-100multcomp-year.png', h=4, w=6, units="in", dpi=300)
sink('residence-R-100multcomp-year-sex.txt', split=TRUE)
##***part100multcomp-sexyearb;
# Sex x year terms.
cat("\n\n Multiple comparison for year-Sex combinations \n")
summary(pairs(result.year.sex.lsmo), infer=TRUE)
result.year.sex.cld <- cld(result.year.sex.lsmo)
result.year.sex.cld
sink()
result.year.sex.cld$trt <- interaction(result.year.sex.cld$YearF, result.year.sex.cld$SexF)
year.sex.cld.plot <- sf.cld.plot.bar(result.year.sex.cld, "trt")+
ggtitle("Comparing mean ResidenceTime over year/Sex levels")+
xlab("year-Sex\nNumbers indicated CLD display")+ylab("Mean ResidenceTime")
year.sex.cld.plot
##***part100multcomp-sexyeare;
ggsave(plot=year.cld.plot, file='residence-R-100multcomp-year-sex.png', h=4, w=6, units="in", dpi=300)
|
/Sampling_Regression_Experiment_Design_and_Analysis/residence.r
|
no_license
|
burakbayramli/books
|
R
| false | false | 6,918 |
r
|
# Stream residence time of salmon
# 2015-07-10 CJS Update with split; ggplot2; lsmemans; contrasts etc
# The stream residence of time was measured for individually tagged fish in
# a number of YearFs.
# Example of a two factor CRD analysis of variance with unbalanced data
options(useFancyQuotes=FALSE) # renders summary output corrects
library(car)
library(ggplot2)
library(lsmeans)
library(plyr)
source("http://www.stat.sfu.ca/~cschwarz/Stat-650/Notes/MyPrograms/schwarz.functions.r")
# Read in the actual data
sink("residence-R-001.txt", split=TRUE)
##***part001b;
cat(" Effect of Sex and YearF on residence time levels in Fish \n\n")
restime <- read.csv("residence.csv", header=TRUE, as.is=TRUE, strip.white=TRUE)
restime$SexF <- factor(restime$Sex)
restime$YearF <- as.factor(restime$Year)
restime$trtF <- interaction(restime$Sex, restime$Year)
cat("Listing of part of the raw data \n")
head(restime)
##***part001e;
sink()
str(restime)
# Preliminary plot
# Get side-by-side dot plots
# Get side-by-side dot plots
##***part010b;
plot1 <- ggplot(data=restime, aes(x=trtF, y=ResidenceTime))+
ggtitle("Residence Time levels in different sex/year combinations")+
ylab("Residence Time")+xlab("Sex and year")+
geom_point( position=position_jitter(width=0.2), size=4)+
geom_boxplot(alpha=0.2, width=0.5, notch=TRUE)
plot1
##***part010e;
ggsave(plot=plot1, file="residence-R-010.png", height=4, width=6, units="in", dpi=300)
# Get some simple summary statistics
sink('residence-R-020.txt', split=TRUE)
##***part020b;
report <- ddply(restime, c("SexF","YearF"),sf.simple.summary, variable="ResidenceTime", crd=TRUE)
cat("\n\n Summary report \n")
report
##***part020e;
sink()
# Draw a profile plot
##***part030b;
profile.plot <- ggplot(data=report, aes(x=SexF, y=mean, group=YearF, shape=YearF))+
ggtitle("Profile plot of mean ResidenceTime scores")+
ylab("Mean ResidenceTime with 95% ci")+xlab('year')+
geom_point(size=4, position=position_dodge(.1))+
geom_errorbar(aes(ymin=lcl, ymax=ucl), width=0.1, position=position_dodge(.1))+
geom_line(position=position_dodge(.1), aes(linetype=YearF))
profile.plot
##***part030e;
ggsave(plot=profile.plot, file="residence-R-030.png", height=4, width=6, unit="in", dpi=300) # send the plot to a png file
# Fit the linear model, get the anova table, and the usual stuff
# CAUTION!!! Because the design is unbalance, the default model
# fit by aov gives the WRONG sum of squares and F-tests.
# The default tests are "sequential tests" where terms are added
# in the order specified. You want the marginal tests
# (which are reported in JMP or SAS)
#
# Read the entry at
# http://r-eco-evo.blogspot.com/2007/10/infamous-type-iii-ss-before-i-started_20.html
#
# You can also use the Anova() function from the car package.
sink('residence-R-100.txt', split=TRUE)
##***part100b;
cat("The sum of squares and F-tests from the anova() below are INCORRECT in unbalanced data\n")
cat("because they are sequential and only adjust for effect\n")
cat("that enter the model prior to the term in question.")
result.lm <- lm( ResidenceTime ~ SexF + YearF + SexF*YearF, data=restime)
cat("\n\n Analysis of variance -- this is NOT CORRECT because design is unbalanced \n")
anova(result.lm)
cat("\n\nUse the Type III tests from the Anova() function from the car package")
cat( "\nbut you need to set the treatment contrasts to sum rather than treatment")
cat(" \nSee http://r.789695.n4.nabble.com/Type-I-v-s-Type-III-Sum-Of-Squares-in-ANOVA-td1573657.html")
result.lm2 <- lm( ResidenceTime ~ SexF + YearF + SexF*YearF, data=restime,
contrasts=list(SexF="contr.sum", YearF="contr.sum"))
Anova(result.lm2,type=3)
##***part100e;
sink()
##***part100diagnosticb;
diagplot <- sf.autoplot.lm(result.lm2)
diagplot
diagplot
##***part100diagnostice;
ggsave(plot=diagplot, file='residence-R-100-diagnostic.png', h=6, w=6, units='in', dpi=300)
# LSmeans for sex after a lm() fit
sink('residence-R-100LSM-sex.txt', split=-TRUE)
##***part100LSM-sexb;
result.sex.lsmo <- lsmeans::lsmeans(result.lm2, ~SexF)
cat("\n\n Estimated marginal means \n\n")
summary(result.sex.lsmo, infer=TRUE)
##***part100LSM-sexe;
sink()
sink('residence-R-100LSM-year.txt', split=TRUE)
##***part100LSM-yearb;
cat("\n\n Estimated marginal means \n\n")
result.year.lsmo <- lsmeans::lsmeans(result.lm, ~YearF)
cat("\n\n Estimated marginal means \n\n")
summary(result.year.lsmo, infer=TRUE)
##***part100LSM-yeare;
sink()
sink('residence-R-100LSM-int.txt', split=TRUE)
##***part100LSM-intb;
result.year.sex.lsmo <- lsmeans::lsmeans(result.lm, ~YearF:SexF)
cat("\n\n Estimated marginal means \n\n")
summary(result.year.sex.lsmo, infer=TRUE)
##***part100LSM-inte;
sink()
# Do the multiple comparisons using the compact letter display from the lsmeans package
sink('residence-R-100multcomp-sex.txt', split=TRUE)
##***part100multcomp-sexb;
cat("\n\n Multiple comparison for Sex effect \n")
summary(pairs(result.sex.lsmo), infer=TRUE)
result.sex.cld <- cld(result.sex.lsmo)
result.sex.cld
sink()
sex.cld.plot <- sf.cld.plot.bar(result.sex.cld, "SexF")+
ggtitle("Comparing mean ResidenceTime over sex levels")+
xlab("Sex\nNumbers indicated CLD display")+ylab("Mean ResidenceTime")
sex.cld.plot
##***part100multcomp-sexe;
ggsave(plot=sex.cld.plot, file='residence-R-100multcomp-sex.png', h=4, w=6, units="in", dpi=300)
sink('residence-R-100multcomp-year.txt', split=TRUE)
##***part100multcomp-yearb;
cat("\n\n Multiple comparison for year effect \n")
summary(pairs(result.year.lsmo), infer=TRUE)
result.year.cld <- cld(result.year.lsmo)
result.year.cld
sink()
year.cld.plot <- sf.cld.plot.bar(result.year.cld, "YearF")+
ggtitle("Comparing mean ResidenceTime over year levels")+
xlab("year\nNumbers indicated CLD display")+ylab("Mean ResidenceTime")
year.cld.plot
##***part100multcomp-yeare;
ggsave(plot=year.cld.plot, file='residence-R-100multcomp-year.png', h=4, w=6, units="in", dpi=300)
sink('residence-R-100multcomp-year-sex.txt', split=TRUE)
##***part100multcomp-sexyearb;
# Sex x year terms.
cat("\n\n Multiple comparison for year-Sex combinations \n")
summary(pairs(result.year.sex.lsmo), infer=TRUE)
result.year.sex.cld <- cld(result.year.sex.lsmo)
result.year.sex.cld
sink()
result.year.sex.cld$trt <- interaction(result.year.sex.cld$YearF, result.year.sex.cld$SexF)
year.sex.cld.plot <- sf.cld.plot.bar(result.year.sex.cld, "trt")+
ggtitle("Comparing mean ResidenceTime over year/Sex levels")+
xlab("year-Sex\nNumbers indicated CLD display")+ylab("Mean ResidenceTime")
year.sex.cld.plot
##***part100multcomp-sexyeare;
ggsave(plot=year.cld.plot, file='residence-R-100multcomp-year-sex.png', h=4, w=6, units="in", dpi=300)
|
## Caching the inverse of a Matrix
## Matrix inversion is computationally expensive to be performed repeatedly.
## Therefore the functions in this program calculates the inverse and stores in
## the cache. This data is accessed whenever the inverse is called.
## Creates a matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
mat1 <- NULL
set <- function(y){
x<<-y
mat1 <<- NULL
}
get <- function() x
setInverse <- function(mat1) mat1 <<- inverse
getInverse <- function() mat1
list(set=set,
get=get,
setInverse=setInverse,
getInverse=getInverse)
}
## The output from this function is the inverse of the matrix. Two steps are
## followed here, the first step checks if the inverse already exists, if it
## does, it returns the value, else it will calculate the value using setinverse
## function
cacheSolve <- function(x, ...) {
mat1 <- x$getInverse() ## Check the cache
if (!is.null(mat1)) { ## If cache exists
message("obtaining cache data") ## Message indicating status
return(mat1) ## Returns the cache data
}
mat <- x$get() ## Obtaining data fro mother function
mat1 <- solve(mat, ...) ## Calcuating inverse
x$setInverse(mat1) ## Store data of new matrix
mat1 ## Return a matrix that is the inverse of 'x'
}
|
/cachematrix.R
|
no_license
|
chidam181/ProgrammingAssignment2
|
R
| false | false | 1,619 |
r
|
## Caching the inverse of a Matrix
## Matrix inversion is computationally expensive to be performed repeatedly.
## Therefore the functions in this program calculates the inverse and stores in
## the cache. This data is accessed whenever the inverse is called.
## Creates a matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
mat1 <- NULL
set <- function(y){
x<<-y
mat1 <<- NULL
}
get <- function() x
setInverse <- function(mat1) mat1 <<- inverse
getInverse <- function() mat1
list(set=set,
get=get,
setInverse=setInverse,
getInverse=getInverse)
}
## The output from this function is the inverse of the matrix. Two steps are
## followed here, the first step checks if the inverse already exists, if it
## does, it returns the value, else it will calculate the value using setinverse
## function
cacheSolve <- function(x, ...) {
mat1 <- x$getInverse() ## Check the cache
if (!is.null(mat1)) { ## If cache exists
message("obtaining cache data") ## Message indicating status
return(mat1) ## Returns the cache data
}
mat <- x$get() ## Obtaining data fro mother function
mat1 <- solve(mat, ...) ## Calcuating inverse
x$setInverse(mat1) ## Store data of new matrix
mat1 ## Return a matrix that is the inverse of 'x'
}
|
#!/usr/bin/Rscript
# seqstats_density.R
# Density plots for sequencing stats.
#
# Author: Daniel A Cuevas (dcuevas08.at.gmail.com)
# Created on 23 Nov 2016
# Updated on 20 Mar 2017
# Import necessary packages
# These may need to be installed first
if ("getopt" %in% rownames(installed.packages()) == F) {
install.packages("getopt")
}
if ("ggplot2" %in% rownames(installed.packages()) == F) {
install.packages("ggplot2")
}
suppressMessages(require("getopt"))
suppressMessages(require("ggplot2"))
# Suppress warning messages
options(warn=-1)
#################################################################
# UTILITY FUNCTIONS
#################################################################
# Set theme
my.theme <-
theme(axis.text=element_text(colour="black", size=12),
axis.title=element_text(face="bold", size=15),
axis.title.x=element_text(margin=margin(t=10, b=5)),
axis.ticks=element_blank(),
axis.line.x=element_line(colour="black"),
axis.line.y=element_line(colour="black"),
legend.key=element_rect(fill=NA),
legend.text=element_text(size=12),
plot.title=element_text(size=16, face="bold"),
panel.background=element_blank(),
panel.margin=unit(3, "mm"),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank())
#################################################################
# ARGUMENT PARSING
#################################################################
spec <- matrix(c(
"datafile", "i", 1, "character", "Data file (required)",
"out_dir", "d", 1, "character", "Output directory (required)",
"header", "e", 0, "logical", "Data file contains header info",
"metric", "m", 1, "character", "Metric name if no header (Default: 'metric')",
"suffix", "s", 1, "character", "Suffix for output files",
"title", "t", 1, "character", "Title for plot",
"help", "h", 0, "logical", "This help message"
), ncol=5, byrow=T)
opt <- getopt(spec)
# Check if help flag was given
if (!is.null(opt$help)) {
cat(paste(getopt(spec, usage=T), "\n"))
q(status=1)
}
# Check for data file
if (is.null(opt$datafile)) {
cat("\nDatafile not supplied. Use the '-i' option.\n\n")
cat(paste(getopt(spec, usage=T), "\n"))
q(status=1)
} else {
fp <- opt$datafile
}
# Check for output directory
if (is.null(opt$out_dir)) {
cat("\nOutput directory not supplied. Use the '-d' option.\n\n")
cat(paste(getopt(spec, usage=T), "\n"))
q(status=1)
} else {
outdir <- opt$out_dir
}
# Check if header flag was given
if (is.null(opt$header)) {
headerFlag <- F
} else {
headerFlag <- opt$header
}
# Check metric name -- only used if header was not given
if (is.null(opt$metric) && !headerFlag) {
metricName <- "metric"
} else {
metricName <- opt$metric
}
# Check suffix
if (is.null(opt$suffix)) {
suffix <- ""
} else {
suffix <- paste(opt$suffix, "_", sep="")
}
# Check title
if (is.null(opt$title)) {
title <- ""
file_title <- ""
} else {
title <- opt$title
file_title <- paste(title, "_", suffix, sep="")
}
#################################################################
# DATA PROCESSING
#################################################################
# Load data
data <- read.delim(fp, header=headerFlag)
# Get column name from header, if given
if (headerFlag) {
# Replace underscores "_" with space " "
metricName <- gsub("_", " ", colnames(data)[2])
colnames(data) <- c("V1", "V2", "V3")
}
# Set position order
data$V1 <- factor(data$V1, levels=c("0-9", "10-19", "20-29", "30-39", "40-49",
"50-59", "60-69", "70-79", "80-89", "90-99",
"100-109", "110-119", "120-129", "130-139", "140-149",
"150-159", "160-169", "170-179", "180-189", "190-199",
"200-209", "210-219", "220-229", "230-239", "240-249",
"250+"))
# Set breaks based on special names
if (metricName == "GC ratio") {
plot.breaks <- seq(0.0, 1.0, 0.2)
lim <- c(0, 1)
} else {
plot.breaks <- seq(0, 40, 10)
lim <- c(0, 40)
}
density.data <- aggregate(list(count=data$V3), by=list(metric=as.character(data$V2)), FUN=sum)
density.data$metric <- as.numeric(density.data$metric)
# Plot density
mean.x <- weighted.mean(density.data$metric, density.data$count)
pl <- ggplot(density.data, aes(x=metric, y=count))
if (metricName == "GC ratio") {
pl <- pl + geom_histogram(stat="identity", fill=NA, colour="#a7a7a7")
} else {
pl <- pl + geom_histogram(stat="identity", fill=NA, colour="black")
}
pl <- pl + geom_vline(aes(xintercept=mean.x), colour="#d62728") +
my.theme +
scale_y_continuous(expand=c(0,0)) +
scale_x_continuous(breaks=c(plot.breaks, mean.x), labels=c(plot.breaks, round(mean.x, digits=2)), limits=lim)
pl <- pl + xlab(metricName) + ylab("Count") + ggtitle(title)
ggsave(paste(outdir, "/", file_title ,"density.png", sep=""),
plot=pl,
width=30,
height=15,
units="cm",
dpi=300)
# Box plots
pl <- ggplot(data, aes(x=V1, y=V2, weight=V3)) +
geom_boxplot() +
my.theme +
theme(panel.grid.major.y=element_line(colour="#d7d7d7")) +
scale_y_continuous(breaks=plot.breaks, limits=lim) +
xlab("Read Position (bp)") + ylab(metricName) + ggtitle(title)
ggsave(paste(outdir, "/", file_title ,"boxplots.png", sep=""),
plot=pl,
width=30,
height=15,
units="cm",
dpi=300)
|
/scripts/seqstats/seqstats_density.R
|
no_license
|
Adrian-Cantu/cf_pipeline
|
R
| false | false | 5,686 |
r
|
#!/usr/bin/Rscript
# seqstats_density.R
# Density plots for sequencing stats.
#
# Author: Daniel A Cuevas (dcuevas08.at.gmail.com)
# Created on 23 Nov 2016
# Updated on 20 Mar 2017
# Import necessary packages
# These may need to be installed first
if ("getopt" %in% rownames(installed.packages()) == F) {
install.packages("getopt")
}
if ("ggplot2" %in% rownames(installed.packages()) == F) {
install.packages("ggplot2")
}
suppressMessages(require("getopt"))
suppressMessages(require("ggplot2"))
# Suppress warning messages
options(warn=-1)
#################################################################
# UTILITY FUNCTIONS
#################################################################
# Set theme
my.theme <-
theme(axis.text=element_text(colour="black", size=12),
axis.title=element_text(face="bold", size=15),
axis.title.x=element_text(margin=margin(t=10, b=5)),
axis.ticks=element_blank(),
axis.line.x=element_line(colour="black"),
axis.line.y=element_line(colour="black"),
legend.key=element_rect(fill=NA),
legend.text=element_text(size=12),
plot.title=element_text(size=16, face="bold"),
panel.background=element_blank(),
panel.margin=unit(3, "mm"),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank())
#################################################################
# ARGUMENT PARSING
#################################################################
spec <- matrix(c(
"datafile", "i", 1, "character", "Data file (required)",
"out_dir", "d", 1, "character", "Output directory (required)",
"header", "e", 0, "logical", "Data file contains header info",
"metric", "m", 1, "character", "Metric name if no header (Default: 'metric')",
"suffix", "s", 1, "character", "Suffix for output files",
"title", "t", 1, "character", "Title for plot",
"help", "h", 0, "logical", "This help message"
), ncol=5, byrow=T)
opt <- getopt(spec)
# Check if help flag was given
if (!is.null(opt$help)) {
cat(paste(getopt(spec, usage=T), "\n"))
q(status=1)
}
# Check for data file
if (is.null(opt$datafile)) {
cat("\nDatafile not supplied. Use the '-i' option.\n\n")
cat(paste(getopt(spec, usage=T), "\n"))
q(status=1)
} else {
fp <- opt$datafile
}
# Check for output directory
if (is.null(opt$out_dir)) {
cat("\nOutput directory not supplied. Use the '-d' option.\n\n")
cat(paste(getopt(spec, usage=T), "\n"))
q(status=1)
} else {
outdir <- opt$out_dir
}
# Check if header flag was given
if (is.null(opt$header)) {
headerFlag <- F
} else {
headerFlag <- opt$header
}
# Check metric name -- only used if header was not given
if (is.null(opt$metric) && !headerFlag) {
metricName <- "metric"
} else {
metricName <- opt$metric
}
# Check suffix
if (is.null(opt$suffix)) {
suffix <- ""
} else {
suffix <- paste(opt$suffix, "_", sep="")
}
# Check title
if (is.null(opt$title)) {
title <- ""
file_title <- ""
} else {
title <- opt$title
file_title <- paste(title, "_", suffix, sep="")
}
#################################################################
# DATA PROCESSING
#################################################################
# Load data
data <- read.delim(fp, header=headerFlag)
# Get column name from header, if given
if (headerFlag) {
# Replace underscores "_" with space " "
metricName <- gsub("_", " ", colnames(data)[2])
colnames(data) <- c("V1", "V2", "V3")
}
# Set position order
data$V1 <- factor(data$V1, levels=c("0-9", "10-19", "20-29", "30-39", "40-49",
"50-59", "60-69", "70-79", "80-89", "90-99",
"100-109", "110-119", "120-129", "130-139", "140-149",
"150-159", "160-169", "170-179", "180-189", "190-199",
"200-209", "210-219", "220-229", "230-239", "240-249",
"250+"))
# Set breaks based on special names
if (metricName == "GC ratio") {
plot.breaks <- seq(0.0, 1.0, 0.2)
lim <- c(0, 1)
} else {
plot.breaks <- seq(0, 40, 10)
lim <- c(0, 40)
}
density.data <- aggregate(list(count=data$V3), by=list(metric=as.character(data$V2)), FUN=sum)
density.data$metric <- as.numeric(density.data$metric)
# Plot density
mean.x <- weighted.mean(density.data$metric, density.data$count)
pl <- ggplot(density.data, aes(x=metric, y=count))
if (metricName == "GC ratio") {
pl <- pl + geom_histogram(stat="identity", fill=NA, colour="#a7a7a7")
} else {
pl <- pl + geom_histogram(stat="identity", fill=NA, colour="black")
}
pl <- pl + geom_vline(aes(xintercept=mean.x), colour="#d62728") +
my.theme +
scale_y_continuous(expand=c(0,0)) +
scale_x_continuous(breaks=c(plot.breaks, mean.x), labels=c(plot.breaks, round(mean.x, digits=2)), limits=lim)
pl <- pl + xlab(metricName) + ylab("Count") + ggtitle(title)
ggsave(paste(outdir, "/", file_title ,"density.png", sep=""),
plot=pl,
width=30,
height=15,
units="cm",
dpi=300)
# Box plots
pl <- ggplot(data, aes(x=V1, y=V2, weight=V3)) +
geom_boxplot() +
my.theme +
theme(panel.grid.major.y=element_line(colour="#d7d7d7")) +
scale_y_continuous(breaks=plot.breaks, limits=lim) +
xlab("Read Position (bp)") + ylab(metricName) + ggtitle(title)
ggsave(paste(outdir, "/", file_title ,"boxplots.png", sep=""),
plot=pl,
width=30,
height=15,
units="cm",
dpi=300)
|
testlist <- list(data = structure(c(-3.879448322712e+260, NaN), .Dim = 1:2), q = 6.95335580945396e-310)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610556855-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 178 |
r
|
testlist <- list(data = structure(c(-3.879448322712e+260, NaN), .Dim = 1:2), q = 6.95335580945396e-310)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
library(photobiologyWavebands)
### Name: Orange
### Title: Constructor of orange waveband
### Aliases: Orange
### ** Examples
Orange()
Orange("ISO")
|
/data/genthat_extracted_code/photobiologyWavebands/examples/Orange.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 157 |
r
|
library(photobiologyWavebands)
### Name: Orange
### Title: Constructor of orange waveband
### Aliases: Orange
### ** Examples
Orange()
Orange("ISO")
|
# old rasters from Greg 2015 I think.
# rastersOLD <- prepInputs(
# url = "https://drive.google.com/file/d/1DN31xcXh97u6v8NaVcy0O3vzKpLpld69/view?usp=sharing",
# fun = "raster::stack",
# #rasterToMatch = masterRaster, useGDAL = FALSE) # this was Eliot's
# destinationPath = 'inputs')
#
# stackIan <- prepInputs(url = "https://drive.google.com/file/d/1keh-3KIvgy8G5nBBxolH_LsoC53w3qqN/view?usp=sharing",
# fun = "raster::stack",
# destinationPath = 'inputs')
# # current masterRaster
# fireRetMasterRaster <- spadesCBMout$masterRaster
#
# # trying things
# rastersOLDt1 <- prepInputs(
# url = "https://drive.google.com/file/d/1DN31xcXh97u6v8NaVcy0O3vzKpLpld69/view?usp=sharing",
# fun = "raster::stack",
# rasterToMatch = spadesCBMout$masterRaster,
# useGDAL = FALSE,
# destinationPath = 'inputs')
#
# age <- postProcess(x = stackIan$RIAlandscape.5,
# rasterToMatch = spadesCBMout$masterRaster,
# filename2 = "inputs/pixelAges.tif")
#
# aValues <- age[]
# table(aValues, useNA = 'ifany')
# mValues <- spadesCBMout$masterRaster[]
# table(mValues)
# # there are this many more NAs in the age raster provided by Ian
# # 5435395 - 5605119
# # [1] -169724
# and here they are
# agesDT <- data.table(spadesCBMout$allPixDT[,.(ages, pixelIndex)],aValues)
# setnames(agesDT,c("ages","aValues"), c("ages2020", "ages2015"))
# agesNA <- agesDT[is.na(ages2015) & !is.na(ages2020),]
#
# # need to check if these pixels are disturbed
# # are they burned?
# presentDayBurns <- prepInputs(url = 'https://drive.google.com/file/d/1MjQ5y9Txr1ezv0BatG4b_6FpM6wti1b5/view?usp=sharing',
# destinationPath = 'inputs',
# overwrite = TRUE,
# fun = 'readRDS')
# # there are 3426/169724 that get burned between 1985-2015
# agesNA[pixelIndex %in% presentDayBurns$pixelID,]
#
# # are they harvested?
# presentDayHarvest <- prepInputs(url = 'https://drive.google.com/file/d/1Ca-kPun7_VF2xV8s40IJ9hri3Ok-6qx5/view?usp=sharing',
# destinationPath = 'inputs',
# overwrite = TRUE,
# fun = 'readRDS')
# # there are 14969/169724 that get burned between 1985-2015
# agesNA[pixelIndex %in% presentDayHarvest$pixelID,]
#
# (14969 + 3426)/169724
# #only 11%
#read-in the VRI2015
# this is an ESRI file
#https://pub.data.gov.bc.ca/datasets/02dba161-fdb7-48ae-a4bb-bd6ef017c36d/2015/VEG_COMP_LYR_L1_POLY_2015.gdb.zip
# # back-tracking VRI2015 age raster to 1985 based on the disturbance info
# presentDayBurns[, events := 1L]
# presentDayHarvest[, events := 2L]
# allDist <- rbind(presentDayBurns, presentDayHarvest)
#
# # there needs to be a part here to make the right age raster
# RIArtm <- prepInputs(url = "https://drive.google.com/file/d/1h7gK44g64dwcoqhij24F2K54hs5e35Ci/view?usp=sharing",
# destinationPath = 'inputs')
# VRI2015ageRaster <- Cache(prepInputsVRIage, VRIurl = "https://drive.google.com/file/d/1spcJq_4r4TRA2jfjclQcwBcIfnbniEXZ",
# dPath = "C:/Celine/github/spadesCBM_RIA/modules/CBM_dataPrep_RIA/data",
# rasterToMatch = RIArtm,
# cacheRepo = cacheDir)
# # make a data table from the raster
#
#
# # this is the wrong raster but waiting for the VRI2015
# ages2015dt <- agesDT[,.(pixelIndex, ages2015)]
#
#
#
#
# prepInputsVRIage <- function(VRIurl, dPath, rasterToMatch, field = "PROJ_AGE_1"){
# VRIin <- prepInputs(url = VRIurl,
# fun = "sf::st_read",
# destinationPath = dPath)
# RIA_VRI <- st_transform(VRIin, crs = st_crs(rasterToMatch))
# #gcIDRaster <- fasterize::fasterize(RIA_VRI, rasterToMatch, field = "curve2")
# ageRaster <- fasterize::fasterize(RIA_VRI, rasterToMatch, field = "PROJ_AGE_1")
# #gcIDRaster[] <- as.integer(gcIDRaster[])
# ageRaster[] <- as.integer(ageRaster[])
# VRIageRaster <- raster::raster(ageRaster)
# return(VRIageRaster)
# }
#
# # Eliot modified reproducible so prepInputs can deal with .gbd files
# remotes::install_github("PredictiveEcology/reproducible@gdb_archiveNA")
# # I am using "dummy" masterRaster --> use your real one
# masterRaster <- raster(extent(-1963750, -1321250, 7407500, 8239000),
# crs = "+proj=lcc +lat_0=0 +lon_0=-95 +lat_1=49 +lat_2=77 +x_0=0 +y_0=0 +ellps=GRS80 +units=m", res = 250)
# # Make a studyArea --> use your real one
# sa <- as(extent(masterRaster), "SpatialPolygons")
# crs(sa) <- crs(masterRaster)
# loadAge <- function(x, field = "PROJ_AGE_1") {
# # a <- Cache(sf::st_read, x) # I used Cache during my development because this takes 37 minutes to run -- I was sick of running it again and again
# a <- sf::st_read(x)
# a1 <- a[, field]
# return(a1)
# }
# a <- prepInputs(url =
# "https://pub.data.gov.bc.ca/datasets/02dba161-fdb7-48ae-a4bb-bd6ef017c36d/2015/VEG_COMP_LYR_L1_POLY_2015.gdb.zip",
# fun = quote(loadAge(x = targetFilePath,
# field = "PROJ_AGE_1")),
# targetFile = "VEG_COMP_LYR_L1_POLY_2015.gdb.zip", archive = NA,
# studyArea = sa
# #rasterTomatch = masterRaster
# )
# extra tries that failed
# asked for help but got "do it another way" answer
# pickAges <- function(gcID, pickN, allPixDT){
# ageDist <- allPixDT[growth_curve_id == as.numeric(gcID) & ages >80,]$ages
# return(ageDist[round(runif(
# n = as.numeric(pickN),
# min = 1,
# max = length(ageDist)),digits = 0)])
# }
#
# newVector <- inDT[, lapply(.SD, FUN = pickAges(gcID = growth_curve_id,
# pickN = N,
# allPixDT = egPixDT)),
# by = 'growth_curve_id']
#
#
# newAgesDistPix <- rndNumBygcID[, lapply(.SD, FUN = pickAges(gcID = growth_curve_id,
# pickN = N,
# allPixDT = sim$allPixDT)),
# by = 'growth_curve_id']
#newPixDT <- egPixDT[inDT, on = "growth_curve_id", nomatch = 0] # join the 2 objects on "growth_curve_id"
# ageList <- as.list(vector(mode = "list", length = nrow(rndNumBygcID)))
# for(i in 1:length(rndNumBygcID$growth_curve_id)){
# ageDist <- matchPixDT[growth_curve_id == as.numeric(rndNumBygcID[i,1]),]
# ageList[[i]] <- sample(ageDist$ages, size = as.numeric(rndNumBygcID[i,2]))
# }
#
# ageNew <- rbindlist(ageList)
#
## another try
############ From the browser post VRI2015 reading-in###########################
# starting from the read-in age raster in 2015 and keeping the same masterRaster
# as the fireReturnInterval runs.
age2015 <- ageRaster2015[]
age2020 <- ageRaster[]
# NAs 2015 5539822
# NAs 2020 5435395
## figure out what ages the NAs have in 2020.
## Note that some ages in the 2020 that have no ages in the 2015 raster AND that
## are disturbed (so in the sim$disturbanceRasters data table) are old...older
## then the age from the dist...but most are below 50.
ageDT <- data.table(pixelIndex = sim$allPixDT$pixelIndex, age2015, age2020)
ageNoMatch <- ageDT[is.na(age2015) & !is.na(age2020),]
## NOTEs: there are 104427 more pixels with NAs in age2015 than in age2020.
ageDT[!is.na(age2015) & is.na(age2020),]
## There are no pixels that have ages in 2015 and are NAs-age in 2020.
# ageNA1985, what is their year of disturbance?
setkeyv(ageNoMatch, "pixelIndex")
setkeyv(allDist,"pixelID")
# do any pixels burn twice?
length(unique(presentDayBurns$pixelID)) == dim(presentDayBurns)[1]
#TRUE
# harvested twice?
length(unique(presentDayHarvest$pixelID)) == dim(presentDayHarvest)[1]
#TRUE
# get only the burnt/harvested pixels in the noMatch
ageNAburns <- presentDayBurns[pixelID %in% ageNoMatch$pixelIndex, ]
setnames(ageNAburns,"pixelID", "pixelIndex")
ageNAharvest <- presentDayHarvest[pixelID %in% ageNoMatch$pixelIndex, ]
setnames(ageNAharvest,"pixelID", "pixelIndex")
# Merge them with ageNoMatch
#dt_a[dt_b, on = .(b = y)]
ageNA1985 <- merge.data.table(ageNoMatch, ageNAburns, by = "pixelIndex", all.x = TRUE)
setnames(ageNA1985,"year", "burnYear")
ageNA1985[, events := NULL]
ageNA1985 <- merge.data.table(ageNA1985, ageNAharvest, by = "pixelIndex", all.x = TRUE)
setnames(ageNA1985,"year", "harvestYear")
ageNA1985[, events := NULL]
ageNA1985$noDist <- 0
ageNA1985[which(is.na(burnYear) & is.na(harvestYear)),]$noDist <- 1
# make a column of "straight substraction"
ageNA1985[, substract := (age2020 - 35)]
## pixels that have no disturbance and are >0 in 1985 get this age ##################
ageNA1985$PixSubtract1985 <- 999
ageNA1985[noDist == 1 & substract >= 0,]$PixSubtract1985 <- ageNA1985[noDist == 1 & substract >= 0,]$substract
ageNA1985[PixSubtract1985 == 999,]
# still 23177 that are not dealt with.
## the ones that have a non-negative age at the time of disturbance will get the
## age in the year prior to disturbance
ageNA1985[, PixFireYrAge := age2020 - (2020-burnYear)]
ageNA1985[, PixCutYrAge := age2020 - (2020-harvestYear)]
## this gives the ages of the burnt pixels in 1985 (values>0)
ageNA1985[, firePix1985age := PixFireYrAge - (burnYear - 1985)]
ageNA1985[, cutPix1985age := PixCutYrAge - (harvestYear - 1985)]
## how many left?
ageNA1 <- ageNA1985[PixSubtract1985 != 999] #81250
ageNA2 <- ageNA1985[ PixSubtract1985 == 999 & (firePix1985age > 0 | cutPix1985age > 0),] #3835
#104427-85085
ageNA1985[pixelIndex %in% ageNA1$pixelIndex | pixelIndex %in% ageNA2$pixelIndex] #85085
probPix <- ageNA1985[!(pixelIndex %in% ageNA1$pixelIndex | pixelIndex %in% ageNA2$pixelIndex)] #19342
# remove these
#colsRemove <- c("substract", "PixSubtract1985", "PixFireYrAge")
#probPix[, c("substract", "PixSubtract1985", "PixFireYrAge") := list(NULL, NULL, NULL)]
# how many of those are disturbed?
table(probPix$noDist)
# 0 1
# 11183 8159
# look at disturbed pixels ages in 1985
# probCuts1985 <- qplot(probPix[noDist == 0,]$cutPix1985age, geom = "histogram")
# probBurns1985 <- qplot(probPix[noDist == 0,]$firePix1985age, geom = "histogram")
## decision: all the disturbed pixels (noDist == 0) that were cut or burnt will
## have mature ages at the year of disturbance. Those will be selected from the
## age distribution of pixels with the same gcID with ages >80 in age2020.
distPixToMatch <- probPix[noDist == 0,]
length(unique(distPixToMatch$pixelIndex)) # no duplicats
# figure out the gcID for each pixelIndex
# get all the ages>80 at ages2020 in each gcID
# randomly select one for each pixelIndex
#gcIDdistPixNoMatch <- unique(sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex,]$growth_curve_id)
#84
rndNumBygcID <- sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex, .(pixelIndex, growth_curve_id)][,.N, by = "growth_curve_id"]
matchPixDT <- sim$allPixDT[growth_curve_id %in% rndNumBygcID$growth_curve_id & ages>80,.(growth_curve_id, ages)]
newAgedistPix1 <- matchPixDT[rndNumBygcID, on = "growth_curve_id", nomatch = 0]
#newPixDT <- newPixDT[, .SD[sample(.N, N)], by = "growth_curve_id"] # take a sample of each growth_curve_id, length N
#newAgedistPix <- newAgedistPix1[, lapply(.SD, sample(ages, size = N[1])), by = growth_curve_id]
newAgedistPix <- newAgedistPix1[, .SD[sample(ages,size = N[1])], by = "growth_curve_id"]
# This is just a test -- does each growth curve id have the same # rows as N says
newAgedistPix[, .N == N[1],by = "growth_curve_id"]
# re-attach a pixelIndex
pixIndDistPixToMatch <- sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex, .(pixelIndex, growth_curve_id)]
setorder(pixIndDistPixToMatch, growth_curve_id)
setorder(newAgedistPix, growth_curve_id)
set(pixIndDistPixToMatch, NULL, "distPixNegAge1985", newAgedistPix$ages)
pixIndDistPixToMatch[, growth_curve_id := NULL]
ageNA1985 <- merge.data.table(ageNA1985, pixIndDistPixToMatch, on = "pixelIndex", all.x = TRUE)
##
noDistPixToMatch <- probPix[noDist == 1,]
negAgeHist <- hist(noDistPixToMatch$substract, plot = FALSE)
Plot(negAgeHist)
## trying to find the closest pixel with a disturbance in the 1985-2015
## disturbance data.table.
# are there pixels that are disturbed twice in this data?
countDist <- allDist[, .N, by = pixelID]
# yes
# are those in the proPix?
probPix[pixelIndex %in% countDist[N>1,]$pixelID,] #no
# create a raster with all the disturbances
allDistRaster <- raster::raster(masterRaster)
allDistRaster[] <- NA
allDistRaster[countDist$pixelID] <- 1
# trying focal. This takes a raster and gives me back a raster
f3 <- function(x){
theNAs <- is.na(x)
if (all(theNAs))
NA
else
x[sample(which(!theNAs),1)]
}
# maybe too small?
f9 <- focal(allDistRaster,
w=matrix(1,nrow=3,ncol=3),
fun = f3)
# agg that column to the all pixels DT
set(ageDT, NULL, "f9", f9[])
# do any of the 3X3 windows cover the last 8159 pixels?
checkF9 <- ageDT[pixelIndex %in% noDistPixToMatch$pixelIndex,]
table(checkF9$f9, useNA = "ifany")
# 1 NaN
# 4410 3749
# figure out what year and what disturbances
# some pixels are disturbed twice but they are not in my probPix
#
# make a raster with the dist year
yrDistRaster <- raster::raster(masterRaster)
setnames(countDist, "pixelID", "pixelIndex")
setnames(allDist, "pixelID", "pixelIndex")
yrDist <- unique(countDist[allDist, on = "pixelIndex", nomatch = 0][,.(pixelIndex, year)])
yrDistRaster[] <- NA
yrDistRaster[yrDist$pixelIndex] <- yrDist$year
yrf9 <- focal(yrDistRaster,
w=matrix(1,nrow=3,ncol=3),
fun = f3)
set(ageDT, NULL, "yrf9", yrf9[])
# make a raster for the dist type
eventRaster <- raster::raster(masterRaster)
eventDist <- unique(countDist[allDist, on = "pixelIndex", nomatch = 0][,.(pixelIndex, events)])
eventRaster[] <- NA
eventRaster[eventDist$pixelIndex] <- eventDist$events
eventf9 <- focal(eventRaster,
w=matrix(1,nrow=3,ncol=3),
fun = f3)
set(ageDT, NULL, "eventf9", eventf9[])
# create a new "distPixels" DT adding the dist to the f9==1
f9pix <- ageDT[f9==1]
f9dist <- merge.data.table(allDist, f9pix, all = TRUE)
# add the growth_curve_id to help the match??
f9dist <- f9dist[sim$allPixDT, on = 'pixelIndex', nomatch = 0][,.(pixelIndex, year, events,
age2015, age2020, f9, yrf9,
eventf9, growth_curve_id)]
f9dist[, targetPix := 0L]
f9dist[pixelIndex %in% noDistPixToMatch$pixelIndex]$targetPix <- 1
# are there any pixels with targetPix == 1 and yrf9 !is.na()?
f9dist[targetPix >0 & !is.na(yrf9)]#4217
f9FirstHist <- hist(f9dist[targetPix >0 & !is.na(yrf9)]$age2020, plot = FALSE)
## create "new" dist to add to the allDist table
newF9dist <- f9dist[targetPix >0 & !is.na(yrf9), .(pixelIndex, yrf9, eventf9, growth_curve_id)]
## assign age at time of dist
## calculate age1985
rndF9BygcID <- newF9dist[,.N, by = "growth_curve_id"]
#sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex, .(pixelIndex, growth_curve_id)][,.N, by = "growth_curve_id"]
f9agesDistDT <- sim$allPixDT[growth_curve_id %in% rndF9BygcID$growth_curve_id & ages>80,.(growth_curve_id, ages)]
newAgeF9atDist <- f9agesDistDT[rndF9BygcID, on = "growth_curve_id", nomatch = 0]
#newPixDT <- newPixDT[, .SD[sample(.N, N)], by = "growth_curve_id"] # take a sample of each growth_curve_id, length N
#newAgedistPix <- newAgedistPix1[, lapply(.SD, sample(ages, size = N[1])), by = growth_curve_id]
newAgeF9atDistPix <- newAgeF9atDist[, .SD[sample(ages,size = N[1])], by = "growth_curve_id"]
# This is just a test -- does each growth curve id have the same # rows as N says
newAgeF9atDistPix[, .N == N[1],by = "growth_curve_id"]
# merge with newF9dist
setorder(newF9dist, growth_curve_id)
setorder(newAgeF9atDistPix, growth_curve_id)
set(newF9dist, NULL, "newAgeF9atDist", newAgeF9atDistPix$ages)
newF9dist[, ageF91985 := newAgeF9atDist - (yrf9 - 1985)]
## add the "new" dist to allDist but make sure there are no NAs
table(newF9dist$yrf9, useNA = "ifany") # none
table(newF9dist$eventf9, useNA = "ifany")
# 1 2 NaN
# 88 4080 49
# need to replace those NAs
distVec <- sample(1:2,49, replace = TRUE)
newF9dist[is.na(eventf9)]$eventf9 <- distVec
f9AddDist <- newF9dist[,.(pixelIndex, yrf9, eventf9)]
setnames(f9AddDist,c("yrf9", "eventf9"), c("year", "events"))
dim(allDist)
allDist <- rbind(allDist, f9AddDist)
## add this age column to the ageNA1985 DT
f9ages <- newF9dist[,.(pixelIndex, ageF91985)]
ageNA1985 <- merge.data.table(ageNA1985, f9ages, on = "pixelIndex", all.x = TRUE)
## try again with a bigger window
# what are the pixels left to match?
toMatch5 <- noDistPixToMatch[!(pixelIndex %in% newF9dist$pixelIndex)]
## bigger focal
# start with yrDistRaster
yrf25 <- focal(yrDistRaster,
w=matrix(1,nrow=7,ncol=7),
fun = f3)
set(ageDT, NULL, "yrf25", yrf25[])
f25pix <- ageDT[!is.na(yrf25), .(pixelIndex, age2015, age2020, yrf25)]
#f25dist <- merge.data.table(allDist, f25pix, all = TRUE)
# add the growth_curve_id to help the match??
f25dist <- f25pix[sim$allPixDT, on = 'pixelIndex', nomatch = 0][,.(pixelIndex,age2015, age2020, yrf25,
growth_curve_id)]
f25dist[, targetPix := 0L]
f25dist[pixelIndex %in% toMatch5$pixelIndex]$targetPix <- 1
# are there any pixels with targetPix == 1 and yrf9 !is.na()?
f25dist[targetPix >0 & !is.na(yrf25)]#4217
eventf25 <- focal(eventRaster,
w=matrix(1,nrow=7,ncol=7),
fun = f3)
set(ageDT, NULL, "eventf25", eventf25[])
f25pix <- ageDT[!is.na(yrf25), .(pixelIndex, age2015, age2020, yrf25, eventf25)]
## this is new###################################################
## create "new" dist to add to the allDist table
newF25dist <- f25dist[targetPix >0 & !is.na(yrf25), .(pixelIndex, yrf25, eventf25, growth_curve_id)]
## assign age at time of dist
## calculate age1985
rndF25BygcID <- newF25dist[,.N, by = "growth_curve_id"]
#sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex, .(pixelIndex, growth_curve_id)][,.N, by = "growth_curve_id"]
f25agesDistDT <- sim$allPixDT[growth_curve_id %in% rndF25BygcID$growth_curve_id & ages>80,.(growth_curve_id, ages)]
newAgeF25atDist <- f25agesDistDT[rndF25BygcID, on = "growth_curve_id", nomatch = 0]
#newPixDT <- newPixDT[, .SD[sample(.N, N)], by = "growth_curve_id"] # take a sample of each growth_curve_id, length N
#newAgedistPix <- newAgedistPix1[, lapply(.SD, sample(ages, size = N[1])), by = growth_curve_id]
newAgeF25atDistPix <- newAgeF25atDist[, .SD[sample(ages,size = N[1])], by = "growth_curve_id"]
# This is just a test -- does each growth curve id have the same # rows as N says
newAgeF25atDistPix[, .N == N[1],by = "growth_curve_id"]
# merge with newF9dist
setorder(newF25dist, growth_curve_id)
setorder(newAgeF25atDistPix, growth_curve_id)
set(newF25dist, NULL, "newAgeF25atDist", newAgeF25atDistPix$ages)
newF25dist[, ageF251985 := newAgeF25atDist - (yrf25 - 1985)]
## add the "new" dist to allDist but make sure there are no NAs
table(newF25dist$yrf25, useNA = "ifany") # none
table(newF25dist$eventf25, useNA = "ifany")
# 1 2 NaN
# 88 4080 49
# need to replace those NAs
distVec <- sample(1:2,length(which(is.na(newF25dist$eventf25))), replace = TRUE)
newF25dist[is.na(eventf25)]$eventf25 <- distVec
f25AddDist <- newF25dist[,.(pixelIndex, yrf25, eventf25)]
setnames(f25AddDist,c("yrf25", "eventf25"), c("year", "events"))
dim(allDist)
allDist <- rbind(allDist, f25AddDist)
## add this age column to the ageNA1985 DT
f25ages <- newF25dist[,.(pixelIndex, ageF251985)]
ageNA1985 <- merge.data.table(ageNA1985, f25ages, on = "pixelIndex", all.x = TRUE)
## try again with a bigger window
# what are the pixels left to match?
toMatch5 <- noDistPixToMatch[!(pixelIndex %in% newF9dist$pixelIndex)]
## create "new" dist to add to the allDist table
newF9dist <- f9dist[targetPix >0 & !is.na(yrf9), .(pixelIndex, yrf9, eventf9, growth_curve_id)]
## assign age at time of dist
## calculate age1985
rndF9BygcID <- newF9dist[,.N, by = "growth_curve_id"]
#sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex, .(pixelIndex, growth_curve_id)][,.N, by = "growth_curve_id"]
f9agesDistDT <- sim$allPixDT[growth_curve_id %in% rndF9BygcID$growth_curve_id & ages>80,.(growth_curve_id, ages)]
newAgeF9atDist <- f9agesDistDT[rndF9BygcID, on = "growth_curve_id", nomatch = 0]
#newPixDT <- newPixDT[, .SD[sample(.N, N)], by = "growth_curve_id"] # take a sample of each growth_curve_id, length N
#newAgedistPix <- newAgedistPix1[, lapply(.SD, sample(ages, size = N[1])), by = growth_curve_id]
newAgeF9atDistPix <- newAgeF9atDist[, .SD[sample(ages,size = N[1])], by = "growth_curve_id"]
# This is just a test -- does each growth curve id have the same # rows as N says
newAgeF9atDistPix[, .N == N[1],by = "growth_curve_id"]
# merge with newF9dist
setorder(newF9dist, growth_curve_id)
setorder(newAgeF9atDistPix, growth_curve_id)
set(newF9dist, NULL, "newAgeF9atDist", newAgeF9atDistPix$ages)
newF9dist[, ageF91985 := newAgeF9atDist - (yrf9 - 1985)]
## add the "new" dist to allDist but make sure there are no NAs
table(newF9dist$yrf9, useNA = "ifany") # none
table(newF9dist$eventf9, useNA = "ifany")
# 1 2 NaN
# 88 4080 49
# need to replace those NAs
distVec <- sample(1:2,49, replace = TRUE)
newF9dist[is.na(eventf9)]$eventf9 <- distVec
f9AddDist <- newF9dist[,.(pixelIndex, yrf9, eventf9)]
setnames(f9AddDist,c("yrf9", "eventf9"), c("year", "events"))
dim(allDist)
allDist <- rbind(allDist, f9AddDist)
## add this age column to the ageNA1985 DT
f9ages <- newF9dist[,.(pixelIndex, ageF91985)]
ageNA1985 <- merge.data.table(ageNA1985, f9ages, on = "pixelIndex", all.x = TRUE)
## try again with a bigger window
# what are the pixels left to match?
toMatch5 <- noDistPixToMatch[!(pixelIndex %in% newF9dist$pixelIndex)]
############OLD
## now check if the lag1 adds years to the
# lagNames <- c("yearLag", "eventLag", "growth_curve_idLag")
# f9dist[,.(lagNames) := list(shift(year,1,type="lag"), shift(events,1,type="lag"),shift(year,1,growth_curve_id="lag"))]
f9dist[, yearLag := shift(year,1,type="lag")]
f9dist[!is.na(yearLag) & targetPix>0]
#1886
# would I get more with a "lead"
f9dist[, yearLead := shift(year,1,type="lead")]
f9dist[!is.na(yearLead) & targetPix>0] #1897
# 2lag?
f9dist[, yearLag := shift(year,1,type="lag")]
f9dist[!is.na(yearLag) & targetPix>0]
ageAllDist <- merge.data.table(ageDT, allDist, by = "pixelIndex", all.x = TRUE)
# add a column to indicate the pixels I am looking to fill ages
ageAllDist[, targetPix := 0L]
ageAllDist[pixelIndex %in% noDistPixToMatch$pixelIndex]$targetPix <- 1
f9Pix <- ageAllDist[!is.na(f9)]
thesePix4 <- checkF9[!is.na(f9)]
### HERE
qplot(sim$allPixDT[growth_curve_id == rndNumBygcID[1,1] & ages >80,]$ages, geom = "histogram")
library(ggplot2)
distPixWageIn2020 <- qplot(ageNoMatch[pixelIndex %in% allDist$pixelID,]$age2020, geom="histogram")
## check the age distribution of the ageNoMatch for the remaining 89409 pixels
## (8046.81 ha over 280118.2)
noAge2015 <- ageNoMathc[!(pixelIndex %in% allDist$pixelID)]
noAge2015hist <- qplot(noAge2015$ageDT2020, geom="histogram")
## have to bring those to 1985...how many are over 35 (2020-1985)?
noAge2015[ageDT2020>=35,]
#81250. These I can do -5.
# look at the others? Can I buffer around them??
probPix <- noAge2015[ageDT2020<35,]
# double checking if any of these are in the disturbed pixels
probPix[pixelIndex %in% allDist$pixelID,]
# none
# trying to see if I can find pixels around those pixels that are disturbed.
# need to create a raster of these pixels and give them a value
probPixRaster <- raster::raster(masterRaster)
probPixRaster[] <- NA
probPixRaster[probPix$pixelIndex] <- 1
## trying to find the closest pixel with a disturbance in the 1985-2015
## disturbance data.table.
# are there pixels that are disturbed twice in this data?
countDist <- allDist[, .N, by = pixelID]
# yes
# are those in the proPix?
probPix[pixelIndex %in% countDist[N>1,]$pixelID,]
# no
# create a raster with all the disturbances
allDistRaster <- raster::raster(masterRaster)
allDistRaster[] <- NA
allDistRaster[countDist$pixelID] <- 1
# trying focal. This takes a raster and gives me back a raster
f3 <- function(x){
theNAs <- is.na(x)
if (all(theNAs))
NA
else
x[sample(which(!theNAs),1)]
}
f1 <- focal(allDistRaster,
w=matrix(1,nrow=5,ncol=5),
fun = f3)
set(ageDTboth, NULL, "f25", f1[])
checkF25 <- ageDTboth[pixelIndex %in% probPix$pixelIndex,]
checkF25[!is.na(f25),]
#ageDT2015 pixelIndex ageDT2020 pixelIndex f25
# 1: NA 1073507 15 1 1
# 2: NA 1078649 15 1 1
# 3: NA 1081227 15 1 1
# 4: NA 1083788 15 2 1
# 5: NA 1083795 15 1 1
# ---
# 6159: NA 8453863 31 1 1
# 6160: NA 8456433 31 1 1
# 6161: NA 8456435 31 1 1
# 6162: NA 8466726 33 2 1
# 6163: NA 8469298 33 2 1
## trying a little bigger
f49 <- focal(allDistRaster,
w=matrix(1,nrow=7,ncol=7),
fun = f3)
set(ageDTboth, NULL, "f49", f49[])
checkf49 <- ageDTboth[pixelIndex %in% probPix$pixelIndex,]
checkf49[!is.na(f49),]
checkf49[is.na(f49),]
## next to do repeat this but with one disturbance at a time since I have to
## know which disturbance to attribute (1 =fire and 2 = harvest)
## what is up with these last pixels??
lastPix <- checkf49[is.na(f49),]$pixelIndex
sim$allPixDT[pixelIndex %in% lastPix,]
range(sim$allPixDT[pixelIndex %in% lastPix,]$pixelIndex)
lastBad <- NA
allPix <- data.table(sim$allPixDT,lastBad)
allPix$lastBad[which(allPix$pixelIndex %in% lastPix)] <- 1
# Now what? take the values of the ones in the row close by??
|
/buildingAgeRaster1985.R
|
no_license
|
cboisvenue/CBM_dataPrep_RIApresentDayTempError
|
R
| false | false | 26,811 |
r
|
# old rasters from Greg 2015 I think.
# rastersOLD <- prepInputs(
# url = "https://drive.google.com/file/d/1DN31xcXh97u6v8NaVcy0O3vzKpLpld69/view?usp=sharing",
# fun = "raster::stack",
# #rasterToMatch = masterRaster, useGDAL = FALSE) # this was Eliot's
# destinationPath = 'inputs')
#
# stackIan <- prepInputs(url = "https://drive.google.com/file/d/1keh-3KIvgy8G5nBBxolH_LsoC53w3qqN/view?usp=sharing",
# fun = "raster::stack",
# destinationPath = 'inputs')
# # current masterRaster
# fireRetMasterRaster <- spadesCBMout$masterRaster
#
# # trying things
# rastersOLDt1 <- prepInputs(
# url = "https://drive.google.com/file/d/1DN31xcXh97u6v8NaVcy0O3vzKpLpld69/view?usp=sharing",
# fun = "raster::stack",
# rasterToMatch = spadesCBMout$masterRaster,
# useGDAL = FALSE,
# destinationPath = 'inputs')
#
# age <- postProcess(x = stackIan$RIAlandscape.5,
# rasterToMatch = spadesCBMout$masterRaster,
# filename2 = "inputs/pixelAges.tif")
#
# aValues <- age[]
# table(aValues, useNA = 'ifany')
# mValues <- spadesCBMout$masterRaster[]
# table(mValues)
# # there are this many more NAs in the age raster provided by Ian
# # 5435395 - 5605119
# # [1] -169724
# and here they are
# agesDT <- data.table(spadesCBMout$allPixDT[,.(ages, pixelIndex)],aValues)
# setnames(agesDT,c("ages","aValues"), c("ages2020", "ages2015"))
# agesNA <- agesDT[is.na(ages2015) & !is.na(ages2020),]
#
# # need to check if these pixels are disturbed
# # are they burned?
# presentDayBurns <- prepInputs(url = 'https://drive.google.com/file/d/1MjQ5y9Txr1ezv0BatG4b_6FpM6wti1b5/view?usp=sharing',
# destinationPath = 'inputs',
# overwrite = TRUE,
# fun = 'readRDS')
# # there are 3426/169724 that get burned between 1985-2015
# agesNA[pixelIndex %in% presentDayBurns$pixelID,]
#
# # are they harvested?
# presentDayHarvest <- prepInputs(url = 'https://drive.google.com/file/d/1Ca-kPun7_VF2xV8s40IJ9hri3Ok-6qx5/view?usp=sharing',
# destinationPath = 'inputs',
# overwrite = TRUE,
# fun = 'readRDS')
# # there are 14969/169724 that get burned between 1985-2015
# agesNA[pixelIndex %in% presentDayHarvest$pixelID,]
#
# (14969 + 3426)/169724
# #only 11%
#read-in the VRI2015
# this is an ESRI file
#https://pub.data.gov.bc.ca/datasets/02dba161-fdb7-48ae-a4bb-bd6ef017c36d/2015/VEG_COMP_LYR_L1_POLY_2015.gdb.zip
# # back-tracking VRI2015 age raster to 1985 based on the disturbance info
# presentDayBurns[, events := 1L]
# presentDayHarvest[, events := 2L]
# allDist <- rbind(presentDayBurns, presentDayHarvest)
#
# # there needs to be a part here to make the right age raster
# RIArtm <- prepInputs(url = "https://drive.google.com/file/d/1h7gK44g64dwcoqhij24F2K54hs5e35Ci/view?usp=sharing",
# destinationPath = 'inputs')
# VRI2015ageRaster <- Cache(prepInputsVRIage, VRIurl = "https://drive.google.com/file/d/1spcJq_4r4TRA2jfjclQcwBcIfnbniEXZ",
# dPath = "C:/Celine/github/spadesCBM_RIA/modules/CBM_dataPrep_RIA/data",
# rasterToMatch = RIArtm,
# cacheRepo = cacheDir)
# # make a data table from the raster
#
#
# # this is the wrong raster but waiting for the VRI2015
# ages2015dt <- agesDT[,.(pixelIndex, ages2015)]
#
#
#
#
# prepInputsVRIage <- function(VRIurl, dPath, rasterToMatch, field = "PROJ_AGE_1"){
# VRIin <- prepInputs(url = VRIurl,
# fun = "sf::st_read",
# destinationPath = dPath)
# RIA_VRI <- st_transform(VRIin, crs = st_crs(rasterToMatch))
# #gcIDRaster <- fasterize::fasterize(RIA_VRI, rasterToMatch, field = "curve2")
# ageRaster <- fasterize::fasterize(RIA_VRI, rasterToMatch, field = "PROJ_AGE_1")
# #gcIDRaster[] <- as.integer(gcIDRaster[])
# ageRaster[] <- as.integer(ageRaster[])
# VRIageRaster <- raster::raster(ageRaster)
# return(VRIageRaster)
# }
#
# # Eliot modified reproducible so prepInputs can deal with .gbd files
# remotes::install_github("PredictiveEcology/reproducible@gdb_archiveNA")
# # I am using "dummy" masterRaster --> use your real one
# masterRaster <- raster(extent(-1963750, -1321250, 7407500, 8239000),
# crs = "+proj=lcc +lat_0=0 +lon_0=-95 +lat_1=49 +lat_2=77 +x_0=0 +y_0=0 +ellps=GRS80 +units=m", res = 250)
# # Make a studyArea --> use your real one
# sa <- as(extent(masterRaster), "SpatialPolygons")
# crs(sa) <- crs(masterRaster)
# loadAge <- function(x, field = "PROJ_AGE_1") {
# # a <- Cache(sf::st_read, x) # I used Cache during my development because this takes 37 minutes to run -- I was sick of running it again and again
# a <- sf::st_read(x)
# a1 <- a[, field]
# return(a1)
# }
# a <- prepInputs(url =
# "https://pub.data.gov.bc.ca/datasets/02dba161-fdb7-48ae-a4bb-bd6ef017c36d/2015/VEG_COMP_LYR_L1_POLY_2015.gdb.zip",
# fun = quote(loadAge(x = targetFilePath,
# field = "PROJ_AGE_1")),
# targetFile = "VEG_COMP_LYR_L1_POLY_2015.gdb.zip", archive = NA,
# studyArea = sa
# #rasterTomatch = masterRaster
# )
# extra tries that failed
# asked for help but got "do it another way" answer
# pickAges <- function(gcID, pickN, allPixDT){
# ageDist <- allPixDT[growth_curve_id == as.numeric(gcID) & ages >80,]$ages
# return(ageDist[round(runif(
# n = as.numeric(pickN),
# min = 1,
# max = length(ageDist)),digits = 0)])
# }
#
# newVector <- inDT[, lapply(.SD, FUN = pickAges(gcID = growth_curve_id,
# pickN = N,
# allPixDT = egPixDT)),
# by = 'growth_curve_id']
#
#
# newAgesDistPix <- rndNumBygcID[, lapply(.SD, FUN = pickAges(gcID = growth_curve_id,
# pickN = N,
# allPixDT = sim$allPixDT)),
# by = 'growth_curve_id']
#newPixDT <- egPixDT[inDT, on = "growth_curve_id", nomatch = 0] # join the 2 objects on "growth_curve_id"
# ageList <- as.list(vector(mode = "list", length = nrow(rndNumBygcID)))
# for(i in 1:length(rndNumBygcID$growth_curve_id)){
# ageDist <- matchPixDT[growth_curve_id == as.numeric(rndNumBygcID[i,1]),]
# ageList[[i]] <- sample(ageDist$ages, size = as.numeric(rndNumBygcID[i,2]))
# }
#
# ageNew <- rbindlist(ageList)
#
## another try
############ From the browser post VRI2015 reading-in###########################
# starting from the read-in age raster in 2015 and keeping the same masterRaster
# as the fireReturnInterval runs.
age2015 <- ageRaster2015[]
age2020 <- ageRaster[]
# NAs 2015 5539822
# NAs 2020 5435395
## figure out what ages the NAs have in 2020.
## Note that some ages in the 2020 that have no ages in the 2015 raster AND that
## are disturbed (so in the sim$disturbanceRasters data table) are old...older
## then the age from the dist...but most are below 50.
ageDT <- data.table(pixelIndex = sim$allPixDT$pixelIndex, age2015, age2020)
ageNoMatch <- ageDT[is.na(age2015) & !is.na(age2020),]
## NOTEs: there are 104427 more pixels with NAs in age2015 than in age2020.
ageDT[!is.na(age2015) & is.na(age2020),]
## There are no pixels that have ages in 2015 and are NAs-age in 2020.
# ageNA1985, what is their year of disturbance?
setkeyv(ageNoMatch, "pixelIndex")
setkeyv(allDist,"pixelID")
# do any pixels burn twice?
length(unique(presentDayBurns$pixelID)) == dim(presentDayBurns)[1]
#TRUE
# harvested twice?
length(unique(presentDayHarvest$pixelID)) == dim(presentDayHarvest)[1]
#TRUE
# get only the burnt/harvested pixels in the noMatch
ageNAburns <- presentDayBurns[pixelID %in% ageNoMatch$pixelIndex, ]
setnames(ageNAburns,"pixelID", "pixelIndex")
ageNAharvest <- presentDayHarvest[pixelID %in% ageNoMatch$pixelIndex, ]
setnames(ageNAharvest,"pixelID", "pixelIndex")
# Merge them with ageNoMatch
#dt_a[dt_b, on = .(b = y)]
ageNA1985 <- merge.data.table(ageNoMatch, ageNAburns, by = "pixelIndex", all.x = TRUE)
setnames(ageNA1985,"year", "burnYear")
ageNA1985[, events := NULL]
ageNA1985 <- merge.data.table(ageNA1985, ageNAharvest, by = "pixelIndex", all.x = TRUE)
setnames(ageNA1985,"year", "harvestYear")
ageNA1985[, events := NULL]
ageNA1985$noDist <- 0
ageNA1985[which(is.na(burnYear) & is.na(harvestYear)),]$noDist <- 1
# make a column of "straight substraction"
ageNA1985[, substract := (age2020 - 35)]
## pixels that have no disturbance and are >0 in 1985 get this age ##################
ageNA1985$PixSubtract1985 <- 999
ageNA1985[noDist == 1 & substract >= 0,]$PixSubtract1985 <- ageNA1985[noDist == 1 & substract >= 0,]$substract
ageNA1985[PixSubtract1985 == 999,]
# still 23177 that are not dealt with.
## the ones that have a non-negative age at the time of disturbance will get the
## age in the year prior to disturbance
ageNA1985[, PixFireYrAge := age2020 - (2020-burnYear)]
ageNA1985[, PixCutYrAge := age2020 - (2020-harvestYear)]
## this gives the ages of the burnt pixels in 1985 (values>0)
ageNA1985[, firePix1985age := PixFireYrAge - (burnYear - 1985)]
ageNA1985[, cutPix1985age := PixCutYrAge - (harvestYear - 1985)]
## how many left?
ageNA1 <- ageNA1985[PixSubtract1985 != 999] #81250
ageNA2 <- ageNA1985[ PixSubtract1985 == 999 & (firePix1985age > 0 | cutPix1985age > 0),] #3835
#104427-85085
ageNA1985[pixelIndex %in% ageNA1$pixelIndex | pixelIndex %in% ageNA2$pixelIndex] #85085
probPix <- ageNA1985[!(pixelIndex %in% ageNA1$pixelIndex | pixelIndex %in% ageNA2$pixelIndex)] #19342
# remove these
#colsRemove <- c("substract", "PixSubtract1985", "PixFireYrAge")
#probPix[, c("substract", "PixSubtract1985", "PixFireYrAge") := list(NULL, NULL, NULL)]
# how many of those are disturbed?
table(probPix$noDist)
# 0 1
# 11183 8159
# look at disturbed pixels ages in 1985
# probCuts1985 <- qplot(probPix[noDist == 0,]$cutPix1985age, geom = "histogram")
# probBurns1985 <- qplot(probPix[noDist == 0,]$firePix1985age, geom = "histogram")
## decision: all the disturbed pixels (noDist == 0) that were cut or burnt will
## have mature ages at the year of disturbance. Those will be selected from the
## age distribution of pixels with the same gcID with ages >80 in age2020.
distPixToMatch <- probPix[noDist == 0,]
length(unique(distPixToMatch$pixelIndex)) # no duplicats
# figure out the gcID for each pixelIndex
# get all the ages>80 at ages2020 in each gcID
# randomly select one for each pixelIndex
#gcIDdistPixNoMatch <- unique(sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex,]$growth_curve_id)
#84
rndNumBygcID <- sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex, .(pixelIndex, growth_curve_id)][,.N, by = "growth_curve_id"]
matchPixDT <- sim$allPixDT[growth_curve_id %in% rndNumBygcID$growth_curve_id & ages>80,.(growth_curve_id, ages)]
newAgedistPix1 <- matchPixDT[rndNumBygcID, on = "growth_curve_id", nomatch = 0]
#newPixDT <- newPixDT[, .SD[sample(.N, N)], by = "growth_curve_id"] # take a sample of each growth_curve_id, length N
#newAgedistPix <- newAgedistPix1[, lapply(.SD, sample(ages, size = N[1])), by = growth_curve_id]
newAgedistPix <- newAgedistPix1[, .SD[sample(ages,size = N[1])], by = "growth_curve_id"]
# This is just a test -- does each growth curve id have the same # rows as N says
newAgedistPix[, .N == N[1],by = "growth_curve_id"]
# re-attach a pixelIndex
pixIndDistPixToMatch <- sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex, .(pixelIndex, growth_curve_id)]
setorder(pixIndDistPixToMatch, growth_curve_id)
setorder(newAgedistPix, growth_curve_id)
set(pixIndDistPixToMatch, NULL, "distPixNegAge1985", newAgedistPix$ages)
pixIndDistPixToMatch[, growth_curve_id := NULL]
ageNA1985 <- merge.data.table(ageNA1985, pixIndDistPixToMatch, on = "pixelIndex", all.x = TRUE)
##
noDistPixToMatch <- probPix[noDist == 1,]
negAgeHist <- hist(noDistPixToMatch$substract, plot = FALSE)
Plot(negAgeHist)
## trying to find the closest pixel with a disturbance in the 1985-2015
## disturbance data.table.
# are there pixels that are disturbed twice in this data?
countDist <- allDist[, .N, by = pixelID]
# yes
# are those in the proPix?
probPix[pixelIndex %in% countDist[N>1,]$pixelID,] #no
# create a raster with all the disturbances
allDistRaster <- raster::raster(masterRaster)
allDistRaster[] <- NA
allDistRaster[countDist$pixelID] <- 1
# trying focal. This takes a raster and gives me back a raster
f3 <- function(x){
theNAs <- is.na(x)
if (all(theNAs))
NA
else
x[sample(which(!theNAs),1)]
}
# maybe too small?
f9 <- focal(allDistRaster,
w=matrix(1,nrow=3,ncol=3),
fun = f3)
# agg that column to the all pixels DT
set(ageDT, NULL, "f9", f9[])
# do any of the 3X3 windows cover the last 8159 pixels?
checkF9 <- ageDT[pixelIndex %in% noDistPixToMatch$pixelIndex,]
table(checkF9$f9, useNA = "ifany")
# 1 NaN
# 4410 3749
# figure out what year and what disturbances
# some pixels are disturbed twice but they are not in my probPix
#
# make a raster with the dist year
yrDistRaster <- raster::raster(masterRaster)
setnames(countDist, "pixelID", "pixelIndex")
setnames(allDist, "pixelID", "pixelIndex")
yrDist <- unique(countDist[allDist, on = "pixelIndex", nomatch = 0][,.(pixelIndex, year)])
yrDistRaster[] <- NA
yrDistRaster[yrDist$pixelIndex] <- yrDist$year
yrf9 <- focal(yrDistRaster,
w=matrix(1,nrow=3,ncol=3),
fun = f3)
set(ageDT, NULL, "yrf9", yrf9[])
# make a raster for the dist type
eventRaster <- raster::raster(masterRaster)
eventDist <- unique(countDist[allDist, on = "pixelIndex", nomatch = 0][,.(pixelIndex, events)])
eventRaster[] <- NA
eventRaster[eventDist$pixelIndex] <- eventDist$events
eventf9 <- focal(eventRaster,
w=matrix(1,nrow=3,ncol=3),
fun = f3)
set(ageDT, NULL, "eventf9", eventf9[])
# create a new "distPixels" DT adding the dist to the f9==1
f9pix <- ageDT[f9==1]
f9dist <- merge.data.table(allDist, f9pix, all = TRUE)
# add the growth_curve_id to help the match??
f9dist <- f9dist[sim$allPixDT, on = 'pixelIndex', nomatch = 0][,.(pixelIndex, year, events,
age2015, age2020, f9, yrf9,
eventf9, growth_curve_id)]
f9dist[, targetPix := 0L]
f9dist[pixelIndex %in% noDistPixToMatch$pixelIndex]$targetPix <- 1
# are there any pixels with targetPix == 1 and yrf9 !is.na()?
f9dist[targetPix >0 & !is.na(yrf9)]#4217
f9FirstHist <- hist(f9dist[targetPix >0 & !is.na(yrf9)]$age2020, plot = FALSE)
## create "new" dist to add to the allDist table
newF9dist <- f9dist[targetPix >0 & !is.na(yrf9), .(pixelIndex, yrf9, eventf9, growth_curve_id)]
## assign age at time of dist
## calculate age1985
rndF9BygcID <- newF9dist[,.N, by = "growth_curve_id"]
#sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex, .(pixelIndex, growth_curve_id)][,.N, by = "growth_curve_id"]
f9agesDistDT <- sim$allPixDT[growth_curve_id %in% rndF9BygcID$growth_curve_id & ages>80,.(growth_curve_id, ages)]
newAgeF9atDist <- f9agesDistDT[rndF9BygcID, on = "growth_curve_id", nomatch = 0]
#newPixDT <- newPixDT[, .SD[sample(.N, N)], by = "growth_curve_id"] # take a sample of each growth_curve_id, length N
#newAgedistPix <- newAgedistPix1[, lapply(.SD, sample(ages, size = N[1])), by = growth_curve_id]
newAgeF9atDistPix <- newAgeF9atDist[, .SD[sample(ages,size = N[1])], by = "growth_curve_id"]
# This is just a test -- does each growth curve id have the same # rows as N says
newAgeF9atDistPix[, .N == N[1],by = "growth_curve_id"]
# merge with newF9dist
setorder(newF9dist, growth_curve_id)
setorder(newAgeF9atDistPix, growth_curve_id)
set(newF9dist, NULL, "newAgeF9atDist", newAgeF9atDistPix$ages)
newF9dist[, ageF91985 := newAgeF9atDist - (yrf9 - 1985)]
## add the "new" dist to allDist but make sure there are no NAs
table(newF9dist$yrf9, useNA = "ifany") # none
table(newF9dist$eventf9, useNA = "ifany")
# 1 2 NaN
# 88 4080 49
# need to replace those NAs
distVec <- sample(1:2,49, replace = TRUE)
newF9dist[is.na(eventf9)]$eventf9 <- distVec
f9AddDist <- newF9dist[,.(pixelIndex, yrf9, eventf9)]
setnames(f9AddDist,c("yrf9", "eventf9"), c("year", "events"))
dim(allDist)
allDist <- rbind(allDist, f9AddDist)
## add this age column to the ageNA1985 DT
f9ages <- newF9dist[,.(pixelIndex, ageF91985)]
ageNA1985 <- merge.data.table(ageNA1985, f9ages, on = "pixelIndex", all.x = TRUE)
## try again with a bigger window
# what are the pixels left to match?
toMatch5 <- noDistPixToMatch[!(pixelIndex %in% newF9dist$pixelIndex)]
## bigger focal
# start with yrDistRaster
yrf25 <- focal(yrDistRaster,
w=matrix(1,nrow=7,ncol=7),
fun = f3)
set(ageDT, NULL, "yrf25", yrf25[])
f25pix <- ageDT[!is.na(yrf25), .(pixelIndex, age2015, age2020, yrf25)]
#f25dist <- merge.data.table(allDist, f25pix, all = TRUE)
# add the growth_curve_id to help the match??
f25dist <- f25pix[sim$allPixDT, on = 'pixelIndex', nomatch = 0][,.(pixelIndex,age2015, age2020, yrf25,
growth_curve_id)]
f25dist[, targetPix := 0L]
f25dist[pixelIndex %in% toMatch5$pixelIndex]$targetPix <- 1
# are there any pixels with targetPix == 1 and yrf9 !is.na()?
f25dist[targetPix >0 & !is.na(yrf25)]#4217
eventf25 <- focal(eventRaster,
w=matrix(1,nrow=7,ncol=7),
fun = f3)
set(ageDT, NULL, "eventf25", eventf25[])
f25pix <- ageDT[!is.na(yrf25), .(pixelIndex, age2015, age2020, yrf25, eventf25)]
## this is new###################################################
## create "new" dist to add to the allDist table
newF25dist <- f25dist[targetPix >0 & !is.na(yrf25), .(pixelIndex, yrf25, eventf25, growth_curve_id)]
## assign age at time of dist
## calculate age1985
rndF25BygcID <- newF25dist[,.N, by = "growth_curve_id"]
#sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex, .(pixelIndex, growth_curve_id)][,.N, by = "growth_curve_id"]
f25agesDistDT <- sim$allPixDT[growth_curve_id %in% rndF25BygcID$growth_curve_id & ages>80,.(growth_curve_id, ages)]
newAgeF25atDist <- f25agesDistDT[rndF25BygcID, on = "growth_curve_id", nomatch = 0]
#newPixDT <- newPixDT[, .SD[sample(.N, N)], by = "growth_curve_id"] # take a sample of each growth_curve_id, length N
#newAgedistPix <- newAgedistPix1[, lapply(.SD, sample(ages, size = N[1])), by = growth_curve_id]
newAgeF25atDistPix <- newAgeF25atDist[, .SD[sample(ages,size = N[1])], by = "growth_curve_id"]
# This is just a test -- does each growth curve id have the same # rows as N says
newAgeF25atDistPix[, .N == N[1],by = "growth_curve_id"]
# merge with newF9dist
setorder(newF25dist, growth_curve_id)
setorder(newAgeF25atDistPix, growth_curve_id)
set(newF25dist, NULL, "newAgeF25atDist", newAgeF25atDistPix$ages)
newF25dist[, ageF251985 := newAgeF25atDist - (yrf25 - 1985)]
## add the "new" dist to allDist but make sure there are no NAs
table(newF25dist$yrf25, useNA = "ifany") # none
table(newF25dist$eventf25, useNA = "ifany")
# 1 2 NaN
# 88 4080 49
# need to replace those NAs
distVec <- sample(1:2,length(which(is.na(newF25dist$eventf25))), replace = TRUE)
newF25dist[is.na(eventf25)]$eventf25 <- distVec
f25AddDist <- newF25dist[,.(pixelIndex, yrf25, eventf25)]
setnames(f25AddDist,c("yrf25", "eventf25"), c("year", "events"))
dim(allDist)
allDist <- rbind(allDist, f25AddDist)
## add this age column to the ageNA1985 DT
f25ages <- newF25dist[,.(pixelIndex, ageF251985)]
ageNA1985 <- merge.data.table(ageNA1985, f25ages, on = "pixelIndex", all.x = TRUE)
## try again with a bigger window
# what are the pixels left to match?
toMatch5 <- noDistPixToMatch[!(pixelIndex %in% newF9dist$pixelIndex)]
## create "new" dist to add to the allDist table
newF9dist <- f9dist[targetPix >0 & !is.na(yrf9), .(pixelIndex, yrf9, eventf9, growth_curve_id)]
## assign age at time of dist
## calculate age1985
rndF9BygcID <- newF9dist[,.N, by = "growth_curve_id"]
#sim$allPixDT[pixelIndex %in% distPixToMatch$pixelIndex, .(pixelIndex, growth_curve_id)][,.N, by = "growth_curve_id"]
f9agesDistDT <- sim$allPixDT[growth_curve_id %in% rndF9BygcID$growth_curve_id & ages>80,.(growth_curve_id, ages)]
newAgeF9atDist <- f9agesDistDT[rndF9BygcID, on = "growth_curve_id", nomatch = 0]
#newPixDT <- newPixDT[, .SD[sample(.N, N)], by = "growth_curve_id"] # take a sample of each growth_curve_id, length N
#newAgedistPix <- newAgedistPix1[, lapply(.SD, sample(ages, size = N[1])), by = growth_curve_id]
newAgeF9atDistPix <- newAgeF9atDist[, .SD[sample(ages,size = N[1])], by = "growth_curve_id"]
# This is just a test -- does each growth curve id have the same # rows as N says
newAgeF9atDistPix[, .N == N[1],by = "growth_curve_id"]
# merge with newF9dist
setorder(newF9dist, growth_curve_id)
setorder(newAgeF9atDistPix, growth_curve_id)
set(newF9dist, NULL, "newAgeF9atDist", newAgeF9atDistPix$ages)
newF9dist[, ageF91985 := newAgeF9atDist - (yrf9 - 1985)]
## add the "new" dist to allDist but make sure there are no NAs
table(newF9dist$yrf9, useNA = "ifany") # none
table(newF9dist$eventf9, useNA = "ifany")
# 1 2 NaN
# 88 4080 49
# need to replace those NAs
distVec <- sample(1:2,49, replace = TRUE)
newF9dist[is.na(eventf9)]$eventf9 <- distVec
f9AddDist <- newF9dist[,.(pixelIndex, yrf9, eventf9)]
setnames(f9AddDist,c("yrf9", "eventf9"), c("year", "events"))
dim(allDist)
allDist <- rbind(allDist, f9AddDist)
## add this age column to the ageNA1985 DT
f9ages <- newF9dist[,.(pixelIndex, ageF91985)]
ageNA1985 <- merge.data.table(ageNA1985, f9ages, on = "pixelIndex", all.x = TRUE)
## try again with a bigger window
# what are the pixels left to match?
toMatch5 <- noDistPixToMatch[!(pixelIndex %in% newF9dist$pixelIndex)]
############OLD
## now check if the lag1 adds years to the
# lagNames <- c("yearLag", "eventLag", "growth_curve_idLag")
# f9dist[,.(lagNames) := list(shift(year,1,type="lag"), shift(events,1,type="lag"),shift(year,1,growth_curve_id="lag"))]
f9dist[, yearLag := shift(year,1,type="lag")]
f9dist[!is.na(yearLag) & targetPix>0]
#1886
# would I get more with a "lead"
f9dist[, yearLead := shift(year,1,type="lead")]
f9dist[!is.na(yearLead) & targetPix>0] #1897
# 2lag?
f9dist[, yearLag := shift(year,1,type="lag")]
f9dist[!is.na(yearLag) & targetPix>0]
ageAllDist <- merge.data.table(ageDT, allDist, by = "pixelIndex", all.x = TRUE)
# add a column to indicate the pixels I am looking to fill ages
ageAllDist[, targetPix := 0L]
ageAllDist[pixelIndex %in% noDistPixToMatch$pixelIndex]$targetPix <- 1
f9Pix <- ageAllDist[!is.na(f9)]
thesePix4 <- checkF9[!is.na(f9)]
### HERE
qplot(sim$allPixDT[growth_curve_id == rndNumBygcID[1,1] & ages >80,]$ages, geom = "histogram")
library(ggplot2)
distPixWageIn2020 <- qplot(ageNoMatch[pixelIndex %in% allDist$pixelID,]$age2020, geom="histogram")
## check the age distribution of the ageNoMatch for the remaining 89409 pixels
## (8046.81 ha over 280118.2)
noAge2015 <- ageNoMathc[!(pixelIndex %in% allDist$pixelID)]
noAge2015hist <- qplot(noAge2015$ageDT2020, geom="histogram")
## have to bring those to 1985...how many are over 35 (2020-1985)?
noAge2015[ageDT2020>=35,]
#81250. These I can do -5.
# look at the others? Can I buffer around them??
probPix <- noAge2015[ageDT2020<35,]
# double checking if any of these are in the disturbed pixels
probPix[pixelIndex %in% allDist$pixelID,]
# none
# trying to see if I can find pixels around those pixels that are disturbed.
# need to create a raster of these pixels and give them a value
probPixRaster <- raster::raster(masterRaster)
probPixRaster[] <- NA
probPixRaster[probPix$pixelIndex] <- 1
## trying to find the closest pixel with a disturbance in the 1985-2015
## disturbance data.table.
# are there pixels that are disturbed twice in this data?
countDist <- allDist[, .N, by = pixelID]
# yes
# are those in the proPix?
probPix[pixelIndex %in% countDist[N>1,]$pixelID,]
# no
# create a raster with all the disturbances
allDistRaster <- raster::raster(masterRaster)
allDistRaster[] <- NA
allDistRaster[countDist$pixelID] <- 1
# trying focal. This takes a raster and gives me back a raster
f3 <- function(x){
theNAs <- is.na(x)
if (all(theNAs))
NA
else
x[sample(which(!theNAs),1)]
}
f1 <- focal(allDistRaster,
w=matrix(1,nrow=5,ncol=5),
fun = f3)
set(ageDTboth, NULL, "f25", f1[])
checkF25 <- ageDTboth[pixelIndex %in% probPix$pixelIndex,]
checkF25[!is.na(f25),]
#ageDT2015 pixelIndex ageDT2020 pixelIndex f25
# 1: NA 1073507 15 1 1
# 2: NA 1078649 15 1 1
# 3: NA 1081227 15 1 1
# 4: NA 1083788 15 2 1
# 5: NA 1083795 15 1 1
# ---
# 6159: NA 8453863 31 1 1
# 6160: NA 8456433 31 1 1
# 6161: NA 8456435 31 1 1
# 6162: NA 8466726 33 2 1
# 6163: NA 8469298 33 2 1
## trying a little bigger
f49 <- focal(allDistRaster,
w=matrix(1,nrow=7,ncol=7),
fun = f3)
set(ageDTboth, NULL, "f49", f49[])
checkf49 <- ageDTboth[pixelIndex %in% probPix$pixelIndex,]
checkf49[!is.na(f49),]
checkf49[is.na(f49),]
## next to do repeat this but with one disturbance at a time since I have to
## know which disturbance to attribute (1 =fire and 2 = harvest)
## what is up with these last pixels??
lastPix <- checkf49[is.na(f49),]$pixelIndex
sim$allPixDT[pixelIndex %in% lastPix,]
range(sim$allPixDT[pixelIndex %in% lastPix,]$pixelIndex)
lastBad <- NA
allPix <- data.table(sim$allPixDT,lastBad)
allPix$lastBad[which(allPix$pixelIndex %in% lastPix)] <- 1
# Now what? take the values of the ones in the row close by??
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reaction.R
\name{reaction}
\alias{reaction}
\title{Define a reaction}
\usage{
reaction(propensity, effect, name = NA_character_)
}
\arguments{
\item{propensity}{\verb{[character/formula]} A character or formula representation of the propensity function, written in C++.}
\item{effect}{\verb{[named integer vector]} The change in state caused by this reaction.}
\item{name}{\verb{[character]} A name for this reaction (Optional). May only contain characters matching \verb{[A-Za-z0-9_]}.}
}
\value{
\verb{[SSA_reaction]} This object describes a single reaction as part of an SSA simulation. It contains the following member values:
\itemize{
\item \code{r[["propensity"]]}: The propensity function as a character.
\item \code{r[["effect"]]}: The change in state caused by this reaction.
\item \code{r[["name"]]}: The name of the reaction, \code{NA_character_} if no name was provided.
}
}
\description{
During an SSA simulation, at any infinitesimal time interval,
a reaction will occur with a probability defined according to its
propensity. If it does, then it will change the state vector according
to its effects.
}
\details{
It is possible to use 'buffer' values in order to speed up the computation
of the propensity functions. For instance, instead of \code{"(c3 * s1) / (1 + c3 * c1)"},
it is possible to write \code{"buf = c3 * s1; buf / (buf + 1)"} instead.
}
\examples{
# propensity effect
reaction(~ c1 * s1, c(s1 = -1))
reaction("c2 * s1 * s1", c(s1 = -2, s2 = +1))
reaction("buf = c3 * s1; buf / (buf + 1)", c(s1 = +2))
}
|
/man/reaction.Rd
|
no_license
|
rcannood/GillespieSSA2
|
R
| false | true | 1,692 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reaction.R
\name{reaction}
\alias{reaction}
\title{Define a reaction}
\usage{
reaction(propensity, effect, name = NA_character_)
}
\arguments{
\item{propensity}{\verb{[character/formula]} A character or formula representation of the propensity function, written in C++.}
\item{effect}{\verb{[named integer vector]} The change in state caused by this reaction.}
\item{name}{\verb{[character]} A name for this reaction (Optional). May only contain characters matching \verb{[A-Za-z0-9_]}.}
}
\value{
\verb{[SSA_reaction]} This object describes a single reaction as part of an SSA simulation. It contains the following member values:
\itemize{
\item \code{r[["propensity"]]}: The propensity function as a character.
\item \code{r[["effect"]]}: The change in state caused by this reaction.
\item \code{r[["name"]]}: The name of the reaction, \code{NA_character_} if no name was provided.
}
}
\description{
During an SSA simulation, at any infinitesimal time interval,
a reaction will occur with a probability defined according to its
propensity. If it does, then it will change the state vector according
to its effects.
}
\details{
It is possible to use 'buffer' values in order to speed up the computation
of the propensity functions. For instance, instead of \code{"(c3 * s1) / (1 + c3 * c1)"},
it is possible to write \code{"buf = c3 * s1; buf / (buf + 1)"} instead.
}
\examples{
# propensity effect
reaction(~ c1 * s1, c(s1 = -1))
reaction("c2 * s1 * s1", c(s1 = -2, s2 = +1))
reaction("buf = c3 * s1; buf / (buf + 1)", c(s1 = +2))
}
|
# 10.27 In Class Assignment
library(ISLR)
library(MASS)
library(boot)
set.seed(1)
help("sample")
train = sample(392,196)
lm.fit <- lm(mpg~horsepower, data = Auto, subset = train)
attach(Auto)
mean((mpg-predict(lm.fit,Auto))[-train]^2)
lm.fit2 <- lm(mpg~poly(horsepower,2), data = Auto, subset = train) # Quadratic
mean((mpg-predict(lm.fit2,Auto))[-train]^2)
lm.fit3 <- lm(mpg~poly(horsepower,3), data = Auto, subset = train) # Cubic
mean((mpg-predict(lm.fit3,Auto))[-train]^2)
set.seed(2)
train = sample(392,196)
lm.fit <- lm(mpg~horsepower, data = Auto, subset = train)
mean((mpg-predict(lm.fit,Auto))[-train]^2)
# the error rate is 23.29
lm.fit2 <- lm(mpg~poly(horsepower,2), data = Auto, subset = train) # Quadratic
mean((mpg-predict(lm.fit2,Auto))[-train]^2)
# the error rate is 18.90
lm.fit3 <- lm(mpg~poly(horsepower,3), data = Auto, subset = train) # Cubic
mean((mpg-predict(lm.fit3,Auto))[-train]^2)
set.seed(17)
cv.error.10 = rep(0,10) # help("rep")
for(i in 1:10){
glm.fit = glm(mpg ~ poly(horsepower, i), data = Auto)
cv.error.10[i] = cv.glm(Auto,glm.fit, K=10) $delta[1]
}
#Random Forest
install.packages("randomForest")
library(randomForest)
data1<read.csv(file.choose(),header = TRUE)
head(data1)
View(data1)
colnames(data1)<-c("BuyingPrice","Maintenance","NumDoors","NumPersons",
"BootSpace","Safety","Condition")
head(data1)
str(data1)
levels(data1$Condition)
summary(data1)
set.seed(100)
train<-sample(nrow(data1),0.7*nrow(data1),replace=FALSE)
TrainSet<-data1[train,]
ValidSet<-data1[-train,]
summary(TrainSet)
summary(ValidSet)
model1<-randomForest(Condition~.,data = TrainSet,importance = TRUE)
model1
model2<-randomForest(Condition~., data = TrainSet, ntree = 500, mtry = 6, importance=TRUE )
model2
predTrain<-predict(model2,TrainSet,type="class")
table(predTrain, TrainSet$Condition)
predValid<-predict(model2,ValidSet,Type="class")
table(predValid,ValidSet$Condition)
importance(model2)
varImPlot(model2)
a=c()
i=5
for (i in 3:8) {
model3<-randomForest(Condition~., data = TrainSet, ntree = 500, mtry = 6, importance=TRUE)
predValid<-predict(model3, ValidSet,type="class")
a[i-2]=mean(predValid == ValidSet$Condition)
}
a
plot(3:8,a)
library(rpart)
library(caret)
library(e1071)
model_dt <- train(Condition~., data=TrainSet,method = "rpart")
model_dt_1=predict(model_dt,data=TrainSet)
table(model_dt_vs,TrainSet$Contion)
mean(model_dt_vs==TrainSet$Condition)
model_dt_vs = predict(model_dt,newdata = ValidSet)
table(model_dt_vs,ValidSet$Contion)
mean(model_dt_vs==ValidSet$Condition)
|
/group3lab1_randomforest1.R
|
no_license
|
ArnoZhang47/DataAnalytics2020_YuxiangZhang
|
R
| false | false | 2,668 |
r
|
# 10.27 In Class Assignment
library(ISLR)
library(MASS)
library(boot)
set.seed(1)
help("sample")
train = sample(392,196)
lm.fit <- lm(mpg~horsepower, data = Auto, subset = train)
attach(Auto)
mean((mpg-predict(lm.fit,Auto))[-train]^2)
lm.fit2 <- lm(mpg~poly(horsepower,2), data = Auto, subset = train) # Quadratic
mean((mpg-predict(lm.fit2,Auto))[-train]^2)
lm.fit3 <- lm(mpg~poly(horsepower,3), data = Auto, subset = train) # Cubic
mean((mpg-predict(lm.fit3,Auto))[-train]^2)
set.seed(2)
train = sample(392,196)
lm.fit <- lm(mpg~horsepower, data = Auto, subset = train)
mean((mpg-predict(lm.fit,Auto))[-train]^2)
# the error rate is 23.29
lm.fit2 <- lm(mpg~poly(horsepower,2), data = Auto, subset = train) # Quadratic
mean((mpg-predict(lm.fit2,Auto))[-train]^2)
# the error rate is 18.90
lm.fit3 <- lm(mpg~poly(horsepower,3), data = Auto, subset = train) # Cubic
mean((mpg-predict(lm.fit3,Auto))[-train]^2)
set.seed(17)
cv.error.10 = rep(0,10) # help("rep")
for(i in 1:10){
glm.fit = glm(mpg ~ poly(horsepower, i), data = Auto)
cv.error.10[i] = cv.glm(Auto,glm.fit, K=10) $delta[1]
}
#Random Forest
install.packages("randomForest")
library(randomForest)
data1<read.csv(file.choose(),header = TRUE)
head(data1)
View(data1)
colnames(data1)<-c("BuyingPrice","Maintenance","NumDoors","NumPersons",
"BootSpace","Safety","Condition")
head(data1)
str(data1)
levels(data1$Condition)
summary(data1)
set.seed(100)
train<-sample(nrow(data1),0.7*nrow(data1),replace=FALSE)
TrainSet<-data1[train,]
ValidSet<-data1[-train,]
summary(TrainSet)
summary(ValidSet)
model1<-randomForest(Condition~.,data = TrainSet,importance = TRUE)
model1
model2<-randomForest(Condition~., data = TrainSet, ntree = 500, mtry = 6, importance=TRUE )
model2
predTrain<-predict(model2,TrainSet,type="class")
table(predTrain, TrainSet$Condition)
predValid<-predict(model2,ValidSet,Type="class")
table(predValid,ValidSet$Condition)
importance(model2)
varImPlot(model2)
a=c()
i=5
for (i in 3:8) {
model3<-randomForest(Condition~., data = TrainSet, ntree = 500, mtry = 6, importance=TRUE)
predValid<-predict(model3, ValidSet,type="class")
a[i-2]=mean(predValid == ValidSet$Condition)
}
a
plot(3:8,a)
library(rpart)
library(caret)
library(e1071)
model_dt <- train(Condition~., data=TrainSet,method = "rpart")
model_dt_1=predict(model_dt,data=TrainSet)
table(model_dt_vs,TrainSet$Contion)
mean(model_dt_vs==TrainSet$Condition)
model_dt_vs = predict(model_dt,newdata = ValidSet)
table(model_dt_vs,ValidSet$Contion)
mean(model_dt_vs==ValidSet$Condition)
|
# Plot model outputs and fit
library(postjags)
library(ggplot2)
library(dplyr)
library(cowplot)
# logit and antilogit functions
logit <- function(x) {
log(x/(1-x))
}
ilogit <- function(x){
exp(x) / (1 + exp(x))
}
# read in data
load("../../../../cleaned_data/cover_mono.Rdata") # cover_mono
# convert to proportions, relevel species based on fig. 6b from Porensky et al. 2018
dat <- cover_mono %>%
mutate(BRTE = BRTE/100,
intro_forbs = intro_forbs/100,
native_grass = native_grass/100,
native_forbs = native_forbs/100,
forbs = native_forbs + intro_forbs,
species = factor(species, levels = c("ELTR", "POSE", "POFE", "VUMI", "ELEL")))
str(dat)
# Load coda and coda.rep
load(file = "coda/coda.Rdata") # coda.out
load(file = "coda/coda_rep.Rdata") # coda.rep
# summarize
sum.out <- coda.fast(coda.out, OpenBUGS = FALSE)
sum.out$var <- row.names(sum.out)
sum.out$sig <- ifelse(sum.out$pc2.5*sum.out$pc97.5 > 0, TRUE, FALSE)
sum.out$dir <- ifelse(sum.out$sig == FALSE, NA,
ifelse(sum.out$sig == TRUE & sum.out$mean > 0, "pos", "neg"))
#### Create output figures
# All betas
beta.labs <- c("POSE", "POFE", "VUMI", "ELEL", "high", "fall", "spring", "coated",
"POSE:high", "POFE:high", "VUMI:high", "ELEL:high",
"POSE:fall", "POFE:fall", "VUMI:fall", "ELEL:fall",
"POSE:spring", "POFE:spring", "VUMI:spring", "ELEL:spring",
"POSE:coated", "POFE:coated", "VUMI:coated", "ELEL:coated",
"high:fall", "high:spring", "high:coated", "fall:coated", "spring:coated")
beta.ind <- grep("beta", row.names(sum.out))
betas <- sum.out[beta.ind,]
betas$var <- factor(betas$var, levels = row.names(betas))
str(betas)
fig1 <- ggplot() +
geom_pointrange(data = betas,
aes(x = var, y = mean, ymin = pc2.5, ymax = pc97.5),
size = 0.5) +
# geom_point(data = subset(betas, sig == TRUE),
# aes(x = var, y = min(pc2.5) - 0.1, col = dir),
# shape = 8) +
geom_hline(yintercept = 0, lty = 2) +
scale_y_continuous(expression(paste(beta))) +
scale_x_discrete(labels = beta.labs) +
# scale_color_manual(values = c("forestgreen", "purple")) +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
axis.title.x = element_blank()) +
guides(color = "none")
jpeg(filename = "plots/fig1_betas2.jpg",
width = 6,
height = 3,
units = "in",
res = 600)
print(fig1)
dev.off()
# Random effects
labs <- c("Block 1", "Block 2", "Block 3")
prob.eps <- grep("eps.star", row.names(sum.out))
ggplot(sum.out[prob.eps,], aes(x = var, y = mean)) +
geom_pointrange(aes(ymin = pc2.5, ymax = pc97.5)) +
geom_hline(yintercept = 0, color = "red", lty = 2) +
scale_x_discrete(labels = labs)
# Only main effect betas
beta.labs2 <- c("POSE", "POFE", "VUMI", "ELEL", "high", "fall", "spring", "coated")
beta.ind <- grep("beta", row.names(sum.out))
betas <- sum.out[beta.ind[1:length(beta.labs2)],]
betas$var <- factor(betas$var, levels = row.names(betas))
str(betas)
fig_1a <- ggplot() +
geom_pointrange(data = betas,
aes(x = var, y = mean, ymin = pc2.5, ymax = pc97.5),
size = 0.5) +
geom_point(data = betas,
aes(x = var, y = min(pc2.5) - 0.1, col = dir),
shape = 8) +
geom_hline(yintercept = 0, lty = 2) +
scale_y_continuous(expression(paste(beta))) +
scale_x_discrete(limits = rev(levels(betas$var)), labels = rev(beta.labs2)) +
scale_color_manual(values = c("forestgreen"),
na.value = "transparent") +
coord_flip() +
theme_bw(base_size = 14) +
theme(axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
guides(color = "none")
fig_1a
# Calculate interactions
beta.labs.ints <- c("POSE:high", "POFE:high", "VUMI:high", "ELEL:high",
"POSE:fall", "POFE:fall", "VUMI:fall", "ELEL:fall",
"POSE:spring", "POFE:spring", "VUMI:spring", "ELEL:spring",
"POSE:coated", "POFE:coated", "VUMI:coated", "ELEL:coated",
"high:fall", "high:spring", "high:coated", "fall:coated", "spring:coated")
beta.int.ind <- grep("int_Beta", row.names(sum.out))
beta.ints <- sum.out[beta.int.ind,]
beta.ints$var <- factor(beta.ints$var, levels = row.names(beta.ints))
str(beta.ints)
fig_1b <- ggplot() +
geom_pointrange(data = beta.ints,
aes(x = var, y = mean, ymin = pc2.5, ymax = pc97.5),
size = 0.5) +
geom_point(data = beta.ints,
aes(x = var, y = min(pc2.5) - 0.1, col = dir),
shape = 8) +
geom_hline(yintercept = 0, lty = 2) +
scale_y_continuous(expression(sum(beta))) +
scale_x_discrete(limits = rev(levels(beta.ints$var)), labels = rev(beta.labs.ints)) +
scale_color_manual(values = c("forestgreen"),
na.value = "transparent") +
coord_flip() +
theme_bw(base_size = 14) +
theme(axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
guides(color = "none")
fig_1b
jpeg(filename = "plots/fig1_betas.jpg",
width = 6,
height = 4,
units = "in",
res = 600)
plot_grid(fig_1a, fig_1b, ncol = 2, rel_widths = c(4, 5), labels = "auto")
dev.off()
# Convert to % cover differences (main and interaction effects)
alph <- sum.out[grep("alpha.star", row.names(sum.out)),]
ilogit(alph[,1:5])
base <- dat %>%
filter(grazing == "ungrazed",
species == "ELTR",
seed_rate == "low",
seed_coat == "UC")
hist(base$forbs, breaks = 30)
beta.labs2 <- c("POSE", "POFE", "VUMI", "ELEL", "high", "fall", "spring", "coated")
beta.ind <- grep("Diff_Beta", row.names(sum.out))
betas <- sum.out[beta.ind[1:length(beta.labs2)],]
betas$var <- factor(betas$var, levels = row.names(betas))
str(betas)
fig_2a <- ggplot() +
geom_pointrange(data = betas,
aes(x = var, y = mean, ymin = pc2.5, ymax = pc97.5),
size = 0.5) +
geom_point(data = betas,
aes(x = var, y = min(pc2.5) - 0.01, col = as.factor(dir)),
shape = 8) +
geom_hline(yintercept = 0, lty = 2) +
scale_y_continuous(expression(paste(Delta, " forbs prop. cover"))) +
scale_x_discrete(limits = rev(levels(betas$var)), labels = rev(beta.labs2)) +
scale_color_manual(values = c("forestgreen"),
na.value = "transparent") +
coord_flip() +
theme_bw(base_size = 14) +
theme(axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
guides(color = "none")
fig_2a
beta.labs.ints <- c("POSE:high", "POFE:high", "VUMI:high", "ELEL:high",
"POSE:fall", "POFE:fall", "VUMI:fall", "ELEL:fall",
"POSE:spring", "POFE:spring", "VUMI:spring", "ELEL:spring",
"POSE:coated", "POFE:coated", "VUMI:coated", "ELEL:coated",
"high:fall", "high:spring", "high:coated", "fall:coated", "spring:coated")
beta.int.ind <- grep("diff_Beta", row.names(sum.out))
beta.ints <- sum.out[beta.int.ind,]
beta.ints$var <- factor(beta.ints$var, levels = row.names(beta.ints))
str(beta.ints)
fig_2b <- ggplot() +
geom_pointrange(data = beta.ints,
aes(x = var, y = mean, ymin = pc2.5, ymax = pc97.5),
size = 0.5) +
geom_point(data = beta.ints,
aes(x = var, y = min(pc2.5) - .01, col = as.factor(dir)),
shape = 8) +
geom_hline(yintercept = 0, lty = 2) +
scale_y_continuous(expression(paste(Delta, " forbs prop. cover"))) +
scale_x_discrete(limits = rev(levels(beta.ints$var)), labels = rev(beta.labs.ints)) +
scale_color_manual(values = c("forestgreen"),
na.value = "transparent") +
coord_flip() +
theme_bw(base_size = 14) +
theme(axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
guides(color = "none")
fig_2b
jpeg(filename = "plots/fig2_betas.jpg",
width = 8,
height = 6,
units = "in",
res = 600)
plot_grid(fig_2a, fig_2b, ncol = 2, rel_widths = c(4, 5), labels = "auto")
dev.off()
# Replicated data summary and fit
sum.rep <- coda.fast(coda.rep, OpenBUGS = FALSE)
z.rep <- sum.rep[grep("y.0.rep", row.names(sum.rep)),]
cont.rep <- sum.rep[grep("y.c.rep", row.names(sum.rep)),]
#align
y.temp <- with(dat, ifelse(forbs == 1 | forbs == 0, forbs, NA))
y.0 <- ifelse(is.na(y.temp), 0, 1)
which.0 <- which(y.0 == 1)
which.cont <- which(y.0 == 0)
fit <- rbind.data.frame(cbind(dat[which.0, ],
mean = z.rep$mean[which.0],
median = z.rep$median[which.0],
lower = z.rep$pc2.5[which.0],
upper = z.rep$pc97.5[which.0]),
cbind(dat[which.cont, ],
mean = cont.rep$mean,
median = cont.rep$median,
lower = cont.rep$pc2.5,
upper = cont.rep$pc97.5)
)
fit.model <- lm(mean ~ forbs, data = fit)
summary(fit.model)
ggplot(fit, aes(x = forbs)) +
geom_abline(slope = 1, intercept = 0, col = "red", lty = 2) +
# geom_errorbar(aes(ymin = lower, ymax = upper)) +
geom_point(aes(y = mean))
|
/models/cover/forbs/mono/Forbs_plots.R
|
permissive
|
cct-datascience/rangeland-restore
|
R
| false | false | 9,462 |
r
|
# Plot model outputs and fit
library(postjags)
library(ggplot2)
library(dplyr)
library(cowplot)
# logit and antilogit functions
logit <- function(x) {
log(x/(1-x))
}
ilogit <- function(x){
exp(x) / (1 + exp(x))
}
# read in data
load("../../../../cleaned_data/cover_mono.Rdata") # cover_mono
# convert to proportions, relevel species based on fig. 6b from Porensky et al. 2018
dat <- cover_mono %>%
mutate(BRTE = BRTE/100,
intro_forbs = intro_forbs/100,
native_grass = native_grass/100,
native_forbs = native_forbs/100,
forbs = native_forbs + intro_forbs,
species = factor(species, levels = c("ELTR", "POSE", "POFE", "VUMI", "ELEL")))
str(dat)
# Load coda and coda.rep
load(file = "coda/coda.Rdata") # coda.out
load(file = "coda/coda_rep.Rdata") # coda.rep
# summarize
sum.out <- coda.fast(coda.out, OpenBUGS = FALSE)
sum.out$var <- row.names(sum.out)
sum.out$sig <- ifelse(sum.out$pc2.5*sum.out$pc97.5 > 0, TRUE, FALSE)
sum.out$dir <- ifelse(sum.out$sig == FALSE, NA,
ifelse(sum.out$sig == TRUE & sum.out$mean > 0, "pos", "neg"))
#### Create output figures
# All betas
beta.labs <- c("POSE", "POFE", "VUMI", "ELEL", "high", "fall", "spring", "coated",
"POSE:high", "POFE:high", "VUMI:high", "ELEL:high",
"POSE:fall", "POFE:fall", "VUMI:fall", "ELEL:fall",
"POSE:spring", "POFE:spring", "VUMI:spring", "ELEL:spring",
"POSE:coated", "POFE:coated", "VUMI:coated", "ELEL:coated",
"high:fall", "high:spring", "high:coated", "fall:coated", "spring:coated")
beta.ind <- grep("beta", row.names(sum.out))
betas <- sum.out[beta.ind,]
betas$var <- factor(betas$var, levels = row.names(betas))
str(betas)
fig1 <- ggplot() +
geom_pointrange(data = betas,
aes(x = var, y = mean, ymin = pc2.5, ymax = pc97.5),
size = 0.5) +
# geom_point(data = subset(betas, sig == TRUE),
# aes(x = var, y = min(pc2.5) - 0.1, col = dir),
# shape = 8) +
geom_hline(yintercept = 0, lty = 2) +
scale_y_continuous(expression(paste(beta))) +
scale_x_discrete(labels = beta.labs) +
# scale_color_manual(values = c("forestgreen", "purple")) +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
axis.title.x = element_blank()) +
guides(color = "none")
jpeg(filename = "plots/fig1_betas2.jpg",
width = 6,
height = 3,
units = "in",
res = 600)
print(fig1)
dev.off()
# Random effects
labs <- c("Block 1", "Block 2", "Block 3")
prob.eps <- grep("eps.star", row.names(sum.out))
ggplot(sum.out[prob.eps,], aes(x = var, y = mean)) +
geom_pointrange(aes(ymin = pc2.5, ymax = pc97.5)) +
geom_hline(yintercept = 0, color = "red", lty = 2) +
scale_x_discrete(labels = labs)
# Only main effect betas
beta.labs2 <- c("POSE", "POFE", "VUMI", "ELEL", "high", "fall", "spring", "coated")
beta.ind <- grep("beta", row.names(sum.out))
betas <- sum.out[beta.ind[1:length(beta.labs2)],]
betas$var <- factor(betas$var, levels = row.names(betas))
str(betas)
fig_1a <- ggplot() +
geom_pointrange(data = betas,
aes(x = var, y = mean, ymin = pc2.5, ymax = pc97.5),
size = 0.5) +
geom_point(data = betas,
aes(x = var, y = min(pc2.5) - 0.1, col = dir),
shape = 8) +
geom_hline(yintercept = 0, lty = 2) +
scale_y_continuous(expression(paste(beta))) +
scale_x_discrete(limits = rev(levels(betas$var)), labels = rev(beta.labs2)) +
scale_color_manual(values = c("forestgreen"),
na.value = "transparent") +
coord_flip() +
theme_bw(base_size = 14) +
theme(axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
guides(color = "none")
fig_1a
# Calculate interactions
beta.labs.ints <- c("POSE:high", "POFE:high", "VUMI:high", "ELEL:high",
"POSE:fall", "POFE:fall", "VUMI:fall", "ELEL:fall",
"POSE:spring", "POFE:spring", "VUMI:spring", "ELEL:spring",
"POSE:coated", "POFE:coated", "VUMI:coated", "ELEL:coated",
"high:fall", "high:spring", "high:coated", "fall:coated", "spring:coated")
beta.int.ind <- grep("int_Beta", row.names(sum.out))
beta.ints <- sum.out[beta.int.ind,]
beta.ints$var <- factor(beta.ints$var, levels = row.names(beta.ints))
str(beta.ints)
fig_1b <- ggplot() +
geom_pointrange(data = beta.ints,
aes(x = var, y = mean, ymin = pc2.5, ymax = pc97.5),
size = 0.5) +
geom_point(data = beta.ints,
aes(x = var, y = min(pc2.5) - 0.1, col = dir),
shape = 8) +
geom_hline(yintercept = 0, lty = 2) +
scale_y_continuous(expression(sum(beta))) +
scale_x_discrete(limits = rev(levels(beta.ints$var)), labels = rev(beta.labs.ints)) +
scale_color_manual(values = c("forestgreen"),
na.value = "transparent") +
coord_flip() +
theme_bw(base_size = 14) +
theme(axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
guides(color = "none")
fig_1b
jpeg(filename = "plots/fig1_betas.jpg",
width = 6,
height = 4,
units = "in",
res = 600)
plot_grid(fig_1a, fig_1b, ncol = 2, rel_widths = c(4, 5), labels = "auto")
dev.off()
# Convert to % cover differences (main and interaction effects)
alph <- sum.out[grep("alpha.star", row.names(sum.out)),]
ilogit(alph[,1:5])
base <- dat %>%
filter(grazing == "ungrazed",
species == "ELTR",
seed_rate == "low",
seed_coat == "UC")
hist(base$forbs, breaks = 30)
beta.labs2 <- c("POSE", "POFE", "VUMI", "ELEL", "high", "fall", "spring", "coated")
beta.ind <- grep("Diff_Beta", row.names(sum.out))
betas <- sum.out[beta.ind[1:length(beta.labs2)],]
betas$var <- factor(betas$var, levels = row.names(betas))
str(betas)
fig_2a <- ggplot() +
geom_pointrange(data = betas,
aes(x = var, y = mean, ymin = pc2.5, ymax = pc97.5),
size = 0.5) +
geom_point(data = betas,
aes(x = var, y = min(pc2.5) - 0.01, col = as.factor(dir)),
shape = 8) +
geom_hline(yintercept = 0, lty = 2) +
scale_y_continuous(expression(paste(Delta, " forbs prop. cover"))) +
scale_x_discrete(limits = rev(levels(betas$var)), labels = rev(beta.labs2)) +
scale_color_manual(values = c("forestgreen"),
na.value = "transparent") +
coord_flip() +
theme_bw(base_size = 14) +
theme(axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
guides(color = "none")
fig_2a
beta.labs.ints <- c("POSE:high", "POFE:high", "VUMI:high", "ELEL:high",
"POSE:fall", "POFE:fall", "VUMI:fall", "ELEL:fall",
"POSE:spring", "POFE:spring", "VUMI:spring", "ELEL:spring",
"POSE:coated", "POFE:coated", "VUMI:coated", "ELEL:coated",
"high:fall", "high:spring", "high:coated", "fall:coated", "spring:coated")
beta.int.ind <- grep("diff_Beta", row.names(sum.out))
beta.ints <- sum.out[beta.int.ind,]
beta.ints$var <- factor(beta.ints$var, levels = row.names(beta.ints))
str(beta.ints)
fig_2b <- ggplot() +
geom_pointrange(data = beta.ints,
aes(x = var, y = mean, ymin = pc2.5, ymax = pc97.5),
size = 0.5) +
geom_point(data = beta.ints,
aes(x = var, y = min(pc2.5) - .01, col = as.factor(dir)),
shape = 8) +
geom_hline(yintercept = 0, lty = 2) +
scale_y_continuous(expression(paste(Delta, " forbs prop. cover"))) +
scale_x_discrete(limits = rev(levels(beta.ints$var)), labels = rev(beta.labs.ints)) +
scale_color_manual(values = c("forestgreen"),
na.value = "transparent") +
coord_flip() +
theme_bw(base_size = 14) +
theme(axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
guides(color = "none")
fig_2b
jpeg(filename = "plots/fig2_betas.jpg",
width = 8,
height = 6,
units = "in",
res = 600)
plot_grid(fig_2a, fig_2b, ncol = 2, rel_widths = c(4, 5), labels = "auto")
dev.off()
# Replicated data summary and fit
sum.rep <- coda.fast(coda.rep, OpenBUGS = FALSE)
z.rep <- sum.rep[grep("y.0.rep", row.names(sum.rep)),]
cont.rep <- sum.rep[grep("y.c.rep", row.names(sum.rep)),]
#align
y.temp <- with(dat, ifelse(forbs == 1 | forbs == 0, forbs, NA))
y.0 <- ifelse(is.na(y.temp), 0, 1)
which.0 <- which(y.0 == 1)
which.cont <- which(y.0 == 0)
fit <- rbind.data.frame(cbind(dat[which.0, ],
mean = z.rep$mean[which.0],
median = z.rep$median[which.0],
lower = z.rep$pc2.5[which.0],
upper = z.rep$pc97.5[which.0]),
cbind(dat[which.cont, ],
mean = cont.rep$mean,
median = cont.rep$median,
lower = cont.rep$pc2.5,
upper = cont.rep$pc97.5)
)
fit.model <- lm(mean ~ forbs, data = fit)
summary(fit.model)
ggplot(fit, aes(x = forbs)) +
geom_abline(slope = 1, intercept = 0, col = "red", lty = 2) +
# geom_errorbar(aes(ymin = lower, ymax = upper)) +
geom_point(aes(y = mean))
|
#==============Load the packages================
library(tseries)
library(vars)
library(tidyverse)
library(e1071)
#==============Load the data====================
setwd('D:/Fall Semester/SYS6018/sysproject/Data')
corn<-read.csv('Corn.csv')
source('Metrics.R')
#==============Data Transformations==============
corn2<-corn
corn2$name<-NULL
corn2$date<-NULL
corn2$ind_manager_spread<-NULL
corn2$ind_other_report_spread<-NULL
corn2$ind_swap_spread<-as.factor(corn2$ind_swap_spread)
#==================ADF Tests=====================
adf.test(corn2$OI, alternative="stationary")
tOI<-diff(log10(corn2$OI))
adf.test(corn2$merchant_long_OI, alternative="stationary")
adf.test(corn2$merchant_short_OI, alternative="stationary")
adf.test(corn2$swap_long_OI, alternative="stationary")
tswap_long_OI<-diff(log10(corn2$swap_long_OI))
adf.test(corn2$swap_short_OI, alternative="stationary")
corn2$swap_short_OI<-NULL
adf.test(corn2$swap_spread_OI, alternative="stationary")
tswap_spread_OI<-diff(log10(corn2$swap_spread_OI))
adf.test(corn2$manager_long_OI, alternative="stationary")
adf.test(corn2$manager_short_OI, alternative="stationary")
adf.test(corn2$manager_spread_OI, alternative="stationary")
adf.test(corn2$otherreportable_long_OI, alternative="stationary")
totherreportable_long_OI<-diff(log10(corn2$otherreportable_long_OI))
adf.test(corn2$otherreportable_short_OI, alternative="stationary")
adf.test(corn2$low_volume_traders_long, alternative="stationary")
tlow_volume_traders_long<-diff(log10(corn2$low_volume_traders_long))
adf.test(corn2$low_volume_traders_short, alternative="stationary")
tlow_volume_traders_short<-diff(log10(corn2$low_volume_traders_short))
adf.test(corn2$big8_long, alternative="stationary")
adf.test(corn2$big8_short, alternative="stationary")
adf.test(corn2$weekly_change, alternative="stationary")
#=======Transforming Variables==================
corn2<-corn2[-c(1),]
corn2$OI<-tOI
corn2$swap_long_OI<-tswap_long_OI
corn2$swap_spread_OI<-tswap_spread_OI
corn2$otherreportable_long_OI<-totherreportable_long_OI
corn2$low_volume_traders_short<-tlow_volume_traders_short
corn2$low_volume_traders_long<-tlow_volume_traders_long
#==============Predictions==================
train<-corn2[seq(105:591),]
test<-corn2[seq(1:104),]
svm_corn <- svm(weekly_change~., train)
summary(svm_corn)
pred_corn <- predict(svm_corn, test)
plot(test$weekly_change,pch=16) #Plot the dataset
points(test$weekly_change, pred_corn, col = "blue", pch=4)
act<-corn[1:104,]$weekly_change
metrics(pred_corn, act)
|
/SYS6018 Final Project/Individual Product Models/Group 1 products (time series models also performed)/SVM/cornsvm.R
|
no_license
|
alizaidia/SysProject
|
R
| false | false | 2,569 |
r
|
#==============Load the packages================
library(tseries)
library(vars)
library(tidyverse)
library(e1071)
#==============Load the data====================
setwd('D:/Fall Semester/SYS6018/sysproject/Data')
corn<-read.csv('Corn.csv')
source('Metrics.R')
#==============Data Transformations==============
corn2<-corn
corn2$name<-NULL
corn2$date<-NULL
corn2$ind_manager_spread<-NULL
corn2$ind_other_report_spread<-NULL
corn2$ind_swap_spread<-as.factor(corn2$ind_swap_spread)
#==================ADF Tests=====================
adf.test(corn2$OI, alternative="stationary")
tOI<-diff(log10(corn2$OI))
adf.test(corn2$merchant_long_OI, alternative="stationary")
adf.test(corn2$merchant_short_OI, alternative="stationary")
adf.test(corn2$swap_long_OI, alternative="stationary")
tswap_long_OI<-diff(log10(corn2$swap_long_OI))
adf.test(corn2$swap_short_OI, alternative="stationary")
corn2$swap_short_OI<-NULL
adf.test(corn2$swap_spread_OI, alternative="stationary")
tswap_spread_OI<-diff(log10(corn2$swap_spread_OI))
adf.test(corn2$manager_long_OI, alternative="stationary")
adf.test(corn2$manager_short_OI, alternative="stationary")
adf.test(corn2$manager_spread_OI, alternative="stationary")
adf.test(corn2$otherreportable_long_OI, alternative="stationary")
totherreportable_long_OI<-diff(log10(corn2$otherreportable_long_OI))
adf.test(corn2$otherreportable_short_OI, alternative="stationary")
adf.test(corn2$low_volume_traders_long, alternative="stationary")
tlow_volume_traders_long<-diff(log10(corn2$low_volume_traders_long))
adf.test(corn2$low_volume_traders_short, alternative="stationary")
tlow_volume_traders_short<-diff(log10(corn2$low_volume_traders_short))
adf.test(corn2$big8_long, alternative="stationary")
adf.test(corn2$big8_short, alternative="stationary")
adf.test(corn2$weekly_change, alternative="stationary")
#=======Transforming Variables==================
corn2<-corn2[-c(1),]
corn2$OI<-tOI
corn2$swap_long_OI<-tswap_long_OI
corn2$swap_spread_OI<-tswap_spread_OI
corn2$otherreportable_long_OI<-totherreportable_long_OI
corn2$low_volume_traders_short<-tlow_volume_traders_short
corn2$low_volume_traders_long<-tlow_volume_traders_long
#==============Predictions==================
train<-corn2[seq(105:591),]
test<-corn2[seq(1:104),]
svm_corn <- svm(weekly_change~., train)
summary(svm_corn)
pred_corn <- predict(svm_corn, test)
plot(test$weekly_change,pch=16) #Plot the dataset
points(test$weekly_change, pred_corn, col = "blue", pch=4)
act<-corn[1:104,]$weekly_change
metrics(pred_corn, act)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcs_add_new_data_series.R
\name{rbinddatasetNMR}
\alias{rbinddatasetNMR}
\title{Row-bind two datasets for NMR with slight changes, check duplicated keys, set
order}
\usage{
rbinddatasetNMR(dt_master, dt_new)
}
\arguments{
\item{dt_master}{master dataset}
\item{dt_new}{new entries}
}
\value{
`dt1` as `rbind(dt_master, dt_new)`
}
\description{
Row-bind two datasets for NMR with slight changes, check duplicated keys, set
order
}
|
/man/rbinddatasetNMR.Rd
|
permissive
|
unicef-drp/CME.assistant
|
R
| false | true | 511 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcs_add_new_data_series.R
\name{rbinddatasetNMR}
\alias{rbinddatasetNMR}
\title{Row-bind two datasets for NMR with slight changes, check duplicated keys, set
order}
\usage{
rbinddatasetNMR(dt_master, dt_new)
}
\arguments{
\item{dt_master}{master dataset}
\item{dt_new}{new entries}
}
\value{
`dt1` as `rbind(dt_master, dt_new)`
}
\description{
Row-bind two datasets for NMR with slight changes, check duplicated keys, set
order
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/has_intercept.R
\name{has_intercept}
\alias{has_intercept}
\title{Find out whether a model includes an intercept}
\usage{
has_intercept(model)
}
\arguments{
\item{model}{a model object.}
}
\value{
logical. \code{TRUE} if the intercept is present,
\code{FALSE} otherwise.
}
\description{
The function tests whether an intercept is present in a model.
}
\keyword{internal}
|
/man/has_intercept.Rd
|
no_license
|
RymerLab/remef
|
R
| false | true | 457 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/has_intercept.R
\name{has_intercept}
\alias{has_intercept}
\title{Find out whether a model includes an intercept}
\usage{
has_intercept(model)
}
\arguments{
\item{model}{a model object.}
}
\value{
logical. \code{TRUE} if the intercept is present,
\code{FALSE} otherwise.
}
\description{
The function tests whether an intercept is present in a model.
}
\keyword{internal}
|
getComposingPrimes <- function (primes, vec.primes)
{
if(is.null(primes))
return(NULL)
a <- strsplit(primes, split = " & ")
b <- strsplit(vec.primes, split = " & ")
d <- numeric(length(vec.primes))
for (i in 1:length(primes)){
d <- d + sapply(b, function (x, y) ifelse(
all(x %in% y) && (length(x) != length(y)),
1, 0), y = a[[i]])
}
if(sum(d) > 0)
out <- vec.primes[d > 0]
else
out <- NULL
out
}
|
/R/getComposingPrimes.R
|
no_license
|
holgerschw/logicFS
|
R
| false | false | 443 |
r
|
getComposingPrimes <- function (primes, vec.primes)
{
if(is.null(primes))
return(NULL)
a <- strsplit(primes, split = " & ")
b <- strsplit(vec.primes, split = " & ")
d <- numeric(length(vec.primes))
for (i in 1:length(primes)){
d <- d + sapply(b, function (x, y) ifelse(
all(x %in% y) && (length(x) != length(y)),
1, 0), y = a[[i]])
}
if(sum(d) > 0)
out <- vec.primes[d > 0]
else
out <- NULL
out
}
|
### Code for reading data and plot1
library(dplyr)
library(pryr)
library(data.table)
library(tidyr)
library(lubridate)
# Download of data and keep under the directory-----------------------------------------------
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
f <- file.path(getwd(), "2Fhousehold_power_consumption.zip")
download.file(url, f)
unzip(f)
# Fetcing the power consumption data------------------------------------
pwrconsp <- read.table("household_power_consumption.txt",header = TRUE,sep = ";", stringsAsFactors = FALSE)
object_size(pwrconsp)
pwrconsp <- pwrconsp %>% mutate (Date_Time = as.POSIXct(dmy_hms(as.character(paste(Date,Time)))))
pwrconsp$Date <- dmy(pwrconsp$Date)
pwrconsp$Date <- as.Date(as.POSIXct(pwrconsp$Date, origin = "01-01-2000", tz = "GMT"))
#pwrconsp$Time <- hms(pwrconsp$Time)
pwrconsp$Global_active_power <- gsub("?","",pwrconsp$Global_active_power)
pwrconsp$Global_reactive_power <- gsub("?","",pwrconsp$Global_reactive_power)
pwrconsp$Global_intensity <- gsub("?","",pwrconsp$Global_intensity)
pwrconsp$Sub_metering_1 <- gsub("?","",pwrconsp$Sub_metering_1)
pwrconsp$Sub_metering_2 <- gsub("?","",pwrconsp$Sub_metering_2)
pwrconsp$Sub_metering_3 <- gsub("?","",pwrconsp$Sub_metering_3)
pwrconsp$Global_active_power <- as.numeric(pwrconsp$Global_active_power,na.rm = TRUE)
pwrconsp$Global_reactive_power <- as.numeric(pwrconsp$Global_reactive_power,na.rm = TRUE)
pwrconsp$Global_intensity <- as.numeric(pwrconsp$Global_intensity,na.rm = TRUE)
pwrconsp$Sub_metering_1 <- as.numeric(pwrconsp$Sub_metering_1,na.rm = TRUE)
pwrconsp$Sub_metering_2 <- as.numeric(pwrconsp$Sub_metering_2,na.rm = TRUE)
pwrconsp$Sub_metering_3 <- as.numeric(pwrconsp$Sub_metering_3,na.rm = TRUE)
pwrconspsdate <- pwrconsp %>% filter(Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
# plotting plot3.png
png(filename = "plot3.png",width = 480, height = 480)
with(pwrconspsdate, plot(Date_Time,Sub_metering_1,col = 'black', type="l", xlab = "",ylab = "Energy sub metering"))
with(pwrconspsdate, points(Date_Time,Sub_metering_2,col = 'red', type="l"))
with(pwrconspsdate, points(Date_Time,Sub_metering_3,col = 'blue', type="l"))
legend("topright", lty=1, col = c("black", "red", "blue"),
legend = c("Sub_Metering_1", "Sub_Metering_2", "Sub_Metering_3"))
dev.off()
|
/plot3.r
|
no_license
|
karthyram/ExData_Plotting1
|
R
| false | false | 2,389 |
r
|
### Code for reading data and plot1
library(dplyr)
library(pryr)
library(data.table)
library(tidyr)
library(lubridate)
# Download of data and keep under the directory-----------------------------------------------
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
f <- file.path(getwd(), "2Fhousehold_power_consumption.zip")
download.file(url, f)
unzip(f)
# Fetcing the power consumption data------------------------------------
pwrconsp <- read.table("household_power_consumption.txt",header = TRUE,sep = ";", stringsAsFactors = FALSE)
object_size(pwrconsp)
pwrconsp <- pwrconsp %>% mutate (Date_Time = as.POSIXct(dmy_hms(as.character(paste(Date,Time)))))
pwrconsp$Date <- dmy(pwrconsp$Date)
pwrconsp$Date <- as.Date(as.POSIXct(pwrconsp$Date, origin = "01-01-2000", tz = "GMT"))
#pwrconsp$Time <- hms(pwrconsp$Time)
pwrconsp$Global_active_power <- gsub("?","",pwrconsp$Global_active_power)
pwrconsp$Global_reactive_power <- gsub("?","",pwrconsp$Global_reactive_power)
pwrconsp$Global_intensity <- gsub("?","",pwrconsp$Global_intensity)
pwrconsp$Sub_metering_1 <- gsub("?","",pwrconsp$Sub_metering_1)
pwrconsp$Sub_metering_2 <- gsub("?","",pwrconsp$Sub_metering_2)
pwrconsp$Sub_metering_3 <- gsub("?","",pwrconsp$Sub_metering_3)
pwrconsp$Global_active_power <- as.numeric(pwrconsp$Global_active_power,na.rm = TRUE)
pwrconsp$Global_reactive_power <- as.numeric(pwrconsp$Global_reactive_power,na.rm = TRUE)
pwrconsp$Global_intensity <- as.numeric(pwrconsp$Global_intensity,na.rm = TRUE)
pwrconsp$Sub_metering_1 <- as.numeric(pwrconsp$Sub_metering_1,na.rm = TRUE)
pwrconsp$Sub_metering_2 <- as.numeric(pwrconsp$Sub_metering_2,na.rm = TRUE)
pwrconsp$Sub_metering_3 <- as.numeric(pwrconsp$Sub_metering_3,na.rm = TRUE)
pwrconspsdate <- pwrconsp %>% filter(Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
# plotting plot3.png
png(filename = "plot3.png",width = 480, height = 480)
with(pwrconspsdate, plot(Date_Time,Sub_metering_1,col = 'black', type="l", xlab = "",ylab = "Energy sub metering"))
with(pwrconspsdate, points(Date_Time,Sub_metering_2,col = 'red', type="l"))
with(pwrconspsdate, points(Date_Time,Sub_metering_3,col = 'blue', type="l"))
legend("topright", lty=1, col = c("black", "red", "blue"),
legend = c("Sub_Metering_1", "Sub_Metering_2", "Sub_Metering_3"))
dev.off()
|
library("nleqslv")
# Dennis & Schnabel,1996,"Numerical methods for unconstrained optimization and nonlinear equations", SIAM
# example 6.5.1 page 149
dslnex <- function(x) {
y <- numeric(2)
y[1] <- x[1]^2 + x[2]^2 - 2
y[2] <- exp(x[1]-1) + x[2]^3 - 2
y
}
xstart <- c(2,0.5)
fstart <- dslnex(xstart)
xstart
fstart
do.print.xf <- TRUE
print.result <- function(z) {
if( do.print.xf ) {
print(z$x)
print(z$fvec)
}
print(z$message)
print(all(abs(z$fvec)<=1e-8))
}
sink("dslnexCN-num.txt")
for( z in c("dbldog","pwldog") ) { # double dogleg, Powell (single) dogleg
for( delta in c(-1.0, -2.0) ) { # Cauchy step , Newton step
znlq <- nleqslv(xstart, dslnex, global=z, control=list(btol=.01,delta=delta, trace=1))
print.result(znlq)
}
}
sink()
sink("dslnexCN-char.txt")
for( z in c("dbldog","pwldog") ) { # double dogleg, Powell (single) dogleg
for( delta in c("cauchy", "newton") ) { # Cauchy step , Newton step
znlq <- nleqslv(xstart, dslnex, global=z, control=list(btol=.01,delta=delta,trace=1))
print.result(znlq)
}
}
sink()
z1 <- readLines(con="dslnexCN-num.txt")
z2 <- readLines(con="dslnexCN-char.txt")
all.equal(z1,z2)
|
/tests/dslnexCN.R
|
no_license
|
cran/nleqslv
|
R
| false | false | 1,228 |
r
|
library("nleqslv")
# Dennis & Schnabel,1996,"Numerical methods for unconstrained optimization and nonlinear equations", SIAM
# example 6.5.1 page 149
dslnex <- function(x) {
y <- numeric(2)
y[1] <- x[1]^2 + x[2]^2 - 2
y[2] <- exp(x[1]-1) + x[2]^3 - 2
y
}
xstart <- c(2,0.5)
fstart <- dslnex(xstart)
xstart
fstart
do.print.xf <- TRUE
print.result <- function(z) {
if( do.print.xf ) {
print(z$x)
print(z$fvec)
}
print(z$message)
print(all(abs(z$fvec)<=1e-8))
}
sink("dslnexCN-num.txt")
for( z in c("dbldog","pwldog") ) { # double dogleg, Powell (single) dogleg
for( delta in c(-1.0, -2.0) ) { # Cauchy step , Newton step
znlq <- nleqslv(xstart, dslnex, global=z, control=list(btol=.01,delta=delta, trace=1))
print.result(znlq)
}
}
sink()
sink("dslnexCN-char.txt")
for( z in c("dbldog","pwldog") ) { # double dogleg, Powell (single) dogleg
for( delta in c("cauchy", "newton") ) { # Cauchy step , Newton step
znlq <- nleqslv(xstart, dslnex, global=z, control=list(btol=.01,delta=delta,trace=1))
print.result(znlq)
}
}
sink()
z1 <- readLines(con="dslnexCN-num.txt")
z2 <- readLines(con="dslnexCN-char.txt")
all.equal(z1,z2)
|
#' Create PNG file from plottable object
#'
#' \code{to_pdf} plots an object directly into one ore multiple PDF files.
#'
#' @param x object to plot.
#' @param name name used for output file(s), might be extended if multiple plots
#' are generated and no ending is needed (e.g. 'my_file').
#' @param ... further options for specific methods.
#'
#' @export
to_png <- function(x, name, ...) {
UseMethod("to_png", x)
}
#' For a \code{\link{describr}} or \code{describrGtable} object,
#' the object it first turned into a
#' plottable object respecting the given size constraints (potential split-up)
#' and then plotted to pmg using \code{grid.draw()}.
#'
#' @inheritParams to_png
#' @param maxwidth maximal plotting width in inches (plain numeric).
#' @param maxheight maximal plotting heigth in inches (plain numeric).
#' @param dpi dots per inch
#'
#' @name to_png
#' @export
to_png.describr <- function(x, name, maxwidth = Inf, maxheight = Inf, dpi = 96, ...) {
as_gtable(x) %>%
optimize_columnwidths(maxwidth = unit(maxwidth, "in")) %>%
split_pages(maxheight = unit(maxheight, "in")) ->
grob_list
for (i in 1:length(grob_list)) {
png(
sprintf("%s_%i.png", name, i),
width = ceiling(dpi * convertUnit(sum(grob_list[[i]]$widths), "in", valueOnly = TRUE)),
height = ceiling(dpi * convertUnit(sum(grob_list[[i]]$heights), "in", valueOnly = TRUE))
)
grid.draw(grob_list[[i]])
dev.off()
}
}
#' @inheritParams to_png.describr
#'
#' @name to_png
#' @export
to_png.describrGtable <- function(x, name, maxwidth = Inf, maxheight = Inf, dpi = 96, ...) {
width <- convertUnit(x, "in", valueOnly = TRUE)
if (width <= maxwidth) {
x %>%
split_pages(maxheight = unit(maxheight, "in")) ->
grob_list
} else {
x %>%
optimize_columnwidths(maxwidth = unit(maxwidth, "in")) %>%
split_pages(maxheight = unit(maxheight, "in")) ->
grob_list
}
for (i in 1:length(grob_list)) {
pdf(
sprintf("%s_%i.png", name, i),
width = ceiling(dpi * convertUnit(sum(grob_list[[i]]$widths), "in", valueOnly = TRUE)),
height = ceiling(dpi * convertUnit(sum(grob_list[[i]]$heights), "in", valueOnly = TRUE))
)
grid.draw(grob_list[[i]])
dev.off()
}
}
#' For a list of \code{describrGtable} objects they are plotted directly to
#' png files of suitable size.
#'
#' @inheritParams to_png
#'
#' @name to_png
#' @export
to_png.list <- function(x, name, dpi = 96, ...) {
for (i in 1:length(x)) {
png(
sprintf("%s_%i.png", name, i),
width = ceiling(dpi * convertUnit(sum(grob_list[[i]]$widths), "in", valueOnly = TRUE)),
height = ceiling(dpi * convertUnit(sum(grob_list[[i]]$heights), "in", valueOnly = TRUE))
)
grid.draw(x[[i]])
dev.off()
}
}
|
/R/to_png.R
|
permissive
|
kkmann/describr
|
R
| false | false | 2,919 |
r
|
#' Create PNG file from plottable object
#'
#' \code{to_pdf} plots an object directly into one ore multiple PDF files.
#'
#' @param x object to plot.
#' @param name name used for output file(s), might be extended if multiple plots
#' are generated and no ending is needed (e.g. 'my_file').
#' @param ... further options for specific methods.
#'
#' @export
to_png <- function(x, name, ...) {
UseMethod("to_png", x)
}
#' For a \code{\link{describr}} or \code{describrGtable} object,
#' the object it first turned into a
#' plottable object respecting the given size constraints (potential split-up)
#' and then plotted to pmg using \code{grid.draw()}.
#'
#' @inheritParams to_png
#' @param maxwidth maximal plotting width in inches (plain numeric).
#' @param maxheight maximal plotting heigth in inches (plain numeric).
#' @param dpi dots per inch
#'
#' @name to_png
#' @export
to_png.describr <- function(x, name, maxwidth = Inf, maxheight = Inf, dpi = 96, ...) {
as_gtable(x) %>%
optimize_columnwidths(maxwidth = unit(maxwidth, "in")) %>%
split_pages(maxheight = unit(maxheight, "in")) ->
grob_list
for (i in 1:length(grob_list)) {
png(
sprintf("%s_%i.png", name, i),
width = ceiling(dpi * convertUnit(sum(grob_list[[i]]$widths), "in", valueOnly = TRUE)),
height = ceiling(dpi * convertUnit(sum(grob_list[[i]]$heights), "in", valueOnly = TRUE))
)
grid.draw(grob_list[[i]])
dev.off()
}
}
#' @inheritParams to_png.describr
#'
#' @name to_png
#' @export
to_png.describrGtable <- function(x, name, maxwidth = Inf, maxheight = Inf, dpi = 96, ...) {
width <- convertUnit(x, "in", valueOnly = TRUE)
if (width <= maxwidth) {
x %>%
split_pages(maxheight = unit(maxheight, "in")) ->
grob_list
} else {
x %>%
optimize_columnwidths(maxwidth = unit(maxwidth, "in")) %>%
split_pages(maxheight = unit(maxheight, "in")) ->
grob_list
}
for (i in 1:length(grob_list)) {
pdf(
sprintf("%s_%i.png", name, i),
width = ceiling(dpi * convertUnit(sum(grob_list[[i]]$widths), "in", valueOnly = TRUE)),
height = ceiling(dpi * convertUnit(sum(grob_list[[i]]$heights), "in", valueOnly = TRUE))
)
grid.draw(grob_list[[i]])
dev.off()
}
}
#' For a list of \code{describrGtable} objects they are plotted directly to
#' png files of suitable size.
#'
#' @inheritParams to_png
#'
#' @name to_png
#' @export
to_png.list <- function(x, name, dpi = 96, ...) {
for (i in 1:length(x)) {
png(
sprintf("%s_%i.png", name, i),
width = ceiling(dpi * convertUnit(sum(grob_list[[i]]$widths), "in", valueOnly = TRUE)),
height = ceiling(dpi * convertUnit(sum(grob_list[[i]]$heights), "in", valueOnly = TRUE))
)
grid.draw(x[[i]])
dev.off()
}
}
|
##
## Write the result of a Redshift sql query to a dataframe
##
library(RPostgreSQL)
library(uuid)
ReadSqlResults <- function(con, sql.query, aws.access.key, aws.secret.key, s3.bucket, redshift.iam.role) {
path <- paste0("data/tmp/", UUIDgenerate())
print("Running SQL query")
# Escape single quotes in the query
escaped.sql.query <- gsub("[\r\n]", "", sql.query)
escaped.sql.query <- gsub("'", "\\\\'", escaped.sql.query)
# Unload query result to s3
unload.query <- paste0("UNLOAD ('", escaped.sql.query, "') TO 's3://", s3.bucket, "/", path, "' iam_role '", redshift.iam.role, "' HEADER DELIMITER AS ',' GZIP ALLOWOVERWRITE ADDQUOTES;")
data <- dbGetQuery(con, unload.query)
# Download dumps from s3 using aws cli
cp.cmd <- paste0("AWS_ACCESS_KEY_ID=", aws.access.key, " AWS_SECRET_ACCESS_KEY=", aws.secret.key," aws s3 cp s3://", s3.bucket,"/ . --recursive --exclude '*' --include '", path, "*'")
system(cp.cmd)
# Merge the dumps in a single dataframe
print("Reading csv dumps")
files <- list.files(path=dirname(path), full.names=TRUE, pattern = paste0(basename(path), ".*.gz"))
datalist <- lapply(files, function(x){read.csv(file=x, sep = ",", quote = '"', header=TRUE)})
print("Merging csv dumps in a single dataframe")
dataset <- do.call(rbind, datalist)
unlink(files)
return(dataset)
}
|
/db.R
|
no_license
|
abrenaut/redshift-r-quick-read
|
R
| false | false | 1,340 |
r
|
##
## Write the result of a Redshift sql query to a dataframe
##
library(RPostgreSQL)
library(uuid)
ReadSqlResults <- function(con, sql.query, aws.access.key, aws.secret.key, s3.bucket, redshift.iam.role) {
path <- paste0("data/tmp/", UUIDgenerate())
print("Running SQL query")
# Escape single quotes in the query
escaped.sql.query <- gsub("[\r\n]", "", sql.query)
escaped.sql.query <- gsub("'", "\\\\'", escaped.sql.query)
# Unload query result to s3
unload.query <- paste0("UNLOAD ('", escaped.sql.query, "') TO 's3://", s3.bucket, "/", path, "' iam_role '", redshift.iam.role, "' HEADER DELIMITER AS ',' GZIP ALLOWOVERWRITE ADDQUOTES;")
data <- dbGetQuery(con, unload.query)
# Download dumps from s3 using aws cli
cp.cmd <- paste0("AWS_ACCESS_KEY_ID=", aws.access.key, " AWS_SECRET_ACCESS_KEY=", aws.secret.key," aws s3 cp s3://", s3.bucket,"/ . --recursive --exclude '*' --include '", path, "*'")
system(cp.cmd)
# Merge the dumps in a single dataframe
print("Reading csv dumps")
files <- list.files(path=dirname(path), full.names=TRUE, pattern = paste0(basename(path), ".*.gz"))
datalist <- lapply(files, function(x){read.csv(file=x, sep = ",", quote = '"', header=TRUE)})
print("Merging csv dumps in a single dataframe")
dataset <- do.call(rbind, datalist)
unlink(files)
return(dataset)
}
|
## Matrix inversion is usually a costly computation and there
## may be some benefit to caching the inverse of a matrix rather
## than compute it repeatedly.
## This function creates a special "matrix" object that can
## cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setinverse<-function(inverse) m<<-inverse
getinverse<-function() m
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
m<-x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
irismeng/ProgrammingAssignment2
|
R
| false | false | 833 |
r
|
## Matrix inversion is usually a costly computation and there
## may be some benefit to caching the inverse of a matrix rather
## than compute it repeatedly.
## This function creates a special "matrix" object that can
## cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setinverse<-function(inverse) m<<-inverse
getinverse<-function() m
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
m<-x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,...)
x$setinverse(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ineqQuantile.R
\name{run_optim_LC}
\alias{run_optim_LC}
\title{Optimisation of a parametric Lorenz curve for several functional forms and several areas}
\usage{
run_optim_LC(ID, income_cum, population_cum, function_form)
}
\arguments{
\item{ID}{vector of ID area}
\item{income_cum}{numeric, vector of cumulaive income shares}
\item{population_cum}{numeric, vector of cumulative population shares}
\item{function_form}{string, functional form in "KP", "RGKO", "ARNOLD", "CHOTIKAPANICH", "SARABIA", "ORTEGA" and "ROHDE"}
}
\value{
A dataframe with the functional forms, the parameters, the value of the NLS and the Chi-squared statistic.
}
\description{
Optimisation of a parametric Lorenz curve for several functional forms and several areas
}
\examples{
data("tabulated_income")
LC_tabulated_income = run_compute_LC(ID=tabulated_income$ID, p=tabulated_income$prop_cum_population, bound_min = tabulated_income$bound_min, bound_max = tabulated_income$bound_max, nb = tabulated_income$prop_population, method = "CondExp")
run_optim_LC(ID = unique(LC_tabulated_income$ID),income_cum = LC_tabulated_income$income_cum, population_cum=LC_tabulated_income$population_cum, function_form = c("KP", "RGKO", "ARNOLD", "CHOTIKAPANICH", "SARABIA", "ORTEGA", "ROHDE"))
}
\references{
Belz (2019), \emph{Estimating Inequality Measures from Quantile Data} \url{https://halshs.archives-ouvertes.fr/halshs-02320110}
}
|
/man/run_optim_LC.Rd
|
no_license
|
EnoraBelz/Inequality
|
R
| false | true | 1,480 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ineqQuantile.R
\name{run_optim_LC}
\alias{run_optim_LC}
\title{Optimisation of a parametric Lorenz curve for several functional forms and several areas}
\usage{
run_optim_LC(ID, income_cum, population_cum, function_form)
}
\arguments{
\item{ID}{vector of ID area}
\item{income_cum}{numeric, vector of cumulaive income shares}
\item{population_cum}{numeric, vector of cumulative population shares}
\item{function_form}{string, functional form in "KP", "RGKO", "ARNOLD", "CHOTIKAPANICH", "SARABIA", "ORTEGA" and "ROHDE"}
}
\value{
A dataframe with the functional forms, the parameters, the value of the NLS and the Chi-squared statistic.
}
\description{
Optimisation of a parametric Lorenz curve for several functional forms and several areas
}
\examples{
data("tabulated_income")
LC_tabulated_income = run_compute_LC(ID=tabulated_income$ID, p=tabulated_income$prop_cum_population, bound_min = tabulated_income$bound_min, bound_max = tabulated_income$bound_max, nb = tabulated_income$prop_population, method = "CondExp")
run_optim_LC(ID = unique(LC_tabulated_income$ID),income_cum = LC_tabulated_income$income_cum, population_cum=LC_tabulated_income$population_cum, function_form = c("KP", "RGKO", "ARNOLD", "CHOTIKAPANICH", "SARABIA", "ORTEGA", "ROHDE"))
}
\references{
Belz (2019), \emph{Estimating Inequality Measures from Quantile Data} \url{https://halshs.archives-ouvertes.fr/halshs-02320110}
}
|
install.packages("dplyr",dependencies = TRUE)
install.packages("xlsx",dependencies = TRUE)
install.packages("ggplot2",dependencies = TRUE)
install.packages("RODBC",dependencies = TRUE)
install.packages("ztable",dependencies = TRUE)
|
/AOI_INSTALL_REQUIRED_PACKAGES.R
|
no_license
|
agiannikos/Hevel
|
R
| false | false | 232 |
r
|
install.packages("dplyr",dependencies = TRUE)
install.packages("xlsx",dependencies = TRUE)
install.packages("ggplot2",dependencies = TRUE)
install.packages("RODBC",dependencies = TRUE)
install.packages("ztable",dependencies = TRUE)
|
#!/usr/bin/Rscript
## And now continue as before.
get_sepsis_score = function(CINCdata, myModel){
myModel <- load_sepsis_model()
## Add the column names back
colnames(CINCdata) <- c("HR", "O2Sat", "Temp", "SBP", "MAP", "DBP", "Resp", "EtCO2",
"BaseExcess", "HCO3", "FiO2", "pH", "PaCO2", "SaO2", "AST", "BUN",
"Alkalinephos", "Calcium", "Chloride", "Creatinine", "Bilirubin_direct",
"Glucose", "Lactate", "Magnesium", "Phosphate", "Potassium",
"Bilirubin_total", "TroponinI", "Hct", "Hgb", "PTT", "WBC", "Fibrinogen",
"Platelets", "Age", "Gender", "Unit1", "Unit2", "HospAdmTime",
"ICULOS")
## Forward fill missing values
cols <- colnames(CINCdata)[1:35]
for (col in cols){
# With the line feeding of data we can now have only a single row...
if (nrow(CINCdata)>=2){
for (n in seq(2,nrow(CINCdata))){
if (is.na(CINCdata[[n,col]])){
CINCdata[[n,col]]<- CINCdata[[n-1,col]]
}
}
}
}
# Calculate a few extra indices
CINCdata <- cbind(CINCdata, "PulsePressure"=(CINCdata[,"SBP"]-CINCdata[,"DBP"]))
CINCdata[CINCdata[,"PulsePressure"]<0,"PulsePressure"] <- NA
CINCdata <- cbind (CINCdata,"CO"=CINCdata[,"PulsePressure"]*CINCdata[,"HR"]/1000)
CINCdata <- cbind (CINCdata,"ShockIndex"= CINCdata[,"HR"]/CINCdata[,"SBP"])
CINCdata <- cbind (CINCdata,"ModifiedShockIndex"=CINCdata[,"HR"]/CINCdata[,"MAP"])
CINCdata <- cbind (CINCdata,"COvariation"=CINCdata[,"PulsePressure"]/CINCdata[,"MAP"])
#CombinationVitals
CINCdata <- cbind(CINCdata, "HRTemp"=(CINCdata[,"HR"]/CINCdata[,"Temp"]))
CINCdata <- cbind(CINCdata, "RRTemp"=(CINCdata[,"Resp"]/CINCdata[,"Temp"]))
CINCdata <- cbind(CINCdata, "comb_RR"=(6.21 + 0.06*CINCdata[,"HR"]+0.20*CINCdata[,"Temp"]))
CINCdata <- cbind(CINCdata, "RRdiff"=(CINCdata[,"Resp"]+0.20*CINCdata[,"comb_RR"]))
# Make Combinations [Electrolytes]
CINCdata <- cbind(CINCdata, "comb_PotassiumMagnesium"=(CINCdata[,"Potassium"]/CINCdata[,"Magnesium"]))
CINCdata <- cbind(CINCdata, "comb_MagnesiumCalcium"=(CINCdata[,"Magnesium"]/CINCdata[,"Calcium"]))
# SpO2 with virtual shunt
CINCdata <- cbind(CINCdata, "VS"=(68.864 * log10(103.711 -CINCdata[,"SaO2"])- 52.109))
# P/F ratios
CINCdata[CINCdata[,"FiO2"]<0.20,"FiO2"] <- 0.20
CINCdata <- cbind(CINCdata, "PF"=(CINCdata[,"SaO2"]/CINCdata[,"FiO2"]))
CINCdata <- cbind(CINCdata, "SF"=(CINCdata[,"O2Sat"]/CINCdata[,"FiO2"]))
# Oxygen delivery
CINCdata <- cbind(CINCdata, "DO2"=(((1.39*CINCdata[,"Hgb"]* CINCdata[,"O2Sat"]/100) + (0.003* CINCdata[,"SaO2"]))*CINCdata[,"CO"]))
CINCdata <- cbind(CINCdata, "HbBO2MAP"=(CINCdata[,"Hgb"]+CINCdata[,"O2Sat"]+CINCdata[,"MAP"]))
CINCdata[is.na(CINCdata[,"HbBO2MAP"]),"HbBO2MAP"] <- 10.4+CINCdata[is.na(CINCdata[,"HbBO2MAP"]),"O2Sat"]+CINCdata[is.na(CINCdata[,"HbBO2MAP"]),"MAP"]
# Lab combinations
CINCdata <- cbind(CINCdata, "UreaCreat"=(CINCdata[,"BUN"]/CINCdata[,"Creatinine"]))
CINCdata <- cbind(CINCdata, "UreaCreatsum"=(CINCdata[,"BUN"]+CINCdata[,"Creatinine"]))
CINCdata <- cbind(CINCdata, "comb_Hgb"=(15.50 - 0.063*CINCdata[,"O2Sat"]+ 0.015*CINCdata[,"MAP"]))
CINCdata <- cbind(CINCdata, "comb_HCO3Lac"=(CINCdata[,"HCO3"]/CINCdata[,"Lactate"]))
CINCdata <- cbind(CINCdata, "comb_HCO3Lacdiff"=(CINCdata[,"HCO3"]+CINCdata[,"Lactate"]))
CINCdata <- cbind(CINCdata, "AnionGap"=(140+CINCdata[,"Potassium"]-CINCdata[,"Chloride"]-CINCdata[,"HCO3"]))
CINCdata <- cbind(CINCdata, "comb_ClpH"=(CINCdata[,"Chloride"]/CINCdata[,"pH"]))
# Add number of lab values
if (nrow(CINCdata)>=2){
CINCdata <- cbind (CINCdata,"numLabs"=rowSums(!is.na(CINCdata[,11:34])))
}else{
CINCdata <- cbind (CINCdata,"numLabs"=sum(!is.na(CINCdata[,11:34])))
}
# Identify abnormal values
cols <- colnames(CINCdata)[c(1:34,41:63)]
for (col in cols){
colAbnormal<-paste0('abn_',col)
CN <- colnames(CINCdata)
CINCdata <- cbind (CINCdata,colAbnormal=!(CINCdata[,col] > myModel$normdf[,col][1] & CINCdata[,col] < myModel$normdf[,col][2]))
# Naming is complicated
colnames(CINCdata)<-c(CN,colAbnormal)
# Missing is normal
CINCdata[is.na(CINCdata[,colAbnormal]),colAbnormal] <- 0
}
# Make absolute z-scores [Also do age!]
for (col in c("Age",cols)){
colaZscore<-paste0('az_',col)
CN <- colnames(CINCdata)
CINCdata <- cbind (CINCdata,colaZscore=abs((CINCdata[,col] - myModel$meanCINCdata[col])/ myModel$sdCINCdata[col]))
# Naming is complicated
colnames(CINCdata)<-c(CN,colaZscore)
# Missing is normal
CINCdata[is.na(CINCdata[,colaZscore]),colaZscore] <- 0
}
# Add 1st derivative
n <- nrow(CINCdata)
for (col in cols){
colDelta<-paste0('Delta_',col)
CN <- colnames(CINCdata)
if (n>=2){
CINCdata <- cbind (CINCdata,colDelta=c(NA,CINCdata[2:n,col]-CINCdata[1:n-1,col]))
}else{
CINCdata <- cbind (CINCdata,colDelta=0)
}
# Naming is complicated
colnames(CINCdata)<-c(CN,colDelta)
# Missing is normal
CINCdata[is.na(CINCdata[,colDelta]),colDelta] <- 0
}
# Replace a few missing values with normal values
for (c in cols){
CINCdata[is.na(CINCdata[,c]),c] <- mean(myModel$normdf[,c])
}
# Make the myModel manually
## which(colnames(CINCdata) %in% names(myModel$coeffs))
cn_CINCdata <- colnames(CINCdata)[c(35:36,64:86,88:93,95:100,102:106,108:109,111:125,128:139,142:143,145:156,159:162,163:165,167:171,
173:179,181:182,188:189,195:197,201,203,205:206,209:211,213,223:224,229:230,232,235:236,
40,2:4,7,42,43,47,52,16,20,22,26,29,30,32,34,39,57)];
cn_Model <- names(myModel$coeffs[2:length(myModel$coeffs)])
cbind(cn_CINCdata,cn_Model,cn_CINCdata==cn_Model)
#
scores=myModel$coeffs[1]+CINCdata[,c(35:36,64:86,88:93,95:100,102:106,108:109,111:125,128:139,142:143,145:156,159:162,163:165,167:171,
173:179,181:182,188:189,195:197,201,203,205:206,209:211,213,223:224,229:230,232,235:236,
40,2:4,7,42,43,47,52,16,20,22,26,29,30,32,34,39,57)] %*% myModel$coeffs[2:146]
#Debug: #return(CINCdata)
scores <- plogis(scores)
# Round the score
scores <- round(scores,digits=3)
# Zero the scores we don't know
scores[is.na(scores)] <- 0
labels <- scores > myModel$cutoff
# Make the score
results <- cbind(scores=scores, labels=labels)
# The new format wants only a single value pair returned...
results <- tail(results,1)
return(results)
}
load_sepsis_model = function(){
myModel<-NULL
myModel$cutoff <- 0.024
myModel$meanCINCdata <-
structure(c(NA, 23.3029598706288, 83.9013786848645, 97.132856887831,
36.8524520620334, 123.628926432469, 82.6852667657605, 64.2408385399842,
18.5685149897509, 33.6377679385079, -0.269208651065925, 24.3155597166449,
0.490816311247089, 7.38834399377586, 40.6489923625897, 93.3661199916155,
138.777731125784, 22.4903402064341, 96.3410038538089, 8.04424197898551,
105.645915505339, 1.44279104523792, 1.13643110250603, 131.704734712249,
1.99958431567192, 2.03496467994132, 3.53991040560614, 4.09907387918333,
1.48745323062004, 5.74663085084951, 31.816220947285, 10.6502898235763,
37.058585286139, 11.1524420714392, 295.863585505111, 206.006290729936,
61.9907195016482, 0.555748401370719, 0.492498745699756, 0.507501254300244,
-53.0579266936389, 0, 59.8285537107514, 4.97801532537259, 0.703748669639609,
1.0511124416104, 0.72818279549807, 2.27594941992041, 0.50399374513594,
18.6155703408557, -0.041763563166269, 2.06544047949225, 0.295388967101313,
1.88379257877611, 2.4267954864037, 213.206527747929, 222.024205089219,
72.5665989299371, 190.415292598047, 18.821152941968, 23.9714702360733,
10.6208244863089, 16.5562713747581, 25.5489749093331, 14.1959984160646,
14.3820218357295), .Names = c("patient", "ICULOS", "HR", "O2Sat",
"Temp", "SBP", "MAP", "DBP", "Resp", "EtCO2", "BaseExcess", "HCO3",
"FiO2", "pH", "PaCO2", "SaO2", "AST", "BUN", "Alkalinephos",
"Calcium", "Chloride", "Creatinine", "Bilirubin_direct", "Glucose",
"Lactate", "Magnesium", "Phosphate", "Potassium", "Bilirubin_total",
"TroponinI", "Hct", "Hgb", "PTT", "WBC", "Fibrinogen", "Platelets",
"Age", "Gender", "Unit1", "Unit2", "HospAdmTime", "SepsisLabel",
"PulsePressure", "CO", "ShockIndex", "ModifiedShockIndex", "COvariation",
"HRTemp", "RRTemp", "comb_RR", "RRdiff", "comb_PotassiumMagnesium",
"comb_MagnesiumCalcium", "VS", "VSgap", "PF", "SF", "DO2", "HbBO2MAP",
"UreaCreat", "UreaCreatsum", "comb_Hgb", "comb_HCO3Lac", "comb_HCO3Lacdiff",
"AnionGap", "comb_ClpH"))
myModel$sdCINCdata <- structure(c(NA, 19.7113781193856, 17.1412404132297, 3.08681722635008,
0.699052520108699, 23.0558486242605, 16.379442150324, 14.1497363788998,
4.99091472002804, 10.7611228130795, 3.98593913652066, 4.13573627070768,
0.182056970588601, 0.0649654528661226, 8.79724434667236, 10.0908941776371,
558.566340470898, 18.801076996622, 106.644309410576, 1.83135988647285,
5.51181244468682, 1.78666188910249, 2.77722929034579, 47.1640853311575,
1.62373454319323, 0.372872951629435, 1.30436563951624, 0.579326822102917,
3.12588569957376, 20.8736961497066, 5.65813896653889, 1.98656965776803,
21.5523796708516, 7.09366708527148, 149.276733043801, 101.471298684207,
16.4068825690898, 0.496882580249856, 0.49994402733963, 0.49994402733963,
152.932974303114, 0, 19.8195249050568, 1.84284267619292, 0.204160289169411,
0.2905788654929, 0.240111790237215, 0.456408566534132, 0.134931241195279,
1.06705150861474, 4.87437520674109, 0.436275380005987, 0.2886774288338,
11.4276609129278, 8.18220521365634, 72.3380641696101, 74.216192115408,
27.3534126098377, 17.0156753755032, 10.0624961772818, 20.0107428126304,
0.308323005954232, 9.39686790945583, 4.12463633093258, 5.37980726148614,
0.761162759324289), .Names = c("patient", "ICULOS", "HR", "O2Sat",
"Temp", "SBP", "MAP", "DBP", "Resp", "EtCO2", "BaseExcess", "HCO3",
"FiO2", "pH", "PaCO2", "SaO2", "AST", "BUN", "Alkalinephos",
"Calcium", "Chloride", "Creatinine", "Bilirubin_direct", "Glucose",
"Lactate", "Magnesium", "Phosphate", "Potassium", "Bilirubin_total",
"TroponinI", "Hct", "Hgb", "PTT", "WBC", "Fibrinogen", "Platelets",
"Age", "Gender", "Unit1", "Unit2", "HospAdmTime", "SepsisLabel",
"PulsePressure", "CO", "ShockIndex", "ModifiedShockIndex", "COvariation",
"HRTemp", "RRTemp", "comb_RR", "RRdiff", "comb_PotassiumMagnesium",
"comb_MagnesiumCalcium", "VS", "VSgap", "PF", "SF", "DO2", "HbBO2MAP",
"UreaCreat", "UreaCreatsum", "comb_Hgb", "comb_HCO3Lac", "comb_HCO3Lacdiff",
"AnionGap", "comb_ClpH"))
# myModel coefficients
myModel$coeffs<- structure(c(-9.45898663673839, 0.000936600472173807, 0.0636915201884217,
-0.0108944702804866, 0.0720082724308512, -0.0012831280123557,
-0.211947386990562, 0.0610542285658345, 0.0535819588754014, 0.0306275629948522,
0.0418757072099501, 0.36425246255102, 0.0700336712508687, 0.0770263283117363,
0.11303962784189, 0.0538419379276473, 0.146275363991929, 0.0590293473202436,
0.440309854615722, 0.196026963779952, 0.00417167102669667, 0.112099549298492,
-0.0309333134524298, 0.0913343934576508, 0.0488488421490149,
-0.0150912104995382, -0.0834783414970238, 0.0876206035028467,
0.0947337639877968, 0.04042035415292, -0.0221807218010848, -0.528278439316815,
0.0634477242520585, 0.182895031624972, -0.0520942079600786, -0.106815996673077,
-0.164045992583646, -0.0742081620371027, 0.03497759605518, -0.0261985412294366,
0.0295757224485244, 0.0480674509867079, 0.0863674080972734, -0.423711532792242,
0.522831184919593, -0.230800340526507, 0.474998449854071, 0.0672940526211311,
0.10820080819108, -0.18598603983762, -0.0697523694040128, -0.318528651194152,
-0.10522560214083, 0.219437216140557, 0.0382817652447438, -0.0480783532640684,
0.0212867314175437, 0.397222177614644, -0.280953061867819, 0.385990842058202,
-0.0636786620268184, -0.00594409020033552, -0.19222226117483,
-0.0540434788514775, -0.0977394259414036, 0.139682431844422,
0.0787633928456701, -0.00197630190403897, -0.0496012727713387,
-0.0635445679398898, 0.381407470050751, -0.0178185044279085,
-0.212952098806186, 0.0235001255088798, 0.00691807733005713,
0.0329793498878727, -0.0142624084498745, 0.00104029991093197,
-0.0112795087463029, 0.0484060405553696, 0.0370149544051675,
-0.085980179872051, 0.0311068587869179, 0.0274808039399266, 0.0574472115480914,
0.080763285625516, -0.187590859875682, 0.0539295768112296, 0.0626027472540064,
-0.0167452565141239, 0.0795644825006939, -0.356986808110634,
0.0133361634330504, 0.0944482893499473, 0.301435404151453, 0.0434902077026457,
-0.0465621974493991, 0.0557373008001718, -0.0309205442399979,
-0.482393501086643, 0.0553123391635784, 0.0653958446663858, 0.145519426870609,
0.0611097861113366, -0.0193224372662968, 0.0618493753490291,
0.00907213609572258, -0.0142532922059549, -0.0334650760599731,
0.0152287384209438, -8.9757012289199e-05, -0.0245981912496177,
-0.000193238956408052, -0.0650376705374133, 0.0787376450833685,
0.0928950120875324, 0.00394804132873114, -2.34889685886428e-06,
0.0413786070111466, 0.000102761869151875, 0.0136101403505218,
0.0780536129108435, -0.00980630000545708, -0.010893936767629,
0.738281367379502, -0.015071699285119, -0.0577558275182888, 0.0100477908487329,
-0.0444629081518658, 0.236078662430384, -0.00579312730774447,
0.0373961830108559, 0.0695751981814091, 0.525479846106556, -1.11659902519107,
-0.00886511658529477, -0.00146997050553036, 0.222118804630279,
0.000330934428399878, -0.099722841498513, -0.00504746286595061,
0.0183156608773489, 0.00138575102999937, -0.000319481983357545,
-4.27594845468878e-05, 0.0130623339399634), .Names = c("(Intercept)",
"Age", "Gender", "numLabs", "abn_HR", "abn_O2Sat", "abn_Temp",
"abn_SBP", "abn_MAP", "abn_DBP", "abn_Resp", "abn_EtCO2", "abn_BaseExcess",
"abn_HCO3", "abn_FiO2", "abn_pH", "abn_PaCO2", "abn_SaO2", "abn_AST",
"abn_BUN", "abn_Alkalinephos", "abn_Calcium", "abn_Chloride",
"abn_Creatinine", "abn_Bilirubin_direct", "abn_Glucose", "abn_Magnesium",
"abn_Phosphate", "abn_Potassium", "abn_Bilirubin_total", "abn_TroponinI",
"abn_Hct", "abn_PTT", "abn_WBC", "abn_Fibrinogen", "abn_Platelets",
"abn_PulsePressure", "abn_CO", "abn_ModifiedShockIndex", "abn_COvariation",
"abn_HRTemp", "abn_RRTemp", "abn_comb_RR", "abn_comb_PotassiumMagnesium",
"abn_comb_MagnesiumCalcium", "abn_PF", "abn_SF", "abn_DO2", "abn_HbBO2MAP",
"abn_UreaCreat", "abn_UreaCreatsum", "abn_comb_Hgb", "abn_comb_HCO3Lac",
"abn_comb_HCO3Lacdiff", "abn_AnionGap", "abn_comb_ClpH", "az_Age",
"az_HR", "az_O2Sat", "az_Temp", "az_DBP", "az_Resp", "az_EtCO2",
"az_BaseExcess", "az_HCO3", "az_FiO2", "az_pH", "az_PaCO2", "az_SaO2",
"az_AST", "az_BUN", "az_Alkalinephos", "az_Creatinine", "az_Bilirubin_direct",
"az_Lactate", "az_Magnesium", "az_Phosphate", "az_Potassium",
"az_Bilirubin_total", "az_TroponinI", "az_Hct", "az_Hgb", "az_PTT",
"az_WBC", "az_Fibrinogen", "az_Platelets", "az_ShockIndex", "az_ModifiedShockIndex",
"az_COvariation", "az_HRTemp", "az_RRTemp", "az_comb_RR", "az_RRdiff",
"az_comb_MagnesiumCalcium", "az_VS", "az_PF", "az_SF", "az_DO2",
"az_UreaCreat", "az_UreaCreatsum", "az_comb_Hgb", "az_comb_HCO3Lac",
"az_comb_HCO3Lacdiff", "az_AnionGap", "az_comb_ClpH", "Delta_O2Sat",
"Delta_Temp", "Delta_BaseExcess", "Delta_HCO3", "Delta_BUN",
"Delta_Alkalinephos", "Delta_Calcium", "Delta_Glucose", "Delta_Magnesium",
"Delta_Potassium", "Delta_Bilirubin_total", "Delta_Hgb", "Delta_PTT",
"Delta_WBC", "Delta_Platelets", "Delta_comb_PotassiumMagnesium",
"Delta_comb_MagnesiumCalcium", "Delta_HbBO2MAP", "Delta_UreaCreat",
"Delta_comb_Hgb", "Delta_AnionGap", "Delta_comb_ClpH", "ICULOS",
"O2Sat", "Temp", "SBP", "Resp", "CO", "ShockIndex", "RRTemp",
"VS", "BUN", "Creatinine", "Glucose", "Potassium", "Hct", "Hgb",
"WBC", "Platelets", "HospAdmTime", "UreaCreat"))
myModel$f <- " ~ Age + Gender + numLabs + abn_HR + abn_O2Sat + abn_Temp +
abn_SBP + abn_MAP + abn_DBP + abn_Resp + abn_EtCO2 + abn_BaseExcess +
abn_HCO3 + abn_FiO2 + abn_pH + abn_PaCO2 + abn_SaO2 + abn_AST +
abn_BUN + abn_Alkalinephos + abn_Calcium + abn_Chloride +
abn_Creatinine + abn_Bilirubin_direct + abn_Glucose + abn_Magnesium +
abn_Phosphate + abn_Potassium + abn_Bilirubin_total + abn_TroponinI +
abn_Hct + abn_PTT + abn_WBC + abn_Fibrinogen + abn_Platelets +
abn_PulsePressure + abn_CO + abn_ModifiedShockIndex + abn_COvariation +
abn_HRTemp + abn_RRTemp + abn_comb_RR + abn_comb_PotassiumMagnesium +
abn_comb_MagnesiumCalcium + abn_PF + abn_SF + abn_DO2 + abn_HbBO2MAP +
abn_UreaCreat + abn_UreaCreatsum + abn_comb_Hgb + abn_comb_HCO3Lac +
abn_comb_HCO3Lacdiff + abn_AnionGap + abn_comb_ClpH + az_Age +
Gender + numLabs + az_HR + az_O2Sat + az_Temp + az_DBP +
az_Resp + az_EtCO2 + az_BaseExcess + az_HCO3 + az_FiO2 +
az_pH + az_PaCO2 + az_SaO2 + az_AST + az_BUN + az_Alkalinephos +
az_Creatinine + az_Bilirubin_direct + az_Lactate + az_Magnesium +
az_Phosphate + az_Potassium + az_Bilirubin_total + az_TroponinI +
az_Hct + az_Hgb + az_PTT + az_WBC + az_Fibrinogen + az_Platelets +
az_ShockIndex + az_ModifiedShockIndex + az_COvariation +
az_HRTemp + az_RRTemp + az_comb_RR + az_RRdiff + az_comb_MagnesiumCalcium +
az_VS + az_PF + az_SF + az_DO2 + az_UreaCreat + az_UreaCreatsum +
az_comb_Hgb + az_comb_HCO3Lac + az_comb_HCO3Lacdiff + az_AnionGap +
az_comb_ClpH + Gender + numLabs + +Delta_O2Sat + Delta_Temp +
Delta_BaseExcess + Delta_HCO3 + Delta_BUN + Delta_Alkalinephos +
Delta_Calcium + Delta_Glucose + Delta_Magnesium + Delta_Potassium +
Delta_Bilirubin_total + Delta_Hgb + Delta_PTT + Delta_WBC +
Delta_Platelets + Delta_comb_PotassiumMagnesium + Delta_comb_MagnesiumCalcium +
Delta_HbBO2MAP + Delta_UreaCreat + Delta_comb_Hgb + Delta_AnionGap +
Delta_comb_ClpH + Age + Gender + numLabs + ICULOS + O2Sat +
Temp + SBP + Resp + CO + ShockIndex + RRTemp + VS + BUN +
Creatinine + Glucose + Potassium + Hct + Hgb + WBC + Platelets +
HospAdmTime + UreaCreat"
myModel$f <- as.formula(myModel$f)
myModel$normdf <- structure(c(60, 100, 90, 100, 36.6, 37, 95, 140, 70, 100, 60,
90, 12, 20, 30, 43, -2, 2, 22, 28, 0.21, 0.35, 7.35, 7.45, 38,
42, 94, 100, 100, 200, 7, 21, 44, 147, 8.5, 10.2, 96, 106, 0.5,
1.2, 0, 0.3, 70, 130, 0.5, 2, 1.7, 2.2, 2.5, 4.5, 3.6, 5.2, 0.1,
1.2, 0, 0.045, 45, 52, 13.8, 17.2, 25, 35, 4.5, 11, 149, 353,
150, 450, 30, 50, 3.5, 8, 0.5, 1, 0.85, 1.3, 0.45, 0.7, 1.6,
2.75, 0.27, 0.54, 12, 20, -2, 2, 4.8, 8.5, 1.6, 3, -5, 10, -3,
10, 300, 500, 300, 500, 100, 200, 174, 217, 6, 42, 7.5, 22.2,
13.8, 17.2, 11, 56, 20, 27.5, 8, 16, 13.15, 14.42), .Dim = c(2L, 58L),
.Dimnames = list(NULL, c("HR", "O2Sat", "Temp", "SBP",
"MAP", "DBP", "Resp", "EtCO2", "BaseExcess", "HCO3", "FiO2",
"pH", "PaCO2", "SaO2", "AST", "BUN", "Alkalinephos", "Calcium",
"Chloride", "Creatinine", "Bilirubin_direct", "Glucose", "Lactate",
"Magnesium", "Phosphate", "Potassium", "Bilirubin_total", "TroponinI",
"Hct", "Hgb", "PTT", "WBC", "Fibrinogen", "Platelets", "PulsePressure",
"CO", "ShockIndex", "ModifiedShockIndex", "COvariation", "HRTemp",
"RRTemp", "comb_RR", "RRdiff", "comb_PotassiumMagnesium", "comb_MagnesiumCalcium",
"VS", "VSgap", "PF", "SF", "DO2", "HbBO2MAP", "UreaCreat", "UreaCreatsum",
"comb_Hgb", "comb_HCO3Lac", "comb_HCO3Lacdiff", "AnionGap", "comb_ClpH")))
return(myModel)
}
|
/UBCDHIL/pnc2019_22Aug/get_sepsis_score.R
|
permissive
|
Eric-Hsieh97/2019ChallengeEntries
|
R
| false | false | 24,197 |
r
|
#!/usr/bin/Rscript
## And now continue as before.
get_sepsis_score = function(CINCdata, myModel){
myModel <- load_sepsis_model()
## Add the column names back
colnames(CINCdata) <- c("HR", "O2Sat", "Temp", "SBP", "MAP", "DBP", "Resp", "EtCO2",
"BaseExcess", "HCO3", "FiO2", "pH", "PaCO2", "SaO2", "AST", "BUN",
"Alkalinephos", "Calcium", "Chloride", "Creatinine", "Bilirubin_direct",
"Glucose", "Lactate", "Magnesium", "Phosphate", "Potassium",
"Bilirubin_total", "TroponinI", "Hct", "Hgb", "PTT", "WBC", "Fibrinogen",
"Platelets", "Age", "Gender", "Unit1", "Unit2", "HospAdmTime",
"ICULOS")
## Forward fill missing values
cols <- colnames(CINCdata)[1:35]
for (col in cols){
# With the line feeding of data we can now have only a single row...
if (nrow(CINCdata)>=2){
for (n in seq(2,nrow(CINCdata))){
if (is.na(CINCdata[[n,col]])){
CINCdata[[n,col]]<- CINCdata[[n-1,col]]
}
}
}
}
# Calculate a few extra indices
CINCdata <- cbind(CINCdata, "PulsePressure"=(CINCdata[,"SBP"]-CINCdata[,"DBP"]))
CINCdata[CINCdata[,"PulsePressure"]<0,"PulsePressure"] <- NA
CINCdata <- cbind (CINCdata,"CO"=CINCdata[,"PulsePressure"]*CINCdata[,"HR"]/1000)
CINCdata <- cbind (CINCdata,"ShockIndex"= CINCdata[,"HR"]/CINCdata[,"SBP"])
CINCdata <- cbind (CINCdata,"ModifiedShockIndex"=CINCdata[,"HR"]/CINCdata[,"MAP"])
CINCdata <- cbind (CINCdata,"COvariation"=CINCdata[,"PulsePressure"]/CINCdata[,"MAP"])
#CombinationVitals
CINCdata <- cbind(CINCdata, "HRTemp"=(CINCdata[,"HR"]/CINCdata[,"Temp"]))
CINCdata <- cbind(CINCdata, "RRTemp"=(CINCdata[,"Resp"]/CINCdata[,"Temp"]))
CINCdata <- cbind(CINCdata, "comb_RR"=(6.21 + 0.06*CINCdata[,"HR"]+0.20*CINCdata[,"Temp"]))
CINCdata <- cbind(CINCdata, "RRdiff"=(CINCdata[,"Resp"]+0.20*CINCdata[,"comb_RR"]))
# Make Combinations [Electrolytes]
CINCdata <- cbind(CINCdata, "comb_PotassiumMagnesium"=(CINCdata[,"Potassium"]/CINCdata[,"Magnesium"]))
CINCdata <- cbind(CINCdata, "comb_MagnesiumCalcium"=(CINCdata[,"Magnesium"]/CINCdata[,"Calcium"]))
# SpO2 with virtual shunt
CINCdata <- cbind(CINCdata, "VS"=(68.864 * log10(103.711 -CINCdata[,"SaO2"])- 52.109))
# P/F ratios
CINCdata[CINCdata[,"FiO2"]<0.20,"FiO2"] <- 0.20
CINCdata <- cbind(CINCdata, "PF"=(CINCdata[,"SaO2"]/CINCdata[,"FiO2"]))
CINCdata <- cbind(CINCdata, "SF"=(CINCdata[,"O2Sat"]/CINCdata[,"FiO2"]))
# Oxygen delivery
CINCdata <- cbind(CINCdata, "DO2"=(((1.39*CINCdata[,"Hgb"]* CINCdata[,"O2Sat"]/100) + (0.003* CINCdata[,"SaO2"]))*CINCdata[,"CO"]))
CINCdata <- cbind(CINCdata, "HbBO2MAP"=(CINCdata[,"Hgb"]+CINCdata[,"O2Sat"]+CINCdata[,"MAP"]))
CINCdata[is.na(CINCdata[,"HbBO2MAP"]),"HbBO2MAP"] <- 10.4+CINCdata[is.na(CINCdata[,"HbBO2MAP"]),"O2Sat"]+CINCdata[is.na(CINCdata[,"HbBO2MAP"]),"MAP"]
# Lab combinations
CINCdata <- cbind(CINCdata, "UreaCreat"=(CINCdata[,"BUN"]/CINCdata[,"Creatinine"]))
CINCdata <- cbind(CINCdata, "UreaCreatsum"=(CINCdata[,"BUN"]+CINCdata[,"Creatinine"]))
CINCdata <- cbind(CINCdata, "comb_Hgb"=(15.50 - 0.063*CINCdata[,"O2Sat"]+ 0.015*CINCdata[,"MAP"]))
CINCdata <- cbind(CINCdata, "comb_HCO3Lac"=(CINCdata[,"HCO3"]/CINCdata[,"Lactate"]))
CINCdata <- cbind(CINCdata, "comb_HCO3Lacdiff"=(CINCdata[,"HCO3"]+CINCdata[,"Lactate"]))
CINCdata <- cbind(CINCdata, "AnionGap"=(140+CINCdata[,"Potassium"]-CINCdata[,"Chloride"]-CINCdata[,"HCO3"]))
CINCdata <- cbind(CINCdata, "comb_ClpH"=(CINCdata[,"Chloride"]/CINCdata[,"pH"]))
# Add number of lab values
if (nrow(CINCdata)>=2){
CINCdata <- cbind (CINCdata,"numLabs"=rowSums(!is.na(CINCdata[,11:34])))
}else{
CINCdata <- cbind (CINCdata,"numLabs"=sum(!is.na(CINCdata[,11:34])))
}
# Identify abnormal values
cols <- colnames(CINCdata)[c(1:34,41:63)]
for (col in cols){
colAbnormal<-paste0('abn_',col)
CN <- colnames(CINCdata)
CINCdata <- cbind (CINCdata,colAbnormal=!(CINCdata[,col] > myModel$normdf[,col][1] & CINCdata[,col] < myModel$normdf[,col][2]))
# Naming is complicated
colnames(CINCdata)<-c(CN,colAbnormal)
# Missing is normal
CINCdata[is.na(CINCdata[,colAbnormal]),colAbnormal] <- 0
}
# Make absolute z-scores [Also do age!]
for (col in c("Age",cols)){
colaZscore<-paste0('az_',col)
CN <- colnames(CINCdata)
CINCdata <- cbind (CINCdata,colaZscore=abs((CINCdata[,col] - myModel$meanCINCdata[col])/ myModel$sdCINCdata[col]))
# Naming is complicated
colnames(CINCdata)<-c(CN,colaZscore)
# Missing is normal
CINCdata[is.na(CINCdata[,colaZscore]),colaZscore] <- 0
}
# Add 1st derivative
n <- nrow(CINCdata)
for (col in cols){
colDelta<-paste0('Delta_',col)
CN <- colnames(CINCdata)
if (n>=2){
CINCdata <- cbind (CINCdata,colDelta=c(NA,CINCdata[2:n,col]-CINCdata[1:n-1,col]))
}else{
CINCdata <- cbind (CINCdata,colDelta=0)
}
# Naming is complicated
colnames(CINCdata)<-c(CN,colDelta)
# Missing is normal
CINCdata[is.na(CINCdata[,colDelta]),colDelta] <- 0
}
# Replace a few missing values with normal values
for (c in cols){
CINCdata[is.na(CINCdata[,c]),c] <- mean(myModel$normdf[,c])
}
# Make the myModel manually
## which(colnames(CINCdata) %in% names(myModel$coeffs))
cn_CINCdata <- colnames(CINCdata)[c(35:36,64:86,88:93,95:100,102:106,108:109,111:125,128:139,142:143,145:156,159:162,163:165,167:171,
173:179,181:182,188:189,195:197,201,203,205:206,209:211,213,223:224,229:230,232,235:236,
40,2:4,7,42,43,47,52,16,20,22,26,29,30,32,34,39,57)];
cn_Model <- names(myModel$coeffs[2:length(myModel$coeffs)])
cbind(cn_CINCdata,cn_Model,cn_CINCdata==cn_Model)
#
scores=myModel$coeffs[1]+CINCdata[,c(35:36,64:86,88:93,95:100,102:106,108:109,111:125,128:139,142:143,145:156,159:162,163:165,167:171,
173:179,181:182,188:189,195:197,201,203,205:206,209:211,213,223:224,229:230,232,235:236,
40,2:4,7,42,43,47,52,16,20,22,26,29,30,32,34,39,57)] %*% myModel$coeffs[2:146]
#Debug: #return(CINCdata)
scores <- plogis(scores)
# Round the score
scores <- round(scores,digits=3)
# Zero the scores we don't know
scores[is.na(scores)] <- 0
labels <- scores > myModel$cutoff
# Make the score
results <- cbind(scores=scores, labels=labels)
# The new format wants only a single value pair returned...
results <- tail(results,1)
return(results)
}
load_sepsis_model = function(){
myModel<-NULL
myModel$cutoff <- 0.024
myModel$meanCINCdata <-
structure(c(NA, 23.3029598706288, 83.9013786848645, 97.132856887831,
36.8524520620334, 123.628926432469, 82.6852667657605, 64.2408385399842,
18.5685149897509, 33.6377679385079, -0.269208651065925, 24.3155597166449,
0.490816311247089, 7.38834399377586, 40.6489923625897, 93.3661199916155,
138.777731125784, 22.4903402064341, 96.3410038538089, 8.04424197898551,
105.645915505339, 1.44279104523792, 1.13643110250603, 131.704734712249,
1.99958431567192, 2.03496467994132, 3.53991040560614, 4.09907387918333,
1.48745323062004, 5.74663085084951, 31.816220947285, 10.6502898235763,
37.058585286139, 11.1524420714392, 295.863585505111, 206.006290729936,
61.9907195016482, 0.555748401370719, 0.492498745699756, 0.507501254300244,
-53.0579266936389, 0, 59.8285537107514, 4.97801532537259, 0.703748669639609,
1.0511124416104, 0.72818279549807, 2.27594941992041, 0.50399374513594,
18.6155703408557, -0.041763563166269, 2.06544047949225, 0.295388967101313,
1.88379257877611, 2.4267954864037, 213.206527747929, 222.024205089219,
72.5665989299371, 190.415292598047, 18.821152941968, 23.9714702360733,
10.6208244863089, 16.5562713747581, 25.5489749093331, 14.1959984160646,
14.3820218357295), .Names = c("patient", "ICULOS", "HR", "O2Sat",
"Temp", "SBP", "MAP", "DBP", "Resp", "EtCO2", "BaseExcess", "HCO3",
"FiO2", "pH", "PaCO2", "SaO2", "AST", "BUN", "Alkalinephos",
"Calcium", "Chloride", "Creatinine", "Bilirubin_direct", "Glucose",
"Lactate", "Magnesium", "Phosphate", "Potassium", "Bilirubin_total",
"TroponinI", "Hct", "Hgb", "PTT", "WBC", "Fibrinogen", "Platelets",
"Age", "Gender", "Unit1", "Unit2", "HospAdmTime", "SepsisLabel",
"PulsePressure", "CO", "ShockIndex", "ModifiedShockIndex", "COvariation",
"HRTemp", "RRTemp", "comb_RR", "RRdiff", "comb_PotassiumMagnesium",
"comb_MagnesiumCalcium", "VS", "VSgap", "PF", "SF", "DO2", "HbBO2MAP",
"UreaCreat", "UreaCreatsum", "comb_Hgb", "comb_HCO3Lac", "comb_HCO3Lacdiff",
"AnionGap", "comb_ClpH"))
myModel$sdCINCdata <- structure(c(NA, 19.7113781193856, 17.1412404132297, 3.08681722635008,
0.699052520108699, 23.0558486242605, 16.379442150324, 14.1497363788998,
4.99091472002804, 10.7611228130795, 3.98593913652066, 4.13573627070768,
0.182056970588601, 0.0649654528661226, 8.79724434667236, 10.0908941776371,
558.566340470898, 18.801076996622, 106.644309410576, 1.83135988647285,
5.51181244468682, 1.78666188910249, 2.77722929034579, 47.1640853311575,
1.62373454319323, 0.372872951629435, 1.30436563951624, 0.579326822102917,
3.12588569957376, 20.8736961497066, 5.65813896653889, 1.98656965776803,
21.5523796708516, 7.09366708527148, 149.276733043801, 101.471298684207,
16.4068825690898, 0.496882580249856, 0.49994402733963, 0.49994402733963,
152.932974303114, 0, 19.8195249050568, 1.84284267619292, 0.204160289169411,
0.2905788654929, 0.240111790237215, 0.456408566534132, 0.134931241195279,
1.06705150861474, 4.87437520674109, 0.436275380005987, 0.2886774288338,
11.4276609129278, 8.18220521365634, 72.3380641696101, 74.216192115408,
27.3534126098377, 17.0156753755032, 10.0624961772818, 20.0107428126304,
0.308323005954232, 9.39686790945583, 4.12463633093258, 5.37980726148614,
0.761162759324289), .Names = c("patient", "ICULOS", "HR", "O2Sat",
"Temp", "SBP", "MAP", "DBP", "Resp", "EtCO2", "BaseExcess", "HCO3",
"FiO2", "pH", "PaCO2", "SaO2", "AST", "BUN", "Alkalinephos",
"Calcium", "Chloride", "Creatinine", "Bilirubin_direct", "Glucose",
"Lactate", "Magnesium", "Phosphate", "Potassium", "Bilirubin_total",
"TroponinI", "Hct", "Hgb", "PTT", "WBC", "Fibrinogen", "Platelets",
"Age", "Gender", "Unit1", "Unit2", "HospAdmTime", "SepsisLabel",
"PulsePressure", "CO", "ShockIndex", "ModifiedShockIndex", "COvariation",
"HRTemp", "RRTemp", "comb_RR", "RRdiff", "comb_PotassiumMagnesium",
"comb_MagnesiumCalcium", "VS", "VSgap", "PF", "SF", "DO2", "HbBO2MAP",
"UreaCreat", "UreaCreatsum", "comb_Hgb", "comb_HCO3Lac", "comb_HCO3Lacdiff",
"AnionGap", "comb_ClpH"))
# myModel coefficients
myModel$coeffs<- structure(c(-9.45898663673839, 0.000936600472173807, 0.0636915201884217,
-0.0108944702804866, 0.0720082724308512, -0.0012831280123557,
-0.211947386990562, 0.0610542285658345, 0.0535819588754014, 0.0306275629948522,
0.0418757072099501, 0.36425246255102, 0.0700336712508687, 0.0770263283117363,
0.11303962784189, 0.0538419379276473, 0.146275363991929, 0.0590293473202436,
0.440309854615722, 0.196026963779952, 0.00417167102669667, 0.112099549298492,
-0.0309333134524298, 0.0913343934576508, 0.0488488421490149,
-0.0150912104995382, -0.0834783414970238, 0.0876206035028467,
0.0947337639877968, 0.04042035415292, -0.0221807218010848, -0.528278439316815,
0.0634477242520585, 0.182895031624972, -0.0520942079600786, -0.106815996673077,
-0.164045992583646, -0.0742081620371027, 0.03497759605518, -0.0261985412294366,
0.0295757224485244, 0.0480674509867079, 0.0863674080972734, -0.423711532792242,
0.522831184919593, -0.230800340526507, 0.474998449854071, 0.0672940526211311,
0.10820080819108, -0.18598603983762, -0.0697523694040128, -0.318528651194152,
-0.10522560214083, 0.219437216140557, 0.0382817652447438, -0.0480783532640684,
0.0212867314175437, 0.397222177614644, -0.280953061867819, 0.385990842058202,
-0.0636786620268184, -0.00594409020033552, -0.19222226117483,
-0.0540434788514775, -0.0977394259414036, 0.139682431844422,
0.0787633928456701, -0.00197630190403897, -0.0496012727713387,
-0.0635445679398898, 0.381407470050751, -0.0178185044279085,
-0.212952098806186, 0.0235001255088798, 0.00691807733005713,
0.0329793498878727, -0.0142624084498745, 0.00104029991093197,
-0.0112795087463029, 0.0484060405553696, 0.0370149544051675,
-0.085980179872051, 0.0311068587869179, 0.0274808039399266, 0.0574472115480914,
0.080763285625516, -0.187590859875682, 0.0539295768112296, 0.0626027472540064,
-0.0167452565141239, 0.0795644825006939, -0.356986808110634,
0.0133361634330504, 0.0944482893499473, 0.301435404151453, 0.0434902077026457,
-0.0465621974493991, 0.0557373008001718, -0.0309205442399979,
-0.482393501086643, 0.0553123391635784, 0.0653958446663858, 0.145519426870609,
0.0611097861113366, -0.0193224372662968, 0.0618493753490291,
0.00907213609572258, -0.0142532922059549, -0.0334650760599731,
0.0152287384209438, -8.9757012289199e-05, -0.0245981912496177,
-0.000193238956408052, -0.0650376705374133, 0.0787376450833685,
0.0928950120875324, 0.00394804132873114, -2.34889685886428e-06,
0.0413786070111466, 0.000102761869151875, 0.0136101403505218,
0.0780536129108435, -0.00980630000545708, -0.010893936767629,
0.738281367379502, -0.015071699285119, -0.0577558275182888, 0.0100477908487329,
-0.0444629081518658, 0.236078662430384, -0.00579312730774447,
0.0373961830108559, 0.0695751981814091, 0.525479846106556, -1.11659902519107,
-0.00886511658529477, -0.00146997050553036, 0.222118804630279,
0.000330934428399878, -0.099722841498513, -0.00504746286595061,
0.0183156608773489, 0.00138575102999937, -0.000319481983357545,
-4.27594845468878e-05, 0.0130623339399634), .Names = c("(Intercept)",
"Age", "Gender", "numLabs", "abn_HR", "abn_O2Sat", "abn_Temp",
"abn_SBP", "abn_MAP", "abn_DBP", "abn_Resp", "abn_EtCO2", "abn_BaseExcess",
"abn_HCO3", "abn_FiO2", "abn_pH", "abn_PaCO2", "abn_SaO2", "abn_AST",
"abn_BUN", "abn_Alkalinephos", "abn_Calcium", "abn_Chloride",
"abn_Creatinine", "abn_Bilirubin_direct", "abn_Glucose", "abn_Magnesium",
"abn_Phosphate", "abn_Potassium", "abn_Bilirubin_total", "abn_TroponinI",
"abn_Hct", "abn_PTT", "abn_WBC", "abn_Fibrinogen", "abn_Platelets",
"abn_PulsePressure", "abn_CO", "abn_ModifiedShockIndex", "abn_COvariation",
"abn_HRTemp", "abn_RRTemp", "abn_comb_RR", "abn_comb_PotassiumMagnesium",
"abn_comb_MagnesiumCalcium", "abn_PF", "abn_SF", "abn_DO2", "abn_HbBO2MAP",
"abn_UreaCreat", "abn_UreaCreatsum", "abn_comb_Hgb", "abn_comb_HCO3Lac",
"abn_comb_HCO3Lacdiff", "abn_AnionGap", "abn_comb_ClpH", "az_Age",
"az_HR", "az_O2Sat", "az_Temp", "az_DBP", "az_Resp", "az_EtCO2",
"az_BaseExcess", "az_HCO3", "az_FiO2", "az_pH", "az_PaCO2", "az_SaO2",
"az_AST", "az_BUN", "az_Alkalinephos", "az_Creatinine", "az_Bilirubin_direct",
"az_Lactate", "az_Magnesium", "az_Phosphate", "az_Potassium",
"az_Bilirubin_total", "az_TroponinI", "az_Hct", "az_Hgb", "az_PTT",
"az_WBC", "az_Fibrinogen", "az_Platelets", "az_ShockIndex", "az_ModifiedShockIndex",
"az_COvariation", "az_HRTemp", "az_RRTemp", "az_comb_RR", "az_RRdiff",
"az_comb_MagnesiumCalcium", "az_VS", "az_PF", "az_SF", "az_DO2",
"az_UreaCreat", "az_UreaCreatsum", "az_comb_Hgb", "az_comb_HCO3Lac",
"az_comb_HCO3Lacdiff", "az_AnionGap", "az_comb_ClpH", "Delta_O2Sat",
"Delta_Temp", "Delta_BaseExcess", "Delta_HCO3", "Delta_BUN",
"Delta_Alkalinephos", "Delta_Calcium", "Delta_Glucose", "Delta_Magnesium",
"Delta_Potassium", "Delta_Bilirubin_total", "Delta_Hgb", "Delta_PTT",
"Delta_WBC", "Delta_Platelets", "Delta_comb_PotassiumMagnesium",
"Delta_comb_MagnesiumCalcium", "Delta_HbBO2MAP", "Delta_UreaCreat",
"Delta_comb_Hgb", "Delta_AnionGap", "Delta_comb_ClpH", "ICULOS",
"O2Sat", "Temp", "SBP", "Resp", "CO", "ShockIndex", "RRTemp",
"VS", "BUN", "Creatinine", "Glucose", "Potassium", "Hct", "Hgb",
"WBC", "Platelets", "HospAdmTime", "UreaCreat"))
myModel$f <- " ~ Age + Gender + numLabs + abn_HR + abn_O2Sat + abn_Temp +
abn_SBP + abn_MAP + abn_DBP + abn_Resp + abn_EtCO2 + abn_BaseExcess +
abn_HCO3 + abn_FiO2 + abn_pH + abn_PaCO2 + abn_SaO2 + abn_AST +
abn_BUN + abn_Alkalinephos + abn_Calcium + abn_Chloride +
abn_Creatinine + abn_Bilirubin_direct + abn_Glucose + abn_Magnesium +
abn_Phosphate + abn_Potassium + abn_Bilirubin_total + abn_TroponinI +
abn_Hct + abn_PTT + abn_WBC + abn_Fibrinogen + abn_Platelets +
abn_PulsePressure + abn_CO + abn_ModifiedShockIndex + abn_COvariation +
abn_HRTemp + abn_RRTemp + abn_comb_RR + abn_comb_PotassiumMagnesium +
abn_comb_MagnesiumCalcium + abn_PF + abn_SF + abn_DO2 + abn_HbBO2MAP +
abn_UreaCreat + abn_UreaCreatsum + abn_comb_Hgb + abn_comb_HCO3Lac +
abn_comb_HCO3Lacdiff + abn_AnionGap + abn_comb_ClpH + az_Age +
Gender + numLabs + az_HR + az_O2Sat + az_Temp + az_DBP +
az_Resp + az_EtCO2 + az_BaseExcess + az_HCO3 + az_FiO2 +
az_pH + az_PaCO2 + az_SaO2 + az_AST + az_BUN + az_Alkalinephos +
az_Creatinine + az_Bilirubin_direct + az_Lactate + az_Magnesium +
az_Phosphate + az_Potassium + az_Bilirubin_total + az_TroponinI +
az_Hct + az_Hgb + az_PTT + az_WBC + az_Fibrinogen + az_Platelets +
az_ShockIndex + az_ModifiedShockIndex + az_COvariation +
az_HRTemp + az_RRTemp + az_comb_RR + az_RRdiff + az_comb_MagnesiumCalcium +
az_VS + az_PF + az_SF + az_DO2 + az_UreaCreat + az_UreaCreatsum +
az_comb_Hgb + az_comb_HCO3Lac + az_comb_HCO3Lacdiff + az_AnionGap +
az_comb_ClpH + Gender + numLabs + +Delta_O2Sat + Delta_Temp +
Delta_BaseExcess + Delta_HCO3 + Delta_BUN + Delta_Alkalinephos +
Delta_Calcium + Delta_Glucose + Delta_Magnesium + Delta_Potassium +
Delta_Bilirubin_total + Delta_Hgb + Delta_PTT + Delta_WBC +
Delta_Platelets + Delta_comb_PotassiumMagnesium + Delta_comb_MagnesiumCalcium +
Delta_HbBO2MAP + Delta_UreaCreat + Delta_comb_Hgb + Delta_AnionGap +
Delta_comb_ClpH + Age + Gender + numLabs + ICULOS + O2Sat +
Temp + SBP + Resp + CO + ShockIndex + RRTemp + VS + BUN +
Creatinine + Glucose + Potassium + Hct + Hgb + WBC + Platelets +
HospAdmTime + UreaCreat"
myModel$f <- as.formula(myModel$f)
myModel$normdf <- structure(c(60, 100, 90, 100, 36.6, 37, 95, 140, 70, 100, 60,
90, 12, 20, 30, 43, -2, 2, 22, 28, 0.21, 0.35, 7.35, 7.45, 38,
42, 94, 100, 100, 200, 7, 21, 44, 147, 8.5, 10.2, 96, 106, 0.5,
1.2, 0, 0.3, 70, 130, 0.5, 2, 1.7, 2.2, 2.5, 4.5, 3.6, 5.2, 0.1,
1.2, 0, 0.045, 45, 52, 13.8, 17.2, 25, 35, 4.5, 11, 149, 353,
150, 450, 30, 50, 3.5, 8, 0.5, 1, 0.85, 1.3, 0.45, 0.7, 1.6,
2.75, 0.27, 0.54, 12, 20, -2, 2, 4.8, 8.5, 1.6, 3, -5, 10, -3,
10, 300, 500, 300, 500, 100, 200, 174, 217, 6, 42, 7.5, 22.2,
13.8, 17.2, 11, 56, 20, 27.5, 8, 16, 13.15, 14.42), .Dim = c(2L, 58L),
.Dimnames = list(NULL, c("HR", "O2Sat", "Temp", "SBP",
"MAP", "DBP", "Resp", "EtCO2", "BaseExcess", "HCO3", "FiO2",
"pH", "PaCO2", "SaO2", "AST", "BUN", "Alkalinephos", "Calcium",
"Chloride", "Creatinine", "Bilirubin_direct", "Glucose", "Lactate",
"Magnesium", "Phosphate", "Potassium", "Bilirubin_total", "TroponinI",
"Hct", "Hgb", "PTT", "WBC", "Fibrinogen", "Platelets", "PulsePressure",
"CO", "ShockIndex", "ModifiedShockIndex", "COvariation", "HRTemp",
"RRTemp", "comb_RR", "RRdiff", "comb_PotassiumMagnesium", "comb_MagnesiumCalcium",
"VS", "VSgap", "PF", "SF", "DO2", "HbBO2MAP", "UreaCreat", "UreaCreatsum",
"comb_Hgb", "comb_HCO3Lac", "comb_HCO3Lacdiff", "AnionGap", "comb_ClpH")))
return(myModel)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classic.wordscores.R
\name{classic.wordscores}
\alias{classic.wordscores}
\title{Old-Style Wordscores}
\usage{
classic.wordscores(wfm, scores)
}
\arguments{
\item{wfm}{object of class wfm}
\item{scores}{reference document positions/scores}
}
\value{
An old-style Wordscores analysis.
}
\description{
Construct a Wordscores model from reference document scores
}
\details{
This version of Wordscores is exactly as described in Laver et al. 2003 and
is provided for historical interest and continued replicability of older
analyses.
\code{scores} is a vector of document scores corresponding to the documents
in the word frequency matrix \code{wfm}. The function computes wordscores
and returns a model from which virgin text scores can be predicted.
}
\examples{
data(lbg)
ref <- getdocs(lbg, 1:5)
ws <- classic.wordscores(ref, scores=seq(-1.5,1.5,by=0.75))
summary(ws)
vir <- getdocs(lbg, 'V1')
predict(ws, newdata=vir)
}
\author{
Will Lowe
}
\references{
Laver, M. and Benoit, K. and Garry, J. (2003) 'Extracting policy
positions from political texts using words as data' American Political
Science Review. 97. pp.311-333
}
\seealso{
\code{\link{summary.classic.wordscores}}
}
|
/man/classic.wordscores.Rd
|
no_license
|
markwestcott34/austin
|
R
| false | true | 1,264 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classic.wordscores.R
\name{classic.wordscores}
\alias{classic.wordscores}
\title{Old-Style Wordscores}
\usage{
classic.wordscores(wfm, scores)
}
\arguments{
\item{wfm}{object of class wfm}
\item{scores}{reference document positions/scores}
}
\value{
An old-style Wordscores analysis.
}
\description{
Construct a Wordscores model from reference document scores
}
\details{
This version of Wordscores is exactly as described in Laver et al. 2003 and
is provided for historical interest and continued replicability of older
analyses.
\code{scores} is a vector of document scores corresponding to the documents
in the word frequency matrix \code{wfm}. The function computes wordscores
and returns a model from which virgin text scores can be predicted.
}
\examples{
data(lbg)
ref <- getdocs(lbg, 1:5)
ws <- classic.wordscores(ref, scores=seq(-1.5,1.5,by=0.75))
summary(ws)
vir <- getdocs(lbg, 'V1')
predict(ws, newdata=vir)
}
\author{
Will Lowe
}
\references{
Laver, M. and Benoit, K. and Garry, J. (2003) 'Extracting policy
positions from political texts using words as data' American Political
Science Review. 97. pp.311-333
}
\seealso{
\code{\link{summary.classic.wordscores}}
}
|
d <- read.csv2("data/rhc_devoir_epidemio.csv")
head(d)
str(d)
dim(d)
summary(d)
d$SADMDTE <- as.Date(d$SADMDTE,"%d/%m/%Y")
d$DSCHDTE <- as.Date(d$DSCHDTE,"%d/%m/%Y")
d$DTHDTE <- as.Date(d$DTHDTE,"%d/%m/%Y")
d$LSTCTDTE <- as.Date(d$LSTCTDTE,"%d/%m/%Y")
for (i in c("DEATH", "DTH30", "DNR1", "RESP", "CARD", "NEURO", "GASTR", "RENAL", "META", "HEMA", "SEPS", "TRAUMA", "ORTHO")) {
d[ ,i] <- as.character(d[,i])
d[,i] <- ifelse (d[ ,i]=="No", 0, d[ ,i])
d[,i] <- ifelse (d[ ,i]=="Yes", 1, d[ ,i])
d[,i] <- as.numeric(d[,i])
}
#Pour transformer les "" en NA
#plus long que apply mais garde la bonne structure
for (x in colnames(d)){
if(is.factor(d[ ,x])) {
if(length(d[d[ ,x]=="", x])!=0) {
d[d[ ,x]=="", x] <- NA
d[ ,x] <- factor(d[,x])
}
}
}
#marche pas
# d2 <- lapply(colnames(d), function(x) {
# vx <- d[ ,x]
# if(is.factor(vx)) {
# if(length(d[d[ ,x]=="", x])!=0) d[d[ ,x]=="", x] <- NA
# }
# return(d[ ,x])
# })
# d2 <- do.call(cbind,d2)
#
# #transforme tout en facteur:
# d2 <- apply(d, 2, function(x) {
# if(is.factor(x)) {
# if(length(x[x==""])!=0) x[x==""] <- NA
# else x <-x
# }
# else x<-x
# return(x)
# })
# d2 <- data.frame(d2)
#Je préfère que PTID soit un caractère pour éviter un gag si le numéro de ligne n'est pas le même que PTID
d$PTID <- paste0("A", d$PTID)
#variables descriptives
vardes <- c("AGE", "SEX", "RACE", "EDU", "INCOME", "NINSCLAS", "CAT1", "CAT2")
var_quali_des <- c("SEX", "RACE", "INCOME", "NINSCLAS", "CAT1", "CAT2")
var_quanti_des <- c("AGE","EDU")
var_outcome <- c("DEATH","SWANG1")
var_date <- c("SADMDTE", "DSCHDTE", "DTHDTE", "LSTCTDTE")
# variables de comorbidités
var_com <- c("CARDIOHX", "CHFHX", "DEMENTHX", "PSYCHHX", "CHRPULHX", "RENALHX",
"LIVERHX", "GIBLEDHX", "MALIGHX", "IMMUNHX", "TRANSHX", "AMIHX")
#variables de l'examen clinique et paraclinique
var_exam <- c("ADLD3P", "DAS2D3PC", "DNR1", "CA", "SURV2MD1", "APS1", "SCOMA1", "WTKILO1", "TEMP1",
"MEANBP1", "RESP1", "HRT1", "PAFI1", "PACO21", "PH1", "WBLC1", "HEMA1", "SOD1", "POT1",
"CREA1", "BILI1", "ALB1", "URIN1")
#var admission diagnosis
var_ad <- c("RESP", "CARD", "NEURO", "GASTR", "RENAL", "META", "HEMA", "SEPS", "TRAUMA", "ORTHO")
# #CA va poser problème pour le modèle linéaire car cancer oui sera comparé à métastase, alors que ce sont des cancers oui également
# #je veux aussi comparer cancer à pas de cancer
# d$CANCER <- NA
# d$CANCER <- ifelse (!is.na(d$CA) & (d$CA=="Yes" | d$CA=="Metastatic"), 1, d$CANCER)
# d$CANCER <- ifelse (!is.na(d$CA) & d$CA=="No" , 0, d$CANCER)
# d$METASTASE <- ifelse (!is.na(d$CA) & d$CA=="Metastatic", 1, d$CA)
# d$METASTASE <- ifelse (!is.na(d$CA) & (d$CA=="Yes" | d$CA=="No"), 0, d$CA)
# var_exam <- c(var_exam, "CANCER", "METASTASE")
colnames(d)[!colnames(d) %in% c(var_ad, var_exam, var_com, vardes, var_outcome)]
#%2 variables ne sont pas dans le cahier des variables : T3D30(? de 2 à 30) et DTH30(0ou1, probablement décès à 30j)
#Outcomes
d$SWAN <- as.character(d$SWANG1)
d[d$SWAN=="No RHC","SWAN"] <- 0
d[d$SWAN=="RHC","SWAN"] <- 1
d$SWAN <- as.factor(d$SWAN)
d$DEATH <- as.factor(d$DEATH)
var_outcome <- c(var_outcome, "SWAN")
#var pour analyse de survie
table(is.na(d$LSTCTDTE)) #pas de perdu de vu pour la date de dernier contact
d$ddn <- as_date(ifelse(is.na(d$DTHDTE), d$LSTCTDTE, d$DTHDTE))
d$time <- as.numeric(d$ddn - d$SADMDTE) #temps de suivi en j entre la date des dernières nouvelles (décès ou date de dernier contact)
d$censor <- ifelse (!is.na(d$DTHDTE), 1, 0)
#avant de recréer la variable DTH30, je vérifie que tous les sujets sont bien suivis au moins 30j
table(d$time<30, d$censor) #j'ai 8 sujets qui sont perdus de vu avant 30 jours
namesDTH30_NA <- d[d$time<30 & d$censor==0, "PTID"]
d$DTH30b <- NA
d$DTH30b <- ifelse (d$time<=30 & d$censor==1, 1, d$DTH30b)
d$DTH30b <- ifelse (d$time>30, 0, d$DTH30b)
table(d$DTH30b)
table(d$DTH30)
#Je ne suis pas d'accord avec la variable DTH30 qui note 0 les sujets perdus de vu avant 30 j. Ce serait ok pour un modele de survie prenant en compte de le temps de suivi mais
#pas pour une régression logistique.
#pour faire analyse de survie avec DTH30:
d$timeb <- ifelse(d$time>30, 30, d$time)
# d$DEATH2 <- ifelse(!is.na(d$DTHDTE), 1, 0)
# table(d$DEATH==d$DEATH2) #all true => Je peux utiliser l'une ou l'autre des variables, ont probablement été créées de la meme marnière
#----------
#Description
t1 <- table_var_quali_des <- describe_qualitative(vec_var = var_quali_des_des, .data=d)
t2 <- table_var_quanti_des <- describe_quantitative(vec_var = var_quanti_des, .data=d)
t3 <- describe_qualitative(var_com, d)#idem # summary(apply(d[ ,var_com],2,as.factor))
t4 <- describe_quantitative(var_exam, d)#summary(d[ ,var_exam])
t5 <- describe_qualitative(c("CA","CANCER","DNR1"), d)
t6 <- describe_qualitative(var_ad, d)
t7 <- describe_qualitative(c("DEATH","SWAN"), d)
obj <- rbind(t1,t2,t3,t4,t5,t6,t7)
write.table(print(obj), file="clipboard", sep="\t")
range(d$LSTCTDTE)
range(d$SADMDTE)
range(d$ddn)
#Nb de NA
#Nb de sujets avec 0, 1, 2, 3, 4 NA
table(apply(apply(d,2,is.na),1,sum))
#cb de NA pour chaque colonne
apply(apply(d,2,is.na),2,sum)
table(apply(apply(d,2,is.na),2,sum))
#distribution des items
.l <- lapply(colnames(d), function(x){
if (class(d[,x])=="Date") {
#browser()
qplot(d[ ,x], main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
}
else{
if (length(names(table(d[,x])))>15) qplot(as.numeric(as.character(d[ ,x])), main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
else qplot(as.factor(d[ ,x]), main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
}
})
ml <- marrangeGrob(.l,ncol=3,nrow=3,top = NULL)
ggsave(file="distrib bef dm.pdf", ml)
print(ml)
#-------------
#verif
HR0 <- ifelse(d$HRT1==0, "FC0", ifelse(!is.na(d$HRT1),0,NA))
FC0 <- ifelse(d$RESP1==0, "FR0", ifelse(!is.na(d$RESP1),0,NA))
TA0 <- ifelse(d$MEANBP1==0, "TA0", ifelse(!is.na(d$MEANBP1),0,NA))
#pas de NA de toutes façons pour ces valeurs
d$cleCR <- paste(HR0, FC0, TA0, sep="|")
table(d$cleCR)
d[d$cleCR=="FC0|FR0|0", c("HRT1", "RESP1", "MEANBP1", "DTH30", "SCOMA1", "TEMP1")]
#"0|0|0" :aucune valeure ne vaut 0, ok
#"0|0|TA0" : LA tension artérielle vaut 0 mais pas le reste : probable erreur de mesure
d$MEANBP1 <- ifelse(d$cleCR=="0|0|TA0", NA, d$MEANBP1)
#"0|FR0|0" : réa probablement pas d'accord sur quoi mesurer faire en cas de respirateur : chercher variable respirateur, sinon garder telle quelle?
#"0|FR0|TA0" : TA 0 et respi 0, je mets la FC à 0 aussi
d$HRT1 <- ifelse(d$cleCR=="0|FR0|TA0", 0, d$HRT1)
#"FC0|0|0" : seul la FC vaut0 : erreur, FC =NA
d$HRT1 <- ifelse(d$cleCR=="FC0|0|0", NA, d$HRT1)
#"FC0|0|TA0" : probablement encore une histoire de respirateur : si TA et FC vaut 0, alors FR aussi
d$RESP1 <- ifelse(d$cleCR=="FC0|0|TA0", 0, d$RESP1)
#"FC0|FR0|0" : quelq'un qui ne respire pas et n'a pas de pouls a une tenion nulle également
d$MEANBP1 <- ifelse(d$cleCR=="FC0|FR0|0", 0, d$MEANBP1)
#"FC0|FR0|TA0" : toutes les valeurs sont à 0 ok
#j'ai vérifié que d$DEATH idem que d$DEATH2 <- ifelse (!is.na(d$DTHDTE),1,0)
#Attention! les schéma ne donnent pas les NA pour les var quantitatives!
#Na : cat2, ADLD3P
#var bizarres : EDU : 30ans?(p-e...) ; URIN1: 9L? oui possible
#HRT1 : 0, >20 et plus de 200 ; MEANBP1: pic à 80 et pic à 140, pression >200? pression=0;
#RESP1 >50 oui c'est possible; temp1<30 oui possible; weight=0 le min et 244 le max (ok pour 244 mais pas pour 0)
#temp mini à 27° dans le fichier : c'est possible en cas d'hypothermie sévère (et hypothermie induite 30-32° parfois en réa dans cas particuliers)
#http://www.srlf.org/wp-content/uploads/2015/11/0505-Reanimation-Vol14-N3-p177_185.pdf
#var inconnues : surv2MD1, das2d3pc, t3d30, aps1, wblc1, pafi1
d[d$URIN1==5000 & !is.na(d$URIN1), var_exam]
#---------------
#refaire en coupant SCOMA1 en classe et recuperer la courbe de survie pour var qualitative
d$SCOMAsup90 <- ifelse(d$SCOMA1>90,1,0)
d$SCOMAcut <- cut(d$SCOMA1, breaks=5)
draw_surv_bin(var="SCOMAsup90", data=d, .time="time", .censor="censor", vec_time_IC= c(1, 3), type = "quali", surv_only=FALSE, pvalue = TRUE, dep_temps=FALSE, .transf=NULL)
draw_surv_bin(var="SCOMAsup90", data=d, .time="timeb", .censor="DTH30", vec_time_IC= c(7, 14), type = "quali", surv_only=FALSE, pvalue = TRUE, dep_temps=FALSE, .transf=NULL, .scale="day")
plot(d$SCOMA1, d$APS1) #le score APS ne nous aide pas
#plus score est élevé, plus le risque de deces est eleve.
ggsurv(survfit(Surv(time, censor)~SCOMAcut, data=d), order.legend =FALSE)
#Pas de doublons
d[duplicated(d$PTID),]
#recodage du poids:
d$WTKILO1 <- ifelse(d$WTKILO1<25, NA, d$WTKILO1) #kilo max 244 ok (j'en ai vu)
#HRT1
# oui impossible d'avoir une FR à 0 avec FC à 52 et vice versa. Si arrêt Cardio respi les 2 sont à 0.
# Natrémie en dessous de 110 possible et au dessus de 170 exceptionnel (ok si 1 seul).
# Pas possible pour hématocrite à 2 pour cent.
# Créât et bili 10 fois la normale : c'est possible.
# Albu 110 et 290 non possible...
#----------------
#refaire description
#Description
t1 <- table_var_quali_des <- describe_qualitative(vec_var = var_quali_des, .data=d)
t2 <- table_var_quanti_des <- describe_quantitative(vec_var = var_quanti_des, .data=d)
t3 <- describe_qualitative(var_com, d)#idem # summary(apply(d[ ,var_com],2,as.factor))
t4 <- describe_quantitative(var_exam, d)#summary(d[ ,var_exam])
t5 <- describe_qualitative(c("CA","CANCER","DNR1"), d)
t6 <- describe_qualitative(var_ad, d)
t7 <- describe_qualitative(c("DEATH","SWAN"), d)
obj <- rbind(t1,t2,t3,t4,t5,t6,t7)
write.table(print(obj), file="clipboard", sep="\t")
#
# table_var_quali_des <- describe_qualitative(vec_var = var_quali_des, .data=d)
# table_var_quanti_des <- describe_quantitative(vec_var = var_quanti_des, .data=d)
# table_var <- rbind(table_var_quali_des,table_var_quanti_des)
# kable(table_var)
# describe_qualitative(var_com, d)#idem # summary(apply(d[ ,var_com],2,as.factor))
# describe_quantitative(var_exam, d)#summary(d[ ,var_exam])
# describe_qualitative("CA", d)
# describe_qualitative(var_ad, d)
# describe_qualitative(c("DEATH","SWAN"), d)
# range(d$LSTCTDTE)
# range(d$SADMDTE)
# range(d$ddn)
#Nb de NA
#Nb de sujets avec 0, 1, 2, 3, 4 NA
table(apply(apply(d,2,is.na),1,sum))
#cb de NA pour chaque colonne
apply(apply(d,2,is.na),2,sum)
table(apply(apply(d,2,is.na),2,sum))
#distribution des items
.l <- lapply(colnames(d), function(x){
if (class(d[,x])=="Date") qplot(d[ ,x], main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
else{
if (length(names(table(d[,x])))>15) qplot(as.numeric(as.character(d[ ,x])), main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
else qplot(as.factor(d[ ,x]), main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
}
})
ml <- marrangeGrob(.l,ncol=3,nrow=3,top = NULL)
ggsave(file="distrib aft dm.pdf", ml)
summary(d)
#------------
d$DAS2D3PC
#TESTS BIVARIES :
#avec swanganz
list_swan <- lapply(c(var_ad, var_exam, var_com, vardes), function(x){
#x <- "CA"
# for (x in c(var_ad, var_exam, var_com, vardes)){
print(x)
d$var <- d[,x]
if (all(levels(as.factor(d$var)) %in% c(0,1))) d$var <- as.factor(d$var)
mod <- glm(SWAN~var,d, family="binomial")
test <- summary(mod)
#browser()
if (nrow(coef(test))>2){ #cas variable explicative qualitative
test <- drop1(mod, .~., test="Chisq")
ab <- test$`Pr(>Chi)`[2]
#SI on veut tester chaque classe, mais finalement je fais un drop 1
# for (i in (2:nrow(coef(test)))){
# ab1 <- coef(test)[i, "Pr(>|z|)"]
# ab <- if(i==2) ab1 else rbind(ab, ab1)
# }
ab <- round(ab, 3)
ab <- data.frame(ab)
ab$signif <- ifelse(ab$ab<0.05,"*","")
#ab$ab <- ifelse(ab$ab<0.001, "<0.001", ab$ab) #empeche de selectionner les bonnes variables après
#rownames(ab) <- paste(x, levels(d$var)[-1], "ref", levels(d$var)[1])
rownames(ab) <- x
} else {
#browser()
ab <- coef(test)[2, "Pr(>|z|)"]
ab <- round(ab, 3)
ab <- data.frame(ab)
ab$signif <- ifelse(ab$ab<0.05,"*","")
#ab$ab <- ifelse(ab$ab<0.001, "<0.001", ab$ab)
rownames(ab) <- x
}
colnames(ab) <- c("coef pvalue SWAN","significatif SWAN")
# }
return(ab)
})
list_swan <- do.call(rbind, list_swan)
list_death <- lapply(c(var_ad, var_exam, var_com, vardes), function(x){
#x <- "CA"
# for (x in c(var_ad, var_exam, var_com, vardes)){
print(x)
d$var <- d[,x]
if (all(levels(as.factor(d$var)) %in% c(0,1))) d$var <- as.factor(d$var)
#mod <- glm(DEATH~var,d, family="binomial")
mod <- glm(DTH30b~var,d, family="binomial")
test <- summary(mod)
#browser()
if (nrow(coef(test))>2){ #cas variable explicative qualitative
test <- drop1(mod, .~., test="Chisq")
ab <- test$`Pr(>Chi)`[2]
# for (i in (2:nrow(coef(test)))){
# ab1 <- coef(test)[i, "Pr(>|z|)"]
# ab <- if(i==2) ab1 else rbind(ab, ab1)
# }
ab <- round(ab, 3)
ab <- data.frame(ab)
ab$signif <- ifelse(ab$ab<0.05,"*","")
#ab$ab <- ifelse(ab$ab<0.001, "<0.001", ab$ab)
#rownames(ab) <- paste(x, levels(d$var)[-1], "ref", levels(d$var)[1])
rownames(ab) <- x
} else {
#browser()
ab <- coef(test)[2, "Pr(>|z|)"]
ab <- round(ab, 3)
ab <- data.frame(ab)
ab$signif <- ifelse(ab$ab<0.05,"*","")
#ab$ab <- ifelse(ab$ab<0.001, "<0.001", ab$ab)
rownames(ab) <- x
}
colnames(ab) <- c("coef pvalue DEATH","significatif DEATH")
# }
return(ab)
})
list_death <- do.call(rbind, list_death)
list_pval <- cbind(list_swan, list_death)
#prendre les valeurs : liées soit uniquement au décès, soit liées à la sonde et au décès (ce qui revient à prendre variable significative pour le décès ici)
list_pval$select <- ifelse (list_pval$`coef pvalue DEATH`<0.05, 1, 0)
#list_pval$select <- ifelse (list_pval$`coef pvalue DEATH`<0.05 & list_pval$`coef pvalue SWAN`<0.05, 1, 0) #ces variables sont déjà sélectionné par la ligne du dessus
list_pval[list_pval$select==1, ]
nrow(list_pval[list_pval$select==1, ])
dput(rownames(list_pval[list_pval$select==1,]))
#c(rownames(list_pval[list_pval$select==1,])[c(1:8,12:36)],"CA","INCOME","NINSCLAS","CAT1")
#dput(rownames(list_pval[list_pval$select==1, ])) #pour éviter de tout taper à la main!ya plus qu'à copier coller
#var avec DEATH
# varps <- c("RESP", "GASTR", "RENAL", "HEMA", "SEPS", "TRAUMA", "ADLD3P",
# "DAS2D3PC", "DNR1", "CA", "SURV2MD1", "APS1", "SCOMA1", "WTKILO1",
# "TEMP1", "MEANBP1", "PACO21", "PH1", "HEMA1", "POT1", "CREA1",
# "BILI1", "ALB1", "URIN1", "CARDIOHX", "CHFHX", "DEMENTHX", "PSYCHHX",
# "CHRPULHX", "LIVERHX", "MALIGHX", "IMMUNHX", "TRANSHX", "AGE",
# "INCOME", "NINSCLAS", "CAT1", "CAT2")
#var avec DTH30b
varps <- c("RESP", "CARD", "NEURO", "GASTR", "HEMA", "SEPS", "ADLD3P",
"DAS2D3PC", "DNR1", "CA", "SURV2MD1", "APS1", "SCOMA1", "WTKILO1",
"TEMP1", "MEANBP1", "PAFI1", "PACO21", "PH1", "WBLC1", "HEMA1",
"CREA1", "BILI1", "ALB1", "URIN1", "CARDIOHX", "CHFHX", "DEMENTHX",
"PSYCHHX", "CHRPULHX", "LIVERHX", "GIBLEDHX", "MALIGHX", "AGE",
"INCOME", "NINSCLAS", "CAT1", "CAT2")
#cb de NA pour chaque colonne
percNA <- round(apply(apply(d[,varps],2,is.na),2,sum)/nrow(d)*100,0)
namesNA <- names(percNA[percNA>50])
#je retire du score les variables avec plus de 50% de NA :ADLD3P, URIN1 et CAT2
varps <- varps[!varps%in% namesNA]
#----------------
#score de propension
ps <- glm(formula(paste0("SWAN ~ ",paste(varps,collapse="+"))), data = d, family="binomial")
d2 <- d[apply(apply(d[ ,varps], 2, is.na),1,sum)==0, ] #J'elimine les lignes avec au moins 1 NA dans les variables slectionnes varps
d2$logitps <- as.vector(predict(ps, type = "response")) #response is the default for binomial model
#Histogramme du score de propension en fonction du groupe de traitement
prs_df <- data.frame(pr_score = predict(ps, type = "response"),
SWAN = ps$model$SWAN)
labs <- paste("actual intervention:", c("no SWAN-GANZ", "SWAN-GANZ"))
prs_df %>%
mutate(SWAN = ifelse(SWAN == 1, labs[2], labs[1])) %>%
ggplot(aes(x = pr_score)) +
geom_histogram(color = "white") +
facet_wrap(~SWAN) +
xlab("Probability of having a SWAN GANZ") +
theme_bw()
#---------------------
#Analyse en utilisant la méthode des quantiles (non complet)
# QT <- quantile(prs_df$pr_score)
# prs_df$quantile <- ifelse (prs_df$pr_score<QT[2], 1, NA)
# prs_df$quantile <- ifelse (prs_df$pr_score>=QT[2] & prs_df$pr_score<QT[3], 2, prs_df$quantile)
# prs_df$quantile <- ifelse (prs_df$pr_score>=QT[3] & prs_df$pr_score<QT[4], 3, prs_df$quantile)
# prs_df$quantile <- ifelse (prs_df$pr_score>=QT[4] & prs_df$pr_score<QT[5], 4, prs_df$quantile)
# prs_df$quantile <- ifelse (prs_df$pr_score>QT[5], 5, prs_df$quantile)
d2$psgp <- cut(d2$logitps, breaks=quantile(d2$logitps, prob=0:4*0.25),
labels=c("Q1","Q2","Q3","Q4"), right=FALSE, include.lowest=TRUE)
#----------------
#Appariemment sur le score de propension:
#Package Matching : Le traitement (ici SWan Ganz) doit être en true false
#cours de David Hajage, MD PhD, département de biostatistiques de la Pitié Salpétriêre
d2$SWANT <- ifelse(d2$SWAN==1, T, F)
tmp <- Match(Tr = d2$SWANT, X = d2$logitps, M = 1, replace = FALSE, caliper = 0.2, ties = FALSE)
d2.app <- d2[c(tmp$index.treated, tmp$index.control),]#index.treated et index.control donne le numero des lignes selectionnees par le matching. Le premier individu de index.treated est matchée avec le 1er de index.ctrl.
d2.app$paire <- rep(1:length(tmp$index.treated), 2) #lignes de d2.app : d'abord les traités de chaque paire puis les control de chaque paire, donc on répète le numéro de paire
d2.app <- d2.app[order(d2.app$paire, d2.app$SWAN==1),] #on réordonne selon la paire SG(1)NSG(1) SG(2)NSG(2) etc
#=> donc ce tableau prend toutes les variables, uniquement les lignes correspondant aux individus matché et pour chaque individu on connait son numéro de paire
#package MatchIt
#https://stanford.edu/~ejdemyr/r-tutorials-archive/tutorial8.html#exercise
#https://stanford.edu/~ejdemyr/r-tutorials-archive/matching.R
mod_match <- matchit(formula(paste0("SWAN ~ ",paste(varps,collapse="+"))),
method = "nearest", replace = FALSE, ratio = 1, m.order = "smallest", caliper=0.2, data = d2[,c("SWAN",varps,"PTID")])
#MatchIt ne sait pas gérer les NA (même si les colonnes de la formule n'ont pas de NA) => JE dois préciser les colonnes qui m'interess dans data
#Pour retrouver les paires
matches<-data.frame(mod_match$match.matrix)
# > dim(matches)
# [1] 2025 1
#2025 lignes, qui correspondent aux 2025 individus traités (avant matching)
# > head(matches)
# X1
# 2 <NA>
# 5 927
# 10 2383
#le patient SWAN de la ligne nommée 2 du tableau d2 n'est matché avec aucun patient non SWAN : il faut eliminer la ligne
#le patient SWAN de la ligne nommée 5 du tableau d2 est matché avec le patient non SWAN de la ligne nommée 927
#J'élimine les les lignes avec NA (correspond aux traités qui n'ont pas été appariée)
matches <- na.omit(matches)
groupSG1<-match(row.names(matches), row.names(d2)) #donne la position de chaque patient traité dans le tableau d2
groupSG0<-match(matches$X1, row.names(d2)) #donne la position de chaque patient non traité dans d2
d2.appbis <- d2[c(groupSG1, groupSG0),]
d2.appbis$paire <- rep(1:length(groupSG1), 2)
d2.appbis <- d2.appbis[order(d2.appbis$paire, d2.appbis$SWAN==1), ]
#si jamais j'ai finalement besoin des distances :
dta_m <- match.data(mod_match)
dtm <- merge(d2.appbis, dta_m[,c("PTID","distance","weights")], by="PTID", all=T)
#-----------------
# Checking balance :
#a faire uniquement sur les variables servant à construire le score de propension, car permet de voir
#si le score et le matching ont bien marché
MatchBalance(formula(paste0("SWANT ~ ",paste(varps,collapse="+"))), data=d2) #package Matching
#METHODE 1 : QQPLOT
plot(mod_match)
#METHODE 2 :moyenne pour chaque variable à chaque point de score de popension
#Attention : variables quali : il faut les transformer en binaire
varps_quanti <- varps[sapply(varps, function(variable) length(levels(dtm[,variable]))<=2)]
varps_quali <- varps[!varps%in% varps_quanti]
#je transforme les variables qualitatives en binaire
dbis <- dtm
for (j in varps_quali){
num <- which(varps_quali==j)
a <- model.matrix( ~ dbis[ ,j])
#pour avoir un nom de variable reconnaissable dans les schéma (si on laisse tel quel ça donne dbis[ ,j] CHF par exemple)
colnames(a) <- gsub("dbis",j, colnames(a))
colnames(a) <- gsub("\\[", "", colnames(a))
colnames(a) <- gsub("\\]", "", colnames(a))
colnames(a) <- gsub("\\,", "", colnames(a))
colnames(a) <- gsub("j", "", colnames(a))
colnames(a) <- gsub(" ", "_", colnames(a))
#créer les variables binaires
for (i in 1:(length(colnames(a))-1)){
dbis[ ,colnames(a)[i+1]] <- a[ ,i+1]
}
#créer un vecteur avec les noms de variables bianires crées
vec_tmp <- colnames(a)[-1]
vec_var <- if(num==1) vec_tmp else c(vec_tmp, vec_var)
}
#retirer les variables qualitatives non binarisees
dbis[ ,varps_quali] <- NULL
#fonction qui plot la distribution des variables en fonciton du score de propension avant et après matching
fn_bal <- function(dta, variable) {
#browser()
dta$variable <- dta[, variable]
dta$SWAN <- as.factor(dta$SWAN)
ggplot(dta, aes(x = distance, y = variable, color = SWAN)) +
geom_point(alpha = 0.2, size = 1.5) +
geom_smooth(method = "loess", se = F) +
xlab("Propensity score") +
ylab(variable) +
theme_bw()
}
.l <- lapply(c(varps_quanti, vec_var), function(variable){
print(variable)
num <- which(c(varps_quanti, vec_var)==variable)
if (num %% 2 != 0) fn_bal(dbis, variable)
else fn_bal(dbis, variable) + theme(legend.position = "none")
})
ml <- marrangeGrob(.l, nrow=2, ncol=2, top = NULL)
ggsave(file="distrib mean variables after matching 20170222.pdf", ml)
#METHODE 3 : Print mean difference
varps_noql <- varps[!varps %in% c("INCOME", "NINSCLAS", "CAT1")]
dta_mQ <- dtm[ ,c(varps_noql,"SWAN")]
diff <- dta_mQ%>%
group_by(SWAN) %>%
summarise_all(funs(mean))
#https://cran.r-project.org/web/packages/tableone/vignettes/smd.html
#https://github.com/kaz-yos/tableone/blob/1d47ec186b2e351937e5f9712dad3881380ab12e/vignettes/smd.Rmd
tabUnmatched <- CreateTableOne(vars = c(varps_noql,"SWAN"), strata = "SWAN", data = d2[ ,c(varps_noql,"SWAN")], test = FALSE)
tabMatched <- CreateTableOne(vars = c(varps_noql,"SWAN"), strata = "SWAN", data = dta_mQ, test = FALSE)
## Construct a data frame containing variable name and SMD from all methods
dataPlot <- data.frame(variable = names(ExtractSmd(tabUnmatched)),
Unmatched = ExtractSmd(tabUnmatched),
Matched = ExtractSmd(tabMatched))
## Create long-format data for ggplot2
dataPlotMelt <- melt(data = dataPlot,
id.vars = c("variable"),
variable.name = "Method",
value.name = "SMD")
colnames(dataPlotMelt) <- c("variable","Method","SMD")
varNames <- as.character(dataPlot$variable)[order(dataPlot$Unmatched)]
## Order factor levels in the same order
dataPlotMelt$variable <- factor(dataPlotMelt$variable,
levels = varNames)
## Plot using ggplot2
#voir quoi faire avec var quali
ggplot(data = dataPlotMelt[dataPlotMelt$variable!= "SWAN", ], mapping = aes(x = variable, y = SMD,
group = Method, color = Method)) +
#geom_line() +
geom_point() +
geom_hline(yintercept = 0.1, color = "black", size = 0.1) +
coord_flip() +
theme_bw() + theme(legend.key = element_blank())
#METHODE 3 BIS standardized mean difference sans tableone
#attention la transformation des var quali a l'air fausse, retirée pour l'instant mais à vérifier
#rajouter unmatched
smd <- summary(mod_match, standardize = TRUE) #fait la balance pour chaque binaire tirée de la variable quali
smd <- smd$sum.matched
smd$var <- rownames(smd)
smd$group <- "matched"
g <- ggplot(data = smd[smd$var%in%varps,], aes(x=var, y= `Std. Mean Diff.`, group = group, color= group))
g <- g + geom_point() + geom_hline(yintercept = 0.1, color="red", size=0.1) +
coord_flip() + theme_bw() + theme(legend.key = element_blank())
g
#METHODE 4 IC à la main
#voir comment calculer IC pou var quali binarisés
getMIC <- function(.data){
dt <- lapply(names(.data), function(var){
num <- which(var==names(.data))
.data[,var] <- (.data[,var] - mean(.data[,var]))/sd(.data[,var]) #standardisation
x <- .data[.data$SWAN == 1, var]
df1 <- data.frame(mymean = mean(x),
ICminSG = mean(x) - 1.96*sqrt(var(x)/nrow(.data)),
ICmaxSG = mean(x) + 1.96*sqrt(var(x)/nrow(.data)),
myy = num*2-0.5,
groupe = "SWAN",
colour = 1)
x <- .data[.data$SWAN == 0, var]
df0 <- data.frame(mymean = mean(x),
ICminSG = mean(x) - 1.96*sqrt(var(x)/nrow(.data)),
ICmaxSG = mean(x) + 1.96*sqrt(var(x)/nrow(.data)),
myy = num*2+0.5,
groupe = "NO SWAN",
colour = 2)
df <- rbind(df1,df0)
})
dt <- do.call(rbind, dt)
dt$var <- rep(names(.data),each=2)
return(dt)
}
df <- getMIC(dta_m)
df <- na.omit(df) #retire les var quali dont les IC n'ont pas été calculés
head(df)
m<-tapply(df$myy, df$var, mean)
m<-data.frame(var=names(m), y=m)
head(m)
df2<-merge(df, m, by="var", all=T)
df2<-df2[order(df2$var, df2$group),]
df2$y<-ifelse(df2$groupe=="SWAN", 0.25, -0.25)+df2$y
head(df2)
laby<-unique(df2$var)
col <- hue_pal()(length(1:2))
g <- ggplot(data=df2, aes(x=mymean, xmin = ICminSG, y = y, xend = ICmaxSG, colour=groupe)) + geom_point()
g <- g + labs(y = "variable")
g<-g+scale_y_continuous(name="Variables", breaks=m$y, labels=m$var)
g
for (i in 1:nrow(df)){
g <- g + geom_segment(x = df2$ICminSG[i], y = df2$y[i], xend = df2$ICmaxSG[i], yend = df2$y[i], colour = col[df2$colour[i]])
g <- g + geom_segment(x = df2$ICminSG[i], y = df2$y[i]-0.5, xend = df2$ICminSG[i], yend = df2$y[i]+0.5, colour = col[df2$colour[i]])
g <- g + geom_segment(x = df2$ICmaxSG[i], y = df2$y[i]-0.5, xend = df2$ICmaxSG[i], yend = df2$y[i]+0.5, colour = col[df2$colour[i]])
}
g
#reste à faire pour le schéma :
#- vérifier que j'ai bien standardisé
#- enlever les trous et séparer les variables (retirer les NA puis numéroter les y en séparant les groupes de variables) : (1:20)[1:20 %% 3 !=0]
#---------
#analysis
#http://imai.princeton.edu/research/files/matchit.pdf
#journals.sfu.ca/jmde/index.php/jmde_1/article/download/431/414
#http://r.iq.harvard.edu/docs/matchit/2.4-20/matchit.pdf
#Analyse avec d2.app (methode David Hajage)
#d2.app$DEATH <- as.numeric(as.character(d2.app$DEATH))
d2.app$DTH30b <- as.numeric(as.character(d2.app$DTH30b))
d2.app$SWAN <- as.numeric(as.character(d2.app$SWAN))
#clogit(DEATH~SWAN + cluster(paire), method="efron", data=d2.app) #marche avec efron
clogit(DTH30b~SWAN + cluster(paire), method="efron", data=d2.app) #marche avec efron
#coxph(Surv(rep(1, nrow(d2.app)),DEATH)~SWAN + cluster(paire), data=d2.app) #marche mais est-ce vraiment la même chose?
coxph(Surv(rep(1, nrow(d2.app)),DTH30b)~SWAN + cluster(paire), data=d2.app) #marche mais est-ce vraiment la même chose?
#glm(DEATH~SWAN, data=d2.app, family="binomial")
glm(DTH30b~SWAN, data=d2.app, family="binomial")
#glmer(DEATH~SWAN + (1|paire), data=d2.app, family = "binomial")
glmer(DTH30b~SWAN + (1|paire), data=d2.app, family = "binomial")
#analyse avec mod_matchet dtm
#Paired t-test
t.test(as.numeric(matched.cases$yT), as.numeric(matched.cases$yC), paired = TRUE)
#-------------------
#QUESTIONS
#J'ai trop de variables, je ne sais pas quoi faire
# var <- c("AGE", "SCOMA1", "WTKILO1", "TEMP1", "MEANBP1", "PACO21", "PH1",
# "HEMA1", "POT1", "CREA1", "BILI1", "ALB1" ,"URIN1" )
#"SURV2MD1" je ne comprends pas ce que c'est
#"CA" JE ne sais pas s'il faut la mettre quand on a CA aussi, s'il faut garder CA s'il faut la transformer cancer 0/1 et meta 0/1
#Pour les variables qualitatives, je fais drop ou je sélectionne par paramètre et je ne prends que les classes significatives? Est-ce que je les garde dans le SP en les transformant en var binaire?
#Comment faire pour les variables avec beaucoup de données manquantes?
#quelle formule utiliser pour différence standardisée des moyennes : moyenne ctrl - moyenne
#faut-il faire la balance pour toutes les variables ou uniquement variables du score de ps
#comment checker la balance des variables qualitatives (tableone calcule une moyenne standardisée pour ces variables je ne vois pas comment?)
# que faire des var quali : quand on selectionne var pour score de propension, quand on regarde la balance: transformer en bianaire? faire drop pour la selection? que plotter pour vérifier la balance? tableone plot qqch mais je ene sais pas quoi...
#comment on met les paires dans le modèle?
#lien avec la mort : regression logistique ou cox??
#modele final : ne vaut-il pas mieux faire une analyse de survie??
table(table(unique(d$ROWNAMES)))
levels(d$CAT1)
levels(d$CAT2)
levels (d$CA)
l <- lapply(colnames(d), function(x) {
vx <- d[ ,x]
if(is.factor(vx)) {
res <- levels(vx)
return(c(x, res))
} else NULL
})
do.call(rbind,l)
unlist(l)
range(d$SADMDTE)
range(d$DSCHDTE, na.rm=T)
range(d$DTHDTE, na.rm=T)
range(d$LSTCTDTE, na.rm=T)
d[1:5, c("SADMDTE", "DSCHDTE", "DTHDTE", "LSTCTDTE")]
#0.2695
summary(d$RESP)
|
/src/epid_data_management.R
|
no_license
|
feldmans/M2MSR-epid
|
R
| false | false | 29,811 |
r
|
d <- read.csv2("data/rhc_devoir_epidemio.csv")
head(d)
str(d)
dim(d)
summary(d)
d$SADMDTE <- as.Date(d$SADMDTE,"%d/%m/%Y")
d$DSCHDTE <- as.Date(d$DSCHDTE,"%d/%m/%Y")
d$DTHDTE <- as.Date(d$DTHDTE,"%d/%m/%Y")
d$LSTCTDTE <- as.Date(d$LSTCTDTE,"%d/%m/%Y")
for (i in c("DEATH", "DTH30", "DNR1", "RESP", "CARD", "NEURO", "GASTR", "RENAL", "META", "HEMA", "SEPS", "TRAUMA", "ORTHO")) {
d[ ,i] <- as.character(d[,i])
d[,i] <- ifelse (d[ ,i]=="No", 0, d[ ,i])
d[,i] <- ifelse (d[ ,i]=="Yes", 1, d[ ,i])
d[,i] <- as.numeric(d[,i])
}
#Pour transformer les "" en NA
#plus long que apply mais garde la bonne structure
for (x in colnames(d)){
if(is.factor(d[ ,x])) {
if(length(d[d[ ,x]=="", x])!=0) {
d[d[ ,x]=="", x] <- NA
d[ ,x] <- factor(d[,x])
}
}
}
#marche pas
# d2 <- lapply(colnames(d), function(x) {
# vx <- d[ ,x]
# if(is.factor(vx)) {
# if(length(d[d[ ,x]=="", x])!=0) d[d[ ,x]=="", x] <- NA
# }
# return(d[ ,x])
# })
# d2 <- do.call(cbind,d2)
#
# #transforme tout en facteur:
# d2 <- apply(d, 2, function(x) {
# if(is.factor(x)) {
# if(length(x[x==""])!=0) x[x==""] <- NA
# else x <-x
# }
# else x<-x
# return(x)
# })
# d2 <- data.frame(d2)
#Je préfère que PTID soit un caractère pour éviter un gag si le numéro de ligne n'est pas le même que PTID
d$PTID <- paste0("A", d$PTID)
#variables descriptives
vardes <- c("AGE", "SEX", "RACE", "EDU", "INCOME", "NINSCLAS", "CAT1", "CAT2")
var_quali_des <- c("SEX", "RACE", "INCOME", "NINSCLAS", "CAT1", "CAT2")
var_quanti_des <- c("AGE","EDU")
var_outcome <- c("DEATH","SWANG1")
var_date <- c("SADMDTE", "DSCHDTE", "DTHDTE", "LSTCTDTE")
# variables de comorbidités
var_com <- c("CARDIOHX", "CHFHX", "DEMENTHX", "PSYCHHX", "CHRPULHX", "RENALHX",
"LIVERHX", "GIBLEDHX", "MALIGHX", "IMMUNHX", "TRANSHX", "AMIHX")
#variables de l'examen clinique et paraclinique
var_exam <- c("ADLD3P", "DAS2D3PC", "DNR1", "CA", "SURV2MD1", "APS1", "SCOMA1", "WTKILO1", "TEMP1",
"MEANBP1", "RESP1", "HRT1", "PAFI1", "PACO21", "PH1", "WBLC1", "HEMA1", "SOD1", "POT1",
"CREA1", "BILI1", "ALB1", "URIN1")
#var admission diagnosis
var_ad <- c("RESP", "CARD", "NEURO", "GASTR", "RENAL", "META", "HEMA", "SEPS", "TRAUMA", "ORTHO")
# #CA va poser problème pour le modèle linéaire car cancer oui sera comparé à métastase, alors que ce sont des cancers oui également
# #je veux aussi comparer cancer à pas de cancer
# d$CANCER <- NA
# d$CANCER <- ifelse (!is.na(d$CA) & (d$CA=="Yes" | d$CA=="Metastatic"), 1, d$CANCER)
# d$CANCER <- ifelse (!is.na(d$CA) & d$CA=="No" , 0, d$CANCER)
# d$METASTASE <- ifelse (!is.na(d$CA) & d$CA=="Metastatic", 1, d$CA)
# d$METASTASE <- ifelse (!is.na(d$CA) & (d$CA=="Yes" | d$CA=="No"), 0, d$CA)
# var_exam <- c(var_exam, "CANCER", "METASTASE")
colnames(d)[!colnames(d) %in% c(var_ad, var_exam, var_com, vardes, var_outcome)]
#%2 variables ne sont pas dans le cahier des variables : T3D30(? de 2 à 30) et DTH30(0ou1, probablement décès à 30j)
#Outcomes
d$SWAN <- as.character(d$SWANG1)
d[d$SWAN=="No RHC","SWAN"] <- 0
d[d$SWAN=="RHC","SWAN"] <- 1
d$SWAN <- as.factor(d$SWAN)
d$DEATH <- as.factor(d$DEATH)
var_outcome <- c(var_outcome, "SWAN")
#var pour analyse de survie
table(is.na(d$LSTCTDTE)) #pas de perdu de vu pour la date de dernier contact
d$ddn <- as_date(ifelse(is.na(d$DTHDTE), d$LSTCTDTE, d$DTHDTE))
d$time <- as.numeric(d$ddn - d$SADMDTE) #temps de suivi en j entre la date des dernières nouvelles (décès ou date de dernier contact)
d$censor <- ifelse (!is.na(d$DTHDTE), 1, 0)
#avant de recréer la variable DTH30, je vérifie que tous les sujets sont bien suivis au moins 30j
table(d$time<30, d$censor) #j'ai 8 sujets qui sont perdus de vu avant 30 jours
namesDTH30_NA <- d[d$time<30 & d$censor==0, "PTID"]
d$DTH30b <- NA
d$DTH30b <- ifelse (d$time<=30 & d$censor==1, 1, d$DTH30b)
d$DTH30b <- ifelse (d$time>30, 0, d$DTH30b)
table(d$DTH30b)
table(d$DTH30)
#Je ne suis pas d'accord avec la variable DTH30 qui note 0 les sujets perdus de vu avant 30 j. Ce serait ok pour un modele de survie prenant en compte de le temps de suivi mais
#pas pour une régression logistique.
#pour faire analyse de survie avec DTH30:
d$timeb <- ifelse(d$time>30, 30, d$time)
# d$DEATH2 <- ifelse(!is.na(d$DTHDTE), 1, 0)
# table(d$DEATH==d$DEATH2) #all true => Je peux utiliser l'une ou l'autre des variables, ont probablement été créées de la meme marnière
#----------
#Description
t1 <- table_var_quali_des <- describe_qualitative(vec_var = var_quali_des_des, .data=d)
t2 <- table_var_quanti_des <- describe_quantitative(vec_var = var_quanti_des, .data=d)
t3 <- describe_qualitative(var_com, d)#idem # summary(apply(d[ ,var_com],2,as.factor))
t4 <- describe_quantitative(var_exam, d)#summary(d[ ,var_exam])
t5 <- describe_qualitative(c("CA","CANCER","DNR1"), d)
t6 <- describe_qualitative(var_ad, d)
t7 <- describe_qualitative(c("DEATH","SWAN"), d)
obj <- rbind(t1,t2,t3,t4,t5,t6,t7)
write.table(print(obj), file="clipboard", sep="\t")
range(d$LSTCTDTE)
range(d$SADMDTE)
range(d$ddn)
#Nb de NA
#Nb de sujets avec 0, 1, 2, 3, 4 NA
table(apply(apply(d,2,is.na),1,sum))
#cb de NA pour chaque colonne
apply(apply(d,2,is.na),2,sum)
table(apply(apply(d,2,is.na),2,sum))
#distribution des items
.l <- lapply(colnames(d), function(x){
if (class(d[,x])=="Date") {
#browser()
qplot(d[ ,x], main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
}
else{
if (length(names(table(d[,x])))>15) qplot(as.numeric(as.character(d[ ,x])), main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
else qplot(as.factor(d[ ,x]), main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
}
})
ml <- marrangeGrob(.l,ncol=3,nrow=3,top = NULL)
ggsave(file="distrib bef dm.pdf", ml)
print(ml)
#-------------
#verif
HR0 <- ifelse(d$HRT1==0, "FC0", ifelse(!is.na(d$HRT1),0,NA))
FC0 <- ifelse(d$RESP1==0, "FR0", ifelse(!is.na(d$RESP1),0,NA))
TA0 <- ifelse(d$MEANBP1==0, "TA0", ifelse(!is.na(d$MEANBP1),0,NA))
#pas de NA de toutes façons pour ces valeurs
d$cleCR <- paste(HR0, FC0, TA0, sep="|")
table(d$cleCR)
d[d$cleCR=="FC0|FR0|0", c("HRT1", "RESP1", "MEANBP1", "DTH30", "SCOMA1", "TEMP1")]
#"0|0|0" :aucune valeure ne vaut 0, ok
#"0|0|TA0" : LA tension artérielle vaut 0 mais pas le reste : probable erreur de mesure
d$MEANBP1 <- ifelse(d$cleCR=="0|0|TA0", NA, d$MEANBP1)
#"0|FR0|0" : réa probablement pas d'accord sur quoi mesurer faire en cas de respirateur : chercher variable respirateur, sinon garder telle quelle?
#"0|FR0|TA0" : TA 0 et respi 0, je mets la FC à 0 aussi
d$HRT1 <- ifelse(d$cleCR=="0|FR0|TA0", 0, d$HRT1)
#"FC0|0|0" : seul la FC vaut0 : erreur, FC =NA
d$HRT1 <- ifelse(d$cleCR=="FC0|0|0", NA, d$HRT1)
#"FC0|0|TA0" : probablement encore une histoire de respirateur : si TA et FC vaut 0, alors FR aussi
d$RESP1 <- ifelse(d$cleCR=="FC0|0|TA0", 0, d$RESP1)
#"FC0|FR0|0" : quelq'un qui ne respire pas et n'a pas de pouls a une tenion nulle également
d$MEANBP1 <- ifelse(d$cleCR=="FC0|FR0|0", 0, d$MEANBP1)
#"FC0|FR0|TA0" : toutes les valeurs sont à 0 ok
#j'ai vérifié que d$DEATH idem que d$DEATH2 <- ifelse (!is.na(d$DTHDTE),1,0)
#Attention! les schéma ne donnent pas les NA pour les var quantitatives!
#Na : cat2, ADLD3P
#var bizarres : EDU : 30ans?(p-e...) ; URIN1: 9L? oui possible
#HRT1 : 0, >20 et plus de 200 ; MEANBP1: pic à 80 et pic à 140, pression >200? pression=0;
#RESP1 >50 oui c'est possible; temp1<30 oui possible; weight=0 le min et 244 le max (ok pour 244 mais pas pour 0)
#temp mini à 27° dans le fichier : c'est possible en cas d'hypothermie sévère (et hypothermie induite 30-32° parfois en réa dans cas particuliers)
#http://www.srlf.org/wp-content/uploads/2015/11/0505-Reanimation-Vol14-N3-p177_185.pdf
#var inconnues : surv2MD1, das2d3pc, t3d30, aps1, wblc1, pafi1
d[d$URIN1==5000 & !is.na(d$URIN1), var_exam]
#---------------
#refaire en coupant SCOMA1 en classe et recuperer la courbe de survie pour var qualitative
d$SCOMAsup90 <- ifelse(d$SCOMA1>90,1,0)
d$SCOMAcut <- cut(d$SCOMA1, breaks=5)
draw_surv_bin(var="SCOMAsup90", data=d, .time="time", .censor="censor", vec_time_IC= c(1, 3), type = "quali", surv_only=FALSE, pvalue = TRUE, dep_temps=FALSE, .transf=NULL)
draw_surv_bin(var="SCOMAsup90", data=d, .time="timeb", .censor="DTH30", vec_time_IC= c(7, 14), type = "quali", surv_only=FALSE, pvalue = TRUE, dep_temps=FALSE, .transf=NULL, .scale="day")
plot(d$SCOMA1, d$APS1) #le score APS ne nous aide pas
#plus score est élevé, plus le risque de deces est eleve.
ggsurv(survfit(Surv(time, censor)~SCOMAcut, data=d), order.legend =FALSE)
#Pas de doublons
d[duplicated(d$PTID),]
#recodage du poids:
d$WTKILO1 <- ifelse(d$WTKILO1<25, NA, d$WTKILO1) #kilo max 244 ok (j'en ai vu)
#HRT1
# oui impossible d'avoir une FR à 0 avec FC à 52 et vice versa. Si arrêt Cardio respi les 2 sont à 0.
# Natrémie en dessous de 110 possible et au dessus de 170 exceptionnel (ok si 1 seul).
# Pas possible pour hématocrite à 2 pour cent.
# Créât et bili 10 fois la normale : c'est possible.
# Albu 110 et 290 non possible...
#----------------
#refaire description
#Description
t1 <- table_var_quali_des <- describe_qualitative(vec_var = var_quali_des, .data=d)
t2 <- table_var_quanti_des <- describe_quantitative(vec_var = var_quanti_des, .data=d)
t3 <- describe_qualitative(var_com, d)#idem # summary(apply(d[ ,var_com],2,as.factor))
t4 <- describe_quantitative(var_exam, d)#summary(d[ ,var_exam])
t5 <- describe_qualitative(c("CA","CANCER","DNR1"), d)
t6 <- describe_qualitative(var_ad, d)
t7 <- describe_qualitative(c("DEATH","SWAN"), d)
obj <- rbind(t1,t2,t3,t4,t5,t6,t7)
write.table(print(obj), file="clipboard", sep="\t")
#
# table_var_quali_des <- describe_qualitative(vec_var = var_quali_des, .data=d)
# table_var_quanti_des <- describe_quantitative(vec_var = var_quanti_des, .data=d)
# table_var <- rbind(table_var_quali_des,table_var_quanti_des)
# kable(table_var)
# describe_qualitative(var_com, d)#idem # summary(apply(d[ ,var_com],2,as.factor))
# describe_quantitative(var_exam, d)#summary(d[ ,var_exam])
# describe_qualitative("CA", d)
# describe_qualitative(var_ad, d)
# describe_qualitative(c("DEATH","SWAN"), d)
# range(d$LSTCTDTE)
# range(d$SADMDTE)
# range(d$ddn)
#Nb de NA
#Nb de sujets avec 0, 1, 2, 3, 4 NA
table(apply(apply(d,2,is.na),1,sum))
#cb de NA pour chaque colonne
apply(apply(d,2,is.na),2,sum)
table(apply(apply(d,2,is.na),2,sum))
#distribution des items
.l <- lapply(colnames(d), function(x){
if (class(d[,x])=="Date") qplot(d[ ,x], main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
else{
if (length(names(table(d[,x])))>15) qplot(as.numeric(as.character(d[ ,x])), main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
else qplot(as.factor(d[ ,x]), main=x, xlab=NULL, fill=I("navajowhite3"), col=I("pink4"))
}
})
ml <- marrangeGrob(.l,ncol=3,nrow=3,top = NULL)
ggsave(file="distrib aft dm.pdf", ml)
summary(d)
#------------
d$DAS2D3PC
#TESTS BIVARIES :
#avec swanganz
list_swan <- lapply(c(var_ad, var_exam, var_com, vardes), function(x){
#x <- "CA"
# for (x in c(var_ad, var_exam, var_com, vardes)){
print(x)
d$var <- d[,x]
if (all(levels(as.factor(d$var)) %in% c(0,1))) d$var <- as.factor(d$var)
mod <- glm(SWAN~var,d, family="binomial")
test <- summary(mod)
#browser()
if (nrow(coef(test))>2){ #cas variable explicative qualitative
test <- drop1(mod, .~., test="Chisq")
ab <- test$`Pr(>Chi)`[2]
#SI on veut tester chaque classe, mais finalement je fais un drop 1
# for (i in (2:nrow(coef(test)))){
# ab1 <- coef(test)[i, "Pr(>|z|)"]
# ab <- if(i==2) ab1 else rbind(ab, ab1)
# }
ab <- round(ab, 3)
ab <- data.frame(ab)
ab$signif <- ifelse(ab$ab<0.05,"*","")
#ab$ab <- ifelse(ab$ab<0.001, "<0.001", ab$ab) #empeche de selectionner les bonnes variables après
#rownames(ab) <- paste(x, levels(d$var)[-1], "ref", levels(d$var)[1])
rownames(ab) <- x
} else {
#browser()
ab <- coef(test)[2, "Pr(>|z|)"]
ab <- round(ab, 3)
ab <- data.frame(ab)
ab$signif <- ifelse(ab$ab<0.05,"*","")
#ab$ab <- ifelse(ab$ab<0.001, "<0.001", ab$ab)
rownames(ab) <- x
}
colnames(ab) <- c("coef pvalue SWAN","significatif SWAN")
# }
return(ab)
})
list_swan <- do.call(rbind, list_swan)
list_death <- lapply(c(var_ad, var_exam, var_com, vardes), function(x){
#x <- "CA"
# for (x in c(var_ad, var_exam, var_com, vardes)){
print(x)
d$var <- d[,x]
if (all(levels(as.factor(d$var)) %in% c(0,1))) d$var <- as.factor(d$var)
#mod <- glm(DEATH~var,d, family="binomial")
mod <- glm(DTH30b~var,d, family="binomial")
test <- summary(mod)
#browser()
if (nrow(coef(test))>2){ #cas variable explicative qualitative
test <- drop1(mod, .~., test="Chisq")
ab <- test$`Pr(>Chi)`[2]
# for (i in (2:nrow(coef(test)))){
# ab1 <- coef(test)[i, "Pr(>|z|)"]
# ab <- if(i==2) ab1 else rbind(ab, ab1)
# }
ab <- round(ab, 3)
ab <- data.frame(ab)
ab$signif <- ifelse(ab$ab<0.05,"*","")
#ab$ab <- ifelse(ab$ab<0.001, "<0.001", ab$ab)
#rownames(ab) <- paste(x, levels(d$var)[-1], "ref", levels(d$var)[1])
rownames(ab) <- x
} else {
#browser()
ab <- coef(test)[2, "Pr(>|z|)"]
ab <- round(ab, 3)
ab <- data.frame(ab)
ab$signif <- ifelse(ab$ab<0.05,"*","")
#ab$ab <- ifelse(ab$ab<0.001, "<0.001", ab$ab)
rownames(ab) <- x
}
colnames(ab) <- c("coef pvalue DEATH","significatif DEATH")
# }
return(ab)
})
list_death <- do.call(rbind, list_death)
list_pval <- cbind(list_swan, list_death)
#prendre les valeurs : liées soit uniquement au décès, soit liées à la sonde et au décès (ce qui revient à prendre variable significative pour le décès ici)
list_pval$select <- ifelse (list_pval$`coef pvalue DEATH`<0.05, 1, 0)
#list_pval$select <- ifelse (list_pval$`coef pvalue DEATH`<0.05 & list_pval$`coef pvalue SWAN`<0.05, 1, 0) #ces variables sont déjà sélectionné par la ligne du dessus
list_pval[list_pval$select==1, ]
nrow(list_pval[list_pval$select==1, ])
dput(rownames(list_pval[list_pval$select==1,]))
#c(rownames(list_pval[list_pval$select==1,])[c(1:8,12:36)],"CA","INCOME","NINSCLAS","CAT1")
#dput(rownames(list_pval[list_pval$select==1, ])) #pour éviter de tout taper à la main!ya plus qu'à copier coller
#var avec DEATH
# varps <- c("RESP", "GASTR", "RENAL", "HEMA", "SEPS", "TRAUMA", "ADLD3P",
# "DAS2D3PC", "DNR1", "CA", "SURV2MD1", "APS1", "SCOMA1", "WTKILO1",
# "TEMP1", "MEANBP1", "PACO21", "PH1", "HEMA1", "POT1", "CREA1",
# "BILI1", "ALB1", "URIN1", "CARDIOHX", "CHFHX", "DEMENTHX", "PSYCHHX",
# "CHRPULHX", "LIVERHX", "MALIGHX", "IMMUNHX", "TRANSHX", "AGE",
# "INCOME", "NINSCLAS", "CAT1", "CAT2")
#var avec DTH30b
varps <- c("RESP", "CARD", "NEURO", "GASTR", "HEMA", "SEPS", "ADLD3P",
"DAS2D3PC", "DNR1", "CA", "SURV2MD1", "APS1", "SCOMA1", "WTKILO1",
"TEMP1", "MEANBP1", "PAFI1", "PACO21", "PH1", "WBLC1", "HEMA1",
"CREA1", "BILI1", "ALB1", "URIN1", "CARDIOHX", "CHFHX", "DEMENTHX",
"PSYCHHX", "CHRPULHX", "LIVERHX", "GIBLEDHX", "MALIGHX", "AGE",
"INCOME", "NINSCLAS", "CAT1", "CAT2")
#cb de NA pour chaque colonne
percNA <- round(apply(apply(d[,varps],2,is.na),2,sum)/nrow(d)*100,0)
namesNA <- names(percNA[percNA>50])
#je retire du score les variables avec plus de 50% de NA :ADLD3P, URIN1 et CAT2
varps <- varps[!varps%in% namesNA]
#----------------
#score de propension
ps <- glm(formula(paste0("SWAN ~ ",paste(varps,collapse="+"))), data = d, family="binomial")
d2 <- d[apply(apply(d[ ,varps], 2, is.na),1,sum)==0, ] #J'elimine les lignes avec au moins 1 NA dans les variables slectionnes varps
d2$logitps <- as.vector(predict(ps, type = "response")) #response is the default for binomial model
#Histogramme du score de propension en fonction du groupe de traitement
prs_df <- data.frame(pr_score = predict(ps, type = "response"),
SWAN = ps$model$SWAN)
labs <- paste("actual intervention:", c("no SWAN-GANZ", "SWAN-GANZ"))
prs_df %>%
mutate(SWAN = ifelse(SWAN == 1, labs[2], labs[1])) %>%
ggplot(aes(x = pr_score)) +
geom_histogram(color = "white") +
facet_wrap(~SWAN) +
xlab("Probability of having a SWAN GANZ") +
theme_bw()
#---------------------
#Analyse en utilisant la méthode des quantiles (non complet)
# QT <- quantile(prs_df$pr_score)
# prs_df$quantile <- ifelse (prs_df$pr_score<QT[2], 1, NA)
# prs_df$quantile <- ifelse (prs_df$pr_score>=QT[2] & prs_df$pr_score<QT[3], 2, prs_df$quantile)
# prs_df$quantile <- ifelse (prs_df$pr_score>=QT[3] & prs_df$pr_score<QT[4], 3, prs_df$quantile)
# prs_df$quantile <- ifelse (prs_df$pr_score>=QT[4] & prs_df$pr_score<QT[5], 4, prs_df$quantile)
# prs_df$quantile <- ifelse (prs_df$pr_score>QT[5], 5, prs_df$quantile)
d2$psgp <- cut(d2$logitps, breaks=quantile(d2$logitps, prob=0:4*0.25),
labels=c("Q1","Q2","Q3","Q4"), right=FALSE, include.lowest=TRUE)
#----------------
#Appariemment sur le score de propension:
#Package Matching : Le traitement (ici SWan Ganz) doit être en true false
#cours de David Hajage, MD PhD, département de biostatistiques de la Pitié Salpétriêre
d2$SWANT <- ifelse(d2$SWAN==1, T, F)
tmp <- Match(Tr = d2$SWANT, X = d2$logitps, M = 1, replace = FALSE, caliper = 0.2, ties = FALSE)
d2.app <- d2[c(tmp$index.treated, tmp$index.control),]#index.treated et index.control donne le numero des lignes selectionnees par le matching. Le premier individu de index.treated est matchée avec le 1er de index.ctrl.
d2.app$paire <- rep(1:length(tmp$index.treated), 2) #lignes de d2.app : d'abord les traités de chaque paire puis les control de chaque paire, donc on répète le numéro de paire
d2.app <- d2.app[order(d2.app$paire, d2.app$SWAN==1),] #on réordonne selon la paire SG(1)NSG(1) SG(2)NSG(2) etc
#=> donc ce tableau prend toutes les variables, uniquement les lignes correspondant aux individus matché et pour chaque individu on connait son numéro de paire
#package MatchIt
#https://stanford.edu/~ejdemyr/r-tutorials-archive/tutorial8.html#exercise
#https://stanford.edu/~ejdemyr/r-tutorials-archive/matching.R
mod_match <- matchit(formula(paste0("SWAN ~ ",paste(varps,collapse="+"))),
method = "nearest", replace = FALSE, ratio = 1, m.order = "smallest", caliper=0.2, data = d2[,c("SWAN",varps,"PTID")])
#MatchIt ne sait pas gérer les NA (même si les colonnes de la formule n'ont pas de NA) => JE dois préciser les colonnes qui m'interess dans data
#Pour retrouver les paires
matches<-data.frame(mod_match$match.matrix)
# > dim(matches)
# [1] 2025 1
#2025 lignes, qui correspondent aux 2025 individus traités (avant matching)
# > head(matches)
# X1
# 2 <NA>
# 5 927
# 10 2383
#le patient SWAN de la ligne nommée 2 du tableau d2 n'est matché avec aucun patient non SWAN : il faut eliminer la ligne
#le patient SWAN de la ligne nommée 5 du tableau d2 est matché avec le patient non SWAN de la ligne nommée 927
#J'élimine les les lignes avec NA (correspond aux traités qui n'ont pas été appariée)
matches <- na.omit(matches)
groupSG1<-match(row.names(matches), row.names(d2)) #donne la position de chaque patient traité dans le tableau d2
groupSG0<-match(matches$X1, row.names(d2)) #donne la position de chaque patient non traité dans d2
d2.appbis <- d2[c(groupSG1, groupSG0),]
d2.appbis$paire <- rep(1:length(groupSG1), 2)
d2.appbis <- d2.appbis[order(d2.appbis$paire, d2.appbis$SWAN==1), ]
#si jamais j'ai finalement besoin des distances :
dta_m <- match.data(mod_match)
dtm <- merge(d2.appbis, dta_m[,c("PTID","distance","weights")], by="PTID", all=T)
#-----------------
# Checking balance :
#a faire uniquement sur les variables servant à construire le score de propension, car permet de voir
#si le score et le matching ont bien marché
MatchBalance(formula(paste0("SWANT ~ ",paste(varps,collapse="+"))), data=d2) #package Matching
#METHODE 1 : QQPLOT
plot(mod_match)
#METHODE 2 :moyenne pour chaque variable à chaque point de score de popension
#Attention : variables quali : il faut les transformer en binaire
varps_quanti <- varps[sapply(varps, function(variable) length(levels(dtm[,variable]))<=2)]
varps_quali <- varps[!varps%in% varps_quanti]
#je transforme les variables qualitatives en binaire
dbis <- dtm
for (j in varps_quali){
num <- which(varps_quali==j)
a <- model.matrix( ~ dbis[ ,j])
#pour avoir un nom de variable reconnaissable dans les schéma (si on laisse tel quel ça donne dbis[ ,j] CHF par exemple)
colnames(a) <- gsub("dbis",j, colnames(a))
colnames(a) <- gsub("\\[", "", colnames(a))
colnames(a) <- gsub("\\]", "", colnames(a))
colnames(a) <- gsub("\\,", "", colnames(a))
colnames(a) <- gsub("j", "", colnames(a))
colnames(a) <- gsub(" ", "_", colnames(a))
#créer les variables binaires
for (i in 1:(length(colnames(a))-1)){
dbis[ ,colnames(a)[i+1]] <- a[ ,i+1]
}
#créer un vecteur avec les noms de variables bianires crées
vec_tmp <- colnames(a)[-1]
vec_var <- if(num==1) vec_tmp else c(vec_tmp, vec_var)
}
#retirer les variables qualitatives non binarisees
dbis[ ,varps_quali] <- NULL
#fonction qui plot la distribution des variables en fonciton du score de propension avant et après matching
fn_bal <- function(dta, variable) {
#browser()
dta$variable <- dta[, variable]
dta$SWAN <- as.factor(dta$SWAN)
ggplot(dta, aes(x = distance, y = variable, color = SWAN)) +
geom_point(alpha = 0.2, size = 1.5) +
geom_smooth(method = "loess", se = F) +
xlab("Propensity score") +
ylab(variable) +
theme_bw()
}
.l <- lapply(c(varps_quanti, vec_var), function(variable){
print(variable)
num <- which(c(varps_quanti, vec_var)==variable)
if (num %% 2 != 0) fn_bal(dbis, variable)
else fn_bal(dbis, variable) + theme(legend.position = "none")
})
ml <- marrangeGrob(.l, nrow=2, ncol=2, top = NULL)
ggsave(file="distrib mean variables after matching 20170222.pdf", ml)
#METHODE 3 : Print mean difference
varps_noql <- varps[!varps %in% c("INCOME", "NINSCLAS", "CAT1")]
dta_mQ <- dtm[ ,c(varps_noql,"SWAN")]
diff <- dta_mQ%>%
group_by(SWAN) %>%
summarise_all(funs(mean))
#https://cran.r-project.org/web/packages/tableone/vignettes/smd.html
#https://github.com/kaz-yos/tableone/blob/1d47ec186b2e351937e5f9712dad3881380ab12e/vignettes/smd.Rmd
tabUnmatched <- CreateTableOne(vars = c(varps_noql,"SWAN"), strata = "SWAN", data = d2[ ,c(varps_noql,"SWAN")], test = FALSE)
tabMatched <- CreateTableOne(vars = c(varps_noql,"SWAN"), strata = "SWAN", data = dta_mQ, test = FALSE)
## Construct a data frame containing variable name and SMD from all methods
dataPlot <- data.frame(variable = names(ExtractSmd(tabUnmatched)),
Unmatched = ExtractSmd(tabUnmatched),
Matched = ExtractSmd(tabMatched))
## Create long-format data for ggplot2
dataPlotMelt <- melt(data = dataPlot,
id.vars = c("variable"),
variable.name = "Method",
value.name = "SMD")
colnames(dataPlotMelt) <- c("variable","Method","SMD")
varNames <- as.character(dataPlot$variable)[order(dataPlot$Unmatched)]
## Order factor levels in the same order
dataPlotMelt$variable <- factor(dataPlotMelt$variable,
levels = varNames)
## Plot using ggplot2
#voir quoi faire avec var quali
ggplot(data = dataPlotMelt[dataPlotMelt$variable!= "SWAN", ], mapping = aes(x = variable, y = SMD,
group = Method, color = Method)) +
#geom_line() +
geom_point() +
geom_hline(yintercept = 0.1, color = "black", size = 0.1) +
coord_flip() +
theme_bw() + theme(legend.key = element_blank())
#METHODE 3 BIS standardized mean difference sans tableone
#attention la transformation des var quali a l'air fausse, retirée pour l'instant mais à vérifier
#rajouter unmatched
smd <- summary(mod_match, standardize = TRUE) #fait la balance pour chaque binaire tirée de la variable quali
smd <- smd$sum.matched
smd$var <- rownames(smd)
smd$group <- "matched"
g <- ggplot(data = smd[smd$var%in%varps,], aes(x=var, y= `Std. Mean Diff.`, group = group, color= group))
g <- g + geom_point() + geom_hline(yintercept = 0.1, color="red", size=0.1) +
coord_flip() + theme_bw() + theme(legend.key = element_blank())
g
#METHODE 4 IC à la main
#voir comment calculer IC pou var quali binarisés
getMIC <- function(.data){
dt <- lapply(names(.data), function(var){
num <- which(var==names(.data))
.data[,var] <- (.data[,var] - mean(.data[,var]))/sd(.data[,var]) #standardisation
x <- .data[.data$SWAN == 1, var]
df1 <- data.frame(mymean = mean(x),
ICminSG = mean(x) - 1.96*sqrt(var(x)/nrow(.data)),
ICmaxSG = mean(x) + 1.96*sqrt(var(x)/nrow(.data)),
myy = num*2-0.5,
groupe = "SWAN",
colour = 1)
x <- .data[.data$SWAN == 0, var]
df0 <- data.frame(mymean = mean(x),
ICminSG = mean(x) - 1.96*sqrt(var(x)/nrow(.data)),
ICmaxSG = mean(x) + 1.96*sqrt(var(x)/nrow(.data)),
myy = num*2+0.5,
groupe = "NO SWAN",
colour = 2)
df <- rbind(df1,df0)
})
dt <- do.call(rbind, dt)
dt$var <- rep(names(.data),each=2)
return(dt)
}
df <- getMIC(dta_m)
df <- na.omit(df) #retire les var quali dont les IC n'ont pas été calculés
head(df)
m<-tapply(df$myy, df$var, mean)
m<-data.frame(var=names(m), y=m)
head(m)
df2<-merge(df, m, by="var", all=T)
df2<-df2[order(df2$var, df2$group),]
df2$y<-ifelse(df2$groupe=="SWAN", 0.25, -0.25)+df2$y
head(df2)
laby<-unique(df2$var)
col <- hue_pal()(length(1:2))
g <- ggplot(data=df2, aes(x=mymean, xmin = ICminSG, y = y, xend = ICmaxSG, colour=groupe)) + geom_point()
g <- g + labs(y = "variable")
g<-g+scale_y_continuous(name="Variables", breaks=m$y, labels=m$var)
g
for (i in 1:nrow(df)){
g <- g + geom_segment(x = df2$ICminSG[i], y = df2$y[i], xend = df2$ICmaxSG[i], yend = df2$y[i], colour = col[df2$colour[i]])
g <- g + geom_segment(x = df2$ICminSG[i], y = df2$y[i]-0.5, xend = df2$ICminSG[i], yend = df2$y[i]+0.5, colour = col[df2$colour[i]])
g <- g + geom_segment(x = df2$ICmaxSG[i], y = df2$y[i]-0.5, xend = df2$ICmaxSG[i], yend = df2$y[i]+0.5, colour = col[df2$colour[i]])
}
g
#reste à faire pour le schéma :
#- vérifier que j'ai bien standardisé
#- enlever les trous et séparer les variables (retirer les NA puis numéroter les y en séparant les groupes de variables) : (1:20)[1:20 %% 3 !=0]
#---------
#analysis
#http://imai.princeton.edu/research/files/matchit.pdf
#journals.sfu.ca/jmde/index.php/jmde_1/article/download/431/414
#http://r.iq.harvard.edu/docs/matchit/2.4-20/matchit.pdf
#Analyse avec d2.app (methode David Hajage)
#d2.app$DEATH <- as.numeric(as.character(d2.app$DEATH))
d2.app$DTH30b <- as.numeric(as.character(d2.app$DTH30b))
d2.app$SWAN <- as.numeric(as.character(d2.app$SWAN))
#clogit(DEATH~SWAN + cluster(paire), method="efron", data=d2.app) #marche avec efron
clogit(DTH30b~SWAN + cluster(paire), method="efron", data=d2.app) #marche avec efron
#coxph(Surv(rep(1, nrow(d2.app)),DEATH)~SWAN + cluster(paire), data=d2.app) #marche mais est-ce vraiment la même chose?
coxph(Surv(rep(1, nrow(d2.app)),DTH30b)~SWAN + cluster(paire), data=d2.app) #marche mais est-ce vraiment la même chose?
#glm(DEATH~SWAN, data=d2.app, family="binomial")
glm(DTH30b~SWAN, data=d2.app, family="binomial")
#glmer(DEATH~SWAN + (1|paire), data=d2.app, family = "binomial")
glmer(DTH30b~SWAN + (1|paire), data=d2.app, family = "binomial")
#analyse avec mod_matchet dtm
#Paired t-test
t.test(as.numeric(matched.cases$yT), as.numeric(matched.cases$yC), paired = TRUE)
#-------------------
#QUESTIONS
#J'ai trop de variables, je ne sais pas quoi faire
# var <- c("AGE", "SCOMA1", "WTKILO1", "TEMP1", "MEANBP1", "PACO21", "PH1",
# "HEMA1", "POT1", "CREA1", "BILI1", "ALB1" ,"URIN1" )
#"SURV2MD1" je ne comprends pas ce que c'est
#"CA" JE ne sais pas s'il faut la mettre quand on a CA aussi, s'il faut garder CA s'il faut la transformer cancer 0/1 et meta 0/1
#Pour les variables qualitatives, je fais drop ou je sélectionne par paramètre et je ne prends que les classes significatives? Est-ce que je les garde dans le SP en les transformant en var binaire?
#Comment faire pour les variables avec beaucoup de données manquantes?
#quelle formule utiliser pour différence standardisée des moyennes : moyenne ctrl - moyenne
#faut-il faire la balance pour toutes les variables ou uniquement variables du score de ps
#comment checker la balance des variables qualitatives (tableone calcule une moyenne standardisée pour ces variables je ne vois pas comment?)
# que faire des var quali : quand on selectionne var pour score de propension, quand on regarde la balance: transformer en bianaire? faire drop pour la selection? que plotter pour vérifier la balance? tableone plot qqch mais je ene sais pas quoi...
#comment on met les paires dans le modèle?
#lien avec la mort : regression logistique ou cox??
#modele final : ne vaut-il pas mieux faire une analyse de survie??
table(table(unique(d$ROWNAMES)))
levels(d$CAT1)
levels(d$CAT2)
levels (d$CA)
l <- lapply(colnames(d), function(x) {
vx <- d[ ,x]
if(is.factor(vx)) {
res <- levels(vx)
return(c(x, res))
} else NULL
})
do.call(rbind,l)
unlist(l)
range(d$SADMDTE)
range(d$DSCHDTE, na.rm=T)
range(d$DTHDTE, na.rm=T)
range(d$LSTCTDTE, na.rm=T)
d[1:5, c("SADMDTE", "DSCHDTE", "DTHDTE", "LSTCTDTE")]
#0.2695
summary(d$RESP)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.