content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/check_data.R
\name{fix_dates}
\alias{fix_dates}
\title{Converts columns with dates into a R date class}
\usage{
fix_dates(df, date = NULL, format = NULL)
}
\arguments{
\item{df}{The input data.frame}
\item{date}{The column name containing the dates}
\item{format}{The format of the date.}
}
\description{
Using lubridate, this function will transform dates from mdy, mdy_h, mdy_hm, mdy_hms (or starting with day instead of month) into valid date classes.
It will return an error it if cannot coerce the date itself.
}
\examples{
\dontrun{
new <- fix_dates(goat_data, 'start.date', 'dmy')
}
}
|
/man/fix_dates.Rd
|
no_license
|
alfcrisci/mapData
|
R
| false | false | 681 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/check_data.R
\name{fix_dates}
\alias{fix_dates}
\title{Converts columns with dates into a R date class}
\usage{
fix_dates(df, date = NULL, format = NULL)
}
\arguments{
\item{df}{The input data.frame}
\item{date}{The column name containing the dates}
\item{format}{The format of the date.}
}
\description{
Using lubridate, this function will transform dates from mdy, mdy_h, mdy_hm, mdy_hms (or starting with day instead of month) into valid date classes.
It will return an error it if cannot coerce the date itself.
}
\examples{
\dontrun{
new <- fix_dates(goat_data, 'start.date', 'dmy')
}
}
|
gate<- 'welcome'
gate
senior<- 35
senior
class(senior)
#working with variables
#we can use variables together and work with them, for example
deposit<- 5000
account<- 150000
new_balance<- deposit + account
new_balance
nvec<- c(1,2,3,4,5)
class(nvec)
cvec <- c('u','s','a')
class(cvec)
Ivec <- c(TRUE,FALSE)
Ivec
class(Ivec)
v <- c(FALSE,2)
class(v)
#class is an in-built function used to detect the datatype of an object
v <- c('A',1)
class(v)
temps <- c(72,71,68,73,69,75,71)
temps
names(temps) <- c('Mon','Tue','Wed','Thur','Fri','Sat','Sun')
temps
days <- c('Mon','Tue','Wed','Thur','Fri','Sat','Sun')
temp2 <- c(1,2,3,4,5,6,7)
names(temps2) <-
|
/day1 on R.R
|
no_license
|
Dynamicideas/newme
|
R
| false | false | 709 |
r
|
gate<- 'welcome'
gate
senior<- 35
senior
class(senior)
#working with variables
#we can use variables together and work with them, for example
deposit<- 5000
account<- 150000
new_balance<- deposit + account
new_balance
nvec<- c(1,2,3,4,5)
class(nvec)
cvec <- c('u','s','a')
class(cvec)
Ivec <- c(TRUE,FALSE)
Ivec
class(Ivec)
v <- c(FALSE,2)
class(v)
#class is an in-built function used to detect the datatype of an object
v <- c('A',1)
class(v)
temps <- c(72,71,68,73,69,75,71)
temps
names(temps) <- c('Mon','Tue','Wed','Thur','Fri','Sat','Sun')
temps
days <- c('Mon','Tue','Wed','Thur','Fri','Sat','Sun')
temp2 <- c(1,2,3,4,5,6,7)
names(temps2) <-
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotDScoreHeatmap.R
\name{plotDScoreHeatmap}
\alias{plotDScoreHeatmap}
\title{Plot a heat map of the normalized drug-disease reverse association scores for cancer samples}
\usage{
plotDScoreHeatmap(
data,
subtype.label = "all",
SDS = "all",
E_Pvalue.th = 1,
E_FDR.th = 0.05,
S_Pvalue.th = 1,
S_FDR.th = 0.001,
show.rownames = TRUE,
show.colnames = FALSE,
color = colorRampPalette(c("#0A8D0A", "#F8F0EB", "red"))(190),
subtype_colors = NA,
drug_colors = NA,
border_color = "grey60",
cellwidth = NA,
cellheight = NA,
fontsize = 10,
fontsize.row = 10,
fontsize.col = 10,
scale = "row"
)
}
\arguments{
\item{data}{A list of result data generated by function `PrioSubtypeDrug()`.}
\item{subtype.label}{Character string indicates which sample of the cancer subtype was used to plot the heat map.
If subtype.label = "all" (default), all cancer samples will be shown in the heat map.}
\item{SDS}{A string indicates that the range of SDS is used for the heat map. if SDS="all" (default), the SDS will not be filtered.
SDS="negative", only drugs with SDS<0 are used. SDS="positive", only drugs with SDS>0 are used.}
\item{E_Pvalue.th}{A numeric.A threshold is used to filter the drug effected P value (default: 1).}
\item{E_FDR.th}{A numeric.A threshold is used to filter the drug effected FDR (default: 0.05).}
\item{S_Pvalue.th}{A numeric.A threshold is used to filter the Subtype specific P value (default: 1).}
\item{S_FDR.th}{A numeric.A threshold is used to filter the Subtype specific P value (default: 0.001).}
\item{show.rownames}{Boolean specifying if row names are be shown (default: TRUE).}
\item{show.colnames}{Boolean specifying if column names are be shown (default: FALSE).}
\item{color}{Vector of colors used in heatmap.}
\item{subtype_colors}{Vector of colors is used to annotate the sample subtype. Its length should correspond to the number of sample subtypes.}
\item{drug_colors}{Vector of colors is used to label subtype-specific drugs.}
\item{border_color}{Color of cell borders on heatmap, use NA if no border should be drawn.}
\item{cellwidth}{Individual cell width in points. If left as NA, then the values depend on the size of plotting window.}
\item{cellheight}{Individual cell height in points. If left as NA, then the values depend on the size of plotting window.}
\item{fontsize}{Base fontsize for the plot (default: 10).}
\item{fontsize.row}{Fontsize for rownames (default: 10).}
\item{fontsize.col}{Fontsize for colnames (default: 10).}
\item{scale}{Character indicating if the values should be centered and scaled in either the row direction or the column direction, or none. Corresponding values are "row" (default), "column" and "none".}
}
\value{
A heat map.
}
\description{
According to the parameter setting, the function `plotDScoreHeatmap()` displays the heat map of the normalized
drug-disease reverse association score for the significant drugs.
}
\details{
plotDScoreHeatmap
}
\examples{
require(pheatmap)
## Get the result data of PrioSubtypeDrug().
## The data is based on the simulated breast cancer subtype data.
Subtype_drugs<-get("Subtype_drugs")
## Heat map of all subtype-specific drugs.
\donttest{plotDScoreHeatmap(data=Subtype_drugs,E_Pvalue.th=0.05,
S_Pvalue.th=0.05)}
## Plot only Basal subtype-specific drugs.
plotDScoreHeatmap(Subtype_drugs,subtype.label="Basal",SDS="all",E_Pvalue.th=0.05,
E_FDR.t=1,S_Pvalue.th=0.05,S_FDR.th=1)
}
\author{
Xudong Han,
Junwei Han,
Chonghui Liu
}
|
/man/plotDScoreHeatmap.Rd
|
no_license
|
hanjunwei-lab/SubtypeDrug
|
R
| false | true | 3,609 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotDScoreHeatmap.R
\name{plotDScoreHeatmap}
\alias{plotDScoreHeatmap}
\title{Plot a heat map of the normalized drug-disease reverse association scores for cancer samples}
\usage{
plotDScoreHeatmap(
data,
subtype.label = "all",
SDS = "all",
E_Pvalue.th = 1,
E_FDR.th = 0.05,
S_Pvalue.th = 1,
S_FDR.th = 0.001,
show.rownames = TRUE,
show.colnames = FALSE,
color = colorRampPalette(c("#0A8D0A", "#F8F0EB", "red"))(190),
subtype_colors = NA,
drug_colors = NA,
border_color = "grey60",
cellwidth = NA,
cellheight = NA,
fontsize = 10,
fontsize.row = 10,
fontsize.col = 10,
scale = "row"
)
}
\arguments{
\item{data}{A list of result data generated by function `PrioSubtypeDrug()`.}
\item{subtype.label}{Character string indicates which sample of the cancer subtype was used to plot the heat map.
If subtype.label = "all" (default), all cancer samples will be shown in the heat map.}
\item{SDS}{A string indicates that the range of SDS is used for the heat map. if SDS="all" (default), the SDS will not be filtered.
SDS="negative", only drugs with SDS<0 are used. SDS="positive", only drugs with SDS>0 are used.}
\item{E_Pvalue.th}{A numeric.A threshold is used to filter the drug effected P value (default: 1).}
\item{E_FDR.th}{A numeric.A threshold is used to filter the drug effected FDR (default: 0.05).}
\item{S_Pvalue.th}{A numeric.A threshold is used to filter the Subtype specific P value (default: 1).}
\item{S_FDR.th}{A numeric.A threshold is used to filter the Subtype specific P value (default: 0.001).}
\item{show.rownames}{Boolean specifying if row names are be shown (default: TRUE).}
\item{show.colnames}{Boolean specifying if column names are be shown (default: FALSE).}
\item{color}{Vector of colors used in heatmap.}
\item{subtype_colors}{Vector of colors is used to annotate the sample subtype. Its length should correspond to the number of sample subtypes.}
\item{drug_colors}{Vector of colors is used to label subtype-specific drugs.}
\item{border_color}{Color of cell borders on heatmap, use NA if no border should be drawn.}
\item{cellwidth}{Individual cell width in points. If left as NA, then the values depend on the size of plotting window.}
\item{cellheight}{Individual cell height in points. If left as NA, then the values depend on the size of plotting window.}
\item{fontsize}{Base fontsize for the plot (default: 10).}
\item{fontsize.row}{Fontsize for rownames (default: 10).}
\item{fontsize.col}{Fontsize for colnames (default: 10).}
\item{scale}{Character indicating if the values should be centered and scaled in either the row direction or the column direction, or none. Corresponding values are "row" (default), "column" and "none".}
}
\value{
A heat map.
}
\description{
According to the parameter setting, the function `plotDScoreHeatmap()` displays the heat map of the normalized
drug-disease reverse association score for the significant drugs.
}
\details{
plotDScoreHeatmap
}
\examples{
require(pheatmap)
## Get the result data of PrioSubtypeDrug().
## The data is based on the simulated breast cancer subtype data.
Subtype_drugs<-get("Subtype_drugs")
## Heat map of all subtype-specific drugs.
\donttest{plotDScoreHeatmap(data=Subtype_drugs,E_Pvalue.th=0.05,
S_Pvalue.th=0.05)}
## Plot only Basal subtype-specific drugs.
plotDScoreHeatmap(Subtype_drugs,subtype.label="Basal",SDS="all",E_Pvalue.th=0.05,
E_FDR.t=1,S_Pvalue.th=0.05,S_FDR.th=1)
}
\author{
Xudong Han,
Junwei Han,
Chonghui Liu
}
|
### TERN LANDSCAPES
# Soil pH model model fitting
# Author: Brendan Malone
# Email: brendan.malone@csiro.au
# created: 18.5.21
# modified: 18.5.21
# CODE PURPOSE
# # Residual kriging. Load in variogram model
### variables
vart<- "pH_4a1"
depth<- "d3"
batch<- 5
srt<- 1001
fin<- 1250
cpus<- 12
# root directory
data.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/data/curated_all/variogram_dat/4a1/"
model.out<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/models/variogram_models/4a1/"
root.tiles<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/predictions/tiles/"
cov.tiles<- "/datasets/work/af-tern-mal-deb/work/datasets/national/covariates/tiles/90m/"
slurm.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/spatialprediction/slurm/pH_4a1_residuals/d3/"
r.code<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/spatialprediction/d3/residuals/residual_modelling_rangermodels_pearcey_spatialise_d3_1.R"
# libraries
library(raster);library(rgdal);library(sp);library(gstat);library(automap);library(rgeos);library(parallel);library(doParallel)
# residual data
# site data
site.dat<- readRDS(paste0(data.root,"tern_soilpH4a1_siteDat_covariates_CALVALDAT_SimulationResiduals_d3_ARD.rds"))
crs(site.dat)<- "+proj=lcc +lat_1=-18 +lat_2=-36 +lat_0=0 +lon_0=134 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
# variogram
afit<- readRDS(paste0(model.out,"residuals_variogram_4a1_d3.rds"))
plot(afit)
### Folders where the coveraiates are
fols<- as.numeric(list.files(cov.tiles, full.names = FALSE))
length(fols)
###
# begin parallel cluster and register it with foreach
cl<- makeCluster(spec=cpus)
# register with foreach
registerDoParallel(cl)
#kriging to raster data
# get raster (this will change from tile to tile)
oper1<- foreach(i=srt:fin, .packages = c("raster", "sp", "rgdal", "gstat", "automap", "rgeos")) %dopar% {
# select folder
sfol<- fols[i]
# path to base raster
fpath<- paste0(cov.tiles, sfol,"/PCS/Climate/")
#raster
r1<- raster(paste(fpath,"climate_PCA_1.tif", sep=""))
# project raster to the same crs as data points (metre scale)
r2<- projectRaster(r1, res=90, crs="+proj=lcc +lat_1=-18 +lat_2=-36 +lat_0=0 +lon_0=134 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
plot(r2)
#refine grid to remove large areas of NAs
tempD <- data.frame(cellNos = seq(1:ncell(r2)))
vals <- as.data.frame(getValues(r2))
tempD<- cbind(tempD, vals)
tempD <- tempD[complete.cases(tempD), ]
cellNos <- c(tempD$cellNos)
gXY <- data.frame(xyFromCell(r2, cellNos, spatial = FALSE))
tempD<- cbind(gXY, tempD)
#str(tempD)
r7<- rasterFromXYZ(tempD[,c(1:3)]) # the raster to interpolate onto
crs(r7)<- crs(r2)
# get centroid point of the raster and outside midpoints
e<- extent(r7)
#e
p<- as(e, "SpatialPolygons")
#centroid
c1<- gCentroid(p)
plot(r7)
plot(c1,add=T)
##Distances of data points to centroid point
#str(dat2)
mat1<- as.data.frame(site.dat)
names(mat1)
#str(mat1)
mat2<- as.matrix(mat1[,51:52]) # matrix of data coordinates
as.matrix(as.data.frame(c1)) # matrix of centroid coordinate
dist1<- spDistsN1(mat2, as.matrix(as.data.frame(c1)),longlat = FALSE) #distance
#summary(dist1)
mat1<- cbind(mat1,dist1)
mat3<- mat1[order(mat1$dist1),]
mat3<- mat3[1:2000,] # get the nearest 2000 points
#kriging data frame
coordinates(mat3)<- ~ x + y
crs(mat3)<- crs(r7)
#mat3
plot(mat3)
plot(r7,add=T)
# kriging model
gRK <- gstat(NULL, "prediction", predResidual~1, mat3, model=afit$var_model)
gRK
## kriging
outPath<- paste0(root.tiles, sfol, "/", depth, "/", vart, "/pred_residual_",depth,".tif")
outPath
map.RK2<- interpolate(r7, gRK, xyOnly=TRUE, index = 1, na.rm=TRUE)
# re-project to original resolution
r8<- projectRaster(map.RK2, r1,filename = outPath,
format="GTiff",
overwrite=T,
datatype="FLT4S")
#r8
#plot(r8)
## SLURM OUTPUT CHECKS
itOuts<- c(i,as.character(Sys.time()))
nmz<- paste0(slurm.root ,batch, "/slurmckeck_", i, "_",sfol, ".txt")
write.table(itOuts,
file = nmz,
row.names = F, col.names = F, sep=",")
}
#END
|
/Production/DSM/pH/digital_soil_mapping/spatialprediction/d3/residuals/residual_modelling_rangermodels_pearcey_spatialise_d3_5.R
|
permissive
|
AusSoilsDSM/SLGA
|
R
| false | false | 4,470 |
r
|
### TERN LANDSCAPES
# Soil pH model model fitting
# Author: Brendan Malone
# Email: brendan.malone@csiro.au
# created: 18.5.21
# modified: 18.5.21
# CODE PURPOSE
# # Residual kriging. Load in variogram model
### variables
vart<- "pH_4a1"
depth<- "d3"
batch<- 5
srt<- 1001
fin<- 1250
cpus<- 12
# root directory
data.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/data/curated_all/variogram_dat/4a1/"
model.out<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/models/variogram_models/4a1/"
root.tiles<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/predictions/tiles/"
cov.tiles<- "/datasets/work/af-tern-mal-deb/work/datasets/national/covariates/tiles/90m/"
slurm.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/spatialprediction/slurm/pH_4a1_residuals/d3/"
r.code<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/spatialprediction/d3/residuals/residual_modelling_rangermodels_pearcey_spatialise_d3_1.R"
# libraries
library(raster);library(rgdal);library(sp);library(gstat);library(automap);library(rgeos);library(parallel);library(doParallel)
# residual data
# site data
site.dat<- readRDS(paste0(data.root,"tern_soilpH4a1_siteDat_covariates_CALVALDAT_SimulationResiduals_d3_ARD.rds"))
crs(site.dat)<- "+proj=lcc +lat_1=-18 +lat_2=-36 +lat_0=0 +lon_0=134 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
# variogram
afit<- readRDS(paste0(model.out,"residuals_variogram_4a1_d3.rds"))
plot(afit)
### Folders where the coveraiates are
fols<- as.numeric(list.files(cov.tiles, full.names = FALSE))
length(fols)
###
# begin parallel cluster and register it with foreach
cl<- makeCluster(spec=cpus)
# register with foreach
registerDoParallel(cl)
#kriging to raster data
# get raster (this will change from tile to tile)
oper1<- foreach(i=srt:fin, .packages = c("raster", "sp", "rgdal", "gstat", "automap", "rgeos")) %dopar% {
# select folder
sfol<- fols[i]
# path to base raster
fpath<- paste0(cov.tiles, sfol,"/PCS/Climate/")
#raster
r1<- raster(paste(fpath,"climate_PCA_1.tif", sep=""))
# project raster to the same crs as data points (metre scale)
r2<- projectRaster(r1, res=90, crs="+proj=lcc +lat_1=-18 +lat_2=-36 +lat_0=0 +lon_0=134 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
plot(r2)
#refine grid to remove large areas of NAs
tempD <- data.frame(cellNos = seq(1:ncell(r2)))
vals <- as.data.frame(getValues(r2))
tempD<- cbind(tempD, vals)
tempD <- tempD[complete.cases(tempD), ]
cellNos <- c(tempD$cellNos)
gXY <- data.frame(xyFromCell(r2, cellNos, spatial = FALSE))
tempD<- cbind(gXY, tempD)
#str(tempD)
r7<- rasterFromXYZ(tempD[,c(1:3)]) # the raster to interpolate onto
crs(r7)<- crs(r2)
# get centroid point of the raster and outside midpoints
e<- extent(r7)
#e
p<- as(e, "SpatialPolygons")
#centroid
c1<- gCentroid(p)
plot(r7)
plot(c1,add=T)
##Distances of data points to centroid point
#str(dat2)
mat1<- as.data.frame(site.dat)
names(mat1)
#str(mat1)
mat2<- as.matrix(mat1[,51:52]) # matrix of data coordinates
as.matrix(as.data.frame(c1)) # matrix of centroid coordinate
dist1<- spDistsN1(mat2, as.matrix(as.data.frame(c1)),longlat = FALSE) #distance
#summary(dist1)
mat1<- cbind(mat1,dist1)
mat3<- mat1[order(mat1$dist1),]
mat3<- mat3[1:2000,] # get the nearest 2000 points
#kriging data frame
coordinates(mat3)<- ~ x + y
crs(mat3)<- crs(r7)
#mat3
plot(mat3)
plot(r7,add=T)
# kriging model
gRK <- gstat(NULL, "prediction", predResidual~1, mat3, model=afit$var_model)
gRK
## kriging
outPath<- paste0(root.tiles, sfol, "/", depth, "/", vart, "/pred_residual_",depth,".tif")
outPath
map.RK2<- interpolate(r7, gRK, xyOnly=TRUE, index = 1, na.rm=TRUE)
# re-project to original resolution
r8<- projectRaster(map.RK2, r1,filename = outPath,
format="GTiff",
overwrite=T,
datatype="FLT4S")
#r8
#plot(r8)
## SLURM OUTPUT CHECKS
itOuts<- c(i,as.character(Sys.time()))
nmz<- paste0(slurm.root ,batch, "/slurmckeck_", i, "_",sfol, ".txt")
write.table(itOuts,
file = nmz,
row.names = F, col.names = F, sep=",")
}
#END
|
#this script demonstrates how to run PGLS to scan for coevolution between TFBS score and a phenotype
#across all ~350k conserved noncoding regions in the UCSC 100way alignment mammals
#and a chosen TFBS motif
#what you need:
#trees from RERconverge readTrees (for 350k regions, they are split into several batches)
#phenotype
#output from TFcalls calculated over regions
#what you need to change in this script:
#filenames
#alter loop (if desired) to perform calculation over all TFs
#function - get the set of trees that contains a given CNE
###############################
getTree=function(regionname){
if(regionname %in% names(trees1$trees)){
ind=which(names(trees1$trees)==regionname)
return(trees1$trees[[ind]])
}
if(regionname %in% names(trees2$trees)){
ind=which(names(trees2$trees)==regionname)
return(trees2$trees[[ind]])
}
if(regionname %in% names(trees3$trees)){
ind=which(names(trees3$trees)==regionname)
return(trees3$trees[[ind]])
}
if(regionname %in% names(trees4$trees)){
ind=which(names(trees4$trees)==regionname)
return(trees4$trees[[ind]])
}
if(regionname %in% names(trees5$trees)){
ind=which(names(trees5$trees)==regionname)
return(trees5$trees[[ind]])
}
if(regionname %in% names(trees6$trees)){
ind=which(names(trees6$trees)==regionname)
return(trees6$trees[[ind]])
}
if(regionname %in% names(trees7$trees)){
ind=which(names(trees7$trees)==regionname)
return(trees7$trees[[ind]])
}
}
###############################
#load packages
library(nlme)
library(phytools)
library(RERconverge)
#get species to use based on those with TF calls
specs=list.dirs("/home/Genomes/HOCOMOCO/Organism/", full.names = F)
specs=specs[-1]
#read in phenotype
PC1=readRDS("/home/kowaae22/AnalysisWithThreeTrees/PC1.rds")
phenvec=PC1
#name of noncoding region to investigate
# statfns=c("STAT1_HUMAN.H11MO.0.A.bed",
# "STAT1_HUMAN.H11MO.1.A.bed",
# "STAT2_HUMAN.H11MO.0.A.bed",
# "STAT3_HUMAN.H11MO.0.A.bed",
# "STAT4_HUMAN.H11MO.0.A.bed",
# "STAT6_HUMAN.H11MO.0.B.bed")
statfns=c("STAT2_HUMAN.H11MO.0.A.bed")
#folder identity that contains TFBS scores + which statistic to use
folder="groupspecscustommergedups" #custom merge
# stat="median"
# stat="mean"
# stat="min"
# stat="max"
stat="count"
# stat="sum"
#read in tree batches for noncoding regions
trees1=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part1.trees.rds")
trees2=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part2.trees.rds")
trees3=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part3.trees.rds")
trees4=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part4.trees.rds")
trees5=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part5.trees.rds")
trees6=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part6.trees.rds")
trees7=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part7.trees.rds")
#run PGLS
########################################
#create a dataframe of the correct size to contain results
full=read.table("/home/kowaae22/TFcalls/filteredcustommergedcoords/hg19coords", stringsAsFactors=F)
full=full[full$V1=="chr1",]
resultsdf=data.frame(matrix(nrow=nrow(full), ncol=length(statfns)))
colnames(resultsdf)=statfns
rownames(resultsdf)=full$V4
#create list to store results
allresults=list(resultsdf,
resultsdf)
names(allresults)=c("PGLSp", "PGLSstat")
start=Sys.time()
hcount=1
for(h in statfns){
#read in TF call information
fn=paste0("/home/kowaae22/TFcalls/", folder, "/", h, stat)
data=read.table(fn, stringsAsFactors =F)
colnames(data)=data[1,]
data=data[-1,]
colnames(data)[colnames(data)=="odoRosDiv1"]="odoRosDi"
data=data[data$chr=="chr1",]
rownames(allresults$PGLSp)=data$name
rownames(allresults$PGLSstat)=data$name
#loop over all CNEs:
count=1
while(count<=nrow(data)){
#match species content in CNE species tree and phenvec
curcne=data$name[count]
TF=setNames(data[count,], colnames(data))
TF=TF[-c(1:4)]
TF=TF[match(names(phenvec), names(TF))]
TF=as.numeric(TF)
df=data.frame(TF, phenvec)
df2=na.omit(df)
mt2=getTree(curcne)
keep=intersect(rownames(df2), mt2$tip.label)
if(!is.null(mt2)){
df2=df2[rownames(df2) %in% keep,]
mt2=keep.tip(mt2, keep)
}
#run PGLS if there are enough species + enough diversity in phenotype and TF across those species
if(length(unique(df2$TF))!=1 & length(unique(df2$phenvec))!=1 & nrow(unique(df2))>2 & !is.null(mt2)){
pgls=gls(TF~phenvec, correlation = corBrownian(phy=mt2), data=df2)
pvalPGLS=summary(pgls)$tTable[2,4]
statPGLS=summary(pgls)$tTable[2,3]
allresults$PGLSp[count, hcount]=pvalPGLS
allresults$PGLSstat[count, hcount]=statPGLS
}else{
allresults$PGLSp[count, hcount]=NA
allresults$PGLSstat[count, hcount]=NA
}
if(count %% 10000==0){
print(paste0("CNE count: ", count)) #345786
}
count=count+1
}
print(paste0("TF count: ", hcount)) #771
# saveRDS(allresults, paste0("/home/kowaae22/TFcalls/allresultsmergedcustomtreesLongevityPC1/allresultsmergedCTfirst",hcount,".rds"))
hcount=hcount+1
}
end=Sys.time()
end-start
saveRDS(allresults, "/home/kowaae22/permPGLSrealSTAT2PC1count.rds")
########################################
|
/RunPGLS.R
|
no_license
|
kowaae22/ClarkLabDocumentation
|
R
| false | false | 5,504 |
r
|
#this script demonstrates how to run PGLS to scan for coevolution between TFBS score and a phenotype
#across all ~350k conserved noncoding regions in the UCSC 100way alignment mammals
#and a chosen TFBS motif
#what you need:
#trees from RERconverge readTrees (for 350k regions, they are split into several batches)
#phenotype
#output from TFcalls calculated over regions
#what you need to change in this script:
#filenames
#alter loop (if desired) to perform calculation over all TFs
#function - get the set of trees that contains a given CNE
###############################
getTree=function(regionname){
if(regionname %in% names(trees1$trees)){
ind=which(names(trees1$trees)==regionname)
return(trees1$trees[[ind]])
}
if(regionname %in% names(trees2$trees)){
ind=which(names(trees2$trees)==regionname)
return(trees2$trees[[ind]])
}
if(regionname %in% names(trees3$trees)){
ind=which(names(trees3$trees)==regionname)
return(trees3$trees[[ind]])
}
if(regionname %in% names(trees4$trees)){
ind=which(names(trees4$trees)==regionname)
return(trees4$trees[[ind]])
}
if(regionname %in% names(trees5$trees)){
ind=which(names(trees5$trees)==regionname)
return(trees5$trees[[ind]])
}
if(regionname %in% names(trees6$trees)){
ind=which(names(trees6$trees)==regionname)
return(trees6$trees[[ind]])
}
if(regionname %in% names(trees7$trees)){
ind=which(names(trees7$trees)==regionname)
return(trees7$trees[[ind]])
}
}
###############################
#load packages
library(nlme)
library(phytools)
library(RERconverge)
#get species to use based on those with TF calls
specs=list.dirs("/home/Genomes/HOCOMOCO/Organism/", full.names = F)
specs=specs[-1]
#read in phenotype
PC1=readRDS("/home/kowaae22/AnalysisWithThreeTrees/PC1.rds")
phenvec=PC1
#name of noncoding region to investigate
# statfns=c("STAT1_HUMAN.H11MO.0.A.bed",
# "STAT1_HUMAN.H11MO.1.A.bed",
# "STAT2_HUMAN.H11MO.0.A.bed",
# "STAT3_HUMAN.H11MO.0.A.bed",
# "STAT4_HUMAN.H11MO.0.A.bed",
# "STAT6_HUMAN.H11MO.0.B.bed")
statfns=c("STAT2_HUMAN.H11MO.0.A.bed")
#folder identity that contains TFBS scores + which statistic to use
folder="groupspecscustommergedups" #custom merge
# stat="median"
# stat="mean"
# stat="min"
# stat="max"
stat="count"
# stat="sum"
#read in tree batches for noncoding regions
trees1=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part1.trees.rds")
trees2=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part2.trees.rds")
trees3=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part3.trees.rds")
trees4=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part4.trees.rds")
trees5=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part5.trees.rds")
trees6=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part6.trees.rds")
trees7=readRDS("/home/kowaae22/AllEnhancers/trees/phastcons46way.final.wSpalax.part7.trees.rds")
#run PGLS
########################################
#create a dataframe of the correct size to contain results
full=read.table("/home/kowaae22/TFcalls/filteredcustommergedcoords/hg19coords", stringsAsFactors=F)
full=full[full$V1=="chr1",]
resultsdf=data.frame(matrix(nrow=nrow(full), ncol=length(statfns)))
colnames(resultsdf)=statfns
rownames(resultsdf)=full$V4
#create list to store results
allresults=list(resultsdf,
resultsdf)
names(allresults)=c("PGLSp", "PGLSstat")
start=Sys.time()
hcount=1
for(h in statfns){
#read in TF call information
fn=paste0("/home/kowaae22/TFcalls/", folder, "/", h, stat)
data=read.table(fn, stringsAsFactors =F)
colnames(data)=data[1,]
data=data[-1,]
colnames(data)[colnames(data)=="odoRosDiv1"]="odoRosDi"
data=data[data$chr=="chr1",]
rownames(allresults$PGLSp)=data$name
rownames(allresults$PGLSstat)=data$name
#loop over all CNEs:
count=1
while(count<=nrow(data)){
#match species content in CNE species tree and phenvec
curcne=data$name[count]
TF=setNames(data[count,], colnames(data))
TF=TF[-c(1:4)]
TF=TF[match(names(phenvec), names(TF))]
TF=as.numeric(TF)
df=data.frame(TF, phenvec)
df2=na.omit(df)
mt2=getTree(curcne)
keep=intersect(rownames(df2), mt2$tip.label)
if(!is.null(mt2)){
df2=df2[rownames(df2) %in% keep,]
mt2=keep.tip(mt2, keep)
}
#run PGLS if there are enough species + enough diversity in phenotype and TF across those species
if(length(unique(df2$TF))!=1 & length(unique(df2$phenvec))!=1 & nrow(unique(df2))>2 & !is.null(mt2)){
pgls=gls(TF~phenvec, correlation = corBrownian(phy=mt2), data=df2)
pvalPGLS=summary(pgls)$tTable[2,4]
statPGLS=summary(pgls)$tTable[2,3]
allresults$PGLSp[count, hcount]=pvalPGLS
allresults$PGLSstat[count, hcount]=statPGLS
}else{
allresults$PGLSp[count, hcount]=NA
allresults$PGLSstat[count, hcount]=NA
}
if(count %% 10000==0){
print(paste0("CNE count: ", count)) #345786
}
count=count+1
}
print(paste0("TF count: ", hcount)) #771
# saveRDS(allresults, paste0("/home/kowaae22/TFcalls/allresultsmergedcustomtreesLongevityPC1/allresultsmergedCTfirst",hcount,".rds"))
hcount=hcount+1
}
end=Sys.time()
end-start
saveRDS(allresults, "/home/kowaae22/permPGLSrealSTAT2PC1count.rds")
########################################
|
#' optimal transport; returns p-Wasserstein distance
#'
#' c(x,y) = dxy^p; ground cost/metric
#'
#' @examples
#' ## create two small datasets from bivariate normal
#' X = matrix(rnorm(5*2),ncol=2) # 5 obs. for X
#' Y = matrix(rnorm(5*2),ncol=2) # 5 obs. for Y
#'
#' ## compute cross-distance between X and Y
#' dXY = array(0,c(5,5))
#' for (i in 1:5){
#' vx = as.vector(X[i,])
#' for (j in 1:5){
#' vy = as.vector(Y[j,])
#' dXY[i,j] = sqrt(sum((vx-vy)^2))
#' }
#' }
#'
#' ## compute the distance and report
#' output = coupling(dXY, p=2) # 2-Wasserstein distance
#' image(output$coupling, main=paste("distance=",round(output$distance,4),sep=""))
#'
#' \dontrun{
#' ## create two datasets from bivariate normal
#' ## let's try to see the evolution of 2-Wasserstein distance
#' nmax = 1000
#' X = matrix(rnorm(nmax*2),ncol=2) # obs. for X
#' Y = matrix(rnorm(nmax*2),ncol=2) # obs. for Y
#'
#' ## compute cross-distance between X and Y
#' dXY = array(0,c(nmax,nmax))
#' for (i in 1:nmax){
#' vx = as.vector(X[i,])
#' for (j in 1:nmax){
#' vy = as.vector(Y[j,])
#' dXY[i,j] = sqrt(sum((vx-vy)^2))
#' }
#' }
#'
#' ## compute
#' xgrid = 2:nmax
#' ygrid = rep(0,nmax-1)
#' for (i in 1:(nmax-1)){
#' pXY = dXY[1:(i+1),1:(i+1)]
#' ygrid[i] = coupling(pXY, p=2)$distance
#' print(paste("Iteration ",i+1,"/",nmax," Complete..",sep=""))
#' }
#'
#' ## visualize
#' plot(xgrid, ygrid, "b", lwd=1, main="Evolution of 2-Wasserstein Distances",
#' xlab="number of samples", ylab="distance", pch=18)
#' }
#'
#' @export
coupling <- function(dxy, p=1, wx, wy,
method = c("networkflow", "shortsimplex", "revsimplex", "primaldual")){
##################################################
# Preprocessing
# 1. dxy
if ((!is.matrix(dxy))||(any(dxy<0))||(any(is.na(dxy)))||(any(is.infinite(dxy)))){
stop("* coupling : input 'dxy' should be a matrix of nonnegative real numbers.")
}
n = nrow(dxy)
m = ncol(dxy)
# 2. p; pass
if ((length(p)>1)||(p<=0)){
stop("* coupling : 'p' should be a nonnegative real number of Inf.")
}
# 3. wx and wy
if (missing(wx)){wx = rep(1/n,n)}; wx=wx/sum(wx)
if (missing(wy)){wy = rep(1/m,m)}; wy=wy/sum(wy)
if ((!is.vector(wx))||(!check_Sto1(wx))||(length(wx)!=n)||(any(wx<0))){
stop("* coupling : 'wx' should be a vector of nonnegative numbers summing to 1.")
}
if ((!is.vector(wy))||(!check_Sto1(wx))||(length(wy)!=m)||(any(wy<0))){
stop("* coupling : 'wy' should be a vector of nonnegative numbers summing to 1.")
}
# 4. method
mymethod = match.arg(method)
##################################################
# Main Computation
if (is.infinite(p)){ # p=Inf; this part is incorrect
output = list()
output$distance = max(dxy)
} else {
cxy = dxy^p
output = compute.coupling(cxy, transport::transport(a=wx, b=wy, costm = cxy))
output$distance = (output$distance^(1/p))
return(output)
}
}
# auxiliary functions -----------------------------------------------------
#' @keywords internal
#' @noRd
compute.coupling <- function(costm, plan){
nx = nrow(costm)
ny = ncol(costm)
output = array(0,c(nx,ny))
nplans = nrow(plan)
for (i in 1:nplans){
id.from = plan[i,1]
id.to = plan[i,2]
vals = plan[i,3]
output[id.from,id.to] = vals
}
result = list()
result$distance = sum(output*costm)
result$coupling = output
return(result)
}
#' @keywords internal
#' @noRd
check_Sto1 <- function(w){
return((abs(sum(w)-1) < 100*.Machine$double.eps))
}
|
/R/geo_coupling.R
|
no_license
|
kisungyou/DAS
|
R
| false | false | 3,551 |
r
|
#' optimal transport; returns p-Wasserstein distance
#'
#' c(x,y) = dxy^p; ground cost/metric
#'
#' @examples
#' ## create two small datasets from bivariate normal
#' X = matrix(rnorm(5*2),ncol=2) # 5 obs. for X
#' Y = matrix(rnorm(5*2),ncol=2) # 5 obs. for Y
#'
#' ## compute cross-distance between X and Y
#' dXY = array(0,c(5,5))
#' for (i in 1:5){
#' vx = as.vector(X[i,])
#' for (j in 1:5){
#' vy = as.vector(Y[j,])
#' dXY[i,j] = sqrt(sum((vx-vy)^2))
#' }
#' }
#'
#' ## compute the distance and report
#' output = coupling(dXY, p=2) # 2-Wasserstein distance
#' image(output$coupling, main=paste("distance=",round(output$distance,4),sep=""))
#'
#' \dontrun{
#' ## create two datasets from bivariate normal
#' ## let's try to see the evolution of 2-Wasserstein distance
#' nmax = 1000
#' X = matrix(rnorm(nmax*2),ncol=2) # obs. for X
#' Y = matrix(rnorm(nmax*2),ncol=2) # obs. for Y
#'
#' ## compute cross-distance between X and Y
#' dXY = array(0,c(nmax,nmax))
#' for (i in 1:nmax){
#' vx = as.vector(X[i,])
#' for (j in 1:nmax){
#' vy = as.vector(Y[j,])
#' dXY[i,j] = sqrt(sum((vx-vy)^2))
#' }
#' }
#'
#' ## compute
#' xgrid = 2:nmax
#' ygrid = rep(0,nmax-1)
#' for (i in 1:(nmax-1)){
#' pXY = dXY[1:(i+1),1:(i+1)]
#' ygrid[i] = coupling(pXY, p=2)$distance
#' print(paste("Iteration ",i+1,"/",nmax," Complete..",sep=""))
#' }
#'
#' ## visualize
#' plot(xgrid, ygrid, "b", lwd=1, main="Evolution of 2-Wasserstein Distances",
#' xlab="number of samples", ylab="distance", pch=18)
#' }
#'
#' @export
coupling <- function(dxy, p=1, wx, wy,
method = c("networkflow", "shortsimplex", "revsimplex", "primaldual")){
##################################################
# Preprocessing
# 1. dxy
if ((!is.matrix(dxy))||(any(dxy<0))||(any(is.na(dxy)))||(any(is.infinite(dxy)))){
stop("* coupling : input 'dxy' should be a matrix of nonnegative real numbers.")
}
n = nrow(dxy)
m = ncol(dxy)
# 2. p; pass
if ((length(p)>1)||(p<=0)){
stop("* coupling : 'p' should be a nonnegative real number of Inf.")
}
# 3. wx and wy
if (missing(wx)){wx = rep(1/n,n)}; wx=wx/sum(wx)
if (missing(wy)){wy = rep(1/m,m)}; wy=wy/sum(wy)
if ((!is.vector(wx))||(!check_Sto1(wx))||(length(wx)!=n)||(any(wx<0))){
stop("* coupling : 'wx' should be a vector of nonnegative numbers summing to 1.")
}
if ((!is.vector(wy))||(!check_Sto1(wx))||(length(wy)!=m)||(any(wy<0))){
stop("* coupling : 'wy' should be a vector of nonnegative numbers summing to 1.")
}
# 4. method
mymethod = match.arg(method)
##################################################
# Main Computation
if (is.infinite(p)){ # p=Inf; this part is incorrect
output = list()
output$distance = max(dxy)
} else {
cxy = dxy^p
output = compute.coupling(cxy, transport::transport(a=wx, b=wy, costm = cxy))
output$distance = (output$distance^(1/p))
return(output)
}
}
# auxiliary functions -----------------------------------------------------
#' @keywords internal
#' @noRd
compute.coupling <- function(costm, plan){
nx = nrow(costm)
ny = ncol(costm)
output = array(0,c(nx,ny))
nplans = nrow(plan)
for (i in 1:nplans){
id.from = plan[i,1]
id.to = plan[i,2]
vals = plan[i,3]
output[id.from,id.to] = vals
}
result = list()
result$distance = sum(output*costm)
result$coupling = output
return(result)
}
#' @keywords internal
#' @noRd
check_Sto1 <- function(w){
return((abs(sum(w)-1) < 100*.Machine$double.eps))
}
|
/Distancias Distribuciones IPC/Code/Tablas/Tabla1.R
|
no_license
|
InvestigacionesEconomicasPuce/PreciosConsumidor
|
R
| false | false | 1,017 |
r
| ||
#' Tibble con i dati di o3_max_mm8h_d per 7 stazioni della regione UMBRIA
#'
#' @format Un tibble con 8 colonne e 5117 osservazioni
#'
#' @usage
#' o3_max_mm8h_d
"o3_max_mm8h_d"
|
/R/o3_max_mm8h_d.R
|
permissive
|
progettopulvirus/umbria
|
R
| false | false | 178 |
r
|
#' Tibble con i dati di o3_max_mm8h_d per 7 stazioni della regione UMBRIA
#'
#' @format Un tibble con 8 colonne e 5117 osservazioni
#'
#' @usage
#' o3_max_mm8h_d
"o3_max_mm8h_d"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare.R
\name{get_local_behind_lockfile}
\alias{get_local_behind_lockfile}
\alias{get_capsule_behind_lockfile}
\title{get packckes behind lockfile}
\usage{
get_local_behind_lockfile(
lockfile_path = "./renv.lock",
dep_source_paths = NULL
)
get_capsule_behind_lockfile(
lockfile_path = "./renv.lock",
dep_source_paths = NULL
)
}
\arguments{
\item{lockfile_path}{a length one character vector path of the lockfile for}
\item{dep_source_paths}{a character vector of file paths to extract
package dependencies from. If NULL (default) the whole local library is compared.}
}
\value{
a summary dataframe of package version differences.
}
\description{
return information on packages in your main R library (\code{.libPaths()}) or capsule library (\code{./renv}) that are behind the
lockfile versions (at \code{lockfile_path}).
}
\details{
if \code{dep_source_paths} is supplied only dependencies declared in these files are returned.
Information is returned about packages that are behind in your development
environment, so you can update them to the capsule versions if you wish.
A warning is thrown in the case that pacakges have the same version but
different remote SHA. E.g. A package in one library is from GitHub and in
the other library is from CRAN. Or Both packages are from GitHub, have the
same version but different SHAs.
}
\section{Functions}{
\itemize{
\item \code{get_capsule_behind_lockfile}: get packages in the renv library that are behind the lockfile
}}
\examples{
\dontrun{
get_local_behind_capsule(
dep_source_paths = "./packages.R",
lockfile_path = "./renv.lock"
)
}
}
\seealso{
Other comparisons:
\code{\link{any_local_behind_lockfile}()},
\code{\link{compare_local_to_lockfile}()}
Other comparisons:
\code{\link{any_local_behind_lockfile}()},
\code{\link{compare_local_to_lockfile}()}
}
\concept{comparisons}
|
/man/get_behind.Rd
|
permissive
|
MilesMcBain/capsule
|
R
| false | true | 1,930 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare.R
\name{get_local_behind_lockfile}
\alias{get_local_behind_lockfile}
\alias{get_capsule_behind_lockfile}
\title{get packckes behind lockfile}
\usage{
get_local_behind_lockfile(
lockfile_path = "./renv.lock",
dep_source_paths = NULL
)
get_capsule_behind_lockfile(
lockfile_path = "./renv.lock",
dep_source_paths = NULL
)
}
\arguments{
\item{lockfile_path}{a length one character vector path of the lockfile for}
\item{dep_source_paths}{a character vector of file paths to extract
package dependencies from. If NULL (default) the whole local library is compared.}
}
\value{
a summary dataframe of package version differences.
}
\description{
return information on packages in your main R library (\code{.libPaths()}) or capsule library (\code{./renv}) that are behind the
lockfile versions (at \code{lockfile_path}).
}
\details{
if \code{dep_source_paths} is supplied only dependencies declared in these files are returned.
Information is returned about packages that are behind in your development
environment, so you can update them to the capsule versions if you wish.
A warning is thrown in the case that pacakges have the same version but
different remote SHA. E.g. A package in one library is from GitHub and in
the other library is from CRAN. Or Both packages are from GitHub, have the
same version but different SHAs.
}
\section{Functions}{
\itemize{
\item \code{get_capsule_behind_lockfile}: get packages in the renv library that are behind the lockfile
}}
\examples{
\dontrun{
get_local_behind_capsule(
dep_source_paths = "./packages.R",
lockfile_path = "./renv.lock"
)
}
}
\seealso{
Other comparisons:
\code{\link{any_local_behind_lockfile}()},
\code{\link{compare_local_to_lockfile}()}
Other comparisons:
\code{\link{any_local_behind_lockfile}()},
\code{\link{compare_local_to_lockfile}()}
}
\concept{comparisons}
|
### Packages to be added to DESCRIPTION of brapigen
### usethis::use_package(package = "yaml")
### usethis::use_package(package = "magrittr")
### usethis::use_package(package = "whisker")
### usethis::use_package(package = "stringr")
### load required packages
library(magrittr)
library(whisker)
### Functions
### ----
### Function to retrieve all call names
fetchCallNames <- function(brapiSpecs, verb = c("", "DELETE", "GET", "PATCH", "POST", "PUT")) {
verb <- tolower(match.arg(verb))
if (verb == "") {
callNames <- brapiSpecs[["paths"]] %>% names
callNames <- sub(pattern = "_",
replacement = "",
x = stringr::str_replace_all(
string = stringr::str_replace_all(string = callNames,
pattern = "/",
replacement = "_"),
pattern = stringr::regex("\\{|\\}"),
replacement = ""))
return(callNames)
} else {
callNames <- as.character()
for (i in names(brapiSpecs[["paths"]])) {
if (verb %in% names(brapiSpecs[["paths"]][[i]])
&&
!("deprecated" %in% names(brapiSpecs[["paths"]][[i]][[verb]]))) {
callNames <- c(callNames, i)
}
}
callNames <- sub(pattern = "_",
replacement = "",
x = stringr::str_replace_all(
string = stringr::str_replace_all(string = callNames,
pattern = "/",
replacement = "_"),
pattern = stringr::regex("\\{|\\}"),
replacement = ""))
return(callNames)
}
}
### Function to retrieve call specifications for a call, with either "DELETE",
### "GET", "PATCH", "POST", or "PUT" as verb,
### intended to loop over allCallNames, where each element of the vector will be
### used as idName.
getCall <- function(brapiSpecs, idName, verb) {
verb <- tolower(verb)
allCallNames <- fetchCallNames(brapiSpecs = brapiSpecs)
idNumber <- which(allCallNames == idName)
callName <- names(brapiSpecs[["paths"]])[idNumber]
## Check for type of call and deprecation
if (verb %in% names(brapiSpecs[["paths"]][[callName]])) {
if (!("Deprecated" %in% brapiSpecs[["paths"]][[callName]][[verb]][["tags"]])) {
aCall <- brapiSpecs[["paths"]][[callName]][[verb]]
aCall[["name"]] <- allCallNames[idNumber]
aCall[["call"]] <- callName
aCall[["verb"]] <- verb
return(aCall)
}
} else {
return()
}
}
### Function to generate a call path for the @title section in the documentation
aCallTitle <- function(aCall) {
titleCall <- gsub(pattern = "\\{", replacement = "\\\\{", x = aCall[["call"]])
titleCall <- gsub(pattern = "\\}", replacement = "\\\\}", x = titleCall)
return(titleCall)
}
### Function to generate a string ("callRefURL") to be used in @references to
### construct the URL
aCallRefURL <- function(aCall) {
callRefURL <- gsub(pattern = "\\/\\{", replacement = "__", x = aCall[["call"]])
callRefURL <- gsub(pattern = "\\}\\/", replacement = "__", x = callRefURL)
callRefURL <- sub(pattern = "^\\/", replacement = "", x = callRefURL)
callRefURL <- sub(pattern = "\\}$", replacement = "_", x = callRefURL)
callRefURL <- gsub(pattern = "\\/", replacement = "_", x = callRefURL)
callRefURL <- gsub(pattern = "-", replacement = "_", x = callRefURL)
return(callRefURL)
}
### Function to generate @param section in the documentation
aCallParamVector <- function(aCall) {
n <- length(aCall[["parameters"]])
res <- character(0)
for (i in 1:n) {
p <- aCall[["parameters"]][[i]]
if (p[["name"]] == "Authorization" | "deprecated" %in% names(p)) {
next()
} else {
if (p[["name"]] %in% c("Accept",
"active",
"dataType",
"expandHomozygotes",
"format",
"includeSiblings",
"includeSynonyms",
"listType",
"sortOrder")) {
switch(p[["name"]],
"Accept" = {res <- c(res, paste0(p[["name"]], " ",
"character",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: "application/json",',
' other possible values: "text/csv"|"text/tsv"|"application/flapjack"'))},
"active" = {res <- c(res, paste0(p[["name"]], " ",
"logical",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: NA,',
' other possible values: TRUE | FALSE'))},
"dataType" = {res <- c(res, paste0(p[["name"]], " ",
"character",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: "",',
' other possible values: "application/json"|"text/csv"|"text/tsv"|"application/flapjack"'))},
"expandHomozygotes" = {res <- c(res, paste0(p[["name"]], " ",
"logical",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: NA,',
' other possible values: TRUE | FALSE'))},
"format" = {res <- c(res, paste0(p[["name"]], " ",
"character",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: as.character(NA),',
' other possible values: "csv", tsv" and depending on the call "flapjack" may be supported.'))},
"includeSiblings" = {res <- c(res, paste0(p[["name"]], " ",
"logical",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: NA,',
' other possible values: TRUE | FALSE'))},
"includeSynonyms" = {res <- c(res, paste0(p[["name"]], " ",
"logical",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: NA,',
' other possible values: TRUE | FALSE'))},
"listType" = {res <- c(res, paste0(p[["name"]], " ",
"character",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: "",',
' other possible values: "germplasm"|"markers"|"observations"|"observationUnits"|"observationVariables"|"programs"|"samples"|"studies"|"trials"'))},
"sortOrder" = {res <- c(res, paste0(p[["name"]], " ",
"character",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: "",',
' other possible values: "asc"|"ASC"|"desc"|"DESC"'))})
} else {
res <- c(res, paste0(p[["name"]], " ",
ifelse(("items" %in% names(p[["schema"]])),
"vector of type character",
ifelse(p[["schema"]][["type"]] == "integer",
"integer",
"character")),
"; required: ",
p[["required"]], "; ",
p[["description"]]))
}
}
}
return(res)
}
### Function to generate @param section in the documentation for POST/PUT calls
aCallBodyVector <- function(aCall) {
tempList <- aCall[["requestBody"]][["content"]][["application/json"]][["schema"]][["properties"]]
res <- character(0)
for (name in names(tempList)) {
tempItem <- tempList[[name]]
tempItem[["name"]] <- name
if (tempItem[["name"]] == "Authorization" | "deprecated" %in% names(tempItem)) {
next()
} else {
res <- c(res, paste0(name, " ",
switch(tempItem[["type"]],
"array" = {
if (tempItem[["items"]][["type"]] == "string") {
"vector of type character"
}
},
"boolean" = "logical",
"integer" = "integer",
"object" = "list",
"string" = "character"),
"; required: FALSE", "; ",
tempItem[["description"]],
if (tempItem[["type"]] %in% c("array", "boolean", "integer", "string")) {
switch(tempItem[["type"]],
"array" = {'; default: "", when using multiple values supply as c("value1", "value2").'},
"boolean" = {"; default: NA, other possible values: TRUE | FALSE."},
"integer" = {ifelse(name %in% c("decimalPlaces", "listSize", "numberOfSamples"),
"; default: 0.",
"")},
"string" = {
if (name %in% c("format",
"listType")) {
switch(name,
"format" = '; default: as.character(NA), other possible values: "csv", "tsv", and depending on the call "flapjack" may be supported.',
"listType" = '; default: "", other possible values: "germplasm"|"markers"|"observations"|"observationUnits"|"observationVariables"|"programs"|"samples"|"studies"|"trials"')
} else {
'; default: "".'
}})
} else {
""
})
)
}
}
return(res)
}
### Function to generate function call arguments
aCallParamString <- function(aCall) {
n <- length(aCall[["parameters"]])
res <- character(0)
for (i in 1:n) {
p <- aCall[["parameters"]][[i]]
if (p[["name"]] == "Authorization" | "deprecated" %in% names(p)) {
next()
} else {
if (p[["name"]] == "page" | p[["name"]] == "pageSize") {
res <- paste(res,
paste(p[["name"]],
"=",
as.integer(p[["example"]])),
sep = ", ")
next()
}
if (p[["name"]] == "format") {
res <- paste(res,
paste(p[["name"]],
"=",
"as.character(NA)"),
sep = ", ")
next()
}
if (p[["name"]] == "min") {
res <- paste(res,
paste(p[["name"]],
"=",
"as.integer(NA)"),
sep = ", ")
next()
}
if (p[["name"]] == "max") {
res <- paste(res,
paste(p[["name"]],
"=",
"as.integer(NA)"),
sep = ", ")
next()
}
if (p[["name"]] %in% c("Accept",
"active",
"expandHomozygotes",
"includeSiblings",
"includeSynonyms")) {
switch(p[["name"]],
"Accept" = {res <- paste(res,
paste(p[["name"]],
"=",
"'application/json'"),
sep = ", ")
next()},
"active" = {res <- paste(res,
paste(p[["name"]], "=", "NA"),
sep = ", ")
next()},
"expandHomozygotes" = {res <- paste(res,
paste(p[["name"]], "=", "NA"),
sep = ", ")
next()},
"includeSiblings" = {res <- paste(res,
paste(p[["name"]], "=", "NA"),
sep = ", ")
next()},
"includeSynonyms" = {res <- paste(res,
paste(p[["name"]], "=", "NA"),
sep = ", ")
next()})
} else {
res <- paste(res,
paste(p[["name"]], "=", "''"),
sep = ", ")
}
}
}
if ("requestBody" %in% names(aCall)) {
tempList <- aCall[["requestBody"]][["content"]][["application/json"]][["schema"]][["properties"]]
for (name in names(tempList)) {
p <- tempList[[name]]
p[["name"]] <- name
if (p[["name"]] == "Authorization" | "deprecated" %in% names(p)) {
next()
} else {
res <- paste(res,
paste(name, "=",
switch(p[["type"]],
"array" = {
if (p[["items"]][["type"]] == "string") "''"
},
"boolean" = "NA",
"integer" = {
switch(name,
"decimalPlaces" = "0",
"imageFileSize" = "as.integer(NA)",
"imageFileSizeMax" = "as.integer(NA)",
"imageFileSizeMin" = "as.integer(NA)",
"imageHeight" = "as.integer(NA)",
"imageHeightMax" = "as.integer(NA)",
"imageHeightMin" = "as.integer(NA)",
"imageWidth" = "as.integer(NA)",
"imageWidthMax" = "as.integer(NA)",
"imageWidthMin" = "as.integer(NA)",
"listSize" = "0",
"numberOfSamples" = "0",
"page" = "0",
"pageSize" = "1000"
)
},
"object" = "list()",
"string" = ifelse(p[["name"]] == "format", "as.character(NA)" , "''")
)),
sep = ", ")
}
}
}
res <- sub(pattern = "^, ",
replacement = "",
x = res)
return(res)
}
### Function to identify required arguments in a function call
aCallReqArgs <- function(aCall) {
n <- length(aCall[["parameters"]])
required <- character(0)
for (i in 1:n) {
p <- aCall$parameters[[i]]
if (p[["required"]] == TRUE && !is.null(p[["required"]])) {
required <- paste(required,
aCall[["parameters"]][[i]][["name"]],
sep = ", ")
} else {
next()
}
}
required <- sub(pattern = "^, ",
replacement = "",
x = required)
if (length(required) == 0) {
aCall[["required"]] <- ""
return(aCall)
} else {
aCall[["required"]] <- required
return(aCall)
}
}
### ----
### Create a GET function
### brapigen package needs to be build!
### Changed to version 2.0
### brapi_2.0.yaml # Full Specs
### brapi-core_2.0.yaml # Core Specs
### brapi-genotyping_2.0.yaml # Genotyping Specs
### brapi-germplasm_2.0.yaml # Germplasm Specs
### brapi-phenotyping_2.0.yaml # Phenotyping Specs
brapiSpecs <- yaml::read_yaml(system.file("openapi/brapi-phenotyping_2.0.yaml",
package = "brapigen"))
### Packages to be added to DESCRIPTION of Brapir
### usethis::use_package(package = "curl")
### Create directory infrastructure
dir_b <- "../brapir-v2"
base::unlink(x = dir_b, recursive = TRUE, force = TRUE)
base::list.files(path = dir_b, recursive = TRUE)
base::dir.create(path = dir_b)
dir_r <- base::file.path(dir_b, "R/")
base::dir.create(path = dir_r)
### copy files
fileNames <- base::setdiff(x = base::list.files("inst/templates/"),
y = base::list.files("inst/templates/")[grepl(pattern = "*.mst",
x = base::list.files("inst/templates/"))])
invisible(base::file.copy(from = paste0("inst/templates/", fileNames),
to = paste0(dir_r, fileNames),
overwrite = TRUE))
### Retrieve call names in a readable format
###
### brapi-core brapi-genotyping
### 44 Total calls
### --- + --- +
### 0 DELETE calls
### 23 GET calls
### 0 PATCH calls
### 14 POST calls
### 7 PUT calls
allCallNames <- fetchCallNames(brapiSpecs = brapiSpecs)
for (verb in c("DELETE", "GET", "PATCH", "POST", "PUT")) {
assign(paste(verb, "calls", sep = ""), fetchCallNames(brapiSpecs, verb))
}
# for (verb in c("GET", "POST", "PUT")) {
# print(verb)
# print(get(paste(verb, "calls", sep = "")))
# }
# get(paste(verb, "calls", sep = ""))
### Create aCall object containing call elements
### tested on: see openapi/examples_brapigen-brapir_test-server_brapi_org.R
for (callName in GETcalls) {
# start with callName <- GETcalls[] for an individual call
## retrieve call setting
aCall <- getCall(brapiSpecs = brapiSpecs, idName = callName, verb = "GET")
## Create element to substitute call address in @title
aCall[["titleCall"]] <- aCallTitle(aCall = aCall)
## Create call description as a character vector substituted LINE FEED \
## Carriage Return for @detail section.
aCallDesc <- gsub(pattern = "\\n(?!(#' ))",
replacement = "\n#' ",
x = aCall[["description"]], perl = TRUE)
## Create @param descriptions for documentation
aCallParam <- aCallParamVector(aCall = aCall)
aCallParam <- whisker::iteratelist(x = aCallParam, value = "pname")
aCallParam <- lapply(X = aCallParam,
FUN = function(el) {
lapply(X = el, FUN = function(elel) {
stringr::str_replace_all(string = elel,
pattern = "\\n\\n",
replacement = "\n#' ")})})
## Create call reference url part for the @references section
aCall[["callRefURL"]] <- aCallRefURL(aCall = aCall)
## Creation function arguments for selected call
aCallArgs <- aCallParamString(aCall = aCall)
## Identify and store required arguments
aCall <- aCallReqArgs(aCall = aCall)
## Store call family information for documentation in @family
aCallFamily <- c(
## brapi_2.0
paste0(tolower(strsplit(x = brapiSpecs[["info"]][["title"]], split = "-")[[1]][1]),
"_",
brapiSpecs[["info"]][["version"]]),
tolower(brapiSpecs[["info"]][["title"]]),
aCall[["tags"]]
)
aCallFamily <- whisker::iteratelist(aCallFamily, value = "fname")
## Create call data list object for the selected call to be used by the
## whisker package.
aCallData <- list(verb = aCall[["verb"]],
titleCall = aCall[["titleCall"]],
summary = aCall[["summary"]],
parameters = aCallParam,
description = aCallDesc,
version = brapiSpecs[["info"]][["version"]],
tag = gsub(pattern = " ",
replacement = "\\%20",
x = aCall[["tags"]][1]),
callRefURL = aCall[["callRefURL"]],
family = aCallFamily,
name = gsub(pattern = "-",
replacement = "_",
x = aCall[["name"]]),
arguments = aCallArgs,
required = aCall[["required"]],
call = aCall[["call"]],
package = brapiSpecs[["info"]][["title"]])
## Load template for function name
template <- readLines(con = "inst/templates/function_name.mst")
## Create function name
functionName <- whisker::whisker.render(template = template,
data = aCallData)
## Load template to create the GET function
if (!grepl(pattern = "(search)", callName)) {
template <- readLines(con = "inst/templates/function_GET.mst")
} else {
template <- readLines(con = "inst/templates/function_GET_search.mst")
}
## Write the created GET function
writeLines(text = whisker::whisker.render(template = template,
data = aCallData),
con = paste0(dir_r, functionName, ".R"))
}
for (callName in POSTcalls) {
# start with callName <- POSTcalls[] for an individual call
## Retrieve call setting
aCall <- getCall(brapiSpecs = brapiSpecs, idName = callName, verb = "POST")
## Create element to substitute call address in @title
aCall[["titleCall"]] <- aCallTitle(aCall = aCall)
## Create call description as a character vector substituted LINE FEED \
## Carriage Return for @detail section.
aCallDesc <- gsub(pattern = "\\n(?!(#' ))",
replacement = "\n#' ",
x = aCall[["description"]], perl = TRUE)
## Create @param descriptions for documentation
aCallParam <- aCallParamVector(aCall = aCall)
aCallParam <- whisker::iteratelist(x = aCallParam, value = "pname")
aCallParam <- lapply(X = aCallParam,
FUN = function(el) {
lapply(X = el, FUN = function(elel) {
stringr::str_replace_all(string = elel,
pattern = "\\n\\n",
replacement = "\n#' ")})})
if ("requestBody" %in% names(aCall)) {
callBodyVector <- aCallBodyVector(aCall = aCall)
callBodyVector <- whisker::iteratelist(x = callBodyVector, value = "pname")
aCallParam <- c(aCallParam, callBodyVector)
}
## Create call reference url part for the @references section
aCall[["callRefURL"]] <- aCallRefURL(aCall = aCall)
## Creation function arguments for selected call
aCallArgs <- aCallParamString(aCall = aCall)
## Identify and store required arguments
aCall <- aCallReqArgs(aCall = aCall)
## Store call family information for documentation in @family
aCallFamily <- c(
## brapi_2.0
paste0(tolower(strsplit(x = brapiSpecs[["info"]][["title"]], split = "-")[[1]][1]),
"_",
brapiSpecs[["info"]][["version"]]),
tolower(brapiSpecs[["info"]][["title"]]),
aCall[["tags"]]
)
aCallFamily <- whisker::iteratelist(aCallFamily, value = "fname")
## Create call data list object for the selected call to be used by the
## whisker package.
aCallData <- list(verb = aCall[["verb"]],
titleCall = aCall[["titleCall"]],
summary = aCall[["summary"]],
parameters = aCallParam,
description = aCallDesc,
version = brapiSpecs[["info"]][["version"]],
tag = gsub(pattern = " ",
replacement = "\\%20",
x = aCall[["tags"]][1]),
callRefURL = aCall[["callRefURL"]],
family = aCallFamily,
name = gsub(pattern = "-",
replacement = "_",
x = aCall[["name"]]),
arguments = aCallArgs,
required = aCall[["required"]],
call = aCall[["call"]],
package = brapiSpecs[["info"]][["title"]])
## Load template for function name
template <- readLines(con = "inst/templates/function_name.mst")
## Create function name
functionName <- whisker::whisker.render(template = template,
data = aCallData)
## Load template to create the GET function
if (!grepl(pattern = "(search)", callName)) {
template <- readLines(con = "inst/templates/function_POST.mst")
} else {
template <- readLines(con = "inst/templates/function_POST_search.mst")
}
## Write the created GET function
writeLines(text = whisker::whisker.render(template = template,
data = aCallData),
con = paste0(dir_r, functionName, ".R"))
}
for (callName in PUTcalls) {
# start with callName <- PUTcalls[] for an individual call
## Retrieve call setting
aCall <- getCall(brapiSpecs = brapiSpecs, idName = callName, verb = "PUT")
## Create element to substitute call address in @title
aCall[["titleCall"]] <- aCallTitle(aCall = aCall)
## Create call description as a character vector substituted LINE FEED \
## Carriage Return for @detail section.
aCallDesc <- gsub(pattern = "\\n(?!(#' ))",
replacement = "\n#' ",
x = aCall[["description"]], perl = TRUE)
## Create @param descriptions for documentation
aCallParam <- aCallParamVector(aCall = aCall)
aCallParam <- whisker::iteratelist(x = aCallParam, value = "pname")
aCallParam <- lapply(X = aCallParam,
FUN = function(el) {
lapply(X = el, FUN = function(elel) {
stringr::str_replace_all(string = elel,
pattern = "\\n\\n",
replacement = "\n#' ")})})
if ("requestBody" %in% names(aCall)) {
callBodyVector <- aCallBodyVector(aCall = aCall)
callBodyVector <- whisker::iteratelist(x = callBodyVector, value = "pname")
aCallParam <- c(aCallParam, callBodyVector)
}
## Create call reference url part for the @references section
aCall[["callRefURL"]] <- aCallRefURL(aCall = aCall)
## Creation function arguments for selected call
aCallArgs <- aCallParamString(aCall = aCall)
## Identify and store required arguments
aCall <- aCallReqArgs(aCall = aCall)
## Store call family information for documentation in @family
aCallFamily <- c(
## brapi_2.0
paste0(tolower(strsplit(x = brapiSpecs[["info"]][["title"]], split = "-")[[1]][1]),
"_",
brapiSpecs[["info"]][["version"]]),
tolower(brapiSpecs[["info"]][["title"]]),
aCall[["tags"]]
)
aCallFamily <- whisker::iteratelist(aCallFamily, value = "fname")
## Create call data list object for the selected call to be used by the
## whisker package.
aCallData <- list(verb = aCall[["verb"]],
titleCall = aCall[["titleCall"]],
summary = aCall[["summary"]],
parameters = aCallParam,
description = aCallDesc,
version = brapiSpecs[["info"]][["version"]],
tag = gsub(pattern = " ",
replacement = "\\%20",
x = aCall[["tags"]][1]),
callRefURL = aCall[["callRefURL"]],
family = aCallFamily,
name = gsub(pattern = "-",
replacement = "_",
x = aCall[["name"]]),
arguments = aCallArgs,
required = aCall[["required"]],
call = aCall[["call"]],
package = brapiSpecs[["info"]][["title"]])
## Load template for function name
template <- readLines(con = "inst/templates/function_name.mst")
## Create function name
functionName <- whisker::whisker.render(template = template,
data = aCallData)
## Load template to create the GET function
template <- readLines(con = "inst/templates/function_PUT.mst")
## Write the created GET function
writeLines(text = whisker::whisker.render(template = template,
data = aCallData),
con = paste0(dir_r, functionName, ".R"))
}
|
/R/generate.R
|
no_license
|
mverouden/brapir-v2-gen
|
R
| false | false | 31,149 |
r
|
### Packages to be added to DESCRIPTION of brapigen
### usethis::use_package(package = "yaml")
### usethis::use_package(package = "magrittr")
### usethis::use_package(package = "whisker")
### usethis::use_package(package = "stringr")
### load required packages
library(magrittr)
library(whisker)
### Functions
### ----
### Function to retrieve all call names
fetchCallNames <- function(brapiSpecs, verb = c("", "DELETE", "GET", "PATCH", "POST", "PUT")) {
verb <- tolower(match.arg(verb))
if (verb == "") {
callNames <- brapiSpecs[["paths"]] %>% names
callNames <- sub(pattern = "_",
replacement = "",
x = stringr::str_replace_all(
string = stringr::str_replace_all(string = callNames,
pattern = "/",
replacement = "_"),
pattern = stringr::regex("\\{|\\}"),
replacement = ""))
return(callNames)
} else {
callNames <- as.character()
for (i in names(brapiSpecs[["paths"]])) {
if (verb %in% names(brapiSpecs[["paths"]][[i]])
&&
!("deprecated" %in% names(brapiSpecs[["paths"]][[i]][[verb]]))) {
callNames <- c(callNames, i)
}
}
callNames <- sub(pattern = "_",
replacement = "",
x = stringr::str_replace_all(
string = stringr::str_replace_all(string = callNames,
pattern = "/",
replacement = "_"),
pattern = stringr::regex("\\{|\\}"),
replacement = ""))
return(callNames)
}
}
### Function to retrieve call specifications for a call, with either "DELETE",
### "GET", "PATCH", "POST", or "PUT" as verb,
### intended to loop over allCallNames, where each element of the vector will be
### used as idName.
getCall <- function(brapiSpecs, idName, verb) {
verb <- tolower(verb)
allCallNames <- fetchCallNames(brapiSpecs = brapiSpecs)
idNumber <- which(allCallNames == idName)
callName <- names(brapiSpecs[["paths"]])[idNumber]
## Check for type of call and deprecation
if (verb %in% names(brapiSpecs[["paths"]][[callName]])) {
if (!("Deprecated" %in% brapiSpecs[["paths"]][[callName]][[verb]][["tags"]])) {
aCall <- brapiSpecs[["paths"]][[callName]][[verb]]
aCall[["name"]] <- allCallNames[idNumber]
aCall[["call"]] <- callName
aCall[["verb"]] <- verb
return(aCall)
}
} else {
return()
}
}
### Function to generate a call path for the @title section in the documentation
aCallTitle <- function(aCall) {
titleCall <- gsub(pattern = "\\{", replacement = "\\\\{", x = aCall[["call"]])
titleCall <- gsub(pattern = "\\}", replacement = "\\\\}", x = titleCall)
return(titleCall)
}
### Function to generate a string ("callRefURL") to be used in @references to
### construct the URL
aCallRefURL <- function(aCall) {
callRefURL <- gsub(pattern = "\\/\\{", replacement = "__", x = aCall[["call"]])
callRefURL <- gsub(pattern = "\\}\\/", replacement = "__", x = callRefURL)
callRefURL <- sub(pattern = "^\\/", replacement = "", x = callRefURL)
callRefURL <- sub(pattern = "\\}$", replacement = "_", x = callRefURL)
callRefURL <- gsub(pattern = "\\/", replacement = "_", x = callRefURL)
callRefURL <- gsub(pattern = "-", replacement = "_", x = callRefURL)
return(callRefURL)
}
### Function to generate @param section in the documentation
aCallParamVector <- function(aCall) {
n <- length(aCall[["parameters"]])
res <- character(0)
for (i in 1:n) {
p <- aCall[["parameters"]][[i]]
if (p[["name"]] == "Authorization" | "deprecated" %in% names(p)) {
next()
} else {
if (p[["name"]] %in% c("Accept",
"active",
"dataType",
"expandHomozygotes",
"format",
"includeSiblings",
"includeSynonyms",
"listType",
"sortOrder")) {
switch(p[["name"]],
"Accept" = {res <- c(res, paste0(p[["name"]], " ",
"character",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: "application/json",',
' other possible values: "text/csv"|"text/tsv"|"application/flapjack"'))},
"active" = {res <- c(res, paste0(p[["name"]], " ",
"logical",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: NA,',
' other possible values: TRUE | FALSE'))},
"dataType" = {res <- c(res, paste0(p[["name"]], " ",
"character",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: "",',
' other possible values: "application/json"|"text/csv"|"text/tsv"|"application/flapjack"'))},
"expandHomozygotes" = {res <- c(res, paste0(p[["name"]], " ",
"logical",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: NA,',
' other possible values: TRUE | FALSE'))},
"format" = {res <- c(res, paste0(p[["name"]], " ",
"character",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: as.character(NA),',
' other possible values: "csv", tsv" and depending on the call "flapjack" may be supported.'))},
"includeSiblings" = {res <- c(res, paste0(p[["name"]], " ",
"logical",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: NA,',
' other possible values: TRUE | FALSE'))},
"includeSynonyms" = {res <- c(res, paste0(p[["name"]], " ",
"logical",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: NA,',
' other possible values: TRUE | FALSE'))},
"listType" = {res <- c(res, paste0(p[["name"]], " ",
"character",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: "",',
' other possible values: "germplasm"|"markers"|"observations"|"observationUnits"|"observationVariables"|"programs"|"samples"|"studies"|"trials"'))},
"sortOrder" = {res <- c(res, paste0(p[["name"]], " ",
"character",
"; required: ",
p[["required"]], "; ",
p[["description"]], "; ",
'default: "",',
' other possible values: "asc"|"ASC"|"desc"|"DESC"'))})
} else {
res <- c(res, paste0(p[["name"]], " ",
ifelse(("items" %in% names(p[["schema"]])),
"vector of type character",
ifelse(p[["schema"]][["type"]] == "integer",
"integer",
"character")),
"; required: ",
p[["required"]], "; ",
p[["description"]]))
}
}
}
return(res)
}
### Function to generate @param section in the documentation for POST/PUT calls
aCallBodyVector <- function(aCall) {
tempList <- aCall[["requestBody"]][["content"]][["application/json"]][["schema"]][["properties"]]
res <- character(0)
for (name in names(tempList)) {
tempItem <- tempList[[name]]
tempItem[["name"]] <- name
if (tempItem[["name"]] == "Authorization" | "deprecated" %in% names(tempItem)) {
next()
} else {
res <- c(res, paste0(name, " ",
switch(tempItem[["type"]],
"array" = {
if (tempItem[["items"]][["type"]] == "string") {
"vector of type character"
}
},
"boolean" = "logical",
"integer" = "integer",
"object" = "list",
"string" = "character"),
"; required: FALSE", "; ",
tempItem[["description"]],
if (tempItem[["type"]] %in% c("array", "boolean", "integer", "string")) {
switch(tempItem[["type"]],
"array" = {'; default: "", when using multiple values supply as c("value1", "value2").'},
"boolean" = {"; default: NA, other possible values: TRUE | FALSE."},
"integer" = {ifelse(name %in% c("decimalPlaces", "listSize", "numberOfSamples"),
"; default: 0.",
"")},
"string" = {
if (name %in% c("format",
"listType")) {
switch(name,
"format" = '; default: as.character(NA), other possible values: "csv", "tsv", and depending on the call "flapjack" may be supported.',
"listType" = '; default: "", other possible values: "germplasm"|"markers"|"observations"|"observationUnits"|"observationVariables"|"programs"|"samples"|"studies"|"trials"')
} else {
'; default: "".'
}})
} else {
""
})
)
}
}
return(res)
}
### Function to generate function call arguments
aCallParamString <- function(aCall) {
n <- length(aCall[["parameters"]])
res <- character(0)
for (i in 1:n) {
p <- aCall[["parameters"]][[i]]
if (p[["name"]] == "Authorization" | "deprecated" %in% names(p)) {
next()
} else {
if (p[["name"]] == "page" | p[["name"]] == "pageSize") {
res <- paste(res,
paste(p[["name"]],
"=",
as.integer(p[["example"]])),
sep = ", ")
next()
}
if (p[["name"]] == "format") {
res <- paste(res,
paste(p[["name"]],
"=",
"as.character(NA)"),
sep = ", ")
next()
}
if (p[["name"]] == "min") {
res <- paste(res,
paste(p[["name"]],
"=",
"as.integer(NA)"),
sep = ", ")
next()
}
if (p[["name"]] == "max") {
res <- paste(res,
paste(p[["name"]],
"=",
"as.integer(NA)"),
sep = ", ")
next()
}
if (p[["name"]] %in% c("Accept",
"active",
"expandHomozygotes",
"includeSiblings",
"includeSynonyms")) {
switch(p[["name"]],
"Accept" = {res <- paste(res,
paste(p[["name"]],
"=",
"'application/json'"),
sep = ", ")
next()},
"active" = {res <- paste(res,
paste(p[["name"]], "=", "NA"),
sep = ", ")
next()},
"expandHomozygotes" = {res <- paste(res,
paste(p[["name"]], "=", "NA"),
sep = ", ")
next()},
"includeSiblings" = {res <- paste(res,
paste(p[["name"]], "=", "NA"),
sep = ", ")
next()},
"includeSynonyms" = {res <- paste(res,
paste(p[["name"]], "=", "NA"),
sep = ", ")
next()})
} else {
res <- paste(res,
paste(p[["name"]], "=", "''"),
sep = ", ")
}
}
}
if ("requestBody" %in% names(aCall)) {
tempList <- aCall[["requestBody"]][["content"]][["application/json"]][["schema"]][["properties"]]
for (name in names(tempList)) {
p <- tempList[[name]]
p[["name"]] <- name
if (p[["name"]] == "Authorization" | "deprecated" %in% names(p)) {
next()
} else {
res <- paste(res,
paste(name, "=",
switch(p[["type"]],
"array" = {
if (p[["items"]][["type"]] == "string") "''"
},
"boolean" = "NA",
"integer" = {
switch(name,
"decimalPlaces" = "0",
"imageFileSize" = "as.integer(NA)",
"imageFileSizeMax" = "as.integer(NA)",
"imageFileSizeMin" = "as.integer(NA)",
"imageHeight" = "as.integer(NA)",
"imageHeightMax" = "as.integer(NA)",
"imageHeightMin" = "as.integer(NA)",
"imageWidth" = "as.integer(NA)",
"imageWidthMax" = "as.integer(NA)",
"imageWidthMin" = "as.integer(NA)",
"listSize" = "0",
"numberOfSamples" = "0",
"page" = "0",
"pageSize" = "1000"
)
},
"object" = "list()",
"string" = ifelse(p[["name"]] == "format", "as.character(NA)" , "''")
)),
sep = ", ")
}
}
}
res <- sub(pattern = "^, ",
replacement = "",
x = res)
return(res)
}
### Function to identify required arguments in a function call
aCallReqArgs <- function(aCall) {
n <- length(aCall[["parameters"]])
required <- character(0)
for (i in 1:n) {
p <- aCall$parameters[[i]]
if (p[["required"]] == TRUE && !is.null(p[["required"]])) {
required <- paste(required,
aCall[["parameters"]][[i]][["name"]],
sep = ", ")
} else {
next()
}
}
required <- sub(pattern = "^, ",
replacement = "",
x = required)
if (length(required) == 0) {
aCall[["required"]] <- ""
return(aCall)
} else {
aCall[["required"]] <- required
return(aCall)
}
}
### ----
### Create a GET function
### brapigen package needs to be build!
### Changed to version 2.0
### brapi_2.0.yaml # Full Specs
### brapi-core_2.0.yaml # Core Specs
### brapi-genotyping_2.0.yaml # Genotyping Specs
### brapi-germplasm_2.0.yaml # Germplasm Specs
### brapi-phenotyping_2.0.yaml # Phenotyping Specs
brapiSpecs <- yaml::read_yaml(system.file("openapi/brapi-phenotyping_2.0.yaml",
package = "brapigen"))
### Packages to be added to DESCRIPTION of Brapir
### usethis::use_package(package = "curl")
### Create directory infrastructure
dir_b <- "../brapir-v2"
base::unlink(x = dir_b, recursive = TRUE, force = TRUE)
base::list.files(path = dir_b, recursive = TRUE)
base::dir.create(path = dir_b)
dir_r <- base::file.path(dir_b, "R/")
base::dir.create(path = dir_r)
### copy files
fileNames <- base::setdiff(x = base::list.files("inst/templates/"),
y = base::list.files("inst/templates/")[grepl(pattern = "*.mst",
x = base::list.files("inst/templates/"))])
invisible(base::file.copy(from = paste0("inst/templates/", fileNames),
to = paste0(dir_r, fileNames),
overwrite = TRUE))
### Retrieve call names in a readable format
###
### brapi-core brapi-genotyping
### 44 Total calls
### --- + --- +
### 0 DELETE calls
### 23 GET calls
### 0 PATCH calls
### 14 POST calls
### 7 PUT calls
allCallNames <- fetchCallNames(brapiSpecs = brapiSpecs)
for (verb in c("DELETE", "GET", "PATCH", "POST", "PUT")) {
assign(paste(verb, "calls", sep = ""), fetchCallNames(brapiSpecs, verb))
}
# for (verb in c("GET", "POST", "PUT")) {
# print(verb)
# print(get(paste(verb, "calls", sep = "")))
# }
# get(paste(verb, "calls", sep = ""))
### Create aCall object containing call elements
### tested on: see openapi/examples_brapigen-brapir_test-server_brapi_org.R
for (callName in GETcalls) {
# start with callName <- GETcalls[] for an individual call
## retrieve call setting
aCall <- getCall(brapiSpecs = brapiSpecs, idName = callName, verb = "GET")
## Create element to substitute call address in @title
aCall[["titleCall"]] <- aCallTitle(aCall = aCall)
## Create call description as a character vector substituted LINE FEED \
## Carriage Return for @detail section.
aCallDesc <- gsub(pattern = "\\n(?!(#' ))",
replacement = "\n#' ",
x = aCall[["description"]], perl = TRUE)
## Create @param descriptions for documentation
aCallParam <- aCallParamVector(aCall = aCall)
aCallParam <- whisker::iteratelist(x = aCallParam, value = "pname")
aCallParam <- lapply(X = aCallParam,
FUN = function(el) {
lapply(X = el, FUN = function(elel) {
stringr::str_replace_all(string = elel,
pattern = "\\n\\n",
replacement = "\n#' ")})})
## Create call reference url part for the @references section
aCall[["callRefURL"]] <- aCallRefURL(aCall = aCall)
## Creation function arguments for selected call
aCallArgs <- aCallParamString(aCall = aCall)
## Identify and store required arguments
aCall <- aCallReqArgs(aCall = aCall)
## Store call family information for documentation in @family
aCallFamily <- c(
## brapi_2.0
paste0(tolower(strsplit(x = brapiSpecs[["info"]][["title"]], split = "-")[[1]][1]),
"_",
brapiSpecs[["info"]][["version"]]),
tolower(brapiSpecs[["info"]][["title"]]),
aCall[["tags"]]
)
aCallFamily <- whisker::iteratelist(aCallFamily, value = "fname")
## Create call data list object for the selected call to be used by the
## whisker package.
aCallData <- list(verb = aCall[["verb"]],
titleCall = aCall[["titleCall"]],
summary = aCall[["summary"]],
parameters = aCallParam,
description = aCallDesc,
version = brapiSpecs[["info"]][["version"]],
tag = gsub(pattern = " ",
replacement = "\\%20",
x = aCall[["tags"]][1]),
callRefURL = aCall[["callRefURL"]],
family = aCallFamily,
name = gsub(pattern = "-",
replacement = "_",
x = aCall[["name"]]),
arguments = aCallArgs,
required = aCall[["required"]],
call = aCall[["call"]],
package = brapiSpecs[["info"]][["title"]])
## Load template for function name
template <- readLines(con = "inst/templates/function_name.mst")
## Create function name
functionName <- whisker::whisker.render(template = template,
data = aCallData)
## Load template to create the GET function
if (!grepl(pattern = "(search)", callName)) {
template <- readLines(con = "inst/templates/function_GET.mst")
} else {
template <- readLines(con = "inst/templates/function_GET_search.mst")
}
## Write the created GET function
writeLines(text = whisker::whisker.render(template = template,
data = aCallData),
con = paste0(dir_r, functionName, ".R"))
}
for (callName in POSTcalls) {
# start with callName <- POSTcalls[] for an individual call
## Retrieve call setting
aCall <- getCall(brapiSpecs = brapiSpecs, idName = callName, verb = "POST")
## Create element to substitute call address in @title
aCall[["titleCall"]] <- aCallTitle(aCall = aCall)
## Create call description as a character vector substituted LINE FEED \
## Carriage Return for @detail section.
aCallDesc <- gsub(pattern = "\\n(?!(#' ))",
replacement = "\n#' ",
x = aCall[["description"]], perl = TRUE)
## Create @param descriptions for documentation
aCallParam <- aCallParamVector(aCall = aCall)
aCallParam <- whisker::iteratelist(x = aCallParam, value = "pname")
aCallParam <- lapply(X = aCallParam,
FUN = function(el) {
lapply(X = el, FUN = function(elel) {
stringr::str_replace_all(string = elel,
pattern = "\\n\\n",
replacement = "\n#' ")})})
if ("requestBody" %in% names(aCall)) {
callBodyVector <- aCallBodyVector(aCall = aCall)
callBodyVector <- whisker::iteratelist(x = callBodyVector, value = "pname")
aCallParam <- c(aCallParam, callBodyVector)
}
## Create call reference url part for the @references section
aCall[["callRefURL"]] <- aCallRefURL(aCall = aCall)
## Creation function arguments for selected call
aCallArgs <- aCallParamString(aCall = aCall)
## Identify and store required arguments
aCall <- aCallReqArgs(aCall = aCall)
## Store call family information for documentation in @family
aCallFamily <- c(
## brapi_2.0
paste0(tolower(strsplit(x = brapiSpecs[["info"]][["title"]], split = "-")[[1]][1]),
"_",
brapiSpecs[["info"]][["version"]]),
tolower(brapiSpecs[["info"]][["title"]]),
aCall[["tags"]]
)
aCallFamily <- whisker::iteratelist(aCallFamily, value = "fname")
## Create call data list object for the selected call to be used by the
## whisker package.
aCallData <- list(verb = aCall[["verb"]],
titleCall = aCall[["titleCall"]],
summary = aCall[["summary"]],
parameters = aCallParam,
description = aCallDesc,
version = brapiSpecs[["info"]][["version"]],
tag = gsub(pattern = " ",
replacement = "\\%20",
x = aCall[["tags"]][1]),
callRefURL = aCall[["callRefURL"]],
family = aCallFamily,
name = gsub(pattern = "-",
replacement = "_",
x = aCall[["name"]]),
arguments = aCallArgs,
required = aCall[["required"]],
call = aCall[["call"]],
package = brapiSpecs[["info"]][["title"]])
## Load template for function name
template <- readLines(con = "inst/templates/function_name.mst")
## Create function name
functionName <- whisker::whisker.render(template = template,
data = aCallData)
## Load template to create the GET function
if (!grepl(pattern = "(search)", callName)) {
template <- readLines(con = "inst/templates/function_POST.mst")
} else {
template <- readLines(con = "inst/templates/function_POST_search.mst")
}
## Write the created GET function
writeLines(text = whisker::whisker.render(template = template,
data = aCallData),
con = paste0(dir_r, functionName, ".R"))
}
for (callName in PUTcalls) {
# start with callName <- PUTcalls[] for an individual call
## Retrieve call setting
aCall <- getCall(brapiSpecs = brapiSpecs, idName = callName, verb = "PUT")
## Create element to substitute call address in @title
aCall[["titleCall"]] <- aCallTitle(aCall = aCall)
## Create call description as a character vector substituted LINE FEED \
## Carriage Return for @detail section.
aCallDesc <- gsub(pattern = "\\n(?!(#' ))",
replacement = "\n#' ",
x = aCall[["description"]], perl = TRUE)
## Create @param descriptions for documentation
aCallParam <- aCallParamVector(aCall = aCall)
aCallParam <- whisker::iteratelist(x = aCallParam, value = "pname")
aCallParam <- lapply(X = aCallParam,
FUN = function(el) {
lapply(X = el, FUN = function(elel) {
stringr::str_replace_all(string = elel,
pattern = "\\n\\n",
replacement = "\n#' ")})})
if ("requestBody" %in% names(aCall)) {
callBodyVector <- aCallBodyVector(aCall = aCall)
callBodyVector <- whisker::iteratelist(x = callBodyVector, value = "pname")
aCallParam <- c(aCallParam, callBodyVector)
}
## Create call reference url part for the @references section
aCall[["callRefURL"]] <- aCallRefURL(aCall = aCall)
## Creation function arguments for selected call
aCallArgs <- aCallParamString(aCall = aCall)
## Identify and store required arguments
aCall <- aCallReqArgs(aCall = aCall)
## Store call family information for documentation in @family
aCallFamily <- c(
## brapi_2.0
paste0(tolower(strsplit(x = brapiSpecs[["info"]][["title"]], split = "-")[[1]][1]),
"_",
brapiSpecs[["info"]][["version"]]),
tolower(brapiSpecs[["info"]][["title"]]),
aCall[["tags"]]
)
aCallFamily <- whisker::iteratelist(aCallFamily, value = "fname")
## Create call data list object for the selected call to be used by the
## whisker package.
aCallData <- list(verb = aCall[["verb"]],
titleCall = aCall[["titleCall"]],
summary = aCall[["summary"]],
parameters = aCallParam,
description = aCallDesc,
version = brapiSpecs[["info"]][["version"]],
tag = gsub(pattern = " ",
replacement = "\\%20",
x = aCall[["tags"]][1]),
callRefURL = aCall[["callRefURL"]],
family = aCallFamily,
name = gsub(pattern = "-",
replacement = "_",
x = aCall[["name"]]),
arguments = aCallArgs,
required = aCall[["required"]],
call = aCall[["call"]],
package = brapiSpecs[["info"]][["title"]])
## Load template for function name
template <- readLines(con = "inst/templates/function_name.mst")
## Create function name
functionName <- whisker::whisker.render(template = template,
data = aCallData)
## Load template to create the GET function
template <- readLines(con = "inst/templates/function_PUT.mst")
## Write the created GET function
writeLines(text = whisker::whisker.render(template = template,
data = aCallData),
con = paste0(dir_r, functionName, ".R"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_examples.R
\name{generate_random_spd_matrix}
\alias{generate_random_spd_matrix}
\title{Generate a random symmetric positive definite matrix}
\usage{
generate_random_spd_matrix(d, bMin = -10, bMax = 10, ...)
}
\arguments{
\item{d}{Number of rows/columns}
\item{bMin}{Minimum value of entries in \code{B}}
\item{bMax}{Maximum value of entries in \code{B}}
\item{...}{Ignored, only allowed for compatibility}
}
\description{
Generates a random \dxd symmetric positive definite matrix.
This is done by generating a random \dxd matrix \code{B},
then computing \code{B \%*\% t(B)},
and then normalizing the matrix to approximately single digit entries.
}
\seealso{
Other Example generations:
\code{\link{generate_random_Gamma}()},
\code{\link{generate_random_cactus}()},
\code{\link{generate_random_chordal_graph}()},
\code{\link{generate_random_connected_graph}()},
\code{\link{generate_random_graphical_Gamma}()},
\code{\link{generate_random_integer_Gamma}()},
\code{\link{generate_random_model}()},
\code{\link{generate_random_tree}()}
}
\concept{Example generations}
|
/man/generate_random_spd_matrix.Rd
|
no_license
|
sebastian-engelke/graphicalExtremes
|
R
| false | true | 1,156 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_examples.R
\name{generate_random_spd_matrix}
\alias{generate_random_spd_matrix}
\title{Generate a random symmetric positive definite matrix}
\usage{
generate_random_spd_matrix(d, bMin = -10, bMax = 10, ...)
}
\arguments{
\item{d}{Number of rows/columns}
\item{bMin}{Minimum value of entries in \code{B}}
\item{bMax}{Maximum value of entries in \code{B}}
\item{...}{Ignored, only allowed for compatibility}
}
\description{
Generates a random \dxd symmetric positive definite matrix.
This is done by generating a random \dxd matrix \code{B},
then computing \code{B \%*\% t(B)},
and then normalizing the matrix to approximately single digit entries.
}
\seealso{
Other Example generations:
\code{\link{generate_random_Gamma}()},
\code{\link{generate_random_cactus}()},
\code{\link{generate_random_chordal_graph}()},
\code{\link{generate_random_connected_graph}()},
\code{\link{generate_random_graphical_Gamma}()},
\code{\link{generate_random_integer_Gamma}()},
\code{\link{generate_random_model}()},
\code{\link{generate_random_tree}()}
}
\concept{Example generations}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxonomy.R
\name{n_leaves}
\alias{n_leaves}
\title{Number of leaves per taxon}
\usage{
n_leaves(x)
}
\arguments{
\item{x}{The object to get leaves for, such as a \link{taxonomy} object}
}
\description{
Get the number of leaves per taxon. A leaf is a taxon with no subtaxa.
}
\examples{
x <- taxonomy(c('Carnivora', 'Felidae', 'Panthera', 'Panthera leo',
'Panthera tigris', 'Ursidae', 'Ursus', 'Ursus arctos'),
supertaxa = c(NA, 1, 2, 3, 3, 1, 6, 7))
n_leaves(x)
}
\seealso{
Other leaf functions:
\code{\link{is_leaf}()},
\code{\link{leaves}()}
}
\concept{leaf functions}
|
/man/n_leaves.Rd
|
permissive
|
ropensci/taxa
|
R
| false | true | 681 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxonomy.R
\name{n_leaves}
\alias{n_leaves}
\title{Number of leaves per taxon}
\usage{
n_leaves(x)
}
\arguments{
\item{x}{The object to get leaves for, such as a \link{taxonomy} object}
}
\description{
Get the number of leaves per taxon. A leaf is a taxon with no subtaxa.
}
\examples{
x <- taxonomy(c('Carnivora', 'Felidae', 'Panthera', 'Panthera leo',
'Panthera tigris', 'Ursidae', 'Ursus', 'Ursus arctos'),
supertaxa = c(NA, 1, 2, 3, 3, 1, 6, 7))
n_leaves(x)
}
\seealso{
Other leaf functions:
\code{\link{is_leaf}()},
\code{\link{leaves}()}
}
\concept{leaf functions}
|
clusterMix=function(zdraw,cutoff=.9,SILENT=FALSE,nprint=BayesmConstant.nprint){
#
#
# revision history:
# written by p. rossi 9/05
#
# purpose: cluster observations based on draws of indicators of
# normal mixture components
#
# arguments:
# zdraw is a R x nobs matrix of draws of indicators (typically output from rnmixGibbs)
# the rth row of zdraw contains rth draw of indicators for each observations
# each element of zdraw takes on up to p values for up to p groups. The maximum
# number of groups is nobs. Typically, however, the number of groups will be small
# and equal to the number of components used in the normal mixture fit.
#
# cutoff is a cutoff used in determining one clustering scheme it must be
# a number between .5 and 1.
#
# nprint - print every nprint'th draw
#
# output:
# two clustering schemes each with a vector of length nobs which gives the assignment
# of each observation to a cluster
#
# clustera (finds zdraw with similarity matrix closest to posterior mean of similarity)
# clusterb (finds clustering scheme by assigning ones if posterior mean of similarity matrix
# > cutoff and computing associated z )
#
# define needed functions
#
# ------------------------------------------------------------------------------------------
#
# check arguments
#
if(missing(zdraw)) {pandterm("Requires zdraw argument -- R x n matrix of indicator draws")}
if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
#
# check validity of zdraw rows -- must be integers in the range 1:nobs
#
nobs=ncol(zdraw)
R=nrow(zdraw)
if(sum(zdraw %in% (1:nobs)) < ncol(zdraw)*nrow(zdraw))
{pandterm("Bad zdraw argument -- all elements must be integers in 1:nobs")}
cat("Table of zdraw values pooled over all rows",fill=TRUE)
print(table(zdraw))
#
# check validity of cuttoff
if(cutoff > 1 || cutoff < .5) {pandterm(paste("cutoff invalid, = ",cutoff))}
###################################################################
# Keunwoo Kim
# 10/06/2014
###################################################################
out=clusterMix_rcpp_loop(zdraw, cutoff, SILENT, nprint)
###################################################################
return(list(clustera=as.vector(out$clustera),clusterb=as.vector(out$clusterb)))
}
|
/pkgs/bayesm/R/clusterMix_rcpp.R
|
no_license
|
vaguiar/EDAV_Project_2017
|
R
| false | false | 2,302 |
r
|
clusterMix=function(zdraw,cutoff=.9,SILENT=FALSE,nprint=BayesmConstant.nprint){
#
#
# revision history:
# written by p. rossi 9/05
#
# purpose: cluster observations based on draws of indicators of
# normal mixture components
#
# arguments:
# zdraw is a R x nobs matrix of draws of indicators (typically output from rnmixGibbs)
# the rth row of zdraw contains rth draw of indicators for each observations
# each element of zdraw takes on up to p values for up to p groups. The maximum
# number of groups is nobs. Typically, however, the number of groups will be small
# and equal to the number of components used in the normal mixture fit.
#
# cutoff is a cutoff used in determining one clustering scheme it must be
# a number between .5 and 1.
#
# nprint - print every nprint'th draw
#
# output:
# two clustering schemes each with a vector of length nobs which gives the assignment
# of each observation to a cluster
#
# clustera (finds zdraw with similarity matrix closest to posterior mean of similarity)
# clusterb (finds clustering scheme by assigning ones if posterior mean of similarity matrix
# > cutoff and computing associated z )
#
# define needed functions
#
# ------------------------------------------------------------------------------------------
#
# check arguments
#
if(missing(zdraw)) {pandterm("Requires zdraw argument -- R x n matrix of indicator draws")}
if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
#
# check validity of zdraw rows -- must be integers in the range 1:nobs
#
nobs=ncol(zdraw)
R=nrow(zdraw)
if(sum(zdraw %in% (1:nobs)) < ncol(zdraw)*nrow(zdraw))
{pandterm("Bad zdraw argument -- all elements must be integers in 1:nobs")}
cat("Table of zdraw values pooled over all rows",fill=TRUE)
print(table(zdraw))
#
# check validity of cuttoff
if(cutoff > 1 || cutoff < .5) {pandterm(paste("cutoff invalid, = ",cutoff))}
###################################################################
# Keunwoo Kim
# 10/06/2014
###################################################################
out=clusterMix_rcpp_loop(zdraw, cutoff, SILENT, nprint)
###################################################################
return(list(clustera=as.vector(out$clustera),clusterb=as.vector(out$clusterb)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitR-package.r
\name{SIR}
\alias{SIR}
\title{A simple deterministic SIR model with constant population size}
\format{A \code{\link{fitmodel}} object, that is a list with the following elements:}
\description{
A simple deterministic SIR model with constant population size, uniform prior and Poisson observation.
}
\details{
\itemize{
\item \code{name} character.
\item \code{state.names} character vector.
\item \code{theta.names} character vector.
\item \code{simulate} \R-function.
\item \code{rPointObs} \R-function.
\item \code{dprior} \R-function.
\item \code{dPointObs} \R-function.
}
Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
You can look at the code of the \R-functions by typing \code{SIR$simulate} for instance. There are some comments included.
}
|
/man/SIR.Rd
|
no_license
|
WWH98932/fitR
|
R
| false | true | 908 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitR-package.r
\name{SIR}
\alias{SIR}
\title{A simple deterministic SIR model with constant population size}
\format{A \code{\link{fitmodel}} object, that is a list with the following elements:}
\description{
A simple deterministic SIR model with constant population size, uniform prior and Poisson observation.
}
\details{
\itemize{
\item \code{name} character.
\item \code{state.names} character vector.
\item \code{theta.names} character vector.
\item \code{simulate} \R-function.
\item \code{rPointObs} \R-function.
\item \code{dprior} \R-function.
\item \code{dPointObs} \R-function.
}
Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
You can look at the code of the \R-functions by typing \code{SIR$simulate} for instance. There are some comments included.
}
|
# Comparing means of more than two groups: R code for Chapter 15 examples
# Download the R code on this page as a single file <a href="../wp-content/rcode/chap15.r">here.
# ------------------------------------------------------------
# Example 15.1. <a href="../wp-content/data/chapter15/chap15e1KneesWhoSayNight.csv">Knees who say night
# Analysis of variance, comparing phase shift in the circadian rhythm of melatonin production in participants given alternative light treatments. Also, the nonparametric Kruskal-Wallis test. Finally, we use the same data to demonstrate planned comparisons and unplanned comparisons (Tukey-Kramer tests).
# Read and inspect the data.
circadian <- read.csv(url("http://www.zoology.ubc.ca/~schluter/WhitlockSchluter/wp-content/data/chapter15/chap15e1KneesWhoSayNight.csv"))
# Set the preferred ordering of groups in tables and graphs.
circadian$treatment <- factor(circadian$treatment,
levels = c("control", "knee", "eyes"))
#
# Table of descriptive statistics by treatment group (Table 15.1-1).
meanShift <- tapply(circadian$shift, circadian$treatment, mean)
sdevShift <- tapply(circadian$shift, circadian$treatment, sd)
n <- tapply(circadian$shift, circadian$treatment, length)
data.frame(mean = meanShift, std.dev = sdevShift, n = n)
# Strip chart of circadian rhythm shift by treatment.
stripchart(shift ~ treatment, data = circadian, method = "jitter", vertical = TRUE)
# Add standard error bars to previous strip chart. This makes use of the descriptive statistics calculated in an earlier step. The error bars are added as line segments, offset along the x-axis by the amount adjustAmount. The means are added using points.
seShift <- sdevShift / sqrt(n)
adjustAmount <- 0.15
segments( c(1,2,3) + adjustAmount, meanShift - seShift,
c(1,2,3) + adjustAmount, meanShift + seShift )
points(meanShift ~ c( c(1,2,3) + adjustAmount ))
# Commands for a stripchart with more options are shown here.
par(bty="l")
adjustAmount <- 0.15
stripchart(shift ~ treatment, data = circadian, method = "jitter",
vertical = TRUE, las = 1, pch = 1, xlab = "Light treatment",
ylab = "Shift in circadian rhythm (h)", col = "firebrick",
cex = 1.2, ylim = c(-3, 1))
segments( c(1,2,3) + adjustAmount, meanShift - seShift,
c(1,2,3) + adjustAmount, meanShift + seShift )
points(meanShift ~ c( c(1,2,3) + adjustAmount ), pch = 16, col = "firebrick")
# Fixed effects ANOVA table (Table 15.1-2). This is done in two steps. The first step involves fitting the ANOVA model to the data using lm ("lm" stands for "linear model", of which ANOVA is one type). Then we use the command anova to assemble the ANOVA table.
circadianAnova <- lm(shift ~ treatment, data = circadian)
anova(circadianAnova)
# R2 indicating the fraction of variation in the response variable "explained" by treatment. This is again done in two steps. The first step calculates a bunch of useful quantities from the ANOVA model object previously created with a lm command. The second step shows the R2 value.
circadianAnovaSummary <- summary(circadianAnova)
circadianAnovaSummary$r.squared
# Kruskal-Wallis test, a nonparametric method to compare more than two groups. The method is not needed for the circadian rhythm data, because assumptions of ANOVA are met, but we include it here to demonstrate the method. The formula is the same as that used with lm.
kruskal.test(shift ~ treatment, data = circadian)
# <hr class = "short">
# Planned and unplanned comparisons between means. A planned comparison is one planned during the design of the study, before the data were collected. To use the method you need a good justification to focus on a specific comparison of two treatments. Only a small number of planned comparisons are allowed. If you don't have a good prior justification for a specific comparison, use unplanned comparison instead. Unplanned comparisons typically involve testing differences between all pairs of means, and methods provide needed protection against rising Type I errors that would otherwise result from multiple testing.
# If you haven't already done so, you'll need to install the multicomp package (this needs to be done just once per computer).
# install.packages("multcomp", dependencies = TRUE)
# Planned comparison between "control" and "knee" treatments. Begin by loading the multicomp package. The commands shown will give the 95% confidence interval and the planned t-test of a difference between the treatment means.
library(multcomp)
circadianPlanned <- glht(circadianAnova, linfct =
mcp(treatment = c("knee - control = 0")))
confint(circadianPlanned)
summary(circadianPlanned)
# Unplanned comparisons (Tukey-Kramer tests) between all pairs of means. The raw data for the "Wood-wide web" example (Example 15.4) are not available, so we have used the circadian rhythm data to demonstrate the R method here instead. The output table will say "t" but it is actually "q" as we describe in the book.
library(multcomp)
circadianTukey <- glht(circadianAnova, linfct = mcp(treatment = "Tukey"))
summary(circadianTukey)
# ------------------------------------------------------------
# Example 15.6. <a href="../wp-content/data/chapter15/chap15e6WalkingStickFemurs.csv">Walking stick limbs
# Random effects ANOVA to estimate variance components and calculate repeatability of measurements of femur length in walking stick insects.
# Read and inspect data. Data are in "long" format. Femur length is one column, with another variable indicating specimen identity. Each specimen was measured twice and therefore takes up two rows.
walkingstick <- read.csv(url("http://www.zoology.ubc.ca/~schluter/WhitlockSchluter/wp-content/data/chapter15/chap15e6WalkingStickFemurs.csv"))
head(walkingstick)
# Descriptive statistics for each specimen, which we need to produce the strip chart. tapply is used to gets the mean, smallest measurement, largest measurement, and specimen id of each specimen.
meanFemur <- tapply(walkingstick$femurLength, walkingstick$specimen, mean)
minFemur <- tapply(walkingstick$femurLength, walkingstick$specimen, min)
maxFemur <- tapply(walkingstick$femurLength, walkingstick$specimen, max)
specimen <- tapply(walkingstick$specimen, walkingstick$specimen, unique)
# Stripchart, with line segments connecting the two measurements of
stripchart(femurLength ~ specimen, data = walkingstick,
vertical = TRUE, xlab = "Specimen")
segments(specimen, minFemur, specimen, maxFemur)
# Commands for a prettier figure in which specimens are ordered along the x-axis by mean femur length (Figure 15.6-1) are here.
par(bty="l")
stripchart(minFemur[order(meanFemur, specimen)] ~ c(1:length(specimen)),
vertical = TRUE, xaxt = "n", pch = 16, col = "firebrick", las = 1,
cex = 1.2, ylim = range(c(minFemur, maxFemur)), ylab = "Femur length (mm)",
xlab = "Individual walking sticks")
stripchart(maxFemur[order(meanFemur, specimen)] ~ c(1:length(specimen)),
vertical = TRUE, add = TRUE, pch = 16, col = "firebrick", las = 1, cex = 1.2)
segments(c(1:length(specimen)), minFemur[order(meanFemur, specimen)],
c(1:length(specimen)), maxFemur[order(meanFemur, specimen)])
# Fit the random effects ANOVA using lme. The random effects ANOVA function requires two formulas, rather than just one. The first formula (beginning with "fixed =") is for the fixed effect. The walking stick insect example doesn't include a fixed-effect variable, so we just provide a symbol for a constant in the formula ("~ 1"), representing the grand mean. The second formula (beginning with "random =") is for the random effect. In this example, the individual specimens are the random groups, and the second formula indicates this (the "~ 1" in the formula below indicates that each specimen has its own mean). You will need to load the nlme library to begin.
library(nlme)
walkingstickAnova <- lme(fixed = femurLength ~ 1,
random = ~ 1|specimen, data = walkingstick)
# Obtain the variance components for the random effects using VarCorr. The output includes the standard deviation and variance for both components of random variation in the random effects model for this example. The first is the variance among the specimen means. This is the variance among groups, and is confusingly labeled "Intercept" in the output. The second component is the variance among measurements made on the same individuals. This is the within group variance, also known as the error mean square, and is labeled "Residual" in the output.
walkingstickVarcomp <- VarCorr(walkingstickAnova)
walkingstickVarcomp
# Calculate the repeatability of the walking stick femur measurements using the estimates of the variance components.
varAmong <- as.numeric( walkingstickVarcomp[1,1] )
varWithin <- as.numeric( walkingstickVarcomp[2,1] )
repeatability <- varAmong / (varAmong + varWithin)
repeatability
# Note that lme doesn't test the significance of the random effects (whether the specimen means are significantly different from one another), since the method basically assumes the presence of variance among random means. As a result, there is no ANOVA table for random effects.
|
/ABD_R_scripts/ABD_chap15.r
|
no_license
|
mbh038/mbhData
|
R
| false | false | 9,127 |
r
|
# Comparing means of more than two groups: R code for Chapter 15 examples
# Download the R code on this page as a single file <a href="../wp-content/rcode/chap15.r">here.
# ------------------------------------------------------------
# Example 15.1. <a href="../wp-content/data/chapter15/chap15e1KneesWhoSayNight.csv">Knees who say night
# Analysis of variance, comparing phase shift in the circadian rhythm of melatonin production in participants given alternative light treatments. Also, the nonparametric Kruskal-Wallis test. Finally, we use the same data to demonstrate planned comparisons and unplanned comparisons (Tukey-Kramer tests).
# Read and inspect the data.
circadian <- read.csv(url("http://www.zoology.ubc.ca/~schluter/WhitlockSchluter/wp-content/data/chapter15/chap15e1KneesWhoSayNight.csv"))
# Set the preferred ordering of groups in tables and graphs.
circadian$treatment <- factor(circadian$treatment,
levels = c("control", "knee", "eyes"))
#
# Table of descriptive statistics by treatment group (Table 15.1-1).
meanShift <- tapply(circadian$shift, circadian$treatment, mean)
sdevShift <- tapply(circadian$shift, circadian$treatment, sd)
n <- tapply(circadian$shift, circadian$treatment, length)
data.frame(mean = meanShift, std.dev = sdevShift, n = n)
# Strip chart of circadian rhythm shift by treatment.
stripchart(shift ~ treatment, data = circadian, method = "jitter", vertical = TRUE)
# Add standard error bars to previous strip chart. This makes use of the descriptive statistics calculated in an earlier step. The error bars are added as line segments, offset along the x-axis by the amount adjustAmount. The means are added using points.
seShift <- sdevShift / sqrt(n)
adjustAmount <- 0.15
segments( c(1,2,3) + adjustAmount, meanShift - seShift,
c(1,2,3) + adjustAmount, meanShift + seShift )
points(meanShift ~ c( c(1,2,3) + adjustAmount ))
# Commands for a stripchart with more options are shown here.
par(bty="l")
adjustAmount <- 0.15
stripchart(shift ~ treatment, data = circadian, method = "jitter",
vertical = TRUE, las = 1, pch = 1, xlab = "Light treatment",
ylab = "Shift in circadian rhythm (h)", col = "firebrick",
cex = 1.2, ylim = c(-3, 1))
segments( c(1,2,3) + adjustAmount, meanShift - seShift,
c(1,2,3) + adjustAmount, meanShift + seShift )
points(meanShift ~ c( c(1,2,3) + adjustAmount ), pch = 16, col = "firebrick")
# Fixed effects ANOVA table (Table 15.1-2). This is done in two steps. The first step involves fitting the ANOVA model to the data using lm ("lm" stands for "linear model", of which ANOVA is one type). Then we use the command anova to assemble the ANOVA table.
circadianAnova <- lm(shift ~ treatment, data = circadian)
anova(circadianAnova)
# R2 indicating the fraction of variation in the response variable "explained" by treatment. This is again done in two steps. The first step calculates a bunch of useful quantities from the ANOVA model object previously created with a lm command. The second step shows the R2 value.
circadianAnovaSummary <- summary(circadianAnova)
circadianAnovaSummary$r.squared
# Kruskal-Wallis test, a nonparametric method to compare more than two groups. The method is not needed for the circadian rhythm data, because assumptions of ANOVA are met, but we include it here to demonstrate the method. The formula is the same as that used with lm.
kruskal.test(shift ~ treatment, data = circadian)
# <hr class = "short">
# Planned and unplanned comparisons between means. A planned comparison is one planned during the design of the study, before the data were collected. To use the method you need a good justification to focus on a specific comparison of two treatments. Only a small number of planned comparisons are allowed. If you don't have a good prior justification for a specific comparison, use unplanned comparison instead. Unplanned comparisons typically involve testing differences between all pairs of means, and methods provide needed protection against rising Type I errors that would otherwise result from multiple testing.
# If you haven't already done so, you'll need to install the multicomp package (this needs to be done just once per computer).
# install.packages("multcomp", dependencies = TRUE)
# Planned comparison between "control" and "knee" treatments. Begin by loading the multicomp package. The commands shown will give the 95% confidence interval and the planned t-test of a difference between the treatment means.
library(multcomp)
circadianPlanned <- glht(circadianAnova, linfct =
mcp(treatment = c("knee - control = 0")))
confint(circadianPlanned)
summary(circadianPlanned)
# Unplanned comparisons (Tukey-Kramer tests) between all pairs of means. The raw data for the "Wood-wide web" example (Example 15.4) are not available, so we have used the circadian rhythm data to demonstrate the R method here instead. The output table will say "t" but it is actually "q" as we describe in the book.
library(multcomp)
circadianTukey <- glht(circadianAnova, linfct = mcp(treatment = "Tukey"))
summary(circadianTukey)
# ------------------------------------------------------------
# Example 15.6. <a href="../wp-content/data/chapter15/chap15e6WalkingStickFemurs.csv">Walking stick limbs
# Random effects ANOVA to estimate variance components and calculate repeatability of measurements of femur length in walking stick insects.
# Read and inspect data. Data are in "long" format. Femur length is one column, with another variable indicating specimen identity. Each specimen was measured twice and therefore takes up two rows.
walkingstick <- read.csv(url("http://www.zoology.ubc.ca/~schluter/WhitlockSchluter/wp-content/data/chapter15/chap15e6WalkingStickFemurs.csv"))
head(walkingstick)
# Descriptive statistics for each specimen, which we need to produce the strip chart. tapply is used to gets the mean, smallest measurement, largest measurement, and specimen id of each specimen.
meanFemur <- tapply(walkingstick$femurLength, walkingstick$specimen, mean)
minFemur <- tapply(walkingstick$femurLength, walkingstick$specimen, min)
maxFemur <- tapply(walkingstick$femurLength, walkingstick$specimen, max)
specimen <- tapply(walkingstick$specimen, walkingstick$specimen, unique)
# Stripchart, with line segments connecting the two measurements of
stripchart(femurLength ~ specimen, data = walkingstick,
vertical = TRUE, xlab = "Specimen")
segments(specimen, minFemur, specimen, maxFemur)
# Commands for a prettier figure in which specimens are ordered along the x-axis by mean femur length (Figure 15.6-1) are here.
par(bty="l")
stripchart(minFemur[order(meanFemur, specimen)] ~ c(1:length(specimen)),
vertical = TRUE, xaxt = "n", pch = 16, col = "firebrick", las = 1,
cex = 1.2, ylim = range(c(minFemur, maxFemur)), ylab = "Femur length (mm)",
xlab = "Individual walking sticks")
stripchart(maxFemur[order(meanFemur, specimen)] ~ c(1:length(specimen)),
vertical = TRUE, add = TRUE, pch = 16, col = "firebrick", las = 1, cex = 1.2)
segments(c(1:length(specimen)), minFemur[order(meanFemur, specimen)],
c(1:length(specimen)), maxFemur[order(meanFemur, specimen)])
# Fit the random effects ANOVA using lme. The random effects ANOVA function requires two formulas, rather than just one. The first formula (beginning with "fixed =") is for the fixed effect. The walking stick insect example doesn't include a fixed-effect variable, so we just provide a symbol for a constant in the formula ("~ 1"), representing the grand mean. The second formula (beginning with "random =") is for the random effect. In this example, the individual specimens are the random groups, and the second formula indicates this (the "~ 1" in the formula below indicates that each specimen has its own mean). You will need to load the nlme library to begin.
library(nlme)
walkingstickAnova <- lme(fixed = femurLength ~ 1,
random = ~ 1|specimen, data = walkingstick)
# Obtain the variance components for the random effects using VarCorr. The output includes the standard deviation and variance for both components of random variation in the random effects model for this example. The first is the variance among the specimen means. This is the variance among groups, and is confusingly labeled "Intercept" in the output. The second component is the variance among measurements made on the same individuals. This is the within group variance, also known as the error mean square, and is labeled "Residual" in the output.
walkingstickVarcomp <- VarCorr(walkingstickAnova)
walkingstickVarcomp
# Calculate the repeatability of the walking stick femur measurements using the estimates of the variance components.
varAmong <- as.numeric( walkingstickVarcomp[1,1] )
varWithin <- as.numeric( walkingstickVarcomp[2,1] )
repeatability <- varAmong / (varAmong + varWithin)
repeatability
# Note that lme doesn't test the significance of the random effects (whether the specimen means are significantly different from one another), since the method basically assumes the presence of variance among random means. As a result, there is no ANOVA table for random effects.
|
# server.R
library(shiny)
library(ggplot2)
library(Rmisc)
testWords <- read.csv("data/testWords.csv")
test2Words <- read.csv("data/test2Words.csv")
shinyServer(function(input, output) {
plot1 <- reactive({
ggplot(subset(testWords, freq >= input$freq1), aes(word, freq)) +
geom_bar(stat="identity", alpha=0.8, aes(fill=colour)) +
ggtitle("Snap Circuit Reviews Keywords") + ylab("Frequency") +
theme(plot.title = element_text(lineheight=.8, face="bold",size=16)) +
theme(axis.text.x=element_text(angle=45, hjust=1, size=16), axis.title.x = element_blank()) +
theme(legend.position="none")
})
plot2 <- reactive({
ggplot(subset(test2Words, freq >= input$freq2), aes(word, freq)) +
geom_bar(stat="identity", alpha=0.8, aes(fill=colour)) +
ggtitle("Science Kit Reviews Keywords") + ylab("Frequency") +
theme(plot.title = element_text(lineheight=.8, face="bold",size=16)) +
theme(axis.text.x=element_text(angle=45, hjust=1, size=16), axis.title.x = element_blank()) +
theme(legend.position="none")
})
output$wordPlot <- renderPlot({
multiplot(plot1(),plot2(),cols=2)
})
})
|
/keywords/server.R
|
no_license
|
janie128/Project_Gift_Picker
|
R
| false | false | 1,160 |
r
|
# server.R
library(shiny)
library(ggplot2)
library(Rmisc)
testWords <- read.csv("data/testWords.csv")
test2Words <- read.csv("data/test2Words.csv")
shinyServer(function(input, output) {
plot1 <- reactive({
ggplot(subset(testWords, freq >= input$freq1), aes(word, freq)) +
geom_bar(stat="identity", alpha=0.8, aes(fill=colour)) +
ggtitle("Snap Circuit Reviews Keywords") + ylab("Frequency") +
theme(plot.title = element_text(lineheight=.8, face="bold",size=16)) +
theme(axis.text.x=element_text(angle=45, hjust=1, size=16), axis.title.x = element_blank()) +
theme(legend.position="none")
})
plot2 <- reactive({
ggplot(subset(test2Words, freq >= input$freq2), aes(word, freq)) +
geom_bar(stat="identity", alpha=0.8, aes(fill=colour)) +
ggtitle("Science Kit Reviews Keywords") + ylab("Frequency") +
theme(plot.title = element_text(lineheight=.8, face="bold",size=16)) +
theme(axis.text.x=element_text(angle=45, hjust=1, size=16), axis.title.x = element_blank()) +
theme(legend.position="none")
})
output$wordPlot <- renderPlot({
multiplot(plot1(),plot2(),cols=2)
})
})
|
# create local experiment environment
#' @export
attach_virtualenv = function(subject_id, electrodes, module_path = NULL, packages = '', is_univariate = TRUE, ...){
tryCatch({
detach(.runtime_env)
}, error = function(e){})
# init subject data
.subject = prophet$get_subject(subject_id)
.subject$data_environment$load(electrodes = electrodes)
.subject$data_environment$bind_electrodes(electrodes = electrodes, debug = TRUE)
data_repository$set_data(.subject$data_environment)
# init module data
if(!is.null(module_path)){
.module = load_module(module_id = '.test_module', source_path = module_path, category = 'Test', label = 'Test Module',
is_univariate = is_univariate, suma_enabled = FALSE, packages = packages)
data_repository$add_module(module = .module)
.runtime_env = .module$runtime_env
.runtime_env$get_local_var <- .module$get_local_var
.runtime_env$get_SUMA <- .module$get_SUMA
.runtime_env$set_cache <- .module$set_cache
.runtime_env$get_cache <- .module$get_cache
}else{
.runtime_env = new.env()
}
data_env <- data_repository$get_data()
.runtime_env$data_env <- new.env()
.runtime_env$data_env$cumsum <- data_env$cumsum
.runtime_env$data_env$data <- data_env$data
.runtime_env$data_env$electrodes <- data_env$electrodes
.runtime_env$data_env$subject <- new.env()
.runtime_env$data_env$subject$id <- .subject$id
.runtime_env$data_env$subject$electrodes <- .subject$electrodes
.runtime_env$data_env$subject$frequencies <- .subject$frequencies
.runtime_env$data_env$subject$trials <- .subject$trials
.runtime_env$data_env$subject$time_points <- .subject$time_points
.runtime_env$get_global_var <- data_repository$get_global_var
attach(.runtime_env)
message('Virtual environment created:\nHere are variables you might want to use for development\n')
print(ls(.runtime_env))
message("Here's what you can find in data_env: \n")
print(ls(.runtime_env$data_env))
message('For example, data_env$data contains ecog tensor data\n')
message(' while data_env$subject contains subject info\n')
message('--- Type detach_virtualenv() to quit this environment. Enjoy :)')
}
#' @export
detach_virtualenv = function(){
detach(.runtime_env)
}
|
/R/dev_virtualenv.R
|
no_license
|
xylimeng/rave
|
R
| false | false | 2,254 |
r
|
# create local experiment environment
#' @export
attach_virtualenv = function(subject_id, electrodes, module_path = NULL, packages = '', is_univariate = TRUE, ...){
tryCatch({
detach(.runtime_env)
}, error = function(e){})
# init subject data
.subject = prophet$get_subject(subject_id)
.subject$data_environment$load(electrodes = electrodes)
.subject$data_environment$bind_electrodes(electrodes = electrodes, debug = TRUE)
data_repository$set_data(.subject$data_environment)
# init module data
if(!is.null(module_path)){
.module = load_module(module_id = '.test_module', source_path = module_path, category = 'Test', label = 'Test Module',
is_univariate = is_univariate, suma_enabled = FALSE, packages = packages)
data_repository$add_module(module = .module)
.runtime_env = .module$runtime_env
.runtime_env$get_local_var <- .module$get_local_var
.runtime_env$get_SUMA <- .module$get_SUMA
.runtime_env$set_cache <- .module$set_cache
.runtime_env$get_cache <- .module$get_cache
}else{
.runtime_env = new.env()
}
data_env <- data_repository$get_data()
.runtime_env$data_env <- new.env()
.runtime_env$data_env$cumsum <- data_env$cumsum
.runtime_env$data_env$data <- data_env$data
.runtime_env$data_env$electrodes <- data_env$electrodes
.runtime_env$data_env$subject <- new.env()
.runtime_env$data_env$subject$id <- .subject$id
.runtime_env$data_env$subject$electrodes <- .subject$electrodes
.runtime_env$data_env$subject$frequencies <- .subject$frequencies
.runtime_env$data_env$subject$trials <- .subject$trials
.runtime_env$data_env$subject$time_points <- .subject$time_points
.runtime_env$get_global_var <- data_repository$get_global_var
attach(.runtime_env)
message('Virtual environment created:\nHere are variables you might want to use for development\n')
print(ls(.runtime_env))
message("Here's what you can find in data_env: \n")
print(ls(.runtime_env$data_env))
message('For example, data_env$data contains ecog tensor data\n')
message(' while data_env$subject contains subject info\n')
message('--- Type detach_virtualenv() to quit this environment. Enjoy :)')
}
#' @export
detach_virtualenv = function(){
detach(.runtime_env)
}
|
library("igraph")
correlation_cal = function(stock_name1, stock_name2, index, data1)
{
path2 = paste("/Users/kay/Documents/course/ee232/hw_4/finance_data/data/", stock_name2, ".csv", sep = "")
data2 = read.csv(path2)
if(nrow(data2)<765)
return()
price1 = data1$Close
price2 = data2$Close
date1 = data1$Date
log1 = numeric()
log2 = numeric()
for(i in index)
{
log1 = c(log1, price1[i])
log2 = c(log2, price2[i])
}
log1 = diff(log(log1))
log2 = diff(log(log2))
result = cor(log1, log2)
mydata[stock_name1, stock_name2] <<- result
mydata[stock_name2, stock_name1] <<- result
gc()
}
data = read.csv("/Users/kay/Documents/course/ee232/hw_4/finance_data/Name_sector.csv")
stock_name = data$Symbol
valid_stock_name = c()
stock_num = nrow(data)
mydata = matrix(0, stock_num, stock_num)
index = numeric()
for(m in 1:stock_num)
{
stock_name1 = stock_name[m]
path1 = paste("/Users/kay/Documents/course/ee232/hw_4/finance_data/data/", stock_name1, ".csv", sep = "")
data1 = read.csv(path1)
if(m==1)
{
for(l in 1:nrow(data1))
{
if(wday(as.Date(toString(data1$Date[l])))==2)
{
index = c(index, l)
}
}
}
if(nrow(data1)<765)
next
valid_stock_name = c(valid_stock_name, toString(stock_name1))
for(n in (m+1):stock_num)
{
stock_name2 = stock_name[n]
correlation_cal(stock_name1, stock_name2, index, data1)
}
gc()
}
for(m in 1:nrow(mydata))
{
if(is.na(mydata[m, 1]))
break
if(mydata[m, 1]==0 && mydata[m, 2]==0)
{
mydata = mydata[-m, ]
mydata = mydata[, -m]
}
}
rownames(mydata) = valid_stock_name
colnames(mydata) = valid_stock_name
adj_data = sqrt(2*(1-mydata))
diag(adj_data) = 0
my_hist = numeric()
for(m in 1:(nrow(mydata)-1))
{
for(n in m:nrow(mydata))
{
if(m!=n)
{
my_hist = c(my_hist, adj_data[m, n])
}
}
}
write.csv(mydata, "/Users/kay/Documents/course/ee232/hw_4/finance_data/p6_correlations.csv")
write.csv(adj_data, "/Users/kay/Documents/course/ee232/hw_4/finance_data/p6_adj_matrix.csv")
hist(my_hist, xlab = "the length of the link Dij", ylab = "frequency", col = "SkyBlue2")
g = graph_from_adjacency_matrix(adj_data, mode = "upper", weighted = TRUE, add.colnames = NULL, add.rownames = NA)
mst_g = mst(g)
data = read.csv("/Users/kay/Documents/course/ee232/hw_4/finance_data/valid_sector.csv")
sector=list("Consumer Discretionary",
"Consumer Staples",
"Energy",
"Financials",
"Health Care",
"Industrials",
"Information Technology",
"Materials",
"Real Estate",
"Telecommunication Services",
"Utilities")
colors = rainbow(11, s=0.8, v=1)
names(colors)=sector
data$col=colors[data$x]
plot(mst_g, main = "MST graph", vertex.label = NA, vertex.size = 3,
vertex.color = data$col)
legend('topleft', cex = 0.75, y.intersp = 0.5,
text.width = 0.6, legend = sector, pch = 21, pt.bg = colors)
|
/hw4/problem_6.R
|
no_license
|
Kaywky/UCLA-EE-232E
|
R
| false | false | 2,983 |
r
|
library("igraph")
correlation_cal = function(stock_name1, stock_name2, index, data1)
{
path2 = paste("/Users/kay/Documents/course/ee232/hw_4/finance_data/data/", stock_name2, ".csv", sep = "")
data2 = read.csv(path2)
if(nrow(data2)<765)
return()
price1 = data1$Close
price2 = data2$Close
date1 = data1$Date
log1 = numeric()
log2 = numeric()
for(i in index)
{
log1 = c(log1, price1[i])
log2 = c(log2, price2[i])
}
log1 = diff(log(log1))
log2 = diff(log(log2))
result = cor(log1, log2)
mydata[stock_name1, stock_name2] <<- result
mydata[stock_name2, stock_name1] <<- result
gc()
}
data = read.csv("/Users/kay/Documents/course/ee232/hw_4/finance_data/Name_sector.csv")
stock_name = data$Symbol
valid_stock_name = c()
stock_num = nrow(data)
mydata = matrix(0, stock_num, stock_num)
index = numeric()
for(m in 1:stock_num)
{
stock_name1 = stock_name[m]
path1 = paste("/Users/kay/Documents/course/ee232/hw_4/finance_data/data/", stock_name1, ".csv", sep = "")
data1 = read.csv(path1)
if(m==1)
{
for(l in 1:nrow(data1))
{
if(wday(as.Date(toString(data1$Date[l])))==2)
{
index = c(index, l)
}
}
}
if(nrow(data1)<765)
next
valid_stock_name = c(valid_stock_name, toString(stock_name1))
for(n in (m+1):stock_num)
{
stock_name2 = stock_name[n]
correlation_cal(stock_name1, stock_name2, index, data1)
}
gc()
}
for(m in 1:nrow(mydata))
{
if(is.na(mydata[m, 1]))
break
if(mydata[m, 1]==0 && mydata[m, 2]==0)
{
mydata = mydata[-m, ]
mydata = mydata[, -m]
}
}
rownames(mydata) = valid_stock_name
colnames(mydata) = valid_stock_name
adj_data = sqrt(2*(1-mydata))
diag(adj_data) = 0
my_hist = numeric()
for(m in 1:(nrow(mydata)-1))
{
for(n in m:nrow(mydata))
{
if(m!=n)
{
my_hist = c(my_hist, adj_data[m, n])
}
}
}
write.csv(mydata, "/Users/kay/Documents/course/ee232/hw_4/finance_data/p6_correlations.csv")
write.csv(adj_data, "/Users/kay/Documents/course/ee232/hw_4/finance_data/p6_adj_matrix.csv")
hist(my_hist, xlab = "the length of the link Dij", ylab = "frequency", col = "SkyBlue2")
g = graph_from_adjacency_matrix(adj_data, mode = "upper", weighted = TRUE, add.colnames = NULL, add.rownames = NA)
mst_g = mst(g)
data = read.csv("/Users/kay/Documents/course/ee232/hw_4/finance_data/valid_sector.csv")
sector=list("Consumer Discretionary",
"Consumer Staples",
"Energy",
"Financials",
"Health Care",
"Industrials",
"Information Technology",
"Materials",
"Real Estate",
"Telecommunication Services",
"Utilities")
colors = rainbow(11, s=0.8, v=1)
names(colors)=sector
data$col=colors[data$x]
plot(mst_g, main = "MST graph", vertex.label = NA, vertex.size = 3,
vertex.color = data$col)
legend('topleft', cex = 0.75, y.intersp = 0.5,
text.width = 0.6, legend = sector, pch = 21, pt.bg = colors)
|
require(raster)
install.packages("arulesViz")
library(arulesViz)
#Real dataset
xyz1 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/WideObsMeans10KM.csv")
rstack.real.obs <- rasterFromXYZ(xyz1)
writeRaster(rstack.real.obs, 'F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/RealDataObs/RealObsEviTranz.tif', bylayer=TRUE)
xyz2 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/WideObsFittedMeans10KM.csv")
names(xyz2)
rstack.real.Fitted <- rasterFromXYZ(xyz2)
writeRaster(rstack.real.Fitted, 'F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/RealDataFitted/FittedObsEviTranz.tif', bylayer=TRUE)
plot(rstack.real.Fitted[[1]])
# Simulated data
xyz3 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/INLA/SimData/WideSimData10KM.csv")
names(xyz3)
rstack.real.obs3 <- rasterFromXYZ(xyz3)
writeRaster(rstack.real.obs3, 'F:/CLIM_DATA/tranzoia_county/other Results/INLA/SimData/SimDataObs/SimEviTranz.tif', bylayer=TRUE)
plot(rstack.real.obs3[[1]])
xyz4 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/INLA/SimData/WideSimFittedmeans10KM.csv")
names(xyz4)
rstack.real.Fitted4 <- rasterFromXYZ(xyz4)
writeRaster(rstack.real.Fitted4, 'F:/CLIM_DATA/tranzoia_county/other Results/INLA/SimData/SimDataFitted/FittedSimTranz.tif', bylayer=TRUE)
# spDTyn
xyz11 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/WideObsMeans10KM.csv")
names(xyz11)
rstack.real.obs11 <- rasterFromXYZ(xyz11)
writeRaster(rstack.real.obs11, 'F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/RealData/RealDataObs/RealSimEviTranz.tif', bylayer=TRUE)
xyz22 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/RealData/WideObsFittedMeans10KM.csv")
names(xyz22)
rstack.real.Fitted22 <- rasterFromXYZ(xyz22)
writeRaster(rstack.real.Fitted22, 'F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/RealData/RealDataFitted/FittedSimEviTranz.tif', bylayer=TRUE)
# Simulated data
xyz33 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/SimData/WideSimData10KM.csv")
names(xyz33)
rstack.real.obs33 <- rasterFromXYZ(xyz33)
writeRaster(rstack.real.obs33, 'F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/SimData/SimDataObs/SimEviTranz.tif', bylayer=TRUE)
plot(rstack.real.obs33[[3]])
xyz44 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/SimData/WideSimFittedmeans10KM.csv")
names(xyz44)
rstack.real.Fitted44 <- rasterFromXYZ(xyz44)
writeRaster(rstack.real.Fitted44, 'F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/SimData/SimDataFitted/FittedSimTranz.tif', bylayer=TRUE)
|
/convertingXYZ_to_Raster_Image.R
|
no_license
|
erickokuto/r_scripts
|
R
| false | false | 2,569 |
r
|
require(raster)
install.packages("arulesViz")
library(arulesViz)
#Real dataset
xyz1 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/WideObsMeans10KM.csv")
rstack.real.obs <- rasterFromXYZ(xyz1)
writeRaster(rstack.real.obs, 'F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/RealDataObs/RealObsEviTranz.tif', bylayer=TRUE)
xyz2 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/WideObsFittedMeans10KM.csv")
names(xyz2)
rstack.real.Fitted <- rasterFromXYZ(xyz2)
writeRaster(rstack.real.Fitted, 'F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/RealDataFitted/FittedObsEviTranz.tif', bylayer=TRUE)
plot(rstack.real.Fitted[[1]])
# Simulated data
xyz3 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/INLA/SimData/WideSimData10KM.csv")
names(xyz3)
rstack.real.obs3 <- rasterFromXYZ(xyz3)
writeRaster(rstack.real.obs3, 'F:/CLIM_DATA/tranzoia_county/other Results/INLA/SimData/SimDataObs/SimEviTranz.tif', bylayer=TRUE)
plot(rstack.real.obs3[[1]])
xyz4 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/INLA/SimData/WideSimFittedmeans10KM.csv")
names(xyz4)
rstack.real.Fitted4 <- rasterFromXYZ(xyz4)
writeRaster(rstack.real.Fitted4, 'F:/CLIM_DATA/tranzoia_county/other Results/INLA/SimData/SimDataFitted/FittedSimTranz.tif', bylayer=TRUE)
# spDTyn
xyz11 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/WideObsMeans10KM.csv")
names(xyz11)
rstack.real.obs11 <- rasterFromXYZ(xyz11)
writeRaster(rstack.real.obs11, 'F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/RealData/RealDataObs/RealSimEviTranz.tif', bylayer=TRUE)
xyz22 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/RealData/WideObsFittedMeans10KM.csv")
names(xyz22)
rstack.real.Fitted22 <- rasterFromXYZ(xyz22)
writeRaster(rstack.real.Fitted22, 'F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/RealData/RealDataFitted/FittedSimEviTranz.tif', bylayer=TRUE)
# Simulated data
xyz33 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/SimData/WideSimData10KM.csv")
names(xyz33)
rstack.real.obs33 <- rasterFromXYZ(xyz33)
writeRaster(rstack.real.obs33, 'F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/SimData/SimDataObs/SimEviTranz.tif', bylayer=TRUE)
plot(rstack.real.obs33[[3]])
xyz44 <- read.csv("F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/SimData/WideSimFittedmeans10KM.csv")
names(xyz44)
rstack.real.Fitted44 <- rasterFromXYZ(xyz44)
writeRaster(rstack.real.Fitted44, 'F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/SimData/SimDataFitted/FittedSimTranz.tif', bylayer=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.frame.R
\name{as.data.frame}
\alias{as.data.frame}
\alias{as.data.frame.regts}
\title{Convert a \code{\link{regts}} to a \code{\link[base]{data.frame}}}
\usage{
\method{as.data.frame}{regts}(x, ..., rowwise = FALSE, row_names = TRUE,
period_as_date = FALSE)
}
\arguments{
\item{x}{a \code{\link{regts}}}
\item{...}{additional arguments to be passed to methods.}
\item{rowwise}{a logical value: should the timeseries be stored rowwise
or columnwise in the data frame? Defaults to \code{FALSE}}
\item{row_names}{Whether to create row names. If \code{FALSE},
then an additional column with name \code{"period"} or \code{"name"} is
created for columnwise or rowwise timeseries, respectively.}
\item{period_as_date}{A logical (default \code{FALSE}).
If \code{TRUE} the periods are stored as \code{\link[base]{Date}} objects.
Depending on arguments \code{rowwise} and \code{row_names}
the periods may appear in the row or column names of the result data frame.
In that case the dates are coerced to character vectors,
using the standard date format \code{"\%Y-\%m-\%d"}
(see the documentation of function \code{\link[base]{strptime}}
for more information about date formats).}
}
\value{
A \code{\link[base]{data.frame}}
}
\description{
Convert a \code{\link{regts}} to a \code{\link[base]{data.frame}}
}
\details{
If the \code{regts} has labels and argument \code{rowwise} is \code{FALSE},
then the labels are added to columns of the data frame
These labels are visible in the data viewer.
}
\examples{
ts <- regts(matrix(1:4, ncol = 2) , start = "2015Q3", names = c("a", "b"),
labels = c("Timeseries a", "Timeseries b"))
print(as.data.frame(ts))
}
|
/pkg/man/as.data.frame.Rd
|
no_license
|
timemod/regts
|
R
| false | true | 1,743 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.frame.R
\name{as.data.frame}
\alias{as.data.frame}
\alias{as.data.frame.regts}
\title{Convert a \code{\link{regts}} to a \code{\link[base]{data.frame}}}
\usage{
\method{as.data.frame}{regts}(x, ..., rowwise = FALSE, row_names = TRUE,
period_as_date = FALSE)
}
\arguments{
\item{x}{a \code{\link{regts}}}
\item{...}{additional arguments to be passed to methods.}
\item{rowwise}{a logical value: should the timeseries be stored rowwise
or columnwise in the data frame? Defaults to \code{FALSE}}
\item{row_names}{Whether to create row names. If \code{FALSE},
then an additional column with name \code{"period"} or \code{"name"} is
created for columnwise or rowwise timeseries, respectively.}
\item{period_as_date}{A logical (default \code{FALSE}).
If \code{TRUE} the periods are stored as \code{\link[base]{Date}} objects.
Depending on arguments \code{rowwise} and \code{row_names}
the periods may appear in the row or column names of the result data frame.
In that case the dates are coerced to character vectors,
using the standard date format \code{"\%Y-\%m-\%d"}
(see the documentation of function \code{\link[base]{strptime}}
for more information about date formats).}
}
\value{
A \code{\link[base]{data.frame}}
}
\description{
Convert a \code{\link{regts}} to a \code{\link[base]{data.frame}}
}
\details{
If the \code{regts} has labels and argument \code{rowwise} is \code{FALSE},
then the labels are added to columns of the data frame
These labels are visible in the data viewer.
}
\examples{
ts <- regts(matrix(1:4, ncol = 2) , start = "2015Q3", names = c("a", "b"),
labels = c("Timeseries a", "Timeseries b"))
print(as.data.frame(ts))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s.RF.R
\name{s.RF}
\alias{s.RF}
\title{Random Forest Classification and Regression [C, R]}
\usage{
s.RF(
x,
y = NULL,
x.test = NULL,
y.test = NULL,
x.name = NULL,
y.name = NULL,
n.trees = 1000,
autotune = FALSE,
n.trees.try = 1000,
stepFactor = 1.5,
mtry = NULL,
mtryStart = mtry,
grid.resample.rtset = rtset.resample("kfold", 5),
metric = NULL,
maximize = NULL,
classwt = NULL,
ipw = TRUE,
ipw.type = 2,
upsample = FALSE,
downsample = FALSE,
resample.seed = NULL,
importance = TRUE,
proximity = FALSE,
replace = TRUE,
nodesize = NULL,
maxnodes = NULL,
strata = NULL,
sampsize = if (replace) nrow(x) else ceiling(0.632 * nrow(x)),
sampsize.ratio = NULL,
do.trace = NULL,
tune.do.trace = FALSE,
imetrics = FALSE,
n.cores = rtCores,
print.tune.plot = FALSE,
print.plot = TRUE,
plot.fitted = NULL,
plot.predicted = NULL,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
proximity.tsne = FALSE,
discard.forest = FALSE,
tsne.perplexity = 5,
plot.tsne.train = FALSE,
plot.tsne.test = FALSE,
question = NULL,
rtclass = NULL,
verbose = TRUE,
grid.verbose = TRUE,
outdir = NULL,
save.mod = ifelse(!is.null(outdir), TRUE, FALSE),
...
)
}
\arguments{
\item{x}{Numeric vector or matrix / data frame of features i.e. independent variables}
\item{y}{Numeric vector of outcome, i.e. dependent variable}
\item{x.test}{Numeric vector or matrix / data frame of testing set features
Columns must correspond to columns in \code{x}}
\item{y.test}{Numeric vector of testing set outcome}
\item{x.name}{Character: Name for feature set}
\item{y.name}{Character: Name for outcome}
\item{n.trees}{Integer: Number of trees to grow. Default = 1000}
\item{autotune}{Logical: If TRUE, use ]code{randomForest::tuneRF} to determine \code{mtry}}
\item{n.trees.try}{Integer: Number of trees to train for tuning, if \code{autotune = TRUE}}
\item{stepFactor}{Float: If \code{autotune = TRUE}, at each tuning iteration, \code{mtry} is multiplied or
divided by this value. Default = 1.5}
\item{mtry}{[gS] Integer: Number of features sampled randomly at each split}
\item{mtryStart}{Integer: If \code{autotune = TRUE}, start at this value for \code{mtry}}
\item{grid.resample.rtset}{List: Output of \link{rtset.resample} defining \link{gridSearchLearn} parameters.
Default = \code{rtset.resample("kfold", 5)}}
\item{metric}{Character: Metric to minimize, or maximize if \code{maximize = TRUE} during grid search.
Default = NULL, which results in "Balanced Accuracy" for Classification,
"MSE" for Regression, and "Coherence" for Survival Analysis.}
\item{maximize}{Logical: If TRUE, \code{metric} will be maximized if grid search is run. Default = FALSE}
\item{classwt}{Vector, Float: Priors of the classes for classification only. Need not add up to 1}
\item{ipw}{Logical: If TRUE, apply inverse probability weighting (for Classification only).
Note: If \code{weights} are provided, \code{ipw} is not used. Default = TRUE}
\item{ipw.type}{Integer {0, 1, 2}
1: class.weights as in 0, divided by max(class.weights)
2: class.weights as in 0, divided by min(class.weights)
Default = 2}
\item{upsample}{Logical: If TRUE, upsample training set cases not belonging in majority outcome
group}
\item{resample.seed}{Integer: If provided, will be used to set the seed during upsampling.
Default = NULL (random seed)}
\item{importance}{Logical: If TRUE, estimate variable relative importance. Default = TRUE}
\item{proximity}{Logical: If TRUE, calculate proximity measure among cases. Default = FALSE}
\item{replace}{Logical: If TRUE, sample cases with replacement during training. Default = TRUE}
\item{nodesize}{[gS]: Integer: Minimum size of terminal nodes. Default = 5 (Regression);
1 (Classification)}
\item{maxnodes}{[gS]: Integer: Maximum number of terminal nodes in a tree. Default = NULL; trees
grown to maximum possible}
\item{strata}{Vector, Factor: Will be used for stratified sampling}
\item{sampsize}{Integer: Size of sample to draw. In Classification, if \code{strata} is defined, this
can be a vector of the same length, in which case, corresponding values determine how many cases are drawn from
the strata.}
\item{sampsize.ratio}{Float (0, 1): Heuristic of sorts to increase sensitivity in unbalanced
cases. Sample with replacement from minority case to create bootstraps of length N cases.
Select \code{(sampsize.ratio * N minority cases)} cases from majority class.}
\item{do.trace}{Logical or integer: If TRUE, \code{randomForest} will outpout information while it is running.
If an integer, \code{randomForest} will report progress every this many trees. Default = \code{n.trees/10} if
\code{verbose = TRUE}}
\item{tune.do.trace}{Same as \code{do.trace} but for tuning, if \code{autotune = TRUE}}
\item{imetrics}{Logical: If TRUE, calculate interpretability metrics (N of trees and N of nodes) and save under
the 'extra' field of \link{rtMod}}
\item{n.cores}{Integer: Number of cores to use. Defaults to available cores reported by
\code{future::availableCores()}, unless option \code{rt.cores} is set at the time the library is loaded}
\item{print.tune.plot}{Logical: passed to \code{randomForest::tuneRF}. Default = FALSE}
\item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3}
Takes precedence over \code{plot.fitted} and \code{plot.predicted}. Default = TRUE}
\item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted}
\item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted.
Requires \code{x.test} and \code{y.test}}
\item{plot.theme}{Character: "zero", "dark", "box", "darkbox"}
\item{proximity.tsne}{Logical: If TRUE, perform t-SNE on proximity matrix. Will be saved under 'extra' field of
\link{rtMod}. Default = FALSE}
\item{discard.forest}{Logical: If TRUE, remove forest from \link{rtMod} object to save space.
Default = FALSE}
\item{tsne.perplexity}{Numeric: Perplexity parameter for \code{Rtsne::Rtsne}}
\item{plot.tsne.train}{Logical: If TRUE, plot training set tSNE projections}
\item{plot.tsne.test}{Logical: If TRUE, plot testing set tSNE projections}
\item{question}{Character: the question you are attempting to answer with this model, in plain language.}
\item{rtclass}{Character: Class type to use. "S3", "S4", "RC", "R6"}
\item{verbose}{Logical: If TRUE, print summary to screen.}
\item{grid.verbose}{Logical: Passed to \link{gridSearchLearn}}
\item{outdir}{String, Optional: Path to directory to save output}
\item{save.mod}{Logical. If TRUE, save all output as RDS file in \code{outdir}
\code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir}
is defined, outdir defaults to \code{paste0("./s.", mod.name)}}
\item{...}{Additional arguments to be passed to \code{randomForest::randomForest}}
}
\value{
\link{rtMod} object
}
\description{
Train a Random Forest for regression or classification using \code{randomForest}
}
\details{
If \code{autotue = TRUE}, \code{randomForest::tuneRF} will be run to determine best \code{mtry}
value.
}
\seealso{
\link{elevate} for external cross-validation
Other Supervised Learning:
\code{\link{s.ADABOOST}()},
\code{\link{s.ADDTREE}()},
\code{\link{s.BART}()},
\code{\link{s.BAYESGLM}()},
\code{\link{s.BRUTO}()},
\code{\link{s.C50}()},
\code{\link{s.CART}()},
\code{\link{s.CTREE}()},
\code{\link{s.DA}()},
\code{\link{s.ET}()},
\code{\link{s.EVTREE}()},
\code{\link{s.GAM.default}()},
\code{\link{s.GAM.formula}()},
\code{\link{s.GAMSEL}()},
\code{\link{s.GAM}()},
\code{\link{s.GBM3}()},
\code{\link{s.GBM}()},
\code{\link{s.GLMNET}()},
\code{\link{s.GLM}()},
\code{\link{s.GLS}()},
\code{\link{s.H2ODL}()},
\code{\link{s.H2OGBM}()},
\code{\link{s.H2ORF}()},
\code{\link{s.IRF}()},
\code{\link{s.KNN}()},
\code{\link{s.LDA}()},
\code{\link{s.LM}()},
\code{\link{s.MARS}()},
\code{\link{s.MLRF}()},
\code{\link{s.MXN}()},
\code{\link{s.NBAYES}()},
\code{\link{s.NLA}()},
\code{\link{s.NLS}()},
\code{\link{s.NW}()},
\code{\link{s.POLYMARS}()},
\code{\link{s.PPR}()},
\code{\link{s.PPTREE}()},
\code{\link{s.QDA}()},
\code{\link{s.QRNN}()},
\code{\link{s.RANGER}()},
\code{\link{s.RFSRC}()},
\code{\link{s.SGD}()},
\code{\link{s.SPLS}()},
\code{\link{s.SVM}()},
\code{\link{s.TFN}()},
\code{\link{s.XGBLIN}()},
\code{\link{s.XGB}()}
Other Tree-based methods:
\code{\link{s.ADABOOST}()},
\code{\link{s.ADDTREE}()},
\code{\link{s.BART}()},
\code{\link{s.C50}()},
\code{\link{s.CART}()},
\code{\link{s.CTREE}()},
\code{\link{s.ET}()},
\code{\link{s.EVTREE}()},
\code{\link{s.GBM3}()},
\code{\link{s.GBM}()},
\code{\link{s.H2OGBM}()},
\code{\link{s.H2ORF}()},
\code{\link{s.IRF}()},
\code{\link{s.MLRF}()},
\code{\link{s.PPTREE}()},
\code{\link{s.RANGER}()},
\code{\link{s.RFSRC}()},
\code{\link{s.XGB}()}
Other Ensembles:
\code{\link{s.ADABOOST}()},
\code{\link{s.GBM3}()},
\code{\link{s.GBM}()},
\code{\link{s.RANGER}()}
}
\author{
Efstathios D. Gennatas
}
\concept{Ensembles}
\concept{Supervised Learning}
\concept{Tree-based methods}
|
/man/s.RF.Rd
|
no_license
|
NourOmran393/rtemis
|
R
| false | true | 9,082 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s.RF.R
\name{s.RF}
\alias{s.RF}
\title{Random Forest Classification and Regression [C, R]}
\usage{
s.RF(
x,
y = NULL,
x.test = NULL,
y.test = NULL,
x.name = NULL,
y.name = NULL,
n.trees = 1000,
autotune = FALSE,
n.trees.try = 1000,
stepFactor = 1.5,
mtry = NULL,
mtryStart = mtry,
grid.resample.rtset = rtset.resample("kfold", 5),
metric = NULL,
maximize = NULL,
classwt = NULL,
ipw = TRUE,
ipw.type = 2,
upsample = FALSE,
downsample = FALSE,
resample.seed = NULL,
importance = TRUE,
proximity = FALSE,
replace = TRUE,
nodesize = NULL,
maxnodes = NULL,
strata = NULL,
sampsize = if (replace) nrow(x) else ceiling(0.632 * nrow(x)),
sampsize.ratio = NULL,
do.trace = NULL,
tune.do.trace = FALSE,
imetrics = FALSE,
n.cores = rtCores,
print.tune.plot = FALSE,
print.plot = TRUE,
plot.fitted = NULL,
plot.predicted = NULL,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
proximity.tsne = FALSE,
discard.forest = FALSE,
tsne.perplexity = 5,
plot.tsne.train = FALSE,
plot.tsne.test = FALSE,
question = NULL,
rtclass = NULL,
verbose = TRUE,
grid.verbose = TRUE,
outdir = NULL,
save.mod = ifelse(!is.null(outdir), TRUE, FALSE),
...
)
}
\arguments{
\item{x}{Numeric vector or matrix / data frame of features i.e. independent variables}
\item{y}{Numeric vector of outcome, i.e. dependent variable}
\item{x.test}{Numeric vector or matrix / data frame of testing set features
Columns must correspond to columns in \code{x}}
\item{y.test}{Numeric vector of testing set outcome}
\item{x.name}{Character: Name for feature set}
\item{y.name}{Character: Name for outcome}
\item{n.trees}{Integer: Number of trees to grow. Default = 1000}
\item{autotune}{Logical: If TRUE, use ]code{randomForest::tuneRF} to determine \code{mtry}}
\item{n.trees.try}{Integer: Number of trees to train for tuning, if \code{autotune = TRUE}}
\item{stepFactor}{Float: If \code{autotune = TRUE}, at each tuning iteration, \code{mtry} is multiplied or
divided by this value. Default = 1.5}
\item{mtry}{[gS] Integer: Number of features sampled randomly at each split}
\item{mtryStart}{Integer: If \code{autotune = TRUE}, start at this value for \code{mtry}}
\item{grid.resample.rtset}{List: Output of \link{rtset.resample} defining \link{gridSearchLearn} parameters.
Default = \code{rtset.resample("kfold", 5)}}
\item{metric}{Character: Metric to minimize, or maximize if \code{maximize = TRUE} during grid search.
Default = NULL, which results in "Balanced Accuracy" for Classification,
"MSE" for Regression, and "Coherence" for Survival Analysis.}
\item{maximize}{Logical: If TRUE, \code{metric} will be maximized if grid search is run. Default = FALSE}
\item{classwt}{Vector, Float: Priors of the classes for classification only. Need not add up to 1}
\item{ipw}{Logical: If TRUE, apply inverse probability weighting (for Classification only).
Note: If \code{weights} are provided, \code{ipw} is not used. Default = TRUE}
\item{ipw.type}{Integer {0, 1, 2}
1: class.weights as in 0, divided by max(class.weights)
2: class.weights as in 0, divided by min(class.weights)
Default = 2}
\item{upsample}{Logical: If TRUE, upsample training set cases not belonging in majority outcome
group}
\item{resample.seed}{Integer: If provided, will be used to set the seed during upsampling.
Default = NULL (random seed)}
\item{importance}{Logical: If TRUE, estimate variable relative importance. Default = TRUE}
\item{proximity}{Logical: If TRUE, calculate proximity measure among cases. Default = FALSE}
\item{replace}{Logical: If TRUE, sample cases with replacement during training. Default = TRUE}
\item{nodesize}{[gS]: Integer: Minimum size of terminal nodes. Default = 5 (Regression);
1 (Classification)}
\item{maxnodes}{[gS]: Integer: Maximum number of terminal nodes in a tree. Default = NULL; trees
grown to maximum possible}
\item{strata}{Vector, Factor: Will be used for stratified sampling}
\item{sampsize}{Integer: Size of sample to draw. In Classification, if \code{strata} is defined, this
can be a vector of the same length, in which case, corresponding values determine how many cases are drawn from
the strata.}
\item{sampsize.ratio}{Float (0, 1): Heuristic of sorts to increase sensitivity in unbalanced
cases. Sample with replacement from minority case to create bootstraps of length N cases.
Select \code{(sampsize.ratio * N minority cases)} cases from majority class.}
\item{do.trace}{Logical or integer: If TRUE, \code{randomForest} will outpout information while it is running.
If an integer, \code{randomForest} will report progress every this many trees. Default = \code{n.trees/10} if
\code{verbose = TRUE}}
\item{tune.do.trace}{Same as \code{do.trace} but for tuning, if \code{autotune = TRUE}}
\item{imetrics}{Logical: If TRUE, calculate interpretability metrics (N of trees and N of nodes) and save under
the 'extra' field of \link{rtMod}}
\item{n.cores}{Integer: Number of cores to use. Defaults to available cores reported by
\code{future::availableCores()}, unless option \code{rt.cores} is set at the time the library is loaded}
\item{print.tune.plot}{Logical: passed to \code{randomForest::tuneRF}. Default = FALSE}
\item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3}
Takes precedence over \code{plot.fitted} and \code{plot.predicted}. Default = TRUE}
\item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted}
\item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted.
Requires \code{x.test} and \code{y.test}}
\item{plot.theme}{Character: "zero", "dark", "box", "darkbox"}
\item{proximity.tsne}{Logical: If TRUE, perform t-SNE on proximity matrix. Will be saved under 'extra' field of
\link{rtMod}. Default = FALSE}
\item{discard.forest}{Logical: If TRUE, remove forest from \link{rtMod} object to save space.
Default = FALSE}
\item{tsne.perplexity}{Numeric: Perplexity parameter for \code{Rtsne::Rtsne}}
\item{plot.tsne.train}{Logical: If TRUE, plot training set tSNE projections}
\item{plot.tsne.test}{Logical: If TRUE, plot testing set tSNE projections}
\item{question}{Character: the question you are attempting to answer with this model, in plain language.}
\item{rtclass}{Character: Class type to use. "S3", "S4", "RC", "R6"}
\item{verbose}{Logical: If TRUE, print summary to screen.}
\item{grid.verbose}{Logical: Passed to \link{gridSearchLearn}}
\item{outdir}{String, Optional: Path to directory to save output}
\item{save.mod}{Logical. If TRUE, save all output as RDS file in \code{outdir}
\code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir}
is defined, outdir defaults to \code{paste0("./s.", mod.name)}}
\item{...}{Additional arguments to be passed to \code{randomForest::randomForest}}
}
\value{
\link{rtMod} object
}
\description{
Train a Random Forest for regression or classification using \code{randomForest}
}
\details{
If \code{autotue = TRUE}, \code{randomForest::tuneRF} will be run to determine best \code{mtry}
value.
}
\seealso{
\link{elevate} for external cross-validation
Other Supervised Learning:
\code{\link{s.ADABOOST}()},
\code{\link{s.ADDTREE}()},
\code{\link{s.BART}()},
\code{\link{s.BAYESGLM}()},
\code{\link{s.BRUTO}()},
\code{\link{s.C50}()},
\code{\link{s.CART}()},
\code{\link{s.CTREE}()},
\code{\link{s.DA}()},
\code{\link{s.ET}()},
\code{\link{s.EVTREE}()},
\code{\link{s.GAM.default}()},
\code{\link{s.GAM.formula}()},
\code{\link{s.GAMSEL}()},
\code{\link{s.GAM}()},
\code{\link{s.GBM3}()},
\code{\link{s.GBM}()},
\code{\link{s.GLMNET}()},
\code{\link{s.GLM}()},
\code{\link{s.GLS}()},
\code{\link{s.H2ODL}()},
\code{\link{s.H2OGBM}()},
\code{\link{s.H2ORF}()},
\code{\link{s.IRF}()},
\code{\link{s.KNN}()},
\code{\link{s.LDA}()},
\code{\link{s.LM}()},
\code{\link{s.MARS}()},
\code{\link{s.MLRF}()},
\code{\link{s.MXN}()},
\code{\link{s.NBAYES}()},
\code{\link{s.NLA}()},
\code{\link{s.NLS}()},
\code{\link{s.NW}()},
\code{\link{s.POLYMARS}()},
\code{\link{s.PPR}()},
\code{\link{s.PPTREE}()},
\code{\link{s.QDA}()},
\code{\link{s.QRNN}()},
\code{\link{s.RANGER}()},
\code{\link{s.RFSRC}()},
\code{\link{s.SGD}()},
\code{\link{s.SPLS}()},
\code{\link{s.SVM}()},
\code{\link{s.TFN}()},
\code{\link{s.XGBLIN}()},
\code{\link{s.XGB}()}
Other Tree-based methods:
\code{\link{s.ADABOOST}()},
\code{\link{s.ADDTREE}()},
\code{\link{s.BART}()},
\code{\link{s.C50}()},
\code{\link{s.CART}()},
\code{\link{s.CTREE}()},
\code{\link{s.ET}()},
\code{\link{s.EVTREE}()},
\code{\link{s.GBM3}()},
\code{\link{s.GBM}()},
\code{\link{s.H2OGBM}()},
\code{\link{s.H2ORF}()},
\code{\link{s.IRF}()},
\code{\link{s.MLRF}()},
\code{\link{s.PPTREE}()},
\code{\link{s.RANGER}()},
\code{\link{s.RFSRC}()},
\code{\link{s.XGB}()}
Other Ensembles:
\code{\link{s.ADABOOST}()},
\code{\link{s.GBM3}()},
\code{\link{s.GBM}()},
\code{\link{s.RANGER}()}
}
\author{
Efstathios D. Gennatas
}
\concept{Ensembles}
\concept{Supervised Learning}
\concept{Tree-based methods}
|
library(dplyr)
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
Y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt")
Sub_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
Y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt")
Sub_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
variable_names <- read.table("./UCI HAR Dataset/features.txt")
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
X_total <- rbind(X_train, X_test)
Y_total <- rbind(Y_train, Y_test)
Sub_total <- rbind(Sub_train, Sub_test)
selected_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
X_total <- X_total[,selected_var[,1]]
colnames(Y_total) <- "activity"
Y_total$activitylabel <- factor(Y_total$activity, labels = as.character(activity_labels[,2]))
activitylabel <- Y_total[,-1]
colnames(X_total) <- variable_names[selected_var[,1],2]
colnames(Sub_total) <- "subject"
total <- cbind(X_total, activitylabel, Sub_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(funs(mean))
write.table(total_mean, file = "./UCI HAR Dataset/tidydata.txt", row.names = FALSE, col.names = TRUE)
|
/run_analysis.R
|
no_license
|
Rpvrushank/Getting_data_and_Cleaning_data_week4
|
R
| false | false | 1,253 |
r
|
library(dplyr)
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
Y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt")
Sub_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
Y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt")
Sub_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
variable_names <- read.table("./UCI HAR Dataset/features.txt")
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
X_total <- rbind(X_train, X_test)
Y_total <- rbind(Y_train, Y_test)
Sub_total <- rbind(Sub_train, Sub_test)
selected_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
X_total <- X_total[,selected_var[,1]]
colnames(Y_total) <- "activity"
Y_total$activitylabel <- factor(Y_total$activity, labels = as.character(activity_labels[,2]))
activitylabel <- Y_total[,-1]
colnames(X_total) <- variable_names[selected_var[,1],2]
colnames(Sub_total) <- "subject"
total <- cbind(X_total, activitylabel, Sub_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(funs(mean))
write.table(total_mean, file = "./UCI HAR Dataset/tidydata.txt", row.names = FALSE, col.names = TRUE)
|
library(httr)
source("R/verify.R")
source("R/text_mining.R")
# 4. Use API
# req <- GET("https://stream.twitter.com/1.1/statuses/sample.json",
# config(token = twitter_token))
# stop_for_status(req)
load("data/congress.RData")
tw_process_api <- function(req) {
# Processing
tweets <-as.data.frame(do.call(rbind,lapply(content(req), function(x) {
c(x[c("id","text","retweet_count")],
screen_name=x$user$screen_name,
friends_count=x$user$friends_count,
followers_count=x$user$followers_count,
favourites_count=x$user$favourites_count,
verified=x$user$verified,
location=x$user$location,
name=x$user$name,
created_at=x$user$created_at,
description=x$user$description,
#url=x$user$url,
protected=x$user$protected,
#utc_offset=x$user$utc_offset,
statuses_count=x$user$statuses_count,
id_str=x$user$id_str, recursive=TRUE
#entities=x$user$entities
)
})), stringsAsFactors=FALSE
)
# Fixing data types
tweets$id <- as.numeric(tweets$id)
tweets$friends_count <- as.numeric(tweets$friends_count)
tweets$followers_count<- as.numeric(tweets$followers_count)
tweets$favourites_count<- as.numeric(tweets$favourites_count)
tweets$statuses_count<- as.numeric(tweets$statuses_count)
tweets
}
# Getting the data and processing it
tweets_congress_raw <- lapply(congress$screen_name,tw_get_user)
status <- sapply(tweets_congress_raw,status_code)
tweets_congress <- tweets_congress_raw[which(status==200)]
tweets_congress <- lapply(tweets_congress,tw_api_get_timeline)
# Data frame
tweets_congress <- do.call(rbind,tweets_congress)
save(tweets_congress,tweets_congress_raw,file="data/tweets_congress.RData")
# content(req)
# > names(content(req)$statuses[[1]]$user)
# [1] "id" "id_str"
# [3] "name" "screen_name"
# [5] "location" "description"
# [7] "url" "entities"
# [9] "protected" "followers_count"
# [11] "friends_count" "listed_count"
# [13] "created_at" "favourites_count"
# [15] "utc_offset" "time_zone"
# [17] "geo_enabled" "verified"
# [19] "statuses_count" "lang"
# [21] "contributors_enabled" "is_translator"
# [23] "is_translation_enabled" "profile_background_color"
# [25] "profile_background_image_url" "profile_background_image_url_https"
# [27] "profile_background_tile" "profile_image_url"
# [29] "profile_image_url_https" "profile_link_color"
# [31] "profile_sidebar_border_color" "profile_sidebar_fill_color"
# [33] "profile_text_color" "profile_use_background_image"
# [35] "has_extended_profile" "default_profile"
# [37] "default_profile_image" "following"
# [39] "follow_request_sent" "notifications"
# tweets <-as.data.frame(do.call(rbind,lapply(content(req), function(x) {
# c(x[c("id","text","retweet_count")],
# screen_name=x$user$screen_name,
# friends_count=x$user$friends_count,
# followers_count=x$user$followers_count,
# favourites_count=x$user$favourites_count,
# verified=x$user$verified,
# location=x$user$location,
# name=x$user$name,
# created_at=x$user$created_at,
# description=x$user$description,
# #url=x$user$url,
# protected=x$user$protected,
# #utc_offset=x$user$utc_offset,
# statuses_count=x$user$statuses_count,
# id_str=x$user$id_str, recursive=TRUE
# #entities=x$user$entities
# )
# })), stringsAsFactors=FALSE
# )
#
# # Fixing data types
# tweets$id <- as.numeric(tweets$id)
# tweets$friends_count <- as.numeric(tweets$friends_count)
# tweets$followers_count<- as.numeric(tweets$followers_count)
# tweets$favourites_count<- as.numeric(tweets$favourites_count)
# tweets$statuses_count<- as.numeric(tweets$statuses_count)
# nrow(tweets)
# save(tweets,file = "data/lovewins.RData")
|
/playground/get_sample_data_httr.R
|
no_license
|
fentonmartin/twitterreport
|
R
| false | false | 4,528 |
r
|
library(httr)
source("R/verify.R")
source("R/text_mining.R")
# 4. Use API
# req <- GET("https://stream.twitter.com/1.1/statuses/sample.json",
# config(token = twitter_token))
# stop_for_status(req)
load("data/congress.RData")
tw_process_api <- function(req) {
# Processing
tweets <-as.data.frame(do.call(rbind,lapply(content(req), function(x) {
c(x[c("id","text","retweet_count")],
screen_name=x$user$screen_name,
friends_count=x$user$friends_count,
followers_count=x$user$followers_count,
favourites_count=x$user$favourites_count,
verified=x$user$verified,
location=x$user$location,
name=x$user$name,
created_at=x$user$created_at,
description=x$user$description,
#url=x$user$url,
protected=x$user$protected,
#utc_offset=x$user$utc_offset,
statuses_count=x$user$statuses_count,
id_str=x$user$id_str, recursive=TRUE
#entities=x$user$entities
)
})), stringsAsFactors=FALSE
)
# Fixing data types
tweets$id <- as.numeric(tweets$id)
tweets$friends_count <- as.numeric(tweets$friends_count)
tweets$followers_count<- as.numeric(tweets$followers_count)
tweets$favourites_count<- as.numeric(tweets$favourites_count)
tweets$statuses_count<- as.numeric(tweets$statuses_count)
tweets
}
# Getting the data and processing it
tweets_congress_raw <- lapply(congress$screen_name,tw_get_user)
status <- sapply(tweets_congress_raw,status_code)
tweets_congress <- tweets_congress_raw[which(status==200)]
tweets_congress <- lapply(tweets_congress,tw_api_get_timeline)
# Data frame
tweets_congress <- do.call(rbind,tweets_congress)
save(tweets_congress,tweets_congress_raw,file="data/tweets_congress.RData")
# content(req)
# > names(content(req)$statuses[[1]]$user)
# [1] "id" "id_str"
# [3] "name" "screen_name"
# [5] "location" "description"
# [7] "url" "entities"
# [9] "protected" "followers_count"
# [11] "friends_count" "listed_count"
# [13] "created_at" "favourites_count"
# [15] "utc_offset" "time_zone"
# [17] "geo_enabled" "verified"
# [19] "statuses_count" "lang"
# [21] "contributors_enabled" "is_translator"
# [23] "is_translation_enabled" "profile_background_color"
# [25] "profile_background_image_url" "profile_background_image_url_https"
# [27] "profile_background_tile" "profile_image_url"
# [29] "profile_image_url_https" "profile_link_color"
# [31] "profile_sidebar_border_color" "profile_sidebar_fill_color"
# [33] "profile_text_color" "profile_use_background_image"
# [35] "has_extended_profile" "default_profile"
# [37] "default_profile_image" "following"
# [39] "follow_request_sent" "notifications"
# tweets <-as.data.frame(do.call(rbind,lapply(content(req), function(x) {
# c(x[c("id","text","retweet_count")],
# screen_name=x$user$screen_name,
# friends_count=x$user$friends_count,
# followers_count=x$user$followers_count,
# favourites_count=x$user$favourites_count,
# verified=x$user$verified,
# location=x$user$location,
# name=x$user$name,
# created_at=x$user$created_at,
# description=x$user$description,
# #url=x$user$url,
# protected=x$user$protected,
# #utc_offset=x$user$utc_offset,
# statuses_count=x$user$statuses_count,
# id_str=x$user$id_str, recursive=TRUE
# #entities=x$user$entities
# )
# })), stringsAsFactors=FALSE
# )
#
# # Fixing data types
# tweets$id <- as.numeric(tweets$id)
# tweets$friends_count <- as.numeric(tweets$friends_count)
# tweets$followers_count<- as.numeric(tweets$followers_count)
# tweets$favourites_count<- as.numeric(tweets$favourites_count)
# tweets$statuses_count<- as.numeric(tweets$statuses_count)
# nrow(tweets)
# save(tweets,file = "data/lovewins.RData")
|
#' @export
#' @name create_resultset
#' @rdname ResultSet-class
#' @aliases ResultSet-methods
#'
#' @param fOrigin Chracter with the function used to run the analysis.
#' @param lResults List with the results
#' @param fData List with the feature data.
#' @param lOptions List with additional options
#' @examples
#' create_resultset("hello", list(), list(), list())
create_resultset <- function(fOrigin, lResults, fData, lOptions = list()){
new("ResultSet",
fun_origin = fOrigin,
results = lResults,
fData = fData,
options = lOptions
)
}
|
/R/create_resultset.R
|
permissive
|
isglobal-brge/MultiDataSet
|
R
| false | false | 581 |
r
|
#' @export
#' @name create_resultset
#' @rdname ResultSet-class
#' @aliases ResultSet-methods
#'
#' @param fOrigin Chracter with the function used to run the analysis.
#' @param lResults List with the results
#' @param fData List with the feature data.
#' @param lOptions List with additional options
#' @examples
#' create_resultset("hello", list(), list(), list())
create_resultset <- function(fOrigin, lResults, fData, lOptions = list()){
new("ResultSet",
fun_origin = fOrigin,
results = lResults,
fData = fData,
options = lOptions
)
}
|
testlist <- list(a = 0L, b = 0L, x = c(1610612736L, 654311424L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610130000-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 414 |
r
|
testlist <- list(a = 0L, b = 0L, x = c(1610612736L, 654311424L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
#Neil Davies 24/04/19
#This select SNPs to use as instruments for height using MR-Base
#Load MR base packages
library(devtools)
install_github("MRCIEU/TwoSampleMR")
library(foreign)
#Load TwoSampleMR package
library(TwoSampleMR)
#Install MR base instruments
devtools::install_github("MRCIEU/MRInstruments")
library(MRInstruments)
#Extract IDs, coefficients and effect alleles for the covariate allele scores
#Extract
phenotypes <-c(89)
instruments<-extract_instruments(outcomes=phenotypes, p1 = 5e-08,r2 = 0.001)
colnames(instruments)
write.table(instruments,paste(path1,"/snplists/wood_eur_effect.txt",sep=""),quote = FALSE)
write.table(instruments$SNP,paste(path1,"/snplists/wood_eur_snps.txt",sep=""),quote = FALSE, row.names = FALSE,col.names = FALSE)
|
/UKBB/cr_0_snp_list_wood_eur.R
|
no_license
|
nmdavies/within_family_mr
|
R
| false | false | 762 |
r
|
#Neil Davies 24/04/19
#This select SNPs to use as instruments for height using MR-Base
#Load MR base packages
library(devtools)
install_github("MRCIEU/TwoSampleMR")
library(foreign)
#Load TwoSampleMR package
library(TwoSampleMR)
#Install MR base instruments
devtools::install_github("MRCIEU/MRInstruments")
library(MRInstruments)
#Extract IDs, coefficients and effect alleles for the covariate allele scores
#Extract
phenotypes <-c(89)
instruments<-extract_instruments(outcomes=phenotypes, p1 = 5e-08,r2 = 0.001)
colnames(instruments)
write.table(instruments,paste(path1,"/snplists/wood_eur_effect.txt",sep=""),quote = FALSE)
write.table(instruments$SNP,paste(path1,"/snplists/wood_eur_snps.txt",sep=""),quote = FALSE, row.names = FALSE,col.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{formatProj}
\alias{formatProj}
\title{Formatting projection matrix for screen printout.}
\usage{
formatProj(proj, params, idx)
}
\arguments{
\item{proj}{Projection matrix to be formatted}
\item{params}{Parameter names}
\item{idx}{Index value of this projection in the tour sequence}
}
\value{
Formatted text for screen printout.
}
\description{
Formatting projection matrix for screen printout.
}
|
/man/formatProj.Rd
|
no_license
|
uschiLaa/galahr
|
R
| false | true | 492 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{formatProj}
\alias{formatProj}
\title{Formatting projection matrix for screen printout.}
\usage{
formatProj(proj, params, idx)
}
\arguments{
\item{proj}{Projection matrix to be formatted}
\item{params}{Parameter names}
\item{idx}{Index value of this projection in the tour sequence}
}
\value{
Formatted text for screen printout.
}
\description{
Formatting projection matrix for screen printout.
}
|
##Data run for SFS conference on 5/19/2019
##Parameter List with Units
#DO - dissolved oxygen (mg/L)
#DO_Sat - dissolved oxygen saturation (%)
#Temp - water temperature (deg C)
#SpCond - specific conductivity (uS/cm)
#pH - pH (SU)
#Turb - turbidity (NTU)
####NOx, O_P, NH4 all filtered samples###
#NOx - Nitrate(NO3)-Nitrite(NO2)-N (mg/L)
#OP - orthophosphate(P) (mg/L)
#NH4 - ammonia(N) (mg/L)
####Sample Replications (col = Rep)
#a = 1; b = 2; c = 3; d = not a rep just a reading
########################Call in and format data########################################
#call in packages
library(tidyverse)
library(lubridate)
library(RColorBrewer)
#set working directory
setwd('C:/Users/etaylor21/Documents/gnv_streams/SFS Poster')
#call in data file
data_nut = read_csv('gnv_nutrientdata_20190502.csv', col_types = cols(
Site = col_character(),
REP = col_character(),
Date = col_date(format = "%m/%d/%Y"),
Analyte = col_character(),
Result = col_double()))#nutrient data; fixed date format for date column
data_fp = read_csv('C:/Users/Emily/Documents/gnv_streams/SFS Poster/gnv_fpdata_20190502.csv', col_types = cols(
Site = col_character(),
Date = col_date(format = "%m/%d/%Y"),
Time = col_time(format = ""),
Analyte = col_character(),
Result = col_double()))#field parameter data
#change date to show only as month; will need to change as POR spans > than 1 year
#lubridate::floor_date(data_nut$Date, unit = 'month')
#lubridate::floor_date(data_fp$Date, unit = 'month')
#####Nutrient Faceted Boxplot (nbp)####
nd2 = data_nut %>%
group_by(Date, Site, Analyte) %>%
summarise(mean = mean(Result))
#nd2
windows()
npb = ggplot(nd2, aes(x = Site, y = mean, fill = Analyte)) +
geom_boxplot() + ylab('Results (mg/L)')
npb2 = npb + scale_x_discrete(labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin'))
npb3 = npb2 + facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 3,
labeller = as_labeller(c(NH4 = "Ammonium (N)",
NOx = "Nitrate-Nitrite (N)",
OP = 'Orthophosphate (P)'))) +
theme(strip.text = element_text(size = 18)) +
scale_fill_manual(values = c('#56B4E9','#D55E00', '#009E73' )) + guides(fill = FALSE) +
theme(axis.text = element_text(size = rel(1.2))) +
theme(axis.title = element_text(size = rel(1.3)))
npb3
######Nutrient Violins (nvp)#####
windows()
nvp = ggplot(nd2, aes(x = Site, y = mean, fill = Analyte)) +
geom_violin(scale = 'count', adjust = 0.5) + ylab('Results (mg/L)')
nvp2 = nvp + scale_x_discrete(labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin'))#change the names on the x axis, use discrete since non-numeric values
nvp3 = nvp2 + facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 3,
labeller = as_labeller(c(NH4 = "Ammonium (N)",
NOx = "Nitrate-Nitrite (N)",
OP = 'Orthophosphate (P)'))) +
theme(strip.text = element_text(size = 18)) +
scale_fill_manual(values = c('#56B4E9','#D55E00', '#009E73' )) + guides(fill = FALSE) +
theme(axis.text = element_text(size = rel(1.2))) +
theme(axis.title = element_text(size = rel(1.3))) +
theme(axis.title.x = element_blank())
nvp3
#####Nutrient Time-Series (nts)#####
windows()
facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 3,
labeller = as_labeller(c(NH4 = "Ammonium (N)",
NOx = "Nitrate-Nitrite (N)",
OP = 'Orthophosphate (P)'))) +
theme(strip.text = element_text(size = 15))
nts2 = nts + scale_color_manual(values = c('#56B4E9', '#0072B2', '#009E73', '#669900', '#D55E00', '#E69F00' ), #assign specific colors
labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) + #rename sites
scale_shape_discrete(labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) + #since sorted by color and shape must rename in both
guides(fill = FALSE) +
theme(axis.title.x = element_blank()) #remove x-axis title of "date"...unecessary
nts2
#####Nutrient Data of just Possum Creek and Hogtown Upstream (pchu_ts)#####
windows()
pchu = data_nut %>%
filter(Date >= as.Date('2018-10-18') & Date <= as.Date('2019-01-25') ) %>%
filter(Site == 'HOGNW16' | Site == 'POSNW16') %>%
group_by(Date, Site, Analyte) %>%
summarise(mean = mean(Result))
#only using data from before january because HOGNW16 has been closed for construction and the gap in data looks wierd
#pchu
pchu_nut_ts = ggplot(pchu, aes(x = Date, y = mean, fill = Site))
pchu_ts = pchu_nut_ts + geom_line(aes(group = Site, color = Site)) + ylab('Results (mg/L)') +
geom_point(size = 4, aes(x=Date, color = Site, shape = Site)) +
scale_x_date(date_breaks = 'month', date_labels = '%b') +
facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 3,
labeller = as_labeller(c(NH4 = "Ammonium (N)",
NOx = "Nitrate-Nitrite (N)",
OP = 'Orthophosphate (P)'))) +
theme(strip.text = element_text(size = 18)) +
scale_color_manual(values = c('#009E73', '#669900'), labels = c('N. Hogtown', 'Possum')) +
guides(fill = FALSE) +
scale_shape_discrete(labels = c('N. Hogtown', 'Possum')) +
theme(axis.text = element_text(size = rel(1.5))) +
theme(axis.title = element_blank()) +
theme(legend.position = c(0.8, 0.95)) +
theme(legend.background = element_blank()) +
theme(legend.key = element_blank(),
legend.text = element_text(size = rel(1.2))) +
theme(legend.title = element_blank())
pchu_ts
#####Field Parameter Time Series####
windows()
fp2 = data_fp %>%
filter(Analyte == 'DO_Sat' | Analyte == 'pH' | Analyte == 'SpCond' | Analyte == 'Temp' | Analyte == 'Turb') %>%
group_by(Date, Site, Analyte)
fp2
field_param_ts = ggplot(fp2, aes(x = Date, y = Result))
fpts = field_param_ts + geom_line(aes(group = Site, color = Site)) + ylab(NULL) +
geom_point(size = 4, aes(x=Date, color = Site, shape = Site)) +
scale_x_date(date_breaks = 'month', date_labels = '%b') +
facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 5,
strip.position = 'left',
labeller = as_labeller(c(DO_Sat = "DO Saturation (%)",
pH = 'pH (SU)',
SpCond = 'SpCond (uS/cm)',
Temp = 'Temperature (degC)',
Turb = 'Turbidity (NTU)' ))) +
theme(strip.placement = 'outside', strip.text = element_text(size = 11),
axis.text = element_text(size = rel(1.1)),
axis.title.x = element_blank(),
legend.position = 'bottom')
fpts + scale_color_manual(values = c('#56B4E9', '#0072B2', '#009E73', '#669900', '#D55E00', '#E69F00' ),
labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) +
scale_shape_discrete(labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) +
guides(fill = FALSE, col = guide_legend(nrow = 1))
#####Field Parameter Violin (fpv)#####
windows()
fvp = ggplot(fp2, aes(x = Site, y = Result, fill = Site)) +
geom_violin(scale = 'count', adjust = 0.5) + ylab(NULL)
fpv2 = fvp + facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 5,
strip.position = 'left',
labeller = as_labeller(c(DO_Sat = "DO Saturation (%)",
pH = 'pH (SU)',
SpCond = 'SpCond (uS/cm)',
Temp = 'Temperature (degC)',
Turb = 'Turbidity (NTU)' ))) +
theme(strip.placement = 'outside', strip.text = element_text(size = 11)) +
scale_fill_manual(values = c('#56B4E9', '#0072B2', '#009E73', '#669900', '#D55E00', '#E69F00'),
labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) +
scale_x_discrete(labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) +
guides(fill = FALSE) +
theme(axis.text = element_text(size = rel(1.1))) +
theme(axis.title.x = element_blank())+
theme(legend.position = 'bottom')
fpv2
|
/SFS Poster/SFS_poster_gnvstrms.R
|
no_license
|
ETaylor21/gnv_streams
|
R
| false | false | 8,580 |
r
|
##Data run for SFS conference on 5/19/2019
##Parameter List with Units
#DO - dissolved oxygen (mg/L)
#DO_Sat - dissolved oxygen saturation (%)
#Temp - water temperature (deg C)
#SpCond - specific conductivity (uS/cm)
#pH - pH (SU)
#Turb - turbidity (NTU)
####NOx, O_P, NH4 all filtered samples###
#NOx - Nitrate(NO3)-Nitrite(NO2)-N (mg/L)
#OP - orthophosphate(P) (mg/L)
#NH4 - ammonia(N) (mg/L)
####Sample Replications (col = Rep)
#a = 1; b = 2; c = 3; d = not a rep just a reading
########################Call in and format data########################################
#call in packages
library(tidyverse)
library(lubridate)
library(RColorBrewer)
#set working directory
setwd('C:/Users/etaylor21/Documents/gnv_streams/SFS Poster')
#call in data file
data_nut = read_csv('gnv_nutrientdata_20190502.csv', col_types = cols(
Site = col_character(),
REP = col_character(),
Date = col_date(format = "%m/%d/%Y"),
Analyte = col_character(),
Result = col_double()))#nutrient data; fixed date format for date column
data_fp = read_csv('C:/Users/Emily/Documents/gnv_streams/SFS Poster/gnv_fpdata_20190502.csv', col_types = cols(
Site = col_character(),
Date = col_date(format = "%m/%d/%Y"),
Time = col_time(format = ""),
Analyte = col_character(),
Result = col_double()))#field parameter data
#change date to show only as month; will need to change as POR spans > than 1 year
#lubridate::floor_date(data_nut$Date, unit = 'month')
#lubridate::floor_date(data_fp$Date, unit = 'month')
#####Nutrient Faceted Boxplot (nbp)####
nd2 = data_nut %>%
group_by(Date, Site, Analyte) %>%
summarise(mean = mean(Result))
#nd2
windows()
npb = ggplot(nd2, aes(x = Site, y = mean, fill = Analyte)) +
geom_boxplot() + ylab('Results (mg/L)')
npb2 = npb + scale_x_discrete(labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin'))
npb3 = npb2 + facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 3,
labeller = as_labeller(c(NH4 = "Ammonium (N)",
NOx = "Nitrate-Nitrite (N)",
OP = 'Orthophosphate (P)'))) +
theme(strip.text = element_text(size = 18)) +
scale_fill_manual(values = c('#56B4E9','#D55E00', '#009E73' )) + guides(fill = FALSE) +
theme(axis.text = element_text(size = rel(1.2))) +
theme(axis.title = element_text(size = rel(1.3)))
npb3
######Nutrient Violins (nvp)#####
windows()
nvp = ggplot(nd2, aes(x = Site, y = mean, fill = Analyte)) +
geom_violin(scale = 'count', adjust = 0.5) + ylab('Results (mg/L)')
nvp2 = nvp + scale_x_discrete(labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin'))#change the names on the x axis, use discrete since non-numeric values
nvp3 = nvp2 + facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 3,
labeller = as_labeller(c(NH4 = "Ammonium (N)",
NOx = "Nitrate-Nitrite (N)",
OP = 'Orthophosphate (P)'))) +
theme(strip.text = element_text(size = 18)) +
scale_fill_manual(values = c('#56B4E9','#D55E00', '#009E73' )) + guides(fill = FALSE) +
theme(axis.text = element_text(size = rel(1.2))) +
theme(axis.title = element_text(size = rel(1.3))) +
theme(axis.title.x = element_blank())
nvp3
#####Nutrient Time-Series (nts)#####
windows()
facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 3,
labeller = as_labeller(c(NH4 = "Ammonium (N)",
NOx = "Nitrate-Nitrite (N)",
OP = 'Orthophosphate (P)'))) +
theme(strip.text = element_text(size = 15))
nts2 = nts + scale_color_manual(values = c('#56B4E9', '#0072B2', '#009E73', '#669900', '#D55E00', '#E69F00' ), #assign specific colors
labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) + #rename sites
scale_shape_discrete(labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) + #since sorted by color and shape must rename in both
guides(fill = FALSE) +
theme(axis.title.x = element_blank()) #remove x-axis title of "date"...unecessary
nts2
#####Nutrient Data of just Possum Creek and Hogtown Upstream (pchu_ts)#####
windows()
pchu = data_nut %>%
filter(Date >= as.Date('2018-10-18') & Date <= as.Date('2019-01-25') ) %>%
filter(Site == 'HOGNW16' | Site == 'POSNW16') %>%
group_by(Date, Site, Analyte) %>%
summarise(mean = mean(Result))
#only using data from before january because HOGNW16 has been closed for construction and the gap in data looks wierd
#pchu
pchu_nut_ts = ggplot(pchu, aes(x = Date, y = mean, fill = Site))
pchu_ts = pchu_nut_ts + geom_line(aes(group = Site, color = Site)) + ylab('Results (mg/L)') +
geom_point(size = 4, aes(x=Date, color = Site, shape = Site)) +
scale_x_date(date_breaks = 'month', date_labels = '%b') +
facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 3,
labeller = as_labeller(c(NH4 = "Ammonium (N)",
NOx = "Nitrate-Nitrite (N)",
OP = 'Orthophosphate (P)'))) +
theme(strip.text = element_text(size = 18)) +
scale_color_manual(values = c('#009E73', '#669900'), labels = c('N. Hogtown', 'Possum')) +
guides(fill = FALSE) +
scale_shape_discrete(labels = c('N. Hogtown', 'Possum')) +
theme(axis.text = element_text(size = rel(1.5))) +
theme(axis.title = element_blank()) +
theme(legend.position = c(0.8, 0.95)) +
theme(legend.background = element_blank()) +
theme(legend.key = element_blank(),
legend.text = element_text(size = rel(1.2))) +
theme(legend.title = element_blank())
pchu_ts
#####Field Parameter Time Series####
windows()
fp2 = data_fp %>%
filter(Analyte == 'DO_Sat' | Analyte == 'pH' | Analyte == 'SpCond' | Analyte == 'Temp' | Analyte == 'Turb') %>%
group_by(Date, Site, Analyte)
fp2
field_param_ts = ggplot(fp2, aes(x = Date, y = Result))
fpts = field_param_ts + geom_line(aes(group = Site, color = Site)) + ylab(NULL) +
geom_point(size = 4, aes(x=Date, color = Site, shape = Site)) +
scale_x_date(date_breaks = 'month', date_labels = '%b') +
facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 5,
strip.position = 'left',
labeller = as_labeller(c(DO_Sat = "DO Saturation (%)",
pH = 'pH (SU)',
SpCond = 'SpCond (uS/cm)',
Temp = 'Temperature (degC)',
Turb = 'Turbidity (NTU)' ))) +
theme(strip.placement = 'outside', strip.text = element_text(size = 11),
axis.text = element_text(size = rel(1.1)),
axis.title.x = element_blank(),
legend.position = 'bottom')
fpts + scale_color_manual(values = c('#56B4E9', '#0072B2', '#009E73', '#669900', '#D55E00', '#E69F00' ),
labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) +
scale_shape_discrete(labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) +
guides(fill = FALSE, col = guide_legend(nrow = 1))
#####Field Parameter Violin (fpv)#####
windows()
fvp = ggplot(fp2, aes(x = Site, y = Result, fill = Site)) +
geom_violin(scale = 'count', adjust = 0.5) + ylab(NULL)
fpv2 = fvp + facet_wrap( . ~ Analyte , scales = 'free_y', nrow = 5,
strip.position = 'left',
labeller = as_labeller(c(DO_Sat = "DO Saturation (%)",
pH = 'pH (SU)',
SpCond = 'SpCond (uS/cm)',
Temp = 'Temperature (degC)',
Turb = 'Turbidity (NTU)' ))) +
theme(strip.placement = 'outside', strip.text = element_text(size = 11)) +
scale_fill_manual(values = c('#56B4E9', '#0072B2', '#009E73', '#669900', '#D55E00', '#E69F00'),
labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) +
scale_x_discrete(labels = c('Hatchet', 'N. Hogtown', 'S. Hogtown', 'Possum', 'Sweetwater', 'Tumblin')) +
guides(fill = FALSE) +
theme(axis.text = element_text(size = rel(1.1))) +
theme(axis.title.x = element_blank())+
theme(legend.position = 'bottom')
fpv2
|
tmp <- league_stats_all %>%
mutate(matchday=rep(c(1:38), length(league_stats_all$year)/38)) %>%
select(matchday, everything()) %>%
group_by(matchday, year) %>%
summarise(matchdate=min(date), goals_for = mean(scored), xG = mean(xG)) %>%
ungroup() %>%
select(-matchday, -year) %>%
arrange(matchdate) %>%
mutate(goals_for_avg=roll_mean(goals_for, 6, align='right', fill=NA),
goals_for_min=roll_min(goals_for, 6, align='right', fill=NA),
goals_for_max=roll_max(goals_for, 6, align='right', fill=NA),
xG_mean=roll_mean(xG, 6, align='right', fill=NA),
xG_min=roll_min(xG, 6, align='right', fill=NA),
xG_max=roll_max(xG, 6, align='right', fill=NA))
tmp$goals_for_avg[1] <- roll_mean(tmp$goals_for, 1, align='right', fill=NA)[1]
tmp$goals_for_avg[2] <- roll_mean(tmp$goals_for, 2, align='right', fill=NA)[2]
tmp$goals_for_avg[3] <- roll_mean(tmp$goals_for, 3, align='right', fill=NA)[3]
tmp$goals_for_avg[4] <- roll_mean(tmp$goals_for, 4, align='right', fill=NA)[4]
tmp$goals_for_avg[5] <- roll_mean(tmp$goals_for, 5, align='right', fill=NA)[5]
tmp$goals_for_min[1] <- roll_min(tmp$goals_for, 1, align='right', fill=NA)[1]
tmp$goals_for_min[2] <- roll_min(tmp$goals_for, 2, align='right', fill=NA)[2]
tmp$goals_for_min[3] <- roll_min(tmp$goals_for, 3, align='right', fill=NA)[3]
tmp$goals_for_min[4] <- roll_min(tmp$goals_for, 4, align='right', fill=NA)[4]
tmp$goals_for_min[5] <- roll_min(tmp$goals_for, 5, align='right', fill=NA)[5]
tmp$goals_for_max[1] <- roll_max(tmp$goals_for, 1, align='right', fill=NA)[1]
tmp$goals_for_max[2] <- roll_max(tmp$goals_for, 2, align='right', fill=NA)[2]
tmp$goals_for_max[3] <- roll_max(tmp$goals_for, 3, align='right', fill=NA)[3]
tmp$goals_for_max[4] <- roll_max(tmp$goals_for, 4, align='right', fill=NA)[4]
tmp$goals_for_max[5] <- roll_max(tmp$goals_for, 5, align='right', fill=NA)[5]
tmp$xG_mean[1] <- roll_mean(tmp$xG, 1, align='right', fill=NA)[1]
tmp$xG_mean[2] <- roll_mean(tmp$xG, 2, align='right', fill=NA)[2]
tmp$xG_mean[3] <- roll_mean(tmp$xG, 3, align='right', fill=NA)[3]
tmp$xG_mean[4] <- roll_mean(tmp$xG, 4, align='right', fill=NA)[4]
tmp$xG_mean[5] <- roll_mean(tmp$xG, 5, align='right', fill=NA)[5]
tmp$xG_min[1] <- roll_min(tmp$xG, 1, align='right', fill=NA)[1]
tmp$xG_min[2] <- roll_min(tmp$xG, 2, align='right', fill=NA)[2]
tmp$xG_min[3] <- roll_min(tmp$xG, 3, align='right', fill=NA)[3]
tmp$xG_min[4] <- roll_min(tmp$xG, 4, align='right', fill=NA)[4]
tmp$xG_min[5] <- roll_min(tmp$xG, 5, align='right', fill=NA)[5]
tmp$xG_max[1] <- roll_max(tmp$xG, 1, align='right', fill=NA)[1]
tmp$xG_max[2] <- roll_max(tmp$xG, 2, align='right', fill=NA)[2]
tmp$xG_max[3] <- roll_max(tmp$xG, 3, align='right', fill=NA)[3]
tmp$xG_max[4] <- roll_max(tmp$xG, 4, align='right', fill=NA)[4]
tmp$xG_max[5] <- roll_max(tmp$xG, 5, align='right', fill=NA)[5]
df_a <- tmp %>% select(matchdate, starts_with('xG'), -xG) %>% mutate(measure='xG')
df_b <- tmp %>% select(matchdate, starts_with('goals'), -goals_for) %>% mutate(measure='goals_for')
colnames(df_a) <- c('matchdate', 'mean', 'min', 'max', 'measure')
colnames(df_b) <- c('matchdate', 'mean', 'min', 'max', 'measure')
tmp_2 <- rbind(df_a, df_b)
tmp_2 %>%
ggplot(aes(matchdate, mean)) +
geom_line(col='steelblue') +
geom_ribbon(aes(ymin = min, ymax = max), fill='steelblue', alpha=0.2, col='steelblue', size=0.2) +
geom_pointrange(data=filter(tmp_2, matchdate==as.Date(cut(matchdate, breaks=10))), aes(ymin=min, ymax=max), col='steelblue') +
facet_grid(vars(measure)) +
theme_minimal() +
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.title.x=element_blank()) +
labs(title = 'Goal & xG averages with Min-Max Range')
|
/Premier League Analysis/R/goal_line.R
|
no_license
|
mfaisalshahid/Premier-League-Analysis
|
R
| false | false | 3,761 |
r
|
tmp <- league_stats_all %>%
mutate(matchday=rep(c(1:38), length(league_stats_all$year)/38)) %>%
select(matchday, everything()) %>%
group_by(matchday, year) %>%
summarise(matchdate=min(date), goals_for = mean(scored), xG = mean(xG)) %>%
ungroup() %>%
select(-matchday, -year) %>%
arrange(matchdate) %>%
mutate(goals_for_avg=roll_mean(goals_for, 6, align='right', fill=NA),
goals_for_min=roll_min(goals_for, 6, align='right', fill=NA),
goals_for_max=roll_max(goals_for, 6, align='right', fill=NA),
xG_mean=roll_mean(xG, 6, align='right', fill=NA),
xG_min=roll_min(xG, 6, align='right', fill=NA),
xG_max=roll_max(xG, 6, align='right', fill=NA))
tmp$goals_for_avg[1] <- roll_mean(tmp$goals_for, 1, align='right', fill=NA)[1]
tmp$goals_for_avg[2] <- roll_mean(tmp$goals_for, 2, align='right', fill=NA)[2]
tmp$goals_for_avg[3] <- roll_mean(tmp$goals_for, 3, align='right', fill=NA)[3]
tmp$goals_for_avg[4] <- roll_mean(tmp$goals_for, 4, align='right', fill=NA)[4]
tmp$goals_for_avg[5] <- roll_mean(tmp$goals_for, 5, align='right', fill=NA)[5]
tmp$goals_for_min[1] <- roll_min(tmp$goals_for, 1, align='right', fill=NA)[1]
tmp$goals_for_min[2] <- roll_min(tmp$goals_for, 2, align='right', fill=NA)[2]
tmp$goals_for_min[3] <- roll_min(tmp$goals_for, 3, align='right', fill=NA)[3]
tmp$goals_for_min[4] <- roll_min(tmp$goals_for, 4, align='right', fill=NA)[4]
tmp$goals_for_min[5] <- roll_min(tmp$goals_for, 5, align='right', fill=NA)[5]
tmp$goals_for_max[1] <- roll_max(tmp$goals_for, 1, align='right', fill=NA)[1]
tmp$goals_for_max[2] <- roll_max(tmp$goals_for, 2, align='right', fill=NA)[2]
tmp$goals_for_max[3] <- roll_max(tmp$goals_for, 3, align='right', fill=NA)[3]
tmp$goals_for_max[4] <- roll_max(tmp$goals_for, 4, align='right', fill=NA)[4]
tmp$goals_for_max[5] <- roll_max(tmp$goals_for, 5, align='right', fill=NA)[5]
tmp$xG_mean[1] <- roll_mean(tmp$xG, 1, align='right', fill=NA)[1]
tmp$xG_mean[2] <- roll_mean(tmp$xG, 2, align='right', fill=NA)[2]
tmp$xG_mean[3] <- roll_mean(tmp$xG, 3, align='right', fill=NA)[3]
tmp$xG_mean[4] <- roll_mean(tmp$xG, 4, align='right', fill=NA)[4]
tmp$xG_mean[5] <- roll_mean(tmp$xG, 5, align='right', fill=NA)[5]
tmp$xG_min[1] <- roll_min(tmp$xG, 1, align='right', fill=NA)[1]
tmp$xG_min[2] <- roll_min(tmp$xG, 2, align='right', fill=NA)[2]
tmp$xG_min[3] <- roll_min(tmp$xG, 3, align='right', fill=NA)[3]
tmp$xG_min[4] <- roll_min(tmp$xG, 4, align='right', fill=NA)[4]
tmp$xG_min[5] <- roll_min(tmp$xG, 5, align='right', fill=NA)[5]
tmp$xG_max[1] <- roll_max(tmp$xG, 1, align='right', fill=NA)[1]
tmp$xG_max[2] <- roll_max(tmp$xG, 2, align='right', fill=NA)[2]
tmp$xG_max[3] <- roll_max(tmp$xG, 3, align='right', fill=NA)[3]
tmp$xG_max[4] <- roll_max(tmp$xG, 4, align='right', fill=NA)[4]
tmp$xG_max[5] <- roll_max(tmp$xG, 5, align='right', fill=NA)[5]
df_a <- tmp %>% select(matchdate, starts_with('xG'), -xG) %>% mutate(measure='xG')
df_b <- tmp %>% select(matchdate, starts_with('goals'), -goals_for) %>% mutate(measure='goals_for')
colnames(df_a) <- c('matchdate', 'mean', 'min', 'max', 'measure')
colnames(df_b) <- c('matchdate', 'mean', 'min', 'max', 'measure')
tmp_2 <- rbind(df_a, df_b)
tmp_2 %>%
ggplot(aes(matchdate, mean)) +
geom_line(col='steelblue') +
geom_ribbon(aes(ymin = min, ymax = max), fill='steelblue', alpha=0.2, col='steelblue', size=0.2) +
geom_pointrange(data=filter(tmp_2, matchdate==as.Date(cut(matchdate, breaks=10))), aes(ymin=min, ymax=max), col='steelblue') +
facet_grid(vars(measure)) +
theme_minimal() +
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.title.x=element_blank()) +
labs(title = 'Goal & xG averages with Min-Max Range')
|
# Libraries
library(ggplot2)
library(dplyr)
library(hrbrthemes)
# data
data <- data.frame(
day = as.Date("2020-06-14") - 0:364,
value1 = runif(365) + seq(-140, 224)^2 / 10000,
value2 = runif(365) + seq(-140, 224)^2 / 10000
)
p <- ggplot(data) +
geom_line( aes(x=day, y=value1),color="#93d5dc") +
geom_line( aes(x=day, y=value2),color="steelblue") +
geom_point(aes(x=day, y=value1),size = 1) +
geom_point(aes(x=day, y=value2),color="#ed4845",size = 1)
xlab("") +
theme_ipsum() +
theme(axis.text.x=element_text(angle=60, hjust=1)) +
scale_x_date(limit=c(as.Date("2020-01-01"),as.Date("2020-02-11"))) +
ylim(0,1.5)
p
# MinYANG noticed:
# use legend for explaining the meaning of different color
# the end
|
/PS4/PS4_1_2.R
|
no_license
|
12032345/ESE5023
|
R
| false | false | 760 |
r
|
# Libraries
library(ggplot2)
library(dplyr)
library(hrbrthemes)
# data
data <- data.frame(
day = as.Date("2020-06-14") - 0:364,
value1 = runif(365) + seq(-140, 224)^2 / 10000,
value2 = runif(365) + seq(-140, 224)^2 / 10000
)
p <- ggplot(data) +
geom_line( aes(x=day, y=value1),color="#93d5dc") +
geom_line( aes(x=day, y=value2),color="steelblue") +
geom_point(aes(x=day, y=value1),size = 1) +
geom_point(aes(x=day, y=value2),color="#ed4845",size = 1)
xlab("") +
theme_ipsum() +
theme(axis.text.x=element_text(angle=60, hjust=1)) +
scale_x_date(limit=c(as.Date("2020-01-01"),as.Date("2020-02-11"))) +
ylim(0,1.5)
p
# MinYANG noticed:
# use legend for explaining the meaning of different color
# the end
|
##Time Series Analysis
##Unemployment Data in Yolo County California
UR = read.table('CAYOLO3URN.txt', header = TRUE)
#The data has no zero value or missing value.
####initial data transformaiton####
#use the data from 1990 to 2013 to estimate
UR.data = UR[1:288,]
UR.predict = UR[289:300,]
date = UR.data[,1]
rate = UR.data[,2]
#Plot the data
plot(rate, type = 'l', xaxt = 'n', main = 'Time Series on "Unemployment rate"', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 24), labels = date[seq(1, length(rate), by = 24)])
#data transformation#
#Use Boxcox
library(FitAR)
BoxCox.ts(rate)
#lambda = 0.076
#transform
lambda = .076
rate = lambda^(-1)*(rate^lambda - 1)
#Compare by plots
par(mfrow = c(1,2))
plot(UR.data[,2], type = 'l', xaxt = 'n', main = 'Data before transformation', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 24), labels = date[seq(1, length(rate), by = 24)])
plot(rate, type = 'l', xaxt = 'n', main = 'Data after transformation', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 24), labels = date[seq(1, length(rate), by = 24)])
par(mfrow = c(1,1))
####Analyze the "smooth"components####
#First, deseasonlize the data
#Use Moving average estimation
#d = 12, we have q = 6, N = 25
#Step 1: Filtering
data.matrix = matrix(rate, ncol = 12, byrow = T)
two.sided.filter = filter(rate, sides = 2, c(0.5, rep(1, 11), 0.5)/12)
filter.matrix = matrix(two.sided.filter, ncol = 12, byrow = T)
mu.matrix = data.matrix - filter.matrix #This is the filterded matrix
#Step 2: Seasonal estimation
mu.k = colMeans(mu.matrix, na.rm = T)
sk = mu.k - mean(mu.k)
sk.matrix2 = matrix(rep(sk, 24), ncol = 12, byrow = T) #seasonal componenets
deseasonalized2 = as.numeric(t(data.matrix - sk.matrix2))
par(mfrow = c(1,2))
plot(as.numeric(t(sk.matrix2)), type = 'l', main = 'Seasonal components', ylab = '',
xlab = 'Months')
plot(as.numeric(t(deseasonalized2)), type = 'l', main = 'Deseasonalized data', ylab = '',
xlab = 'Months')
par(mfrow = c(1,1))
#Then, detrend the deseasonalized data
#Method 1: polynomial
n = length(rate)
design.matrix2 = data.frame(y = deseasonalized2, Intercept = rep(1, n))
var.name2 = names(design.matrix2)
fit2 = vector('list', 20)
Rsquared2 = numeric(20)
Adj.Rsquared2 = numeric(20)
for(p in 1:20){
design.matrix2 = cbind(design.matrix2, (1:n)^p)
var.name2 = c(var.name2, paste('t', p, sep = ''))
colnames(design.matrix2) = var.name2
fit2[[p]] = lm(y ~ . - 1, data = design.matrix2)
Rsquared2[p] = summary(fit2[[p]])$r.squared
Adj.Rsquared2[p] = summary(fit2[[p]])$adj.r.squared
}
print(Adj.Rsquared2)
plot(Adj.Rsquared2, main = 'Adj.Rsquared2 vs. model size', xlab = 'p')
#4, 6, 9, 11th
#Can also use AIC/BIC
model.selection2 = sapply(fit2, function(i){
return(c(AIC(i), BIC(i)))
})
rownames(model.selection2) = c('AIC', 'BIC')
print(model.selection2)
#plot out the AIC/BIC vs different model sizes and see how the AIC/BIC behaves.
par(mfrow = c(1,2))
plot(model.selection2[1,], main = 'AIC vs. model size', xlab = 'p')
plot(model.selection2[2,], main = 'BIC vs. model size', xlab = 'p')
par(mfrow = c(1,1))
#2, 4, 6, 9, 11th
#Look at the 11th model
summary(fit2[[2]])
summary(fit2[[4]])
summary(fit2[[6]])
summary(fit2[[9]])
summary(fit2[[11]])
#Compare by plots to see which one looks reasonable towards the end of the observation period
plot(rate, type = 'l', xaxt = 'n', main = 'Trend estimates for different orders', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 24), labels = date[seq(1, length(rate), by = 24)])
lines(fit2[[4]]$fitted, col = 2)
lines(fit2[[6]]$fitted, col = 3)
lines(fit2[[9]]$fitted, col = 4)
lines(fit2[[11]]$fitted, col = 6)
legends = paste('p = ', c(4,6,9,11), sep = '')
legend('bottomright', legend = legends, col = c(2,3,4,6), lty = 1, cex = 0.9)
#4th best
plot(deseasonalized2 - fit2[[4]]$fitted, type = 'l',
main = 'Residuals after 4th polynomial detrending', ylab = '', xlab = 'Months')
#Seems like there is still some trend left
#need to detrend again
#Use moving average
detrended = deseasonalized2 - fit2[[4]]$fitted
one.sided.filters = vector('list', 9)
a = seq(0.1, 0.9, by = 0.1)
for(q in 1:9){
one.sided.filters[[q]][1] = detrended[1]
for(j in 2:length(rate)){
one.sided.filters[[q]][j] = one.sided.filters[[q]][j-1]*(1-a[q]) + detrended[j]*a[q]
}
}
#Plot residuals
par(mfrow = c(3,3))
plot(detrended - one.sided.filters[[1]], main = 'One sided filter: a = 0.1', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[2]], main = 'One sided filter: a = 0.2', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[3]], main = 'One sided filter: a = 0.3', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[4]], main = 'One sided filter: a = 0.4', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[5]], main = 'one sided filter: a = 0.5', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[6]], main = 'One sided filter: a = 0.6', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[7]], main = 'One sided filter: a = 0.7', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[8]], main = 'one sided filter: q = 0.8', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[9]], main = 'One sided filter: a = 0.9', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
##Looks like the residual plot does not have an obvious trend from a = 0.5
#Plot the filters
par(mfrow = c(1,2))
plot(detrended, type = 'l', xaxt = 'n', main = 'One sided moving average filter', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
lines(one.sided.filters[[5]], col = 2)
plot(detrended - one.sided.filters[[5]], main = 'Residuals after redetrending', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
trend = one.sided.filters[[5]] + fit2[[4]]$fitted
res1 = detrended - one.sided.filters[[5]]
##method2: moving average
one.sided.filters2 = vector('list', 9)
a = seq(0.1, 0.9, by = 0.1)
for(q in 1:9){
one.sided.filters2[[q]][1] = deseasonalized2[1]
for(j in 2:length(rate)){
one.sided.filters2[[q]][j] = one.sided.filters2[[q]][j-1]*(1-a[q]) + deseasonalized2[j]*a[q]
}
}
#plot residulas
par(mfrow = c(3,3))
plot(deseasonalized2 - one.sided.filters2[[1]], main = 'One sided filter: a = 0.1', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[2]], main = 'One sided filter: a = 0.2', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[3]], main = 'One sided filter: a = 0.3', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[4]], main = 'One sided filter: a = 0.4', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[5]], main = 'one sided filter: a = 0.5', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[6]], main = 'One sided filter: a = 0.6', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[7]], main = 'One sided filter: a = 0.7', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[8]], main = 'one sided filter: q = 0.8', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[9]], main = 'One sided filter: a = 0.9', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
par(mfrow = c(1,2))
plot(deseasonalized2, type = 'l', xaxt = 'n', main = 'One sided moving average filter', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
lines(one.sided.filters2[[5]], col = 2)
plot(deseasonalized2 - one.sided.filters2[[5]], main = 'Residuals after redetrending', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
res2 = deseasonalized2 - one.sided.filters2[[5]]
###Analyzing the "rough" component
#compare res1 vs. res2
par(mfrow = c(1,2))
plot(res1, main = 'Residuals for method 1', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(res2, main = 'Residuals for method 2', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
####analyze the residuals#####
###1. The sample ACF
acf(res1)
acf(res2)
res1.acf = acf(res1)$acf[2:20]
res2.acf = acf(res2)$acf[2:20]
bounds = c(-1.96/sqrt(n), 1.96/sqrt(n))
sum(res1.acf < bounds[2] & res1.acf > bounds[1]) #9/19 are within the bounds
sum(res2.acf < bounds[2] & res2.acf > bounds[1]) #10/19 are within the bounds
#The white noise assumption might be rejected
###2: The Portmanteau test
teststat.1 = numeric(20)
pvals.1 = numeric(20)
teststat.2 = numeric(20)
pvals.2 = numeric(20)
for(i in 1:20){
test1 = Box.test(res1, lag = i, type = 'Ljung')
teststat.1[i] = test1$statistic
pvals.1[i] = test1$p.value
test2 = Box.test(res2, lag = i, type = 'Ljung')
teststat.2[i] = test2$statistic
pvals.2[i] = test2$p.value
}
#Comparing p-values
pvals.1 < 0.05
pvals.2 < 0.05
#The hypothesis of i.i.d. residuals is rejected at level 0.05.
###3: rank test
mu.pi = 1/4*n*(n-1)
sigma.pi = sqrt(1/72*n*(n-1)*(2*n+5))
alpha = 0.05
z = qnorm(1 - alpha/2)
#Find Pi for residual 1
Pi.1 = 0
for(j in 1:(n-1)){
for(i in (j+1):n){
if(res1[i] > res1[j]) Pi.1 = Pi.1 + 1
}
}
P1 = abs(Pi.1 - mu.pi)/sigma.pi
P1 > z #Comparing test statistics with critical value
#Find Pi for residual 2
Pi.2 = 0
for(j in 1:(n-1)){
for(i in (j+1):n){
if(res2[i] > res2[j]) Pi.2 = Pi.2 + 1
}
}
P2 = abs(Pi.2 - mu.pi)/sigma.pi
P2 > z
#Both results in a false at alpha = 0.05,
#Thus the assumption that the residuals are i.i.d. cannot be rejected.
#This says there is no more trend in the data
###### Analyze the "rough" component ######
###res1
lag.plot(res1, lags = 12, layout = c(3,4))
ar1 = vector('list', 20)
aic1 = numeric(20)
for (j in 1:20){
ar1[[j]] = arima(res1, order = c(j,0,0), method = 'ML')
aic1[j] = ar1[[j]]$aic
}
aic1
ma1 = vector('list', 20)
aic2 = numeric(20)
for (j in 1:20){
ma1[[j]] = arima(res1, order = c(0,0,j), method = 'ML')
aic2[j] = ma1[[j]]$aic
}
aic2
arma1 = matrix(NA, ncol = 10, nrow = 10)
for(i in 1:10){
for(j in 1:10){
arma1[i,j] = arima(res1, order = c(i,0,j), method = "ML")$aic
}
}
arma1
aic3 = numeric(100)
pq = numeric(100)
for(i in 1:10){
for (j in 1:10){
aic3[(i-1)*10 + j] = arma1[i,j]
pq[(i-1)*10 + j] = i + j
}
}
limit = c(min(aic1, aic2, aic3), max(aic1, aic2, aic3))
par(mfrow = c(1,3))
plot(aic1, ylim = limit, xlab = 'p', main = 'AIC of the fitted AR(p) mode')
plot(aic2, ylim = limit, xlab = 'q', main = 'AIC of the fitted MA(q) model')
plot(aic3 ~ pq, xlab = 'p+q', ylim = limit, main = 'AIC of the fitted ARMA(p,q) model')
points(x = 9, y = -1238.515, col = 'red', pch = 19)
par(mfrow = c(1,1))
# the red point represent arma(5,4) <- best!
###res2
lag.plot(res2, lags = 12, layout = c(3,4))
ar2 = vector('list', 20)
aic1.2 = numeric(20)
for (j in 1:20){
ar2[[j]] = arima(res2, order = c(j,0,0), method = 'ML')
aic1.2[j] = ar2[[j]]$aic
}
aic1.2
ma2 = vector('list', 20)
aic2.2 = numeric(20)
for (j in 1:20){
ma2[[j]] = arima(res2, order = c(0,0,j), method = 'ML')
aic2.2[j] = ma2[[j]]$aic
}
aic2.2
arma2 = matrix(NA, ncol = 10, nrow = 10)
for(i in 1:10){
for(j in 1:10){
arma2[i,j] = arima(res2, order = c(i,0,j), method = "ML")$aic
}
}
arma2
aic3.2 = numeric(100)
pq.2 = numeric(100)
for(i in 1:10){
for (j in 1:10){
aic3.2[(i-1)*10 + j] = arma2[i,j]
pq.2[(i-1)*10 + j] = i + j
}
}
limit = c(min(aic1.2, aic2.2, aic3.2), max(aic1.2, aic2.2, aic3.2))
par(mfrow = c(1,3))
plot(aic1.2, ylim = limit, xlab = 'p', main = 'AIC of the fitted AR(p) mode')
plot(aic2.2, ylim = limit, xlab = 'q', main = 'AIC of the fitted MA(q) mode')
plot(aic3.2 ~ pq, xlab = 'p+q', ylim = limit, main = 'AIC of the fitted ARMA(p,q) mode')
points(x = 9, y = -1227.302, col = 'red', pch = 19)
par(mfrow = c(1,1))
# the red point represent arma(5,4) <- best!
#### Check if residuals conform to white noise
#Method 1: The sample ACF
resid1 = res1 - fitted(arima(res1, order = c(5, 0, 4), method = "ML"))
resid2 = res2 - fitted(arima(res2, order = c(5, 0, 4), method = 'ML'))
resid1.acf = acf(resid1)
resid2.acf = acf(resid2)
#Check how many within bound
resid1.acf = resid1.acf$acf[2:20] #The first acf value is of lag 0, which we ignore here
resid2.acf = resid2.acf$acf[2:20] #The first acf value is of lag 0, which we ignore here
bounds = c(-1.96/sqrt(n), 1.96/sqrt(n))
sum(resid1.acf < bounds[2] & resid1.acf > bounds[1]) #18/19 are within the bounds
sum(resid2.acf < bounds[2] & resid2.acf > bounds[1]) #18/19 are within the bounds
#Method 2: The Portmanteau test
Q.1 = cumsum(resid1.acf^2) * n
Q.2 = cumsum(resid2.acf^2) * n
Q.1 > qchisq(0.95, df = 1:19)
Q.2 > qchisq(0.95, df = 1:19)
#The hypothesis of i.i.d. residuals is accepted at level 0.05.
#Method 3: Rank test
mu.pi = 1/4*n*(n-1)
sigma.pi = sqrt(1/72*n*(n-1)*(2*n+5))
alpha = 0.05
z = qnorm(1 - alpha/2)
#Find Pi for residual 1
Pi.1 = 0
for(j in 1:(n-1)){
for(i in (j+1):n){
if(resid1[i] > resid1[j]) Pi.1 = Pi.1 + 1
}
}
P1 = abs(Pi.1 - mu.pi)/sigma.pi
P1 > z #Comparing test statistics with critical value
#Find Pi for residual 2
Pi.2 = 0
for(j in 1:(n-1)){
for(i in (j+1):n){
if(resid2[i] > resid2[j]) Pi.2 = Pi.2 + 1
}
}
P2 = abs(Pi.2 - mu.pi)/sigma.pi
P2 > z #Comparing test statistics with critical value
#Both results in a false at alpha = 0.05,
#thus the assumption that the residuals are i.i.d. cannot be rejected.
#This says there is no more trend in the data
par(mfrow = c(1,2))
plot(resid1, main = 'Residuals for method 1', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(resid2, main = 'Residuals for method 2', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
###### Predict future values #####
#deseasonlize: moving average
#detrend: one-sided moving average
#residuals: arma(5,4)
seasonality = sk.matrix2[1,]
plot(one.sided.filters2[[5]][265:288], xaxt = 'n',
main = 'One sided moving average filter (last 24 points)', xlab = 'Dates')
axis(1, at = seq(1, 24, by = 12), labels = date[seq(265, 288, by = 12)])
#We decide to use the last 12 point to predict m289
#m13
Y = one.sided.filters2[[5]][277:288]
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model13 = model[[3]]
m13 = predict(model13, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m13
#m14
Y = c(one.sided.filters2[[5]][278:288], m13)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model14 = model[[3]]
m14 = predict(model14, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
#m15
Y = c(one.sided.filters2[[5]][279:288], m13, m14)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model15.2 = model[[2]]
model15.3 = model[[3]]
m15.2 = predict(model15.2, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2))
m15.2
m15.3 = predict(model15.3, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m15.3
m15 = m15.2
#m15.2
#m16
Y = c(one.sided.filters2[[5]][280:288], m13, m14, m15)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model16 = model[[2]]
m16 = predict(model16, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2))
m16
##m17
Y = c(one.sided.filters2[[5]][281:288], m13, m14, m15, m16)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model17 = model[[2]]
m17 = predict(model17, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2))
m17
#m18
Y = c(one.sided.filters2[[5]][282:288], m13, m14, m15, m16, m17)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model18 = model[[2]]
m18 = predict(model18, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m18
#m19
Y = c(one.sided.filters2[[5]][283:288], m13, m14, m15, m16, m17, m18)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model19 = model[[2]]
m19 = predict(model19, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m19
#m20
Y = c(one.sided.filters2[[5]][284:288], m13, m14, m15, m16, m17, m18, m19)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model20 = model[[2]]
m20 = predict(model20, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m20
#m21
Y = c(one.sided.filters2[[5]][285:288], m13, m14, m15, m16, m17, m18, m19, m20)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model21 = model[[2]]
m21 = predict(model21, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m21
#m22
Y = c(one.sided.filters2[[5]][286:288], m13, m14, m15, m16, m17, m18, m19, m20, m21)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model22 = model[[2]]
m22 = predict(model22, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m22
#m23
Y = c(one.sided.filters2[[5]][287:288], m13, m14, m15, m16, m17, m18, m19, m20, m21, m22)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model23 = model[[2]]
m23 = predict(model23, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m23
#m24
Y = c(one.sided.filters2[[5]][288], m13, m14, m15, m16, m17, m18, m19, m20, m21, m22, m23)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model24 = model[[2]]
m24 = predict(model24, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m24
trend.predict = c(m13, m14, m15, m16, m17, m18, m19, m20, m21, m22, m23, m24)
plot(trend.predict)
fit.res = arima (res2, c(5, 0, 4))
predict.res = predict(fit.res, n.ahead = 12)
plot(res2, type = "l", main = 'Residual prediction', ylab = 'Residual')
lines(predict.res$pred, col ='red')
predict = trend.predict + seasonality + predict.res$pred
predict.t = (1 + lambda*predict)^(1 / lambda)
#True value VS predictted values
plot(UR.predict$VALUE, xaxt = 'n', ylim = c(0,12))
axis(1, at = seq(1, 12, by = 1), labels = UR[,1][seq(289, 300, by = 1)])
points(as.numeric(predict.t), col = 2, ylim = c(0 ,12), pch=19)
|
/TimeSeriesAnalysis.R
|
no_license
|
rainbowfan/Unemployment-rate-prediction
|
R
| false | false | 25,295 |
r
|
##Time Series Analysis
##Unemployment Data in Yolo County California
UR = read.table('CAYOLO3URN.txt', header = TRUE)
#The data has no zero value or missing value.
####initial data transformaiton####
#use the data from 1990 to 2013 to estimate
UR.data = UR[1:288,]
UR.predict = UR[289:300,]
date = UR.data[,1]
rate = UR.data[,2]
#Plot the data
plot(rate, type = 'l', xaxt = 'n', main = 'Time Series on "Unemployment rate"', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 24), labels = date[seq(1, length(rate), by = 24)])
#data transformation#
#Use Boxcox
library(FitAR)
BoxCox.ts(rate)
#lambda = 0.076
#transform
lambda = .076
rate = lambda^(-1)*(rate^lambda - 1)
#Compare by plots
par(mfrow = c(1,2))
plot(UR.data[,2], type = 'l', xaxt = 'n', main = 'Data before transformation', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 24), labels = date[seq(1, length(rate), by = 24)])
plot(rate, type = 'l', xaxt = 'n', main = 'Data after transformation', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 24), labels = date[seq(1, length(rate), by = 24)])
par(mfrow = c(1,1))
####Analyze the "smooth"components####
#First, deseasonlize the data
#Use Moving average estimation
#d = 12, we have q = 6, N = 25
#Step 1: Filtering
data.matrix = matrix(rate, ncol = 12, byrow = T)
two.sided.filter = filter(rate, sides = 2, c(0.5, rep(1, 11), 0.5)/12)
filter.matrix = matrix(two.sided.filter, ncol = 12, byrow = T)
mu.matrix = data.matrix - filter.matrix #This is the filterded matrix
#Step 2: Seasonal estimation
mu.k = colMeans(mu.matrix, na.rm = T)
sk = mu.k - mean(mu.k)
sk.matrix2 = matrix(rep(sk, 24), ncol = 12, byrow = T) #seasonal componenets
deseasonalized2 = as.numeric(t(data.matrix - sk.matrix2))
par(mfrow = c(1,2))
plot(as.numeric(t(sk.matrix2)), type = 'l', main = 'Seasonal components', ylab = '',
xlab = 'Months')
plot(as.numeric(t(deseasonalized2)), type = 'l', main = 'Deseasonalized data', ylab = '',
xlab = 'Months')
par(mfrow = c(1,1))
#Then, detrend the deseasonalized data
#Method 1: polynomial
n = length(rate)
design.matrix2 = data.frame(y = deseasonalized2, Intercept = rep(1, n))
var.name2 = names(design.matrix2)
fit2 = vector('list', 20)
Rsquared2 = numeric(20)
Adj.Rsquared2 = numeric(20)
for(p in 1:20){
design.matrix2 = cbind(design.matrix2, (1:n)^p)
var.name2 = c(var.name2, paste('t', p, sep = ''))
colnames(design.matrix2) = var.name2
fit2[[p]] = lm(y ~ . - 1, data = design.matrix2)
Rsquared2[p] = summary(fit2[[p]])$r.squared
Adj.Rsquared2[p] = summary(fit2[[p]])$adj.r.squared
}
print(Adj.Rsquared2)
plot(Adj.Rsquared2, main = 'Adj.Rsquared2 vs. model size', xlab = 'p')
#4, 6, 9, 11th
#Can also use AIC/BIC
model.selection2 = sapply(fit2, function(i){
return(c(AIC(i), BIC(i)))
})
rownames(model.selection2) = c('AIC', 'BIC')
print(model.selection2)
#plot out the AIC/BIC vs different model sizes and see how the AIC/BIC behaves.
par(mfrow = c(1,2))
plot(model.selection2[1,], main = 'AIC vs. model size', xlab = 'p')
plot(model.selection2[2,], main = 'BIC vs. model size', xlab = 'p')
par(mfrow = c(1,1))
#2, 4, 6, 9, 11th
#Look at the 11th model
summary(fit2[[2]])
summary(fit2[[4]])
summary(fit2[[6]])
summary(fit2[[9]])
summary(fit2[[11]])
#Compare by plots to see which one looks reasonable towards the end of the observation period
plot(rate, type = 'l', xaxt = 'n', main = 'Trend estimates for different orders', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 24), labels = date[seq(1, length(rate), by = 24)])
lines(fit2[[4]]$fitted, col = 2)
lines(fit2[[6]]$fitted, col = 3)
lines(fit2[[9]]$fitted, col = 4)
lines(fit2[[11]]$fitted, col = 6)
legends = paste('p = ', c(4,6,9,11), sep = '')
legend('bottomright', legend = legends, col = c(2,3,4,6), lty = 1, cex = 0.9)
#4th best
plot(deseasonalized2 - fit2[[4]]$fitted, type = 'l',
main = 'Residuals after 4th polynomial detrending', ylab = '', xlab = 'Months')
#Seems like there is still some trend left
#need to detrend again
#Use moving average
detrended = deseasonalized2 - fit2[[4]]$fitted
one.sided.filters = vector('list', 9)
a = seq(0.1, 0.9, by = 0.1)
for(q in 1:9){
one.sided.filters[[q]][1] = detrended[1]
for(j in 2:length(rate)){
one.sided.filters[[q]][j] = one.sided.filters[[q]][j-1]*(1-a[q]) + detrended[j]*a[q]
}
}
#Plot residuals
par(mfrow = c(3,3))
plot(detrended - one.sided.filters[[1]], main = 'One sided filter: a = 0.1', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[2]], main = 'One sided filter: a = 0.2', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[3]], main = 'One sided filter: a = 0.3', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[4]], main = 'One sided filter: a = 0.4', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[5]], main = 'one sided filter: a = 0.5', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[6]], main = 'One sided filter: a = 0.6', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[7]], main = 'One sided filter: a = 0.7', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[8]], main = 'one sided filter: q = 0.8', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(detrended - one.sided.filters[[9]], main = 'One sided filter: a = 0.9', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
##Looks like the residual plot does not have an obvious trend from a = 0.5
#Plot the filters
par(mfrow = c(1,2))
plot(detrended, type = 'l', xaxt = 'n', main = 'One sided moving average filter', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
lines(one.sided.filters[[5]], col = 2)
plot(detrended - one.sided.filters[[5]], main = 'Residuals after redetrending', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
trend = one.sided.filters[[5]] + fit2[[4]]$fitted
res1 = detrended - one.sided.filters[[5]]
##method2: moving average
one.sided.filters2 = vector('list', 9)
a = seq(0.1, 0.9, by = 0.1)
for(q in 1:9){
one.sided.filters2[[q]][1] = deseasonalized2[1]
for(j in 2:length(rate)){
one.sided.filters2[[q]][j] = one.sided.filters2[[q]][j-1]*(1-a[q]) + deseasonalized2[j]*a[q]
}
}
#plot residulas
par(mfrow = c(3,3))
plot(deseasonalized2 - one.sided.filters2[[1]], main = 'One sided filter: a = 0.1', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[2]], main = 'One sided filter: a = 0.2', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[3]], main = 'One sided filter: a = 0.3', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[4]], main = 'One sided filter: a = 0.4', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[5]], main = 'one sided filter: a = 0.5', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[6]], main = 'One sided filter: a = 0.6', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[7]], main = 'One sided filter: a = 0.7', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[8]], main = 'one sided filter: q = 0.8', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(deseasonalized2 - one.sided.filters2[[9]], main = 'One sided filter: a = 0.9', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
par(mfrow = c(1,2))
plot(deseasonalized2, type = 'l', xaxt = 'n', main = 'One sided moving average filter', xlab = 'Dates')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
lines(one.sided.filters2[[5]], col = 2)
plot(deseasonalized2 - one.sided.filters2[[5]], main = 'Residuals after redetrending', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
res2 = deseasonalized2 - one.sided.filters2[[5]]
###Analyzing the "rough" component
#compare res1 vs. res2
par(mfrow = c(1,2))
plot(res1, main = 'Residuals for method 1', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(res2, main = 'Residuals for method 2', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
####analyze the residuals#####
###1. The sample ACF
acf(res1)
acf(res2)
res1.acf = acf(res1)$acf[2:20]
res2.acf = acf(res2)$acf[2:20]
bounds = c(-1.96/sqrt(n), 1.96/sqrt(n))
sum(res1.acf < bounds[2] & res1.acf > bounds[1]) #9/19 are within the bounds
sum(res2.acf < bounds[2] & res2.acf > bounds[1]) #10/19 are within the bounds
#The white noise assumption might be rejected
###2: The Portmanteau test
teststat.1 = numeric(20)
pvals.1 = numeric(20)
teststat.2 = numeric(20)
pvals.2 = numeric(20)
for(i in 1:20){
test1 = Box.test(res1, lag = i, type = 'Ljung')
teststat.1[i] = test1$statistic
pvals.1[i] = test1$p.value
test2 = Box.test(res2, lag = i, type = 'Ljung')
teststat.2[i] = test2$statistic
pvals.2[i] = test2$p.value
}
#Comparing p-values
pvals.1 < 0.05
pvals.2 < 0.05
#The hypothesis of i.i.d. residuals is rejected at level 0.05.
###3: rank test
mu.pi = 1/4*n*(n-1)
sigma.pi = sqrt(1/72*n*(n-1)*(2*n+5))
alpha = 0.05
z = qnorm(1 - alpha/2)
#Find Pi for residual 1
Pi.1 = 0
for(j in 1:(n-1)){
for(i in (j+1):n){
if(res1[i] > res1[j]) Pi.1 = Pi.1 + 1
}
}
P1 = abs(Pi.1 - mu.pi)/sigma.pi
P1 > z #Comparing test statistics with critical value
#Find Pi for residual 2
Pi.2 = 0
for(j in 1:(n-1)){
for(i in (j+1):n){
if(res2[i] > res2[j]) Pi.2 = Pi.2 + 1
}
}
P2 = abs(Pi.2 - mu.pi)/sigma.pi
P2 > z
#Both results in a false at alpha = 0.05,
#Thus the assumption that the residuals are i.i.d. cannot be rejected.
#This says there is no more trend in the data
###### Analyze the "rough" component ######
###res1
lag.plot(res1, lags = 12, layout = c(3,4))
ar1 = vector('list', 20)
aic1 = numeric(20)
for (j in 1:20){
ar1[[j]] = arima(res1, order = c(j,0,0), method = 'ML')
aic1[j] = ar1[[j]]$aic
}
aic1
ma1 = vector('list', 20)
aic2 = numeric(20)
for (j in 1:20){
ma1[[j]] = arima(res1, order = c(0,0,j), method = 'ML')
aic2[j] = ma1[[j]]$aic
}
aic2
arma1 = matrix(NA, ncol = 10, nrow = 10)
for(i in 1:10){
for(j in 1:10){
arma1[i,j] = arima(res1, order = c(i,0,j), method = "ML")$aic
}
}
arma1
aic3 = numeric(100)
pq = numeric(100)
for(i in 1:10){
for (j in 1:10){
aic3[(i-1)*10 + j] = arma1[i,j]
pq[(i-1)*10 + j] = i + j
}
}
limit = c(min(aic1, aic2, aic3), max(aic1, aic2, aic3))
par(mfrow = c(1,3))
plot(aic1, ylim = limit, xlab = 'p', main = 'AIC of the fitted AR(p) mode')
plot(aic2, ylim = limit, xlab = 'q', main = 'AIC of the fitted MA(q) model')
plot(aic3 ~ pq, xlab = 'p+q', ylim = limit, main = 'AIC of the fitted ARMA(p,q) model')
points(x = 9, y = -1238.515, col = 'red', pch = 19)
par(mfrow = c(1,1))
# the red point represent arma(5,4) <- best!
###res2
lag.plot(res2, lags = 12, layout = c(3,4))
ar2 = vector('list', 20)
aic1.2 = numeric(20)
for (j in 1:20){
ar2[[j]] = arima(res2, order = c(j,0,0), method = 'ML')
aic1.2[j] = ar2[[j]]$aic
}
aic1.2
ma2 = vector('list', 20)
aic2.2 = numeric(20)
for (j in 1:20){
ma2[[j]] = arima(res2, order = c(0,0,j), method = 'ML')
aic2.2[j] = ma2[[j]]$aic
}
aic2.2
arma2 = matrix(NA, ncol = 10, nrow = 10)
for(i in 1:10){
for(j in 1:10){
arma2[i,j] = arima(res2, order = c(i,0,j), method = "ML")$aic
}
}
arma2
aic3.2 = numeric(100)
pq.2 = numeric(100)
for(i in 1:10){
for (j in 1:10){
aic3.2[(i-1)*10 + j] = arma2[i,j]
pq.2[(i-1)*10 + j] = i + j
}
}
limit = c(min(aic1.2, aic2.2, aic3.2), max(aic1.2, aic2.2, aic3.2))
par(mfrow = c(1,3))
plot(aic1.2, ylim = limit, xlab = 'p', main = 'AIC of the fitted AR(p) mode')
plot(aic2.2, ylim = limit, xlab = 'q', main = 'AIC of the fitted MA(q) mode')
plot(aic3.2 ~ pq, xlab = 'p+q', ylim = limit, main = 'AIC of the fitted ARMA(p,q) mode')
points(x = 9, y = -1227.302, col = 'red', pch = 19)
par(mfrow = c(1,1))
# the red point represent arma(5,4) <- best!
#### Check if residuals conform to white noise
#Method 1: The sample ACF
resid1 = res1 - fitted(arima(res1, order = c(5, 0, 4), method = "ML"))
resid2 = res2 - fitted(arima(res2, order = c(5, 0, 4), method = 'ML'))
resid1.acf = acf(resid1)
resid2.acf = acf(resid2)
#Check how many within bound
resid1.acf = resid1.acf$acf[2:20] #The first acf value is of lag 0, which we ignore here
resid2.acf = resid2.acf$acf[2:20] #The first acf value is of lag 0, which we ignore here
bounds = c(-1.96/sqrt(n), 1.96/sqrt(n))
sum(resid1.acf < bounds[2] & resid1.acf > bounds[1]) #18/19 are within the bounds
sum(resid2.acf < bounds[2] & resid2.acf > bounds[1]) #18/19 are within the bounds
#Method 2: The Portmanteau test
Q.1 = cumsum(resid1.acf^2) * n
Q.2 = cumsum(resid2.acf^2) * n
Q.1 > qchisq(0.95, df = 1:19)
Q.2 > qchisq(0.95, df = 1:19)
#The hypothesis of i.i.d. residuals is accepted at level 0.05.
#Method 3: Rank test
mu.pi = 1/4*n*(n-1)
sigma.pi = sqrt(1/72*n*(n-1)*(2*n+5))
alpha = 0.05
z = qnorm(1 - alpha/2)
#Find Pi for residual 1
Pi.1 = 0
for(j in 1:(n-1)){
for(i in (j+1):n){
if(resid1[i] > resid1[j]) Pi.1 = Pi.1 + 1
}
}
P1 = abs(Pi.1 - mu.pi)/sigma.pi
P1 > z #Comparing test statistics with critical value
#Find Pi for residual 2
Pi.2 = 0
for(j in 1:(n-1)){
for(i in (j+1):n){
if(resid2[i] > resid2[j]) Pi.2 = Pi.2 + 1
}
}
P2 = abs(Pi.2 - mu.pi)/sigma.pi
P2 > z #Comparing test statistics with critical value
#Both results in a false at alpha = 0.05,
#thus the assumption that the residuals are i.i.d. cannot be rejected.
#This says there is no more trend in the data
par(mfrow = c(1,2))
plot(resid1, main = 'Residuals for method 1', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
plot(resid2, main = 'Residuals for method 2', ylab = 'Residuals', xlab = 'Date', xaxt = 'n', type = 'l')
axis(1, at = seq(1, length(rate), by = 48), labels = date[seq(1, length(rate), by = 48)])
par(mfrow = c(1,1))
###### Predict future values #####
#deseasonlize: moving average
#detrend: one-sided moving average
#residuals: arma(5,4)
seasonality = sk.matrix2[1,]
plot(one.sided.filters2[[5]][265:288], xaxt = 'n',
main = 'One sided moving average filter (last 24 points)', xlab = 'Dates')
axis(1, at = seq(1, 24, by = 12), labels = date[seq(265, 288, by = 12)])
#We decide to use the last 12 point to predict m289
#m13
Y = one.sided.filters2[[5]][277:288]
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model13 = model[[3]]
m13 = predict(model13, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m13
#m14
Y = c(one.sided.filters2[[5]][278:288], m13)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model14 = model[[3]]
m14 = predict(model14, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
#m15
Y = c(one.sided.filters2[[5]][279:288], m13, m14)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model15.2 = model[[2]]
model15.3 = model[[3]]
m15.2 = predict(model15.2, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2))
m15.2
m15.3 = predict(model15.3, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m15.3
m15 = m15.2
#m15.2
#m16
Y = c(one.sided.filters2[[5]][280:288], m13, m14, m15)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model16 = model[[2]]
m16 = predict(model16, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2))
m16
##m17
Y = c(one.sided.filters2[[5]][281:288], m13, m14, m15, m16)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model17 = model[[2]]
m17 = predict(model17, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2))
m17
#m18
Y = c(one.sided.filters2[[5]][282:288], m13, m14, m15, m16, m17)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model18 = model[[2]]
m18 = predict(model18, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m18
#m19
Y = c(one.sided.filters2[[5]][283:288], m13, m14, m15, m16, m17, m18)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model19 = model[[2]]
m19 = predict(model19, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m19
#m20
Y = c(one.sided.filters2[[5]][284:288], m13, m14, m15, m16, m17, m18, m19)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model20 = model[[2]]
m20 = predict(model20, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m20
#m21
Y = c(one.sided.filters2[[5]][285:288], m13, m14, m15, m16, m17, m18, m19, m20)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model21 = model[[2]]
m21 = predict(model21, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m21
#m22
Y = c(one.sided.filters2[[5]][286:288], m13, m14, m15, m16, m17, m18, m19, m20, m21)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model22 = model[[2]]
m22 = predict(model22, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m22
#m23
Y = c(one.sided.filters2[[5]][287:288], m13, m14, m15, m16, m17, m18, m19, m20, m21, m22)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model23 = model[[2]]
m23 = predict(model23, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m23
#m24
Y = c(one.sided.filters2[[5]][288], m13, m14, m15, m16, m17, m18, m19, m20, m21, m22, m23)
matrix = data.frame(y = Y, Intercept = rep(1, 12))
name = names(matrix)
model = vector('list', 5)
R = numeric(5)
AR = numeric(5)
for(p in 1:5){
matrix = cbind(matrix, (1:12)^p) #Each time attach a new column
name = c(name, paste('t', p, sep = '')) #Append a new column name
colnames(matrix) = name
model[[p]] = lm(y ~ . - 1, data = matrix)
R[p] = summary(model[[p]])$r.squared
AR[p] = summary(model[[p]])$adj.r.squared
}
AR
plot(AR)
model24 = model[[2]]
m24 = predict(model24, newdata = data.frame(Intercept = 1, t1 = 13, t2 = 13^2, t3 = 13^3))
m24
trend.predict = c(m13, m14, m15, m16, m17, m18, m19, m20, m21, m22, m23, m24)
plot(trend.predict)
fit.res = arima (res2, c(5, 0, 4))
predict.res = predict(fit.res, n.ahead = 12)
plot(res2, type = "l", main = 'Residual prediction', ylab = 'Residual')
lines(predict.res$pred, col ='red')
predict = trend.predict + seasonality + predict.res$pred
predict.t = (1 + lambda*predict)^(1 / lambda)
#True value VS predictted values
plot(UR.predict$VALUE, xaxt = 'n', ylim = c(0,12))
axis(1, at = seq(1, 12, by = 1), labels = UR[,1][seq(289, 300, by = 1)])
points(as.numeric(predict.t), col = 2, ylim = c(0 ,12), pch=19)
|
library(shiny)
########################
#
# This is R code to run :
# Leslie-Gower competition model with stochastic growth rates
#
# This code always assumes 2 species
#
# 1. The Leslie-Gower model is also sometimes called the multispecies
# Beverton-Holt model. It has been used by Jonathan and others as
# a model of annual plants.
# lg1, lg2: populations of species 1,2
# rlg1, rlg2: reproduction rates
# alpha_ij: competition coefficients, where
# the notation "ij" gives the competitive
# effect of species j on i.
#
# 2. Stochastic reproduction can be modelled in a relatively
# straightforward way.
#Load library MASS For multivariate random variables
library(MASS)
###################################
# 1. Leslie Gower model with fluctuating reproduction
######################################################
#This produces a column vector for each species.
#That is, the dimensions are such that rows = time.
pop_lgf = function (ia,ngens, rs, alphamat,surv,cor,var) {
#par(mar = c(0,0,0,0))
ia=as.matrix(ia)
rs=as.matrix(rs)
alphamat=as.matrix(alphamat)
surv=as.matrix(surv)
var=as.matrix(var)
#Initialize populations
lg1f = matrix(0, ngens, 1)
lg2f = lg1f
#Demographic parameters.
#Reproduction rates are multivariate random variables now!
#Means
m1=rs[1]
m2=rs[2]
means12=c(m1,m2)
#Variances
v1=var[1]
v2=var[2]
vs12=c(v1,v2)
#correlation/covariance
rho12=cor
cov12 = matrix(c(v1,(rho12*sqrt(v1)*sqrt(v2)),(rho12*sqrt(v2)*sqrt(v1)),v2),2,2)
#Transform specified means, variance to LOGNORMAL means, variances
MLog=log((means12^2)/sqrt(vs12+means12^2))
VLog=log(vs12/(means12^2)+1);
covLog=log(1+(cov12)/abs(means12%*%t(means12)));
#Now use mvrnorm to generate the entire time series of reproduction.
#repro12 = mvrnorm (ngens, means12, cov12)
repro12 = mvrnorm (ngens, MLog, covLog)
repro12log=exp(repro12)
r1lgf= repro12log[,1]
r2lgf= repro12log[,2]
#Survival rates
s1f = surv[1]
s2f = surv[2]
#Intra/Inter specific competition
alpha_11 = alphamat[1,1]
alpha_22 = alphamat[2,2]
alpha_21 = alphamat[2,1]
alpha_12 = alphamat[1,2]
#Important:
#Invasion is simulated in a single loop. The resident
#and invader designation is determined by the initial conditions (ICs).
#Give each species its initial abundance
lg1f[1] = ia[1]
lg2f[1] = ia[2]
#Population growth
for ( t in 1:ngens) {
lg1f[t+1] = s1f*lg1f[t]+r1lgf[t]*lg1f[t]/(1+lg1f[t]*alpha_11+lg2f[t]*alpha_12)
lg2f[t+1] = s2f*lg2f[t]+r2lgf[t]*lg2f[t]/(1+lg2f[t]*alpha_22+lg1f[t]*alpha_21)
}
#Visualize,
#jpeg( filename="lgnr1f_both_co.jpg", width=5, height=5, units = "in", pointsize=12, quality=100,res=300)
plot(lg2f, t="l",col="red",xlab="Generations", ylab="Population size",ylim=c(0,max(cbind(lg1f,lg2f))))
lines(lg1f, col="green")
#dev.off
}
######################################################
# 1. Leslie Gower model with fluctuating reproduction
######################################################
#
# Animate the population growth in space:
#
pop_space_lgf = function (ia,ngens, rs, alphamat,surv,cor,var) {
#par(mar = c(0,0,0,0))
ia=as.matrix(ia)
rs=as.matrix(rs)
alphamat=as.matrix(alphamat)
surv=as.matrix(surv)
var=as.matrix(var)
#For spatial visualization
np=100
#Initialize populations
lg1f = matrix(0, ngens, 1)
lg2f = lg1f
#Demographic parameters.
#Reproduction rates are multivariate random variables now!
#Means
m1=rs[1]
m2=rs[2]
means12=c(m1,m2)
#Variances
v1=var[1]
v2=var[2]
vs12=c(v1,v2)
#correlation/covariance
rho12=cor
cov12 = matrix(c(v1,(rho12*sqrt(v1)*sqrt(v2)),(rho12*sqrt(v2)*sqrt(v1)),v2),2,2)
#Transform specified means, variance to LOGNORMAL means, variances
MLog=log((means12^2)/sqrt(vs12+means12^2))
VLog=log(vs12/(means12^2)+1);
covLog=log(1+(cov12)/abs(means12%*%t(means12)));
#Now use mvrnorm to generate the entire time series of reproduction.
#repro12 = mvrnorm (ngens, means12, cov12)
repro12 = mvrnorm (ngens, MLog, covLog)
repro12log=exp(repro12)
r1lgf= repro12log[,1]
r2lgf= repro12log[,2]
#Survival rates
s1f = surv[1]
s2f = surv[2]
#Intra/Inter specific competition
alpha_11 = alphamat[1,1]
alpha_22 = alphamat[2,2]
alpha_21 = alphamat[2,1]
alpha_12 = alphamat[1,2]
#Important:
#Invasion is simulated in a single loop. The resident
#and invader designation is determined by the initial conditions (ICs).
#Give each species its initial abundance
lg1f[1] = ia[1]
lg2f[1] = ia[2]
#Population growth
for ( t in 1:ngens) {
#Spread population 1 out in space:
prop1 = lg1f[t]/(lg1f[t]+lg2f[t]) #Proportion of space owned by 1
pop_space = matrix( as.numeric( runif(np^2) < prop1),np,np) #Randomly assign cells to 1 based on proportion
#Plot and delay each new plot to create appearance of animation
image(pop_space)
#date_time<-Sys.time()
#while((as.numeric(Sys.time()) - as.numeric(date_time))<0.25){} #dummy while loop
#Populations for next time step
lg1f[t+1] = s1f*lg1f[t]+r1lgf[t]*lg1f[t]*1/(1+lg1f[t]*alpha_11+lg2f[t]*alpha_12)
lg2f[t+1] = s2f*lg2f[t]+r2lgf[t]*lg2f[t]*1/(1+lg2f[t]*alpha_22+lg1f[t]*alpha_21)
}
return(cbind(lg1f,lg2f))
}
#################################################
# Shiny server logic
#################################################
# This section takes inputs from the ui and displays the desired
# information using the code from Saavedra et al. above
shinyServer( function(input, output) {
output$pop_lgf = renderPlot({
pop_lgf(input$ia, input$ngens, input$rs, input$alphamat,input$surv,input$cor, input$var )
})
output$pop_space_lgf = renderPlot({
pop_space_lgf (input$ia, input$ngens, input$rs, input$alphamat,input$surv,input$cor, input$var )
})
#Download the population data
datasetInput = reactive( pop_space_lgf (input$ia, input$ngens, input$rs, input$alphamat,input$surv,input$cor, input$var ))
output$downloadData = downloadHandler(
filename= "popLG_data.csv",
content = function(file) {
write.csv(datasetInput(), file, row.names = FALSE)
})
})
|
/leslie_gower/server.R
|
permissive
|
jusinowicz/coexistence_shiny_apps
|
R
| false | false | 6,088 |
r
|
library(shiny)
########################
#
# This is R code to run :
# Leslie-Gower competition model with stochastic growth rates
#
# This code always assumes 2 species
#
# 1. The Leslie-Gower model is also sometimes called the multispecies
# Beverton-Holt model. It has been used by Jonathan and others as
# a model of annual plants.
# lg1, lg2: populations of species 1,2
# rlg1, rlg2: reproduction rates
# alpha_ij: competition coefficients, where
# the notation "ij" gives the competitive
# effect of species j on i.
#
# 2. Stochastic reproduction can be modelled in a relatively
# straightforward way.
#Load library MASS For multivariate random variables
library(MASS)
###################################
# 1. Leslie Gower model with fluctuating reproduction
######################################################
#This produces a column vector for each species.
#That is, the dimensions are such that rows = time.
pop_lgf = function (ia,ngens, rs, alphamat,surv,cor,var) {
#par(mar = c(0,0,0,0))
ia=as.matrix(ia)
rs=as.matrix(rs)
alphamat=as.matrix(alphamat)
surv=as.matrix(surv)
var=as.matrix(var)
#Initialize populations
lg1f = matrix(0, ngens, 1)
lg2f = lg1f
#Demographic parameters.
#Reproduction rates are multivariate random variables now!
#Means
m1=rs[1]
m2=rs[2]
means12=c(m1,m2)
#Variances
v1=var[1]
v2=var[2]
vs12=c(v1,v2)
#correlation/covariance
rho12=cor
cov12 = matrix(c(v1,(rho12*sqrt(v1)*sqrt(v2)),(rho12*sqrt(v2)*sqrt(v1)),v2),2,2)
#Transform specified means, variance to LOGNORMAL means, variances
MLog=log((means12^2)/sqrt(vs12+means12^2))
VLog=log(vs12/(means12^2)+1);
covLog=log(1+(cov12)/abs(means12%*%t(means12)));
#Now use mvrnorm to generate the entire time series of reproduction.
#repro12 = mvrnorm (ngens, means12, cov12)
repro12 = mvrnorm (ngens, MLog, covLog)
repro12log=exp(repro12)
r1lgf= repro12log[,1]
r2lgf= repro12log[,2]
#Survival rates
s1f = surv[1]
s2f = surv[2]
#Intra/Inter specific competition
alpha_11 = alphamat[1,1]
alpha_22 = alphamat[2,2]
alpha_21 = alphamat[2,1]
alpha_12 = alphamat[1,2]
#Important:
#Invasion is simulated in a single loop. The resident
#and invader designation is determined by the initial conditions (ICs).
#Give each species its initial abundance
lg1f[1] = ia[1]
lg2f[1] = ia[2]
#Population growth
for ( t in 1:ngens) {
lg1f[t+1] = s1f*lg1f[t]+r1lgf[t]*lg1f[t]/(1+lg1f[t]*alpha_11+lg2f[t]*alpha_12)
lg2f[t+1] = s2f*lg2f[t]+r2lgf[t]*lg2f[t]/(1+lg2f[t]*alpha_22+lg1f[t]*alpha_21)
}
#Visualize,
#jpeg( filename="lgnr1f_both_co.jpg", width=5, height=5, units = "in", pointsize=12, quality=100,res=300)
plot(lg2f, t="l",col="red",xlab="Generations", ylab="Population size",ylim=c(0,max(cbind(lg1f,lg2f))))
lines(lg1f, col="green")
#dev.off
}
######################################################
# 1. Leslie Gower model with fluctuating reproduction
######################################################
#
# Animate the population growth in space:
#
pop_space_lgf = function (ia,ngens, rs, alphamat,surv,cor,var) {
#par(mar = c(0,0,0,0))
ia=as.matrix(ia)
rs=as.matrix(rs)
alphamat=as.matrix(alphamat)
surv=as.matrix(surv)
var=as.matrix(var)
#For spatial visualization
np=100
#Initialize populations
lg1f = matrix(0, ngens, 1)
lg2f = lg1f
#Demographic parameters.
#Reproduction rates are multivariate random variables now!
#Means
m1=rs[1]
m2=rs[2]
means12=c(m1,m2)
#Variances
v1=var[1]
v2=var[2]
vs12=c(v1,v2)
#correlation/covariance
rho12=cor
cov12 = matrix(c(v1,(rho12*sqrt(v1)*sqrt(v2)),(rho12*sqrt(v2)*sqrt(v1)),v2),2,2)
#Transform specified means, variance to LOGNORMAL means, variances
MLog=log((means12^2)/sqrt(vs12+means12^2))
VLog=log(vs12/(means12^2)+1);
covLog=log(1+(cov12)/abs(means12%*%t(means12)));
#Now use mvrnorm to generate the entire time series of reproduction.
#repro12 = mvrnorm (ngens, means12, cov12)
repro12 = mvrnorm (ngens, MLog, covLog)
repro12log=exp(repro12)
r1lgf= repro12log[,1]
r2lgf= repro12log[,2]
#Survival rates
s1f = surv[1]
s2f = surv[2]
#Intra/Inter specific competition
alpha_11 = alphamat[1,1]
alpha_22 = alphamat[2,2]
alpha_21 = alphamat[2,1]
alpha_12 = alphamat[1,2]
#Important:
#Invasion is simulated in a single loop. The resident
#and invader designation is determined by the initial conditions (ICs).
#Give each species its initial abundance
lg1f[1] = ia[1]
lg2f[1] = ia[2]
#Population growth
for ( t in 1:ngens) {
#Spread population 1 out in space:
prop1 = lg1f[t]/(lg1f[t]+lg2f[t]) #Proportion of space owned by 1
pop_space = matrix( as.numeric( runif(np^2) < prop1),np,np) #Randomly assign cells to 1 based on proportion
#Plot and delay each new plot to create appearance of animation
image(pop_space)
#date_time<-Sys.time()
#while((as.numeric(Sys.time()) - as.numeric(date_time))<0.25){} #dummy while loop
#Populations for next time step
lg1f[t+1] = s1f*lg1f[t]+r1lgf[t]*lg1f[t]*1/(1+lg1f[t]*alpha_11+lg2f[t]*alpha_12)
lg2f[t+1] = s2f*lg2f[t]+r2lgf[t]*lg2f[t]*1/(1+lg2f[t]*alpha_22+lg1f[t]*alpha_21)
}
return(cbind(lg1f,lg2f))
}
#################################################
# Shiny server logic
#################################################
# This section takes inputs from the ui and displays the desired
# information using the code from Saavedra et al. above
shinyServer( function(input, output) {
output$pop_lgf = renderPlot({
pop_lgf(input$ia, input$ngens, input$rs, input$alphamat,input$surv,input$cor, input$var )
})
output$pop_space_lgf = renderPlot({
pop_space_lgf (input$ia, input$ngens, input$rs, input$alphamat,input$surv,input$cor, input$var )
})
#Download the population data
datasetInput = reactive( pop_space_lgf (input$ia, input$ngens, input$rs, input$alphamat,input$surv,input$cor, input$var ))
output$downloadData = downloadHandler(
filename= "popLG_data.csv",
content = function(file) {
write.csv(datasetInput(), file, row.names = FALSE)
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/options.R
\name{future.options}
\alias{future.options}
\alias{future.cmdargs}
\alias{.future.R}
\alias{future.startup.script}
\alias{R_FUTURE_STARTUP_SCRIPT}
\alias{future.debug}
\alias{R_FUTURE_DEBUG}
\alias{future.demo.mandelbrot.region}
\alias{R_FUTURE_DEMO_MANDELBROT_REGION}
\alias{future.demo.mandelbrot.nrow}
\alias{R_FUTURE_DEMO_MANDELBROT_NROW}
\alias{future.fork.multithreading.enable}
\alias{R_FUTURE_FORK_MULTITHREADING_ENABLE}
\alias{future.globals.maxSize}
\alias{R_FUTURE_GLOBALS_MAXSIZE}
\alias{future.globals.method}
\alias{R_FUTURE_GLOBALS_METHOD}
\alias{future.globals.onMissing}
\alias{R_FUTURE_GLOBALS_ONMISSING}
\alias{future.globals.resolve}
\alias{R_FUTURE_GLOBALS_RESOLVE}
\alias{future.globals.onReference}
\alias{R_FUTURE_GLOBALS_ONREFERENCE}
\alias{future.plan}
\alias{R_FUTURE_PLAN}
\alias{future.onFutureCondition.keepFuture}
\alias{R_FUTURE_ONFUTURECONDITION_KEEPFUTURE}
\alias{future.resolve.recursive}
\alias{R_FUTURE_RESOLVE_RECURSIVE}
\alias{future.globalenv.onMisuse}
\alias{R_FUTURE_GLOBALENV_ONMISUSE}
\alias{future.rng.onMisuse}
\alias{R_FUTURE_RNG_ONMISUSE}
\alias{future.wait.alpha}
\alias{R_FUTURE_WAIT_ALPHA}
\alias{future.wait.interval}
\alias{R_FUTURE_WAIT_INTERVAL}
\alias{future.wait.timeout}
\alias{R_FUTURE_WAIT_TIMEOUT}
\alias{R_FUTURE_RESOLVED_TIMEOUT}
\alias{future.output.windows.reencode}
\alias{R_FUTURE_OUTPUT_WINDOWS_REENCODE}
\alias{future.journal}
\alias{R_FUTURE_JOURNAL}
\title{Options used for futures}
\description{
Below are the \R options and environment variables that are used by the
\pkg{future} package and packages enhancing it.\cr
\cr
\emph{WARNING: Note that the names and the default values of these options may
change in future versions of the package. Please use with care until
further notice.}
}
\section{Settings moved to the 'parallelly' package}{
Several functions have been moved to the \pkg{parallelly} package:
\itemize{
\item \code{\link[parallelly:availableCores]{parallelly::availableCores()}}
\item \code{\link[parallelly:availableWorkers]{parallelly::availableWorkers()}}
\item \code{\link[parallelly:makeClusterMPI]{parallelly::makeClusterMPI()}}
\item \code{\link[parallelly:makeClusterPSOCK]{parallelly::makeClusterPSOCK()}}
\item \code{\link[parallelly:makeClusterPSOCK]{parallelly::makeNodePSOCK()}}
\item \code{\link[parallelly:supportsMulticore]{parallelly::supportsMulticore()}}
}
The options and environment variables controlling those have been adjusted
accordingly to have different prefixes.
For example, option \option{future.fork.enable} has been renamed to
\option{parallelly.fork.enable} and the corresponding environment variable
\env{R_FUTURE_FORK_ENABLE} has been renamed to
\env{R_PARALLELLY_FORK_ENABLE}.
For backward compatibility reasons, the \pkg{parallelly} package will
support both versions for a long foreseeable time.
See the \link[parallelly:parallelly.options]{parallelly::parallelly.options} page for the settings.
}
\section{Options for controlling futures}{
\describe{
\item{\option{future.plan}:}{(character string or future function) Default future strategy plan used unless otherwise specified via \code{\link[=plan]{plan()}}. This will also be the future plan set when calling \code{plan("default")}. If not specified, this option may be set when the \pkg{future} package is \emph{loaded} if command-line option \code{--parallel=ncores} (short \verb{-p ncores}) is specified; if \code{ncores > 1}, then option \option{future.plan} is set to \code{multisession} otherwise \code{sequential} (in addition to option \option{mc.cores} being set to \code{ncores}, if \code{ncores >= 1}). (Default: \code{sequential})}
\item{\option{future.globals.maxSize}:}{(numeric) Maximum allowed total size (in bytes) of global variables identified. Used to prevent too large exports. If set of \code{+Inf}, then the check for large globals is skipped. (Default: \code{500 * 1024 ^ 2} = 500 MiB)}
\item{\option{future.globals.onReference}: (\emph{beta feature - may change})}{(character string) Controls whether the identified globals should be scanned for so called \emph{references} (e.g. external pointers and connections) or not. It is unlikely that another \R process ("worker") can use a global that uses a internal reference of the master \R process - we call such objects \emph{non-exportable globals}.
If this option is \code{"error"}, an informative error message is produced if a non-exportable global is detected.
If \code{"warning"}, a warning is produced, but the processing will continue; it is likely that the future will be resolved with a run-time error unless processed in the master \R process (e.g. \code{plan(sequential)} and \code{plan(multicore)}).
If \code{"ignore"}, no scan is performed.
(Default: \code{"ignore"} but may change)
}
\item{\option{future.resolve.recursive}:}{(integer) An integer specifying the maximum recursive depth to which futures should be resolved. If negative, nothing is resolved. If \code{0}, only the future itself is resolved. If \code{1}, the future and any of its elements that are futures are resolved, and so on. If \code{+Inf}, infinite search depth is used. (Default: \code{0})}
\item{\option{future.rng.onMisuse}: (\emph{beta feature - may change})}{(character string) If random numbers are used in futures, then parallel (L'Ecuyer-CMRG) RNG should be used in order to get statistical sound RNGs. The defaults in the future framework assume that \emph{no} random number generation (RNG) is taken place in the future expression because L'Ecuyer-CMRG RNGs come with an unnecessary overhead if not needed. To protect against mistakes, the future framework attempts to detect when random numbers are used despite L'Ecuyer-CMRG RNGs are not in place. If this is detected, and \code{future.rng.onMisuse = "error"}, then an informative error message is produced. If \code{"warning"}, then a warning message is produced. If \code{"ignore"}, no check is performed. (Default: \code{"warning"})}
\item{\option{future.globalenv.onMisuse}: (\emph{beta feature - may change})}{(character string) Assigning variables to the global environment for the purpose of using the variable at a later time makes no sense with futures, because the next future may be evaluated in different R process. To protect against mistakes, the future framework attempts to detect when variables are added to the global environment. If this is detected, and \code{future.globalenv.onMisuse = "error"}, then an informative error message is produced. If \code{"warning"}, then a warning message is produced. If \code{"ignore"}, no check is performed. (Default: \code{"ignore"})}
\item{\option{future.onFutureCondition.keepFuture}:}{(logical) If \code{TRUE}, a \code{FutureCondition} keeps a copy of the \code{Future} object that triggered the condition. If \code{FALSE}, it is dropped. (Default: \code{TRUE})}
\item{\option{future.wait.timeout}:}{(numeric) Maximum waiting time (in seconds) for a free worker before a timeout error is generated. (Default: \code{30 * 24 * 60 * 60} (= 30 days))}
\item{\option{future.wait.interval}:}{(numeric) Initial interval (in
seconds) between polls. This controls the polling frequency for finding
an available worker when all workers are currently busy. It also controls
the polling frequency of \code{resolve()}. (Default: \code{0.01} = 0.01 seconds)}
\item{\option{future.wait.alpha}:}{(numeric) Positive scale factor used to increase the interval after each poll. (Default: \code{1.01})}
}
}
\section{Options for debugging futures}{
\describe{
\item{\option{future.debug}:}{(logical) If \code{TRUE}, extensive debug messages are generated. (Default: \code{FALSE})}
}
}
\section{Options for controlling package startup}{
\describe{
\item{\option{future.startup.script}:}{(character vector or a logical) Specifies zero of more future startup scripts to be sourced when the \pkg{future} package is \emph{attached}. It is only the first existing script that is sourced. If none of the specified files exist, nothing is sourced - there will be neither a warning nor an error.
If this option is not specified, environment variable \env{R_FUTURE_STARTUP_SCRIPT} is considered, where multiple scripts may be separated by either a colon (\code{:}) or a semicolon (\verb{;}). If neither is set, or either is set to \code{TRUE}, the default is to look for a \file{.future.R} script in the current directory and then in the user's home directory. To disable future startup scripts, set the option or the environment variable to \code{FALSE}. \emph{Importantly}, this option is \emph{always} set to \code{FALSE} if the \pkg{future} package is loaded as part of a future expression being evaluated, e.g. in a background process. In order words, they are sourced in the main \R process but not in future processes. (Default: \code{TRUE} in main \R process and \code{FALSE} in future processes / during future evaluation)}
\item{\option{future.cmdargs}:}{(character vector) Overrides \code{\link[base]{commandArgs}()} when the \pkg{future} package is \emph{loaded}.}
}
}
\section{Options for configuring low-level system behaviors}{
\describe{
\item{\option{future.fork.multithreading.enable} (\emph{beta feature - may change}):}{(logical) Enable or disable \emph{multi-threading} while using \emph{forked} parallel processing. If \code{FALSE}, different multi-thread library settings are overridden such that they run in single-thread mode. Specifically, multi-threading will be disabled for OpenMP (which requires the \pkg{RhpcBLASctl} package) and for \strong{RcppParallel}. If \code{TRUE}, or not set (the default), multi-threading is allowed. Parallelization via multi-threaded processing (done in native code by some packages and external libraries) while at the same time using forked (aka "multicore") parallel processing is known to unstable. Note that this is not only true when using \code{plan(multicore)} but also when using, for instance, \code{\link[=mclapply]{mclapply}()} of the \pkg{parallel} package. (Default: not set)}
\item{\option{future.output.windows.reencode} (\emph{beta feature - may change}):}{(logical) Enable or disable re-encoding of UTF-8 symbols that were incorrectly encoded while captured. On MS Windows, R cannot capture UTF-8 symbols as-is when they are captured from the standard output. For examples, a UTF-8 check mark symbol (\code{"\\u2713"}) would be relayed as \code{"<U+2713>"} (a string with eight ASCII characters). This option will cause \code{value()} to attempt to recover the intended UTF-8 symbols from \verb{<U+nnnn>} string components, if, and only if, the string was captured by a future resolved on MS Windows. (Default: \code{TRUE})}
}
See also \link[parallelly:parallelly.options]{parallelly::parallelly.options}.
}
\section{Options for demos}{
\describe{
\item{\option{future.demo.mandelbrot.region}:}{(integer) Either a named list of \code{\link[=mandelbrot]{mandelbrot()}} arguments or an integer in \{1, 2, 3\} specifying a predefined Mandelbrot region. (Default: \code{1L})}
\item{\option{future.demo.mandelbrot.nrow}:}{(integer) Number of rows and columns of tiles. (Default: \code{3L})}
}
}
\section{Deprecated or for internal prototyping}{
The following options exists only for troubleshooting purposes and must not
be used in production. If used, there is a risk that the results are
non-reproducible if processed elsewhere. To lower the risk of them being
used by mistake, they are marked as deprecated and will produce warnings
if set.
\describe{
\item{\option{future.globals.onMissing}:}{(character string) Action to take when non-existing global variables ("globals" or "unknowns") are identified when the future is created. If \code{"error"}, an error is generated immediately. If \code{"ignore"}, no action is taken and an attempt to evaluate the future expression will be made. The latter is useful when there is a risk for false-positive globals being identified, e.g. when future expression contains non-standard evaluation (NSE). (Default: \code{"ignore"})}
\item{\option{future.globals.method}:}{(character string) Method used to identify globals. For details, see \code{\link[globals]{globalsOf}()}. (Default: \code{"ordered"})}
\item{\option{future.globals.resolve}:}{(logical) If \code{TRUE}, globals that are \code{\link{Future}} objects (typically created as \emph{explicit} futures) will be resolved and have their values (using \code{value()}) collected. Because searching for unresolved futures among globals (including their content) can be expensive, the default is not to do it and instead leave it to the run-time checks that assert proper ownership when resolving futures and collecting their values. (Default: \code{FALSE})}
}
}
\section{Environment variables that set R options}{
All of the above \R \option{future.*} options can be set by corresponding
environment variable \env{R_FUTURE_*} \emph{when the \pkg{future} package is
loaded}. This means that those environment variables must be set before
the \pkg{future} package is loaded in order to have an effect.
For example, if \code{R_FUTURE_RNG_ONMISUSE = "ignore"}, then option
\option{future.rng.onMisuse} is set to \code{"ignore"} (character string).
Similarly, if \code{R_FUTURE_GLOBALS_MAXSIZE = "50000000"}, then option
\option{future.globals.maxSize} is set to \code{50000000} (numeric).
}
\examples{
# Allow at most 5 MB globals per futures
options(future.globals.maxSize = 5e6)
# Be strict; catch all RNG mistakes
options(future.rng.onMisuse = "error")
}
\seealso{
To set \R options or environment variables when \R starts (even before the \pkg{future} package is loaded), see the \link[base]{Startup} help page. The \href{https://cran.r-project.org/package=startup}{\pkg{startup}} package provides a friendly mechanism for configurating \R's startup process.
}
|
/man/future.options.Rd
|
no_license
|
seonghobae/future
|
R
| false | true | 13,923 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/options.R
\name{future.options}
\alias{future.options}
\alias{future.cmdargs}
\alias{.future.R}
\alias{future.startup.script}
\alias{R_FUTURE_STARTUP_SCRIPT}
\alias{future.debug}
\alias{R_FUTURE_DEBUG}
\alias{future.demo.mandelbrot.region}
\alias{R_FUTURE_DEMO_MANDELBROT_REGION}
\alias{future.demo.mandelbrot.nrow}
\alias{R_FUTURE_DEMO_MANDELBROT_NROW}
\alias{future.fork.multithreading.enable}
\alias{R_FUTURE_FORK_MULTITHREADING_ENABLE}
\alias{future.globals.maxSize}
\alias{R_FUTURE_GLOBALS_MAXSIZE}
\alias{future.globals.method}
\alias{R_FUTURE_GLOBALS_METHOD}
\alias{future.globals.onMissing}
\alias{R_FUTURE_GLOBALS_ONMISSING}
\alias{future.globals.resolve}
\alias{R_FUTURE_GLOBALS_RESOLVE}
\alias{future.globals.onReference}
\alias{R_FUTURE_GLOBALS_ONREFERENCE}
\alias{future.plan}
\alias{R_FUTURE_PLAN}
\alias{future.onFutureCondition.keepFuture}
\alias{R_FUTURE_ONFUTURECONDITION_KEEPFUTURE}
\alias{future.resolve.recursive}
\alias{R_FUTURE_RESOLVE_RECURSIVE}
\alias{future.globalenv.onMisuse}
\alias{R_FUTURE_GLOBALENV_ONMISUSE}
\alias{future.rng.onMisuse}
\alias{R_FUTURE_RNG_ONMISUSE}
\alias{future.wait.alpha}
\alias{R_FUTURE_WAIT_ALPHA}
\alias{future.wait.interval}
\alias{R_FUTURE_WAIT_INTERVAL}
\alias{future.wait.timeout}
\alias{R_FUTURE_WAIT_TIMEOUT}
\alias{R_FUTURE_RESOLVED_TIMEOUT}
\alias{future.output.windows.reencode}
\alias{R_FUTURE_OUTPUT_WINDOWS_REENCODE}
\alias{future.journal}
\alias{R_FUTURE_JOURNAL}
\title{Options used for futures}
\description{
Below are the \R options and environment variables that are used by the
\pkg{future} package and packages enhancing it.\cr
\cr
\emph{WARNING: Note that the names and the default values of these options may
change in future versions of the package. Please use with care until
further notice.}
}
\section{Settings moved to the 'parallelly' package}{
Several functions have been moved to the \pkg{parallelly} package:
\itemize{
\item \code{\link[parallelly:availableCores]{parallelly::availableCores()}}
\item \code{\link[parallelly:availableWorkers]{parallelly::availableWorkers()}}
\item \code{\link[parallelly:makeClusterMPI]{parallelly::makeClusterMPI()}}
\item \code{\link[parallelly:makeClusterPSOCK]{parallelly::makeClusterPSOCK()}}
\item \code{\link[parallelly:makeClusterPSOCK]{parallelly::makeNodePSOCK()}}
\item \code{\link[parallelly:supportsMulticore]{parallelly::supportsMulticore()}}
}
The options and environment variables controlling those have been adjusted
accordingly to have different prefixes.
For example, option \option{future.fork.enable} has been renamed to
\option{parallelly.fork.enable} and the corresponding environment variable
\env{R_FUTURE_FORK_ENABLE} has been renamed to
\env{R_PARALLELLY_FORK_ENABLE}.
For backward compatibility reasons, the \pkg{parallelly} package will
support both versions for a long foreseeable time.
See the \link[parallelly:parallelly.options]{parallelly::parallelly.options} page for the settings.
}
\section{Options for controlling futures}{
\describe{
\item{\option{future.plan}:}{(character string or future function) Default future strategy plan used unless otherwise specified via \code{\link[=plan]{plan()}}. This will also be the future plan set when calling \code{plan("default")}. If not specified, this option may be set when the \pkg{future} package is \emph{loaded} if command-line option \code{--parallel=ncores} (short \verb{-p ncores}) is specified; if \code{ncores > 1}, then option \option{future.plan} is set to \code{multisession} otherwise \code{sequential} (in addition to option \option{mc.cores} being set to \code{ncores}, if \code{ncores >= 1}). (Default: \code{sequential})}
\item{\option{future.globals.maxSize}:}{(numeric) Maximum allowed total size (in bytes) of global variables identified. Used to prevent too large exports. If set of \code{+Inf}, then the check for large globals is skipped. (Default: \code{500 * 1024 ^ 2} = 500 MiB)}
\item{\option{future.globals.onReference}: (\emph{beta feature - may change})}{(character string) Controls whether the identified globals should be scanned for so called \emph{references} (e.g. external pointers and connections) or not. It is unlikely that another \R process ("worker") can use a global that uses a internal reference of the master \R process - we call such objects \emph{non-exportable globals}.
If this option is \code{"error"}, an informative error message is produced if a non-exportable global is detected.
If \code{"warning"}, a warning is produced, but the processing will continue; it is likely that the future will be resolved with a run-time error unless processed in the master \R process (e.g. \code{plan(sequential)} and \code{plan(multicore)}).
If \code{"ignore"}, no scan is performed.
(Default: \code{"ignore"} but may change)
}
\item{\option{future.resolve.recursive}:}{(integer) An integer specifying the maximum recursive depth to which futures should be resolved. If negative, nothing is resolved. If \code{0}, only the future itself is resolved. If \code{1}, the future and any of its elements that are futures are resolved, and so on. If \code{+Inf}, infinite search depth is used. (Default: \code{0})}
\item{\option{future.rng.onMisuse}: (\emph{beta feature - may change})}{(character string) If random numbers are used in futures, then parallel (L'Ecuyer-CMRG) RNG should be used in order to get statistical sound RNGs. The defaults in the future framework assume that \emph{no} random number generation (RNG) is taken place in the future expression because L'Ecuyer-CMRG RNGs come with an unnecessary overhead if not needed. To protect against mistakes, the future framework attempts to detect when random numbers are used despite L'Ecuyer-CMRG RNGs are not in place. If this is detected, and \code{future.rng.onMisuse = "error"}, then an informative error message is produced. If \code{"warning"}, then a warning message is produced. If \code{"ignore"}, no check is performed. (Default: \code{"warning"})}
\item{\option{future.globalenv.onMisuse}: (\emph{beta feature - may change})}{(character string) Assigning variables to the global environment for the purpose of using the variable at a later time makes no sense with futures, because the next future may be evaluated in different R process. To protect against mistakes, the future framework attempts to detect when variables are added to the global environment. If this is detected, and \code{future.globalenv.onMisuse = "error"}, then an informative error message is produced. If \code{"warning"}, then a warning message is produced. If \code{"ignore"}, no check is performed. (Default: \code{"ignore"})}
\item{\option{future.onFutureCondition.keepFuture}:}{(logical) If \code{TRUE}, a \code{FutureCondition} keeps a copy of the \code{Future} object that triggered the condition. If \code{FALSE}, it is dropped. (Default: \code{TRUE})}
\item{\option{future.wait.timeout}:}{(numeric) Maximum waiting time (in seconds) for a free worker before a timeout error is generated. (Default: \code{30 * 24 * 60 * 60} (= 30 days))}
\item{\option{future.wait.interval}:}{(numeric) Initial interval (in
seconds) between polls. This controls the polling frequency for finding
an available worker when all workers are currently busy. It also controls
the polling frequency of \code{resolve()}. (Default: \code{0.01} = 0.01 seconds)}
\item{\option{future.wait.alpha}:}{(numeric) Positive scale factor used to increase the interval after each poll. (Default: \code{1.01})}
}
}
\section{Options for debugging futures}{
\describe{
\item{\option{future.debug}:}{(logical) If \code{TRUE}, extensive debug messages are generated. (Default: \code{FALSE})}
}
}
\section{Options for controlling package startup}{
\describe{
\item{\option{future.startup.script}:}{(character vector or a logical) Specifies zero of more future startup scripts to be sourced when the \pkg{future} package is \emph{attached}. It is only the first existing script that is sourced. If none of the specified files exist, nothing is sourced - there will be neither a warning nor an error.
If this option is not specified, environment variable \env{R_FUTURE_STARTUP_SCRIPT} is considered, where multiple scripts may be separated by either a colon (\code{:}) or a semicolon (\verb{;}). If neither is set, or either is set to \code{TRUE}, the default is to look for a \file{.future.R} script in the current directory and then in the user's home directory. To disable future startup scripts, set the option or the environment variable to \code{FALSE}. \emph{Importantly}, this option is \emph{always} set to \code{FALSE} if the \pkg{future} package is loaded as part of a future expression being evaluated, e.g. in a background process. In order words, they are sourced in the main \R process but not in future processes. (Default: \code{TRUE} in main \R process and \code{FALSE} in future processes / during future evaluation)}
\item{\option{future.cmdargs}:}{(character vector) Overrides \code{\link[base]{commandArgs}()} when the \pkg{future} package is \emph{loaded}.}
}
}
\section{Options for configuring low-level system behaviors}{
\describe{
\item{\option{future.fork.multithreading.enable} (\emph{beta feature - may change}):}{(logical) Enable or disable \emph{multi-threading} while using \emph{forked} parallel processing. If \code{FALSE}, different multi-thread library settings are overridden such that they run in single-thread mode. Specifically, multi-threading will be disabled for OpenMP (which requires the \pkg{RhpcBLASctl} package) and for \strong{RcppParallel}. If \code{TRUE}, or not set (the default), multi-threading is allowed. Parallelization via multi-threaded processing (done in native code by some packages and external libraries) while at the same time using forked (aka "multicore") parallel processing is known to unstable. Note that this is not only true when using \code{plan(multicore)} but also when using, for instance, \code{\link[=mclapply]{mclapply}()} of the \pkg{parallel} package. (Default: not set)}
\item{\option{future.output.windows.reencode} (\emph{beta feature - may change}):}{(logical) Enable or disable re-encoding of UTF-8 symbols that were incorrectly encoded while captured. On MS Windows, R cannot capture UTF-8 symbols as-is when they are captured from the standard output. For examples, a UTF-8 check mark symbol (\code{"\\u2713"}) would be relayed as \code{"<U+2713>"} (a string with eight ASCII characters). This option will cause \code{value()} to attempt to recover the intended UTF-8 symbols from \verb{<U+nnnn>} string components, if, and only if, the string was captured by a future resolved on MS Windows. (Default: \code{TRUE})}
}
See also \link[parallelly:parallelly.options]{parallelly::parallelly.options}.
}
\section{Options for demos}{
\describe{
\item{\option{future.demo.mandelbrot.region}:}{(integer) Either a named list of \code{\link[=mandelbrot]{mandelbrot()}} arguments or an integer in \{1, 2, 3\} specifying a predefined Mandelbrot region. (Default: \code{1L})}
\item{\option{future.demo.mandelbrot.nrow}:}{(integer) Number of rows and columns of tiles. (Default: \code{3L})}
}
}
\section{Deprecated or for internal prototyping}{
The following options exists only for troubleshooting purposes and must not
be used in production. If used, there is a risk that the results are
non-reproducible if processed elsewhere. To lower the risk of them being
used by mistake, they are marked as deprecated and will produce warnings
if set.
\describe{
\item{\option{future.globals.onMissing}:}{(character string) Action to take when non-existing global variables ("globals" or "unknowns") are identified when the future is created. If \code{"error"}, an error is generated immediately. If \code{"ignore"}, no action is taken and an attempt to evaluate the future expression will be made. The latter is useful when there is a risk for false-positive globals being identified, e.g. when future expression contains non-standard evaluation (NSE). (Default: \code{"ignore"})}
\item{\option{future.globals.method}:}{(character string) Method used to identify globals. For details, see \code{\link[globals]{globalsOf}()}. (Default: \code{"ordered"})}
\item{\option{future.globals.resolve}:}{(logical) If \code{TRUE}, globals that are \code{\link{Future}} objects (typically created as \emph{explicit} futures) will be resolved and have their values (using \code{value()}) collected. Because searching for unresolved futures among globals (including their content) can be expensive, the default is not to do it and instead leave it to the run-time checks that assert proper ownership when resolving futures and collecting their values. (Default: \code{FALSE})}
}
}
\section{Environment variables that set R options}{
All of the above \R \option{future.*} options can be set by corresponding
environment variable \env{R_FUTURE_*} \emph{when the \pkg{future} package is
loaded}. This means that those environment variables must be set before
the \pkg{future} package is loaded in order to have an effect.
For example, if \code{R_FUTURE_RNG_ONMISUSE = "ignore"}, then option
\option{future.rng.onMisuse} is set to \code{"ignore"} (character string).
Similarly, if \code{R_FUTURE_GLOBALS_MAXSIZE = "50000000"}, then option
\option{future.globals.maxSize} is set to \code{50000000} (numeric).
}
\examples{
# Allow at most 5 MB globals per futures
options(future.globals.maxSize = 5e6)
# Be strict; catch all RNG mistakes
options(future.rng.onMisuse = "error")
}
\seealso{
To set \R options or environment variables when \R starts (even before the \pkg{future} package is loaded), see the \link[base]{Startup} help page. The \href{https://cran.r-project.org/package=startup}{\pkg{startup}} package provides a friendly mechanism for configurating \R's startup process.
}
|
# This tests the sensibility of the filterWindows() function. In particular,
# we want to make sure that the filter is calculated properly, despite the
# manipulations of width and prior count.
# library(csaw); library(testthat); source("test-filter.R")
library(edgeR)
test_that("global filtering works correctly", {
windowed <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1)), colData=DataFrame(totals=1e6, ext=100),
metadata=list(final.ext=NA))
# Filter statistic should be zero, as effective length after extension is the same as the bin width.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, nrow=10, 1)),
rowRanges=GRanges("chrA", IRanges(0:9*100+1, 1:10*100), seqinfo=Seqinfo("chrA", 1000)),
colData=DataFrame(totals=1e6, ext=1), metadata=list(final.ext=NA, spacing=100))
out <- filterWindows(windowed, binned, type="global")
expect_equivalent(out$filter, 0)
expect_identical(names(out$filter), "50%")
# Effective length after extension is exactly 10-fold less than the bin width.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(100, nrow=10, 1)),
rowRanges=GRanges("chrA", IRanges(0:9*1000+1, 1:10*1000), seqinfo=Seqinfo("chrA", 10000)),
colData=DataFrame(totals=1e6, ext=1), metadata=list(final.ext=NA, spacing=1000))
out <- filterWindows(windowed, binned, type="global")
expect_equivalent(out$filter, 0)
# Testing out different prior counts.
out <- filterWindows(windowed, binned, type="global", prior.count=3.5)
expect_equivalent(out$filter, 0)
out <- filterWindows(windowed, binned, type="global", prior.count=0.5)
expect_equivalent(out$filter, 0)
# Testing what happens when the median is below the number of recorded bins.
zeroed <- windowed
assay(zeroed)[1] <- 0
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, nrow=10, 1)),
rowRanges=GRanges("chrA", IRanges(0:9*100+1, 1:10*100), seqinfo=Seqinfo("chrA", 10000)),
colData=DataFrame(totals=1e6, ext=1), metadata=list(final.ext=NA, spacing=100))
out <- filterWindows(zeroed, binned, type="global")
expect_equivalent(out$filter, 0) # Background estimate is also computed from all-zeroes.
# Testing what happens when the median is within the recorded bins, but not quite the median of them.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, nrow=10, 1)),
rowRanges=GRanges("chrA", IRanges(0:9*100+1, 1:10*100), seqinfo=Seqinfo("chrA", 1100)),
colData=DataFrame(totals=1e6, ext=1), metadata=list(final.ext=NA, spacing=100))
out <- filterWindows(windowed, binned, type="global")
expect_equivalent(out$filter, 0)
expect_identical(names(out$filter), "45%")
# Testing what happens when you don't specify the bin.
seqinfo(rowRanges(zeroed)) <- Seqinfo("chrA", 100)
metadata(zeroed)$spacing <- 10
out <- filterWindows(zeroed, type="global")
expect_equivalent(out$filter, 0) # Should be zero, as both median and count are based on zero's.
win2 <- windowed
seqinfo(rowRanges(win2)) <- Seqinfo("chrA", 100)
metadata(win2)$spacing <- 10
out <- filterWindows(win2, type="global")
expect_equal(out$abundances, aveLogCPM(asDGEList(win2)))
expect_equal(out$filter, out$abundances - aveLogCPM(DGEList(matrix(0, 1, ncol(win2)), lib.size=win2$totals)))
metadata(win2)$spacing <- 100
out2 <- filterWindows(win2, type="global")
expect_equivalent(out2$filter, 0)
expect_equivalent(out2$abundances, out$abundances)
# Works correctly on empty inputs.
emp <- filterWindows(zeroed[0,], type="global")
expect_equal(emp$filter, numeric(0))
emp <- filterWindows(zeroed[0,], binned, type="global")
expect_equal(emp$filter, numeric(0))
emp <- filterWindows(zeroed, binned[0,], type="global")
expect_equal(emp$filter, 0)
})
test_that("local filtering works correctly", {
windowed <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1)), colData=DataFrame(totals=1e6, ext=100),
metadata=list(final.ext=NA))
# Should be zero, as the count/width for the window is subtracted from the background bin.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(20, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 200)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=NA))
out <- filterWindows(windowed, binned, type="local")
expect_equivalent(out$filter, 0)
# After subtraction, the background is still 10-times wider, but again this should be zero.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(110, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1100)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=NA))
out <- filterWindows(windowed, binned, type="local")
expect_equivalent(out$filter, 0)
# Testing out different prior counts.
out <- filterWindows(windowed, binned, type="local", prior.count=3.5)
expect_equivalent(out$filter, 0)
out <- filterWindows(windowed, binned, type="local", prior.count=0.5)
expect_equivalent(out$filter, 0)
# Another case of subtraction, this time with a different extension for the background.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(110, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1001)), colData=DataFrame(totals=1e6, ext=100),
metadata=list(final.ext=NA))
out <- filterWindows(windowed, binned, type="local")
expect_equivalent(out$filter, 0)
# More subtraction.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(100, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1000)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=1))
out <- filterWindows(windowed, binned, type="local")
expect_equivalent(out$filter, 0)
# Works correctly on empty inputs.
emp <- filterWindows(windowed[0,], binned[0,], type="local")
expect_equal(emp$filter, numeric(0))
expect_error(filterWindows(windowed, binned[0,], type="local"), "same length")
})
test_that("control-based filtering works correctly", {
windowed <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1)), colData=DataFrame(totals=1e6, ext=100),
metadata=list(final.ext=NA))
countered <- windowed
expect_warning(out <- filterWindows(windowed, countered, type="control"), "not specified")
expect_equivalent(out$filter, 0)
expect_warning(out <- filterWindows(windowed, countered, type="control", prior.count=5), "not specified")
expect_equivalent(out$filter, 0)
# Also seeing what happens when the library size of the control changes.
countered2 <- countered
assay(countered2)[1] <- 20
countered2$totals <- 2e6
expect_warning(out <- filterWindows(windowed, countered2, type="control"))
# With normalization; in this case, a trivial scaling.
binned.chip <- SummarizedExperiment(assays=SimpleList(counts=matrix(100, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1000)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=NA))
binned.con <- SummarizedExperiment(assays=SimpleList(counts=matrix(100, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1000)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=NA))
sc.info <- scaleControlFilter(binned.chip, binned.con)
expect_equal(sc.info$scale, 1)
expect_identical(sc.info$data.totals, windowed$totals)
expect_identical(sc.info$back.totals, countered$totals)
out <- filterWindows(windowed, countered, type="control", scale.info=sc.info)
expect_equivalent(out$filter, 0)
# More effortful normalization, assuming undersampling in control.
countered2 <- countered
assay(countered2)[1] <- 5
binned.con2 <- binned.con
assay(binned.con2)[1] <- 50
sc.info <- scaleControlFilter(binned.chip, binned.con2)
expect_equal(sc.info$scale, 50/100)
expect_identical(sc.info$data.totals, windowed$totals)
expect_identical(sc.info$back.totals, countered2$totals)
out <- filterWindows(windowed, countered2, type="control", scale.info=sc.info)
expect_equivalent(out$filter, 0)
out <- filterWindows(windowed, countered2, type="control", scale.info=sc.info, prior.count=5)
expect_equivalent(out$filter, 0)
# Checking that it is unhappy when the regions are of different size.
countered2 <- SummarizedExperiment(assays=SimpleList(counts=matrix(20, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 200)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=NA))
expect_warning(out <- filterWindows(windowed, countered2, type="control"))
expect_equal(out$filter, 0)
# Works correctly on empty inputs.
expect_warning(emp <- filterWindows(windowed[0,], countered[0,], type="control"))
expect_equal(emp$filter, numeric(0))
expect_error(filterWindows(windowed, countered[0,], type="control"), "same length")
})
test_that("proportional filtering works as expected", {
multi.win <- SummarizedExperiment(assays=list(counts=matrix(11:20, 10, 1)),
rowRanges=GRanges("chrA", IRanges(1:10, 1:10), seqinfo=Seqinfo("chrA", 1000)),
colData=DataFrame(totals=1e6, ext=100), metadata=list(final.ext=NA, spacing=1))
# works if not all windows are available, assuming the lost windows are lower abundance.
out <- filterWindows(multi.win, type="proportion")$filter
expect_equal(tail(out,1), 1)
expect_true(all(diff(out) > 0))
expect_true(all(out > 0.99))
# Still works upon changes to the chromosome size.
seqinfo(rowRanges(multi.win)) <- Seqinfo("chrA", 10)
out <- filterWindows(multi.win, type="proportion")$filter
expect_equal(out, 1:10/10)
expect_true(all(diff(out) > 0))
# Works correctly on empty inputs.
emp <- filterWindows(multi.win[0,], type="global")
expect_equal(emp$filter, numeric(0))
})
|
/tests/testthat/test-filter.R
|
no_license
|
Shians/csaw
|
R
| false | false | 10,117 |
r
|
# This tests the sensibility of the filterWindows() function. In particular,
# we want to make sure that the filter is calculated properly, despite the
# manipulations of width and prior count.
# library(csaw); library(testthat); source("test-filter.R")
library(edgeR)
test_that("global filtering works correctly", {
windowed <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1)), colData=DataFrame(totals=1e6, ext=100),
metadata=list(final.ext=NA))
# Filter statistic should be zero, as effective length after extension is the same as the bin width.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, nrow=10, 1)),
rowRanges=GRanges("chrA", IRanges(0:9*100+1, 1:10*100), seqinfo=Seqinfo("chrA", 1000)),
colData=DataFrame(totals=1e6, ext=1), metadata=list(final.ext=NA, spacing=100))
out <- filterWindows(windowed, binned, type="global")
expect_equivalent(out$filter, 0)
expect_identical(names(out$filter), "50%")
# Effective length after extension is exactly 10-fold less than the bin width.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(100, nrow=10, 1)),
rowRanges=GRanges("chrA", IRanges(0:9*1000+1, 1:10*1000), seqinfo=Seqinfo("chrA", 10000)),
colData=DataFrame(totals=1e6, ext=1), metadata=list(final.ext=NA, spacing=1000))
out <- filterWindows(windowed, binned, type="global")
expect_equivalent(out$filter, 0)
# Testing out different prior counts.
out <- filterWindows(windowed, binned, type="global", prior.count=3.5)
expect_equivalent(out$filter, 0)
out <- filterWindows(windowed, binned, type="global", prior.count=0.5)
expect_equivalent(out$filter, 0)
# Testing what happens when the median is below the number of recorded bins.
zeroed <- windowed
assay(zeroed)[1] <- 0
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, nrow=10, 1)),
rowRanges=GRanges("chrA", IRanges(0:9*100+1, 1:10*100), seqinfo=Seqinfo("chrA", 10000)),
colData=DataFrame(totals=1e6, ext=1), metadata=list(final.ext=NA, spacing=100))
out <- filterWindows(zeroed, binned, type="global")
expect_equivalent(out$filter, 0) # Background estimate is also computed from all-zeroes.
# Testing what happens when the median is within the recorded bins, but not quite the median of them.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, nrow=10, 1)),
rowRanges=GRanges("chrA", IRanges(0:9*100+1, 1:10*100), seqinfo=Seqinfo("chrA", 1100)),
colData=DataFrame(totals=1e6, ext=1), metadata=list(final.ext=NA, spacing=100))
out <- filterWindows(windowed, binned, type="global")
expect_equivalent(out$filter, 0)
expect_identical(names(out$filter), "45%")
# Testing what happens when you don't specify the bin.
seqinfo(rowRanges(zeroed)) <- Seqinfo("chrA", 100)
metadata(zeroed)$spacing <- 10
out <- filterWindows(zeroed, type="global")
expect_equivalent(out$filter, 0) # Should be zero, as both median and count are based on zero's.
win2 <- windowed
seqinfo(rowRanges(win2)) <- Seqinfo("chrA", 100)
metadata(win2)$spacing <- 10
out <- filterWindows(win2, type="global")
expect_equal(out$abundances, aveLogCPM(asDGEList(win2)))
expect_equal(out$filter, out$abundances - aveLogCPM(DGEList(matrix(0, 1, ncol(win2)), lib.size=win2$totals)))
metadata(win2)$spacing <- 100
out2 <- filterWindows(win2, type="global")
expect_equivalent(out2$filter, 0)
expect_equivalent(out2$abundances, out$abundances)
# Works correctly on empty inputs.
emp <- filterWindows(zeroed[0,], type="global")
expect_equal(emp$filter, numeric(0))
emp <- filterWindows(zeroed[0,], binned, type="global")
expect_equal(emp$filter, numeric(0))
emp <- filterWindows(zeroed, binned[0,], type="global")
expect_equal(emp$filter, 0)
})
test_that("local filtering works correctly", {
windowed <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1)), colData=DataFrame(totals=1e6, ext=100),
metadata=list(final.ext=NA))
# Should be zero, as the count/width for the window is subtracted from the background bin.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(20, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 200)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=NA))
out <- filterWindows(windowed, binned, type="local")
expect_equivalent(out$filter, 0)
# After subtraction, the background is still 10-times wider, but again this should be zero.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(110, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1100)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=NA))
out <- filterWindows(windowed, binned, type="local")
expect_equivalent(out$filter, 0)
# Testing out different prior counts.
out <- filterWindows(windowed, binned, type="local", prior.count=3.5)
expect_equivalent(out$filter, 0)
out <- filterWindows(windowed, binned, type="local", prior.count=0.5)
expect_equivalent(out$filter, 0)
# Another case of subtraction, this time with a different extension for the background.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(110, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1001)), colData=DataFrame(totals=1e6, ext=100),
metadata=list(final.ext=NA))
out <- filterWindows(windowed, binned, type="local")
expect_equivalent(out$filter, 0)
# More subtraction.
binned <- SummarizedExperiment(assays=SimpleList(counts=matrix(100, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1000)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=1))
out <- filterWindows(windowed, binned, type="local")
expect_equivalent(out$filter, 0)
# Works correctly on empty inputs.
emp <- filterWindows(windowed[0,], binned[0,], type="local")
expect_equal(emp$filter, numeric(0))
expect_error(filterWindows(windowed, binned[0,], type="local"), "same length")
})
test_that("control-based filtering works correctly", {
windowed <- SummarizedExperiment(assays=SimpleList(counts=matrix(10, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1)), colData=DataFrame(totals=1e6, ext=100),
metadata=list(final.ext=NA))
countered <- windowed
expect_warning(out <- filterWindows(windowed, countered, type="control"), "not specified")
expect_equivalent(out$filter, 0)
expect_warning(out <- filterWindows(windowed, countered, type="control", prior.count=5), "not specified")
expect_equivalent(out$filter, 0)
# Also seeing what happens when the library size of the control changes.
countered2 <- countered
assay(countered2)[1] <- 20
countered2$totals <- 2e6
expect_warning(out <- filterWindows(windowed, countered2, type="control"))
# With normalization; in this case, a trivial scaling.
binned.chip <- SummarizedExperiment(assays=SimpleList(counts=matrix(100, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1000)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=NA))
binned.con <- SummarizedExperiment(assays=SimpleList(counts=matrix(100, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 1000)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=NA))
sc.info <- scaleControlFilter(binned.chip, binned.con)
expect_equal(sc.info$scale, 1)
expect_identical(sc.info$data.totals, windowed$totals)
expect_identical(sc.info$back.totals, countered$totals)
out <- filterWindows(windowed, countered, type="control", scale.info=sc.info)
expect_equivalent(out$filter, 0)
# More effortful normalization, assuming undersampling in control.
countered2 <- countered
assay(countered2)[1] <- 5
binned.con2 <- binned.con
assay(binned.con2)[1] <- 50
sc.info <- scaleControlFilter(binned.chip, binned.con2)
expect_equal(sc.info$scale, 50/100)
expect_identical(sc.info$data.totals, windowed$totals)
expect_identical(sc.info$back.totals, countered2$totals)
out <- filterWindows(windowed, countered2, type="control", scale.info=sc.info)
expect_equivalent(out$filter, 0)
out <- filterWindows(windowed, countered2, type="control", scale.info=sc.info, prior.count=5)
expect_equivalent(out$filter, 0)
# Checking that it is unhappy when the regions are of different size.
countered2 <- SummarizedExperiment(assays=SimpleList(counts=matrix(20, 1, 1)),
rowRanges=GRanges("chrA", IRanges(1, 200)), colData=DataFrame(totals=1e6, ext=1),
metadata=list(final.ext=NA))
expect_warning(out <- filterWindows(windowed, countered2, type="control"))
expect_equal(out$filter, 0)
# Works correctly on empty inputs.
expect_warning(emp <- filterWindows(windowed[0,], countered[0,], type="control"))
expect_equal(emp$filter, numeric(0))
expect_error(filterWindows(windowed, countered[0,], type="control"), "same length")
})
test_that("proportional filtering works as expected", {
multi.win <- SummarizedExperiment(assays=list(counts=matrix(11:20, 10, 1)),
rowRanges=GRanges("chrA", IRanges(1:10, 1:10), seqinfo=Seqinfo("chrA", 1000)),
colData=DataFrame(totals=1e6, ext=100), metadata=list(final.ext=NA, spacing=1))
# works if not all windows are available, assuming the lost windows are lower abundance.
out <- filterWindows(multi.win, type="proportion")$filter
expect_equal(tail(out,1), 1)
expect_true(all(diff(out) > 0))
expect_true(all(out > 0.99))
# Still works upon changes to the chromosome size.
seqinfo(rowRanges(multi.win)) <- Seqinfo("chrA", 10)
out <- filterWindows(multi.win, type="proportion")$filter
expect_equal(out, 1:10/10)
expect_true(all(diff(out) > 0))
# Works correctly on empty inputs.
emp <- filterWindows(multi.win[0,], type="global")
expect_equal(emp$filter, numeric(0))
})
|
#' @title Read and format contrast tables from Excel
#'
#' @description Reads contrast from an Excel file for use with \code{estimable} of package
#' gmodels.
#'
#' @aliases getContrasts readContrasts
#' @param cname named region in Excel file with contrast table
#' @param excelfile path of Excel file with contrast table
#' @param rows optionally only select some rows of the table
#' @return \code{readContrasts} read a contrast table; \code{getContrasts}
#' additionally includes column and row name information to label contrast
#' tables with estimable (gmodels)
#' @author Dieter Menne, \email{dieter.menne@@menne-biomed.de}
#' @keywords models
#' @seealso See \code{constrast.xlsx} in the extdata directory for some
#' rules of thumb on constructing a contrast matrix.
#' @examples
#' library(nlme)
#' library(gmodels)
#' library(RODBC)
#' options(digits=3)
#' set.seed(4711)
#' excelfile = system.file("extdata", "contrasts.xlsx", package = "Dmisc2")
#' d = expand.grid(subject = LETTERS[1:8],
#' peri= c("Wine", "Tea"),
#' post = c("Water", "Kirsch"),
#' interval = c("Pre", "Post"))
#' d$vol = round(rnorm(nrow(d),10,2),1)
#' d.lme = lme(vol~interval+peri+post+peri:interval+interval:post,
#' data=d,random=~1|subject)
#' summary(d.lme)
#' ct = getContrasts("peripostinterval",excelfile)
#' estimable(d.lme,ct,conf.int=0.95)
#' @export
#' @rdname getContrasts
"getContrasts" = function(cname,excelfile,rows = NULL) {
cn = readContrasts(cname,excelfile)
if (!is.null(rows))
cn = cn[rows,]
colnames = cn[,1]
rownames = gsub('#','.',colnames(cn))#[-1]
# Use _ as placeholder for an empty field
vars = do.call("rbind", strsplit(rownames,'\\.'))
vars[vars == '_'] = ''
# upper left corner must contain the names of the variables
varnames = vars[1,]
vars = vars[-1,,drop = FALSE]
rownames(vars) = rownames[-1]
colnames(vars) = varnames
cn = t(as.matrix(cn[,-1]))
rownames(cn) = rownames[-1]
colnames(cn) = colnames
attr(cn,"vars") = data.frame(vars)
attr(cn,"varnames") = varnames
cn
}
#' @export
#' @rdname getContrasts
"readContrasts" =
function(cname,excelfile) {
if (!file.exists(excelfile))
stop(str_c("Contrast file <<",excelfile,">> not found"))
channel = odbcConnectExcel2007(excelfile)
cn = sqlQuery(channel,paste("select * from",cname),as.is = TRUE)
odbcClose(channel)
if (class(cn) != "data.frame")
stop(str_c("Range ",cname," not found in file ",excelfile))
cn[,1] = sub('\\s+$', '', cn[,1], perl = TRUE)
cn
}
|
/R/getContrasts.r
|
no_license
|
jimhester/dmisc2
|
R
| false | false | 2,572 |
r
|
#' @title Read and format contrast tables from Excel
#'
#' @description Reads contrast from an Excel file for use with \code{estimable} of package
#' gmodels.
#'
#' @aliases getContrasts readContrasts
#' @param cname named region in Excel file with contrast table
#' @param excelfile path of Excel file with contrast table
#' @param rows optionally only select some rows of the table
#' @return \code{readContrasts} read a contrast table; \code{getContrasts}
#' additionally includes column and row name information to label contrast
#' tables with estimable (gmodels)
#' @author Dieter Menne, \email{dieter.menne@@menne-biomed.de}
#' @keywords models
#' @seealso See \code{constrast.xlsx} in the extdata directory for some
#' rules of thumb on constructing a contrast matrix.
#' @examples
#' library(nlme)
#' library(gmodels)
#' library(RODBC)
#' options(digits=3)
#' set.seed(4711)
#' excelfile = system.file("extdata", "contrasts.xlsx", package = "Dmisc2")
#' d = expand.grid(subject = LETTERS[1:8],
#' peri= c("Wine", "Tea"),
#' post = c("Water", "Kirsch"),
#' interval = c("Pre", "Post"))
#' d$vol = round(rnorm(nrow(d),10,2),1)
#' d.lme = lme(vol~interval+peri+post+peri:interval+interval:post,
#' data=d,random=~1|subject)
#' summary(d.lme)
#' ct = getContrasts("peripostinterval",excelfile)
#' estimable(d.lme,ct,conf.int=0.95)
#' @export
#' @rdname getContrasts
"getContrasts" = function(cname,excelfile,rows = NULL) {
cn = readContrasts(cname,excelfile)
if (!is.null(rows))
cn = cn[rows,]
colnames = cn[,1]
rownames = gsub('#','.',colnames(cn))#[-1]
# Use _ as placeholder for an empty field
vars = do.call("rbind", strsplit(rownames,'\\.'))
vars[vars == '_'] = ''
# upper left corner must contain the names of the variables
varnames = vars[1,]
vars = vars[-1,,drop = FALSE]
rownames(vars) = rownames[-1]
colnames(vars) = varnames
cn = t(as.matrix(cn[,-1]))
rownames(cn) = rownames[-1]
colnames(cn) = colnames
attr(cn,"vars") = data.frame(vars)
attr(cn,"varnames") = varnames
cn
}
#' @export
#' @rdname getContrasts
"readContrasts" =
function(cname,excelfile) {
if (!file.exists(excelfile))
stop(str_c("Contrast file <<",excelfile,">> not found"))
channel = odbcConnectExcel2007(excelfile)
cn = sqlQuery(channel,paste("select * from",cname),as.is = TRUE)
odbcClose(channel)
if (class(cn) != "data.frame")
stop(str_c("Range ",cname," not found in file ",excelfile))
cn[,1] = sub('\\s+$', '', cn[,1], perl = TRUE)
cn
}
|
hotspot_wrap <- function(map, peaks, peak_window = 1, minLOD = 5.5,
project_info) {
# This uses global hotspots
if(peak_window == 1 & minLOD == 5.5 & nrow(project_info))
read_project(project_info, "hotspot")
else {
if(shiny::isTruthy(map) && shiny::isTruthy(peaks)) {
hotspot(map, peaks, peak_window, minLOD)
} else
NULL
}
}
|
/R/hotspot_wrap.R
|
no_license
|
byandell/qtl2shiny
|
R
| false | false | 382 |
r
|
hotspot_wrap <- function(map, peaks, peak_window = 1, minLOD = 5.5,
project_info) {
# This uses global hotspots
if(peak_window == 1 & minLOD == 5.5 & nrow(project_info))
read_project(project_info, "hotspot")
else {
if(shiny::isTruthy(map) && shiny::isTruthy(peaks)) {
hotspot(map, peaks, peak_window, minLOD)
} else
NULL
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeModels.R
\name{makeModels}
\alias{makeModels}
\title{Build model matrices for differential expression}
\usage{
makeModels(sampleDepths, testvars, adjustvars = NULL, testIntercept = FALSE)
}
\arguments{
\item{sampleDepths}{Per sample library size adjustments calculated with
\link{sampleDepth}.}
\item{testvars}{A vector or matrix specifying the variables to test. For
example, a factor with the group memberships when testing for differences
across groups. It's length should match the number of columns used from
\code{coverageInfo$coverage}.}
\item{adjustvars}{Optional matrix of adjustment variables (e.g. measured
confounders, output from SVA, etc.) to use in fitting linear models to each
nucleotide. These variables have to be specified by sample and the number of
rows must match the number of columns used. It will also work if it is a
vector of the correct length.}
\item{testIntercept}{If \code{TRUE} then \code{testvars} is ignored and mod0
will contain the column medians and any adjusting variables specified, but
no intercept.}
}
\value{
A list with two components.
\describe{
\item{mod }{ The alternative model matrix.}
\item{mod0 }{ The null model matrix.}
}
}
\description{
Builds the model matrices for testing for differential expression by
comparing a model with a grouping factor versus one without it. It adjusts
for the confounders specified and the median coverage of each sample. The
resulting models can be used in \link{calculateStats}.
}
\examples{
## Collapse the coverage information
collapsedFull <- collapseFullCoverage(list(genomeData$coverage),
verbose = TRUE
)
## Calculate library size adjustments
sampleDepths <- sampleDepth(collapsedFull,
probs = c(0.5), nonzero = TRUE,
verbose = TRUE
)
## Build the models
group <- genomeInfo$pop
adjustvars <- data.frame(genomeInfo$gender)
models <- makeModels(sampleDepths, testvars = group, adjustvars = adjustvars)
names(models)
models
}
\seealso{
\link{sampleDepth}, \link{calculateStats}
}
\author{
Leonardo Collado-Torres
}
|
/man/makeModels.Rd
|
no_license
|
fallinwind/derfinder
|
R
| false | true | 2,103 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeModels.R
\name{makeModels}
\alias{makeModels}
\title{Build model matrices for differential expression}
\usage{
makeModels(sampleDepths, testvars, adjustvars = NULL, testIntercept = FALSE)
}
\arguments{
\item{sampleDepths}{Per sample library size adjustments calculated with
\link{sampleDepth}.}
\item{testvars}{A vector or matrix specifying the variables to test. For
example, a factor with the group memberships when testing for differences
across groups. It's length should match the number of columns used from
\code{coverageInfo$coverage}.}
\item{adjustvars}{Optional matrix of adjustment variables (e.g. measured
confounders, output from SVA, etc.) to use in fitting linear models to each
nucleotide. These variables have to be specified by sample and the number of
rows must match the number of columns used. It will also work if it is a
vector of the correct length.}
\item{testIntercept}{If \code{TRUE} then \code{testvars} is ignored and mod0
will contain the column medians and any adjusting variables specified, but
no intercept.}
}
\value{
A list with two components.
\describe{
\item{mod }{ The alternative model matrix.}
\item{mod0 }{ The null model matrix.}
}
}
\description{
Builds the model matrices for testing for differential expression by
comparing a model with a grouping factor versus one without it. It adjusts
for the confounders specified and the median coverage of each sample. The
resulting models can be used in \link{calculateStats}.
}
\examples{
## Collapse the coverage information
collapsedFull <- collapseFullCoverage(list(genomeData$coverage),
verbose = TRUE
)
## Calculate library size adjustments
sampleDepths <- sampleDepth(collapsedFull,
probs = c(0.5), nonzero = TRUE,
verbose = TRUE
)
## Build the models
group <- genomeInfo$pop
adjustvars <- data.frame(genomeInfo$gender)
models <- makeModels(sampleDepths, testvars = group, adjustvars = adjustvars)
names(models)
models
}
\seealso{
\link{sampleDepth}, \link{calculateStats}
}
\author{
Leonardo Collado-Torres
}
|
#' Sample Seedling Subplots For Seedlings
#'
#' This function takes a list of random x and y coordinates within a plot,
#' builds a box around each point, and subsets a full plot's dataframe to see if
#' there are any seedlings inside.
#'
#' @param subplotxy A data.frame of x and y coordinates for sampling subplots
#' @param fulldf The full plot dataframe, containing all trees and seedlings
#' within your larger plot space.
#' @param subplotsize The size of the subplot in coordinate space, default is 1.
#'
#' @export
sampleSubplots <- function(subplotxy, fulldf, subplotsize=1){
subplotbound <- subplotsize/2
responsetable <- data.frame(x=NA,
y=NA,
species=NA,
numseedlings=NA)
## loop through each row of subplotxy
for(i in 1:nrow(subplotxy)){
## build the bounds of the sampling box
boundaries <- getBoxBoundaries(subplotxy[i, "x"],
subplotxy[i, "y"],
subplotbound)
## subset to any values that are in the subplot
subsettedDf <- fulldf[which(fulldf$x > boundaries[1] & fulldf$x < boundaries[2] & fulldf$y > boundaries[3] & fulldf$y < boundaries[4] & is.na(fulldf$dbh)),]
if(nrow(subsettedDf) > 0){
## get the number of species left inside
species <- unique(subsettedDf$species)
## for each species...
for(j in 1:length(species)){
responsetable <- rbind(responsetable,
c(subplotxy[i, "x"],
subplotxy[i, "y"],
species[j],
sum(is.na(subsettedDf[subsettedDf$species==species[j],
"dbh"]))
)
)
} ## end species for loop
} ##end if there are successful rows
} ## end subplot for loop
## clean up responsetable
responsetable$x <- as.numeric(responsetable$x)
responsetable$y <- as.numeric(responsetable$y)
responsetable$numseedlings <- as.numeric(responsetable$numseedlings)
return(cleanResponse(responsetable,1))
}
|
/R/sampleSubplots.R
|
no_license
|
davis-research/disperseR
|
R
| false | false | 2,145 |
r
|
#' Sample Seedling Subplots For Seedlings
#'
#' This function takes a list of random x and y coordinates within a plot,
#' builds a box around each point, and subsets a full plot's dataframe to see if
#' there are any seedlings inside.
#'
#' @param subplotxy A data.frame of x and y coordinates for sampling subplots
#' @param fulldf The full plot dataframe, containing all trees and seedlings
#' within your larger plot space.
#' @param subplotsize The size of the subplot in coordinate space, default is 1.
#'
#' @export
sampleSubplots <- function(subplotxy, fulldf, subplotsize=1){
subplotbound <- subplotsize/2
responsetable <- data.frame(x=NA,
y=NA,
species=NA,
numseedlings=NA)
## loop through each row of subplotxy
for(i in 1:nrow(subplotxy)){
## build the bounds of the sampling box
boundaries <- getBoxBoundaries(subplotxy[i, "x"],
subplotxy[i, "y"],
subplotbound)
## subset to any values that are in the subplot
subsettedDf <- fulldf[which(fulldf$x > boundaries[1] & fulldf$x < boundaries[2] & fulldf$y > boundaries[3] & fulldf$y < boundaries[4] & is.na(fulldf$dbh)),]
if(nrow(subsettedDf) > 0){
## get the number of species left inside
species <- unique(subsettedDf$species)
## for each species...
for(j in 1:length(species)){
responsetable <- rbind(responsetable,
c(subplotxy[i, "x"],
subplotxy[i, "y"],
species[j],
sum(is.na(subsettedDf[subsettedDf$species==species[j],
"dbh"]))
)
)
} ## end species for loop
} ##end if there are successful rows
} ## end subplot for loop
## clean up responsetable
responsetable$x <- as.numeric(responsetable$x)
responsetable$y <- as.numeric(responsetable$y)
responsetable$numseedlings <- as.numeric(responsetable$numseedlings)
return(cleanResponse(responsetable,1))
}
|
testlist <- list(a = -640034343L, b = -640034343L, x = integer(0))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610056714-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 127 |
r
|
testlist <- list(a = -640034343L, b = -640034343L, x = integer(0))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
replaceNAI<-function(INCFile,
OUTFile1,
OUTFile2
){
#######################################################################
#READ INPUT FILE
INCData <- read.csv(INCFile,
header=T,
sep="\t",
stringsAsFactors = F,
row.names = 1)
#######################################################################
#subset PSI values
onlyValues<-INCData[,7:ncol(INCData)]
#subset QUAL values
Qvals <- grep("\\.Q$", colnames(onlyValues))
qualValues <-onlyValues[, Qvals]
psiValues <-onlyValues[,-Qvals]
finalValues<-matrix(0, ncol=ncol(psiValues), nrow=nrow(psiValues))
colnames(finalValues)<-colnames(onlyValues)[-Qvals]
#######################################################################
#count Ns in QUAL values
Ns_comma<-t(apply(qualValues, 1, function(x){ str_count(x, "N,")}))
#######################################################################
#replace
indexes_N<-which(Ns_comma !=0, arr.in=TRUE)
indexes_NewN3<-which(Ns_comma ==3, arr.in=TRUE);
finalValues[indexes_N]<-Ns_comma[indexes_N];head(finalValues)
correctedPsiVal<-psiValues;
head(correctedPsiVal)
correctedPsiVal[indexes_NewN3]<-"NAnew3"
originalNA<-is.na(psiValues)
correctedPsiVal[originalNA]<-"NAold"
#
fullNAS<-cbind(INCData[, 1:6], correctedPsiVal)
fullNAS[1:5,1:6]
#####################################################
#compute and print STATS:
tVal<-length(originalNA)# 235328422
oNA<-length(which(originalNA))#95995460 40.8
new3<-length(indexes_NewN3)/2# 827226 28.39
oNA2<-length(which(correctedPsiVal=="NAold"))#
new3b<-length(which(correctedPsiVal=="NAnew3"))#
m1<-paste(round(oNA/tVal*100),"%")
m2<-paste(round(new3/tVal*100),"%")
m3<-paste(round(new3b/tVal*100),"%")
message(paste("Num total PSI values:",tVal ) )
message(paste("NA original values:", m1 ))
message(paste("NewN3 values:", m3 ))
message(paste("Total NA + NewN3 values:",m2) )
#####################################################
#PRINT OUTPUT FILE
write.table(fullNAS,OUTFile1, sep="\t", col.names = NA )
message(paste("Final corrected table only NewNAs is written in:",OUTFile1) )
#####################################################
#correct IR:
message("Number of events by type: ")
print(table(fullNAS$COMPLEX) )
#IRCqual<-qualValues[fullNAS$COMPLEX=="IR-C" | fullNAS$COMPLEX=="IR-S" ,]
ii<-which(fullNAS$COMPLEX=="IR")
IRCqual<-qualValues[ii,]
# IRCpsi<-fullNAS[fullNAS$COMPLEX=="IR-C" | fullNAS$COMPLEX=="IR-S" ,6:ncol(fullNAS)]
IRCpsi<-fullNAS[ii ,7:ncol(fullNAS)]
###################################################################
pval<-apply(IRCqual, 2, function(x)
{
as.numeric(str_match(
matrix( unlist(strsplit(x, ",")), ncol=6, byrow = T)[,5],"(.*)@")[,2])
})
pvalCorrected<-apply(pval, 2, p.adjust)
pvalCorrected[pvalCorrected<0.05]<-"NAI"
######################################################################
Nis<-t(apply(pvalCorrected, 1, function(x)
{ str_count(x, "NAI")}))
indexes_NAI<-which(Nis !=0, arr.in=TRUE)
#####################################################################
dim(IRCpsi); dim(IRCqual)
rownames(IRCpsi)[1:10]
rownames(IRCqual)[1:10]
correctedIRpsi<-IRCpsi;
#compute and print STATS IR
tIR<-dim(IRCpsi)[1]*dim(IRCpsi)[2]
tNewNAI<-dim(indexes_NAI)[1]
m4<-paste(round(tNewNAI/tIR*100),"%")
message(paste("Num total PSI values:",tVal ) )
correctedIRpsi[indexes_NAI]<-"NAI"
df1<-correctedPsiVal;
df2<-correctedIRpsi
df1[match(rownames(df2) , rownames(df1)), ] <- df2
fullNASIR<-cbind(INCData[, 1:6], df1)
message("pval analyzed (for correction): ",length(pvalCorrected))
message("pval corrected <0.05: ",m4)
write.table(fullNASIR,OUTFile2, sep="\t", col.names = NA )
message(paste("Final corrected table NewNAs+NAI is written in:",OUTFile2) )
}
|
/replaceNAI_corrected.R
|
no_license
|
estepi/useful
|
R
| false | false | 3,993 |
r
|
replaceNAI<-function(INCFile,
OUTFile1,
OUTFile2
){
#######################################################################
#READ INPUT FILE
INCData <- read.csv(INCFile,
header=T,
sep="\t",
stringsAsFactors = F,
row.names = 1)
#######################################################################
#subset PSI values
onlyValues<-INCData[,7:ncol(INCData)]
#subset QUAL values
Qvals <- grep("\\.Q$", colnames(onlyValues))
qualValues <-onlyValues[, Qvals]
psiValues <-onlyValues[,-Qvals]
finalValues<-matrix(0, ncol=ncol(psiValues), nrow=nrow(psiValues))
colnames(finalValues)<-colnames(onlyValues)[-Qvals]
#######################################################################
#count Ns in QUAL values
Ns_comma<-t(apply(qualValues, 1, function(x){ str_count(x, "N,")}))
#######################################################################
#replace
indexes_N<-which(Ns_comma !=0, arr.in=TRUE)
indexes_NewN3<-which(Ns_comma ==3, arr.in=TRUE);
finalValues[indexes_N]<-Ns_comma[indexes_N];head(finalValues)
correctedPsiVal<-psiValues;
head(correctedPsiVal)
correctedPsiVal[indexes_NewN3]<-"NAnew3"
originalNA<-is.na(psiValues)
correctedPsiVal[originalNA]<-"NAold"
#
fullNAS<-cbind(INCData[, 1:6], correctedPsiVal)
fullNAS[1:5,1:6]
#####################################################
#compute and print STATS:
tVal<-length(originalNA)# 235328422
oNA<-length(which(originalNA))#95995460 40.8
new3<-length(indexes_NewN3)/2# 827226 28.39
oNA2<-length(which(correctedPsiVal=="NAold"))#
new3b<-length(which(correctedPsiVal=="NAnew3"))#
m1<-paste(round(oNA/tVal*100),"%")
m2<-paste(round(new3/tVal*100),"%")
m3<-paste(round(new3b/tVal*100),"%")
message(paste("Num total PSI values:",tVal ) )
message(paste("NA original values:", m1 ))
message(paste("NewN3 values:", m3 ))
message(paste("Total NA + NewN3 values:",m2) )
#####################################################
#PRINT OUTPUT FILE
write.table(fullNAS,OUTFile1, sep="\t", col.names = NA )
message(paste("Final corrected table only NewNAs is written in:",OUTFile1) )
#####################################################
#correct IR:
message("Number of events by type: ")
print(table(fullNAS$COMPLEX) )
#IRCqual<-qualValues[fullNAS$COMPLEX=="IR-C" | fullNAS$COMPLEX=="IR-S" ,]
ii<-which(fullNAS$COMPLEX=="IR")
IRCqual<-qualValues[ii,]
# IRCpsi<-fullNAS[fullNAS$COMPLEX=="IR-C" | fullNAS$COMPLEX=="IR-S" ,6:ncol(fullNAS)]
IRCpsi<-fullNAS[ii ,7:ncol(fullNAS)]
###################################################################
pval<-apply(IRCqual, 2, function(x)
{
as.numeric(str_match(
matrix( unlist(strsplit(x, ",")), ncol=6, byrow = T)[,5],"(.*)@")[,2])
})
pvalCorrected<-apply(pval, 2, p.adjust)
pvalCorrected[pvalCorrected<0.05]<-"NAI"
######################################################################
Nis<-t(apply(pvalCorrected, 1, function(x)
{ str_count(x, "NAI")}))
indexes_NAI<-which(Nis !=0, arr.in=TRUE)
#####################################################################
dim(IRCpsi); dim(IRCqual)
rownames(IRCpsi)[1:10]
rownames(IRCqual)[1:10]
correctedIRpsi<-IRCpsi;
#compute and print STATS IR
tIR<-dim(IRCpsi)[1]*dim(IRCpsi)[2]
tNewNAI<-dim(indexes_NAI)[1]
m4<-paste(round(tNewNAI/tIR*100),"%")
message(paste("Num total PSI values:",tVal ) )
correctedIRpsi[indexes_NAI]<-"NAI"
df1<-correctedPsiVal;
df2<-correctedIRpsi
df1[match(rownames(df2) , rownames(df1)), ] <- df2
fullNASIR<-cbind(INCData[, 1:6], df1)
message("pval analyzed (for correction): ",length(pvalCorrected))
message("pval corrected <0.05: ",m4)
write.table(fullNASIR,OUTFile2, sep="\t", col.names = NA )
message(paste("Final corrected table NewNAs+NAI is written in:",OUTFile2) )
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_dataset.R
\name{get_dataset.geochronologic}
\alias{get_dataset.geochronologic}
\title{Obtain dataset information from an object of class \code{geochronologic}.}
\usage{
\method{get_dataset}{geochronologic}(x, ...)
}
\arguments{
\item{x}{An object of class \code{geochronologic}.}
\item{...}{objects passed from the generic. Not used in the call.}
}
\description{
A function to access the Neotoma API and return datasets corresponding to the parameters defined by the user.
}
|
/man/get_dataset.geochronologic.Rd
|
no_license
|
Nitin-Joshi-perk/neotoma
|
R
| false | false | 568 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_dataset.R
\name{get_dataset.geochronologic}
\alias{get_dataset.geochronologic}
\title{Obtain dataset information from an object of class \code{geochronologic}.}
\usage{
\method{get_dataset}{geochronologic}(x, ...)
}
\arguments{
\item{x}{An object of class \code{geochronologic}.}
\item{...}{objects passed from the generic. Not used in the call.}
}
\description{
A function to access the Neotoma API and return datasets corresponding to the parameters defined by the user.
}
|
plotCandidatos <- function(){
colors <- c('rgb(244, 223, 100)', 'rgb(72, 177, 219)')
output$anoPlot <- renderUI({
if(input$cargoPlot %in% cargos1) {
selectInput("anoPlot", "Ano", anos1)
} else{
selectInput("anoPlot", "Ano", anos2)
}
})
output$estadoPlot <- renderUI({
if(input$cargoPlot == "Presidente"){
selectInput("estadoPlot", "Estado", "BR")}
else{
selectInput("estadoPlot", "Estado", estadosdiv )
}
})
output$nomemunicipio <- renderUI({
if(input$estadoPlot == "BR"){
selectInput("nomemunicipio", "Municipio", choices = "all")
} else {
#filtrando o data frame pelo estado selecionado
columns <- list("UF", "NOME_MUNICIPIO")
df <- cepespdata(year = input$anoPlot,state = input$estadoPlot, position = input$cargoPlot, columns_list = columns)
df <- df[df$UF == input$estadoPlot,]
choices <- df$NOME_MUNICIPIO
selectInput("nomemunicipio", "Municipio", choices = choices)
}
})
output$generoSpawn <- renderUI({
if(input$cargoPlot != "Prefeito" & input$cargoPlot != "Vereador"){
box(plotlyOutput("genero"), title = "Distribuicao dos candidatos por genero", status = "primary")
}
})
output$piespawn <- renderUI({
if(input$cargoPlot %in% c("Presidente", "Governador","Senador")){
box(plotlyOutput("pie"), title = paste(" Percentual de votos por candidato a:", input$cargoPlot), status = "primary")
} else if (input$cargoPlot %in% c("Deputado Federal","Deputado Estadual", "Vereador")){
box(plotlyOutput("pie2"), title = paste("Percentual de votos por partido para candidatos a:", input$cargoPlot), status = "primary")
} else {
box(plotlyOutput("pie3"), title = paste("Percentual de votos por partido para candidatos a:", input$cargoPlot), status = "primary")
}
})
output$tabela <- renderUI({
if(input$cargoPlot %in% c("Presidente", "Governador","Senador")){
box(tableOutput("tabela1"), title = "Resultados eleicao", background = "light-blue", width = 6)
} else if (input$cargoPlot %in% c("Deputado Federal","Deputado Estadual", "Vereador")){
box(tableOutput("tabela2"), title = " Resultados eleicao", background = "light-blue", width = 6)
} else {
box(tableOutput("tabela3"), title = "Resultados eleicao", background = "light-blue", width = 6)
}
})
regagrPlot <- reactive({
if(input$cargoPlot != "Vereador" && input$cargoPlot != "Prefeito"){
regagrPlot <- "Estado"
}
else{
regagrPlot <- "Municipio"
}
return(regagrPlot)
})
columplot <- reactive({
if(input$cargoPlot != "Vereador" && input$cargoPlot != "Prefeito"){
columplot <- list( "NUMERO_CANDIDATO", "UF", "QTDE_VOTOS", "NOME_URNA_CANDIDATO", "SIGLA_PARTIDO")
}
else{
columplot <- list( "NUMERO_CANDIDATO", "UF", "QTDE_VOTOS", "NOME_URNA_CANDIDATO", "SIGLA_PARTIDO", "NOME_MUNICIPIO")
}
return(columplot)
})
#para a segunda ABA
#Baixando dados de candidato e limpando por estado
dfcepesp <- reactive({
# Para inputs de prefeito e vereador o grafico da distribuicao de genero nao sera realizado, pois os dados nao estao
#disponiveis pela API, a solucao e mostrar composicao de homens e mulheres no estado
columns <- list("SIGLA_UF","NUMERO_CANDIDATO", "NOME_URNA_CANDIDATO", "NUMERO_PARTIDO","SIGLA_PARTIDO","COMPOSICAO_LEGENDA", "DESCRICAO_SEXO", "DESPESA_MAX_CAMPANHA")
dfcepesp <- as.data.frame(candidates(year = input$anoPlot, position = input$cargoPlot, columns_list = columns))
dfcepesp <- dfcepesp[dfcepesp[8] != -1,]
dfcepesp <- dfcepesp[dfcepesp[1] == input$estadoPlot,]
return(dfcepesp)
})
#Plot de genero
output$genero <- renderPlotly({
dadosgenero <- prop.table(table(dfcepesp()[7]))
pd <- plot_ly( labels = names(dadosgenero), values = as.vector(dadosgenero), type = "pie",
textposition = "inside",
textinfo = 'label+percent',
insidetextfont = list(color = 'rgb(1, 14, 20)'),
hoverinfo = 'text',
text = paste( names(dadosgenero), ":", as.vector(dadosgenero)),
marker = list(colors = colors))
})
#_________________________________________________________________
#Plot para proporcao de candidato e partido.
#Pegando os dados
#PIE chart para presidente governador e senador
dfpie <- reactive({
columns <- list( "NUMERO_CANDIDATO", "UF", "QTDE_VOTOS", "NOME_URNA_CANDIDATO", "SIGLA_PARTIDO", "NOME_MUNICIPIO")
dfpie <- cepespdata(year = input$anoPlot, position = input$cargoPlot, regional_aggregation = regagrPlot(), columns_list = columplot())
if(input$estadoPlot == "BR"){
dfpie <- as.data.frame(dfpie)
} else {
dfpie <- as.data.frame(dfpie)
dfpie <- dfpie[dfpie$UF == input$estadoPlot,]}
return(dfpie)
})
#calculo da proporcao
prop <- reactive({
pp <- vector()
v <- vector()
nomes <- vector()
tabela <- as.data.frame(table(dfpie()$NUMERO_CANDIDATO))
for(i in 1: length(tabela$Var1)){
a <- dfpie()[dfpie()$NUMERO_CANDIDATO == tabela$Var1[i],]
pp[i] <- sum(a$QTDE_VOTOS)/ sum(dfpie()$QTDE_VOTOS)
v <- as.numeric(which(dfpie()$NUMERO_CANDIDATO == tabela$Var1[i])[1])
if(is.na(v) == F){
nomes[i] <- dfpie()$NOME_URNA_CANDIDATO[v]}
}
pp <- round(pp,3)
prop <- as.data.frame(cbind(nomes, pp))
return(prop)
})
#coligacao
dadoscolig <- reactive({
a <- vector()
colig <- vector()
partido <- vector()
nomes <- prop()$nomes
for(i in 1: length(dfcepesp()$COMPOSICAO_LEGENDA)){
a <- as.numeric(which(nomes == dfcepesp()$NOME_URNA_CANDIDATO[i])[1])
if(is.na(a) == F){
colig[a] <- dfcepesp()$COMPOSICAO_LEGENDA[i]
partido[a] <- dfcepesp()$SIGLA_PARTIDO[i]
}}
dadoscolig <- as.data.frame(cbind(colig, partido))
return(dadoscolig)
})
#plotando o grafico
output$pie <- renderPlotly({
if (length(prop()$nomes) == length(dadoscolig()$partido)){
pie <- plot_ly(labels = prop()$nomes, values = prop()$pp, type = "pie",
textposition = "inside",
textinfo = "label+percent",
insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'text',
text = ~paste("<b> Coligacao: </b>", dadoscolig()$colig, "<br>", "<b> Partido do candidato: </b>", dadoscolig()$partido, sep = " "),
marker = list(colors = colors,
line= list(color = '#FFFFFF', width=1 )),
showlegend = T)
}
else{
pie <- plot_ly(labels = prop()$nomes, values = prop()$pp, type = "pie",
textposition = "inside",
textinfo = "label+percent",
insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'text',
marker = list(colors = colors,
line= list(color = '#FFFFFF', width=1 )),
showlegend = T)}
})
#tabela1
output$tabela1 <- renderTable({
dados <- cbind(prop(), dadoscolig())
colnames(dados) <- c("nome candidato", "Proporcao de votos", "Coligacao", "Partido")
tabela1 <- as.data.frame(dados)
})
#______________________________________________
#para deputados federais estaduais e vereador
prop2 <- reactive({
pp <- vector()
v <- vector()
nomes <- vector()
tabela <- as.data.frame(table(dfpie()$SIGLA_PARTIDO))
for(i in 1: length(tabela$Var1)){
a <- dfpie()[dfpie()$SIGLA_PARTIDO == tabela$Var1[i],]
pp[i] <- sum(a$QTDE_VOTOS)/ sum(dfpie()$QTDE_VOTOS)
v <- as.numeric(which(dfpie()$SIGLA_PARTIDO == tabela$Var1[i])[1])
if(is.na(v) == F){
nomes[i] <- dfpie()$SIGLA_PARTIDO[v]
}
}
pp <- round(pp,3)
prop2 <- as.data.frame(cbind(pp, nomes))
return(prop2)
})
#plot
output$pie2 <- renderPlotly({
pie2 <- plot_ly(labels = prop2()$nomes, values = prop2()$pp, type = "pie",
textposition = "inside",
textinfo = "label+percent",
insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'text',
marker = list(colors = colors,
line= list(color = '#FFFFFF', width=1 )),
showlegend = T)
})
#tabela2
output$tabela2 <- renderTable({
dados <- prop2()
colnames(dados) <- c("Partido", "Proporcao de votos por partido")
tabela2 <- dados
})
#__________________________________________________________
#Para prefeito
dfmun <- reactive({
if(input$nomemunicipio != "all"){
dfmun <- dfpie()[dfpie()$UF == input$estadoPlot,]
dfmun <- dfpie()[dfpie()$NOME_MUNICIPIO == input$nomemunicipio,]}
else{
dfmun <- dfpie()}
return(dfmun)
})
#Proporcao
# para prefeito utilizando cepespdata nao e possivel retornar o nome do candidato ou do partido, retorna #NE portanto
#a proporcao de votos ficou pelo numero do candidato
prop3 <- reactive({
pp <- vector()
v <- vector()
tabela <- as.data.frame(table(dfmun()$NUMERO_CANDIDATO))
for(i in 1: length(tabela$Var1)){
a <- dfmun()[dfmun()$NUMERO_CANDIDATO == tabela$Var1[i],]
pp[i] <- sum(a$QTDE_VOTOS)/ sum(dfmun()$QTDE_VOTOS)
}
prop3 <- round(pp,3)
return(prop3)
})
#plotando o grafico
output$pie3 <- renderPlotly({
pie3 <- plot_ly(labels = names(table(dfmun()$NUMERO_CANDIDATO)), values = prop3(), type = "pie",
textposition = "inside",
textinfo = "label+percent",
insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'text',
marker = list(colors = colors,
line= list(color = '#FFFFFF', width=1 )),
showlegend = T)
})
#tabela3
output$tabela3 <- renderTable({
nomes <- names(table(dfmun()$NUMERO_CANDIDATO))
dados <- cbind(nomes, prop3())
colnames(dados) <- c("Numero do candidato", "Proporcao de votos")
tabela3 <- dados
})
output$textinho3 <- renderText({
textinho3 <- HTML(paste(
h2(HTML(' '),HTML(' '), "Guia:"),
p( HTML(' '), HTML(' '), HTML(' '), strong("Grafico 1:"), "Mostra a distribuicao de genero dos candidatos dado o cargo, ano de eleicao, estado e nome do municipio fornecidos pelo usuario.", HTML("<br/>"),
HTML(' '), HTML(' '), HTML(' '), strong("Grafico 2:"), "Permite visualizar o percentual de votos do candidato ou partido selecionados em formato de grafico circular. Permite-se tambem verificar a coligacao do candidato ao passar o cursor no grafico", HTML("<br/>"),
HTML(' '), HTML(' '), HTML(' '), strong("Tabela 1:"), "Mostra, de acordo com as variaveis selecionadas para presidente, governador e senador, o nome do candidato, o partido dele, a proporcao de votos e a coligacao. Para os demais cargos, a tabela apresenta o partido, a coligacao e o percentual de votos por partido.", HTML("<br/>"), HTML("<br/>"),
HTML(' '), HTML(' '), HTML(' '), "Obs: Pode-se fazer download dos graficos passando cursor do mouse no canto superior da tela e selecionando o icone: foto.", HTML("<br/>"),
HTML(' '), HTML(' '), HTML(' '), "Devido a grande quantidade de informacoes a serem processadas esse grafico pode levar algum tempo para ser carregado."
)))
})
}
|
/www/func/PlotCandidatos.R
|
no_license
|
tadeup/gv-code
|
R
| false | false | 11,781 |
r
|
plotCandidatos <- function(){
colors <- c('rgb(244, 223, 100)', 'rgb(72, 177, 219)')
output$anoPlot <- renderUI({
if(input$cargoPlot %in% cargos1) {
selectInput("anoPlot", "Ano", anos1)
} else{
selectInput("anoPlot", "Ano", anos2)
}
})
output$estadoPlot <- renderUI({
if(input$cargoPlot == "Presidente"){
selectInput("estadoPlot", "Estado", "BR")}
else{
selectInput("estadoPlot", "Estado", estadosdiv )
}
})
output$nomemunicipio <- renderUI({
if(input$estadoPlot == "BR"){
selectInput("nomemunicipio", "Municipio", choices = "all")
} else {
#filtrando o data frame pelo estado selecionado
columns <- list("UF", "NOME_MUNICIPIO")
df <- cepespdata(year = input$anoPlot,state = input$estadoPlot, position = input$cargoPlot, columns_list = columns)
df <- df[df$UF == input$estadoPlot,]
choices <- df$NOME_MUNICIPIO
selectInput("nomemunicipio", "Municipio", choices = choices)
}
})
output$generoSpawn <- renderUI({
if(input$cargoPlot != "Prefeito" & input$cargoPlot != "Vereador"){
box(plotlyOutput("genero"), title = "Distribuicao dos candidatos por genero", status = "primary")
}
})
output$piespawn <- renderUI({
if(input$cargoPlot %in% c("Presidente", "Governador","Senador")){
box(plotlyOutput("pie"), title = paste(" Percentual de votos por candidato a:", input$cargoPlot), status = "primary")
} else if (input$cargoPlot %in% c("Deputado Federal","Deputado Estadual", "Vereador")){
box(plotlyOutput("pie2"), title = paste("Percentual de votos por partido para candidatos a:", input$cargoPlot), status = "primary")
} else {
box(plotlyOutput("pie3"), title = paste("Percentual de votos por partido para candidatos a:", input$cargoPlot), status = "primary")
}
})
output$tabela <- renderUI({
if(input$cargoPlot %in% c("Presidente", "Governador","Senador")){
box(tableOutput("tabela1"), title = "Resultados eleicao", background = "light-blue", width = 6)
} else if (input$cargoPlot %in% c("Deputado Federal","Deputado Estadual", "Vereador")){
box(tableOutput("tabela2"), title = " Resultados eleicao", background = "light-blue", width = 6)
} else {
box(tableOutput("tabela3"), title = "Resultados eleicao", background = "light-blue", width = 6)
}
})
regagrPlot <- reactive({
if(input$cargoPlot != "Vereador" && input$cargoPlot != "Prefeito"){
regagrPlot <- "Estado"
}
else{
regagrPlot <- "Municipio"
}
return(regagrPlot)
})
columplot <- reactive({
if(input$cargoPlot != "Vereador" && input$cargoPlot != "Prefeito"){
columplot <- list( "NUMERO_CANDIDATO", "UF", "QTDE_VOTOS", "NOME_URNA_CANDIDATO", "SIGLA_PARTIDO")
}
else{
columplot <- list( "NUMERO_CANDIDATO", "UF", "QTDE_VOTOS", "NOME_URNA_CANDIDATO", "SIGLA_PARTIDO", "NOME_MUNICIPIO")
}
return(columplot)
})
#para a segunda ABA
#Baixando dados de candidato e limpando por estado
dfcepesp <- reactive({
# Para inputs de prefeito e vereador o grafico da distribuicao de genero nao sera realizado, pois os dados nao estao
#disponiveis pela API, a solucao e mostrar composicao de homens e mulheres no estado
columns <- list("SIGLA_UF","NUMERO_CANDIDATO", "NOME_URNA_CANDIDATO", "NUMERO_PARTIDO","SIGLA_PARTIDO","COMPOSICAO_LEGENDA", "DESCRICAO_SEXO", "DESPESA_MAX_CAMPANHA")
dfcepesp <- as.data.frame(candidates(year = input$anoPlot, position = input$cargoPlot, columns_list = columns))
dfcepesp <- dfcepesp[dfcepesp[8] != -1,]
dfcepesp <- dfcepesp[dfcepesp[1] == input$estadoPlot,]
return(dfcepesp)
})
#Plot de genero
output$genero <- renderPlotly({
dadosgenero <- prop.table(table(dfcepesp()[7]))
pd <- plot_ly( labels = names(dadosgenero), values = as.vector(dadosgenero), type = "pie",
textposition = "inside",
textinfo = 'label+percent',
insidetextfont = list(color = 'rgb(1, 14, 20)'),
hoverinfo = 'text',
text = paste( names(dadosgenero), ":", as.vector(dadosgenero)),
marker = list(colors = colors))
})
#_________________________________________________________________
#Plot para proporcao de candidato e partido.
#Pegando os dados
#PIE chart para presidente governador e senador
dfpie <- reactive({
columns <- list( "NUMERO_CANDIDATO", "UF", "QTDE_VOTOS", "NOME_URNA_CANDIDATO", "SIGLA_PARTIDO", "NOME_MUNICIPIO")
dfpie <- cepespdata(year = input$anoPlot, position = input$cargoPlot, regional_aggregation = regagrPlot(), columns_list = columplot())
if(input$estadoPlot == "BR"){
dfpie <- as.data.frame(dfpie)
} else {
dfpie <- as.data.frame(dfpie)
dfpie <- dfpie[dfpie$UF == input$estadoPlot,]}
return(dfpie)
})
#calculo da proporcao
prop <- reactive({
pp <- vector()
v <- vector()
nomes <- vector()
tabela <- as.data.frame(table(dfpie()$NUMERO_CANDIDATO))
for(i in 1: length(tabela$Var1)){
a <- dfpie()[dfpie()$NUMERO_CANDIDATO == tabela$Var1[i],]
pp[i] <- sum(a$QTDE_VOTOS)/ sum(dfpie()$QTDE_VOTOS)
v <- as.numeric(which(dfpie()$NUMERO_CANDIDATO == tabela$Var1[i])[1])
if(is.na(v) == F){
nomes[i] <- dfpie()$NOME_URNA_CANDIDATO[v]}
}
pp <- round(pp,3)
prop <- as.data.frame(cbind(nomes, pp))
return(prop)
})
#coligacao
dadoscolig <- reactive({
a <- vector()
colig <- vector()
partido <- vector()
nomes <- prop()$nomes
for(i in 1: length(dfcepesp()$COMPOSICAO_LEGENDA)){
a <- as.numeric(which(nomes == dfcepesp()$NOME_URNA_CANDIDATO[i])[1])
if(is.na(a) == F){
colig[a] <- dfcepesp()$COMPOSICAO_LEGENDA[i]
partido[a] <- dfcepesp()$SIGLA_PARTIDO[i]
}}
dadoscolig <- as.data.frame(cbind(colig, partido))
return(dadoscolig)
})
#plotando o grafico
output$pie <- renderPlotly({
if (length(prop()$nomes) == length(dadoscolig()$partido)){
pie <- plot_ly(labels = prop()$nomes, values = prop()$pp, type = "pie",
textposition = "inside",
textinfo = "label+percent",
insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'text',
text = ~paste("<b> Coligacao: </b>", dadoscolig()$colig, "<br>", "<b> Partido do candidato: </b>", dadoscolig()$partido, sep = " "),
marker = list(colors = colors,
line= list(color = '#FFFFFF', width=1 )),
showlegend = T)
}
else{
pie <- plot_ly(labels = prop()$nomes, values = prop()$pp, type = "pie",
textposition = "inside",
textinfo = "label+percent",
insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'text',
marker = list(colors = colors,
line= list(color = '#FFFFFF', width=1 )),
showlegend = T)}
})
#tabela1
output$tabela1 <- renderTable({
dados <- cbind(prop(), dadoscolig())
colnames(dados) <- c("nome candidato", "Proporcao de votos", "Coligacao", "Partido")
tabela1 <- as.data.frame(dados)
})
#______________________________________________
#para deputados federais estaduais e vereador
prop2 <- reactive({
pp <- vector()
v <- vector()
nomes <- vector()
tabela <- as.data.frame(table(dfpie()$SIGLA_PARTIDO))
for(i in 1: length(tabela$Var1)){
a <- dfpie()[dfpie()$SIGLA_PARTIDO == tabela$Var1[i],]
pp[i] <- sum(a$QTDE_VOTOS)/ sum(dfpie()$QTDE_VOTOS)
v <- as.numeric(which(dfpie()$SIGLA_PARTIDO == tabela$Var1[i])[1])
if(is.na(v) == F){
nomes[i] <- dfpie()$SIGLA_PARTIDO[v]
}
}
pp <- round(pp,3)
prop2 <- as.data.frame(cbind(pp, nomes))
return(prop2)
})
#plot
output$pie2 <- renderPlotly({
pie2 <- plot_ly(labels = prop2()$nomes, values = prop2()$pp, type = "pie",
textposition = "inside",
textinfo = "label+percent",
insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'text',
marker = list(colors = colors,
line= list(color = '#FFFFFF', width=1 )),
showlegend = T)
})
#tabela2
output$tabela2 <- renderTable({
dados <- prop2()
colnames(dados) <- c("Partido", "Proporcao de votos por partido")
tabela2 <- dados
})
#__________________________________________________________
#Para prefeito
dfmun <- reactive({
if(input$nomemunicipio != "all"){
dfmun <- dfpie()[dfpie()$UF == input$estadoPlot,]
dfmun <- dfpie()[dfpie()$NOME_MUNICIPIO == input$nomemunicipio,]}
else{
dfmun <- dfpie()}
return(dfmun)
})
#Proporcao
# para prefeito utilizando cepespdata nao e possivel retornar o nome do candidato ou do partido, retorna #NE portanto
#a proporcao de votos ficou pelo numero do candidato
prop3 <- reactive({
pp <- vector()
v <- vector()
tabela <- as.data.frame(table(dfmun()$NUMERO_CANDIDATO))
for(i in 1: length(tabela$Var1)){
a <- dfmun()[dfmun()$NUMERO_CANDIDATO == tabela$Var1[i],]
pp[i] <- sum(a$QTDE_VOTOS)/ sum(dfmun()$QTDE_VOTOS)
}
prop3 <- round(pp,3)
return(prop3)
})
#plotando o grafico
output$pie3 <- renderPlotly({
pie3 <- plot_ly(labels = names(table(dfmun()$NUMERO_CANDIDATO)), values = prop3(), type = "pie",
textposition = "inside",
textinfo = "label+percent",
insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'text',
marker = list(colors = colors,
line= list(color = '#FFFFFF', width=1 )),
showlegend = T)
})
#tabela3
output$tabela3 <- renderTable({
nomes <- names(table(dfmun()$NUMERO_CANDIDATO))
dados <- cbind(nomes, prop3())
colnames(dados) <- c("Numero do candidato", "Proporcao de votos")
tabela3 <- dados
})
output$textinho3 <- renderText({
textinho3 <- HTML(paste(
h2(HTML(' '),HTML(' '), "Guia:"),
p( HTML(' '), HTML(' '), HTML(' '), strong("Grafico 1:"), "Mostra a distribuicao de genero dos candidatos dado o cargo, ano de eleicao, estado e nome do municipio fornecidos pelo usuario.", HTML("<br/>"),
HTML(' '), HTML(' '), HTML(' '), strong("Grafico 2:"), "Permite visualizar o percentual de votos do candidato ou partido selecionados em formato de grafico circular. Permite-se tambem verificar a coligacao do candidato ao passar o cursor no grafico", HTML("<br/>"),
HTML(' '), HTML(' '), HTML(' '), strong("Tabela 1:"), "Mostra, de acordo com as variaveis selecionadas para presidente, governador e senador, o nome do candidato, o partido dele, a proporcao de votos e a coligacao. Para os demais cargos, a tabela apresenta o partido, a coligacao e o percentual de votos por partido.", HTML("<br/>"), HTML("<br/>"),
HTML(' '), HTML(' '), HTML(' '), "Obs: Pode-se fazer download dos graficos passando cursor do mouse no canto superior da tela e selecionando o icone: foto.", HTML("<br/>"),
HTML(' '), HTML(' '), HTML(' '), "Devido a grande quantidade de informacoes a serem processadas esse grafico pode levar algum tempo para ser carregado."
)))
})
}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 6.84360123863729e-301, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613111479-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 257 |
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 6.84360123863729e-301, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bidsio.R
\name{read_confounds.bids_project}
\alias{read_confounds.bids_project}
\title{read confound files}
\usage{
\method{read_confounds}{bids_project}(
x,
subid = ".*",
task = ".*",
session = ".*",
cvars = DEFAULT_CVARS,
npcs = -1,
perc_var = -1,
nest = TRUE
)
}
\arguments{
\item{subid}{(optional) subid regex selector}
\item{task}{(optional) task regex selector}
\item{cvars}{the names of the confound variables to select. If missing, defaults to a set defined by constant \code{DEFAULT_CVARS}.}
\item{npcs}{perform pca reduction on confound matrix and reduce to \code{npcs} dimensions}
\item{perc_var}{perform pca reduction on confound matrix and retain \code{perc_var} percent of total (selected) confound variance}
\item{nest}{nest confound tables by subject/sesssion/run}
}
\description{
read in fmriprep confound tables for one or more subjects
}
|
/man/read_confounds.bids_project.Rd
|
no_license
|
bbuchsbaum/bidser
|
R
| false | true | 956 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bidsio.R
\name{read_confounds.bids_project}
\alias{read_confounds.bids_project}
\title{read confound files}
\usage{
\method{read_confounds}{bids_project}(
x,
subid = ".*",
task = ".*",
session = ".*",
cvars = DEFAULT_CVARS,
npcs = -1,
perc_var = -1,
nest = TRUE
)
}
\arguments{
\item{subid}{(optional) subid regex selector}
\item{task}{(optional) task regex selector}
\item{cvars}{the names of the confound variables to select. If missing, defaults to a set defined by constant \code{DEFAULT_CVARS}.}
\item{npcs}{perform pca reduction on confound matrix and reduce to \code{npcs} dimensions}
\item{perc_var}{perform pca reduction on confound matrix and retain \code{perc_var} percent of total (selected) confound variance}
\item{nest}{nest confound tables by subject/sesssion/run}
}
\description{
read in fmriprep confound tables for one or more subjects
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/building_blocks.R
\name{p_paasche}
\alias{p_paasche}
\title{Paasche price index.}
\usage{
p_paasche(pt, pb, qt)
}
\arguments{
\item{pt}{A n-vector (or m x n matrix) of prices in current period.}
\item{pb}{A n-vector (or m x n matrix) of prices in the baseline period.}
\item{qt}{A n-vector (or n x n matrix) of quantities in the current period.}
}
\value{
Price index between current an baseline period (number or vector).
}
\description{
The Paasche price index is calculated as
\deqn{ \frac{\sum_i p^t_i q^t_t}{\sum_i p^b_i q^t_i}.}
Arguments can either be vectors or matrices.
}
\examples{
Pt <- matrix(1:6, ncol = 2)
Pb <- Pt*0.7
Qt <- matrix(2:7, ncol = 2)
p_paasche(Pt, Pb, Qt)
p_paasche(Pt[1,], Pb[1,], Qt[1,])
}
|
/man/p_paasche.Rd
|
permissive
|
ErikOSorensen/pppindexr
|
R
| false | true | 801 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/building_blocks.R
\name{p_paasche}
\alias{p_paasche}
\title{Paasche price index.}
\usage{
p_paasche(pt, pb, qt)
}
\arguments{
\item{pt}{A n-vector (or m x n matrix) of prices in current period.}
\item{pb}{A n-vector (or m x n matrix) of prices in the baseline period.}
\item{qt}{A n-vector (or n x n matrix) of quantities in the current period.}
}
\value{
Price index between current an baseline period (number or vector).
}
\description{
The Paasche price index is calculated as
\deqn{ \frac{\sum_i p^t_i q^t_t}{\sum_i p^b_i q^t_i}.}
Arguments can either be vectors or matrices.
}
\examples{
Pt <- matrix(1:6, ncol = 2)
Pb <- Pt*0.7
Qt <- matrix(2:7, ncol = 2)
p_paasche(Pt, Pb, Qt)
p_paasche(Pt[1,], Pb[1,], Qt[1,])
}
|
library(ecoforecastR)
#making a forecast and having fun
##HERE IS WHAT GOES IN:
#IC=initial conditions, from j.pheno.out$params, see below
#tempcast=max temp forecast from NOAA ensembles
#beta=slope of temp data (assessed from daymet data?)
#q=process error tau_add
#Nmc=# of mcmc runs
#gmin=default value min gcc
#gmax=default value max gcc
##STILL NEED TO SAVE DATA VALUES EACH TIME TO GET GMIN AND GMAX FOR EACH SITE
#the timestep is 16 days:
NT=16
#the number of ensemble members is 10:
Nmc=1000
# #we set gcc min and max values, they are different for each run/site and they are here:
# load(file=paste0(as.character(siteID[i]),".data.Rdata"))
# gmin=data$gmin
# gmax=data$gmax
#load gcc data into list:
site.gcc<-list()
site.gcc$BART<-BART
site.gcc$CLBJ<-CLBJ
site.gcc$DELA<-DELA
site.gcc$GRSM<-GRSM
site.gcc$HARV<-HARV
site.gcc$SCBI<-SCBI
site.gcc$STEI<-STEI
site.gcc$UKFS<-UKFS
#getting all IC's for each site:
IC.ens<-list()
for (s in siteID){
IC.ens[[s]]<-rnorm(Nmc,tail(site.gcc[[s]]$gcc_90,1),tail(site.gcc[[s]]$gcc_sd,1))
}
#FORECAST FUNCTION
phenoforecast <- function(IC,tempcast,beta,Q,n=Nmc,gmin,gmax){
N <- matrix(NA,n,NT)
Nprev <- IC
for(t in 1:NT){
mu = Nprev + beta*tempcast[t,] #or [,t] depending on dim
N[,t] <- pmax(pmin(rnorm(n,mu,Q),gmax),gmin) #ensuring we are btw min and max we set
Nprev <- N[,t]
}
return(N)
}
#finding mean temp from NOAA ensembles
#WAIT! do unit conversions first because its in Kelvin!
#make function to convert from kelvin to celsius (like daymet data we used to calibrate the model)
k.to.c<-function(k){
return(k-273.15)
}
#noaa temp data in celsius
#df1.c<-apply(df1,2,k.to.c)
df1.c <- lapply(df1,k.to.c)
###now we need to group them by site
# df1.BART<-df1.c[1:31,]
# df1.CLBJ<-df1.c[32:62,]
# df1.DELA<-df1.c[63:93,]
# df1.GRSM<-df1.c[94:124,]
# df1.HARV<-df1.c[125:155,]
# df1.SCBI<-df1.c[156:186,]
# df1.STEI<-df1.c[187:217,]
# df1.UKFS<-df1.c[218:248,]
#findmaxtemp<-function(x){
# return(max(x))
#}
#BART.temp.test<-tapply(df1.BART,day,max)
findmaxtemp<-function(x){
try=as.vector(x)
return(tapply(try, rep(1:16, each=24), max))
}
#MUST DO FOR ALL SITES
#temp.max <- matrix(findmaxtemp(df1.BART[1,-1]),ncol=1) #drops the 1st observation (analysis)
#temp.max <- apply(df1.c$BART[,-1],1,findmaxtemp) #days vs ensemble members
#temp.max.mean<-matrix(apply(temp.max,1,mean),ncol=1)
#FINDS MAX TEMP ENSEMBLE MEAN FOR EACH SITE:
#temp.max.mean<-list()
#for (s in siteID){
# temp.max<-apply(df1.c[[s]][,-1],1,findmaxtemp)
# temp.max.mean[[s]]<-matrix(apply(temp.max,1,mean),ncol=1)
#}
temp.max<-list()
temp.max.mean<-list()
for (s in siteID){
temp.max[[s]]<-matrix(apply(df1.c[[s]][,-1],1,findmaxtemp),nrow=NT)
temp.max.mean[[s]]<-matrix(apply(temp.max[[s]],1,mean),ncol=1)
}
## parameters
## initial conditions
#IC <-data$mu_ic ##we don't have this? START @ END OF GCC TIME SERIES AND ITS UNCERTAINTY(sd) FOR EACH SITE
#phiend<-phenoforecast(IC,temp.max,beta,q,Nmc,gmin,gmax)
#next steps: compute confidence intervals, add in uncertainties 1 by one, do for 35 not 16, then set up for all sites,THEN assess where we're at
time=1:NT
#------THE FORECAST LOOP----------
site.pheno<-list()
#forecast loop
for (s in siteID){
load(paste0("MCMC/",s,".Rdata"))
params<-as.matrix(j.pheno.out)
param.mean <- apply(params,2,mean)
beta<-param.mean["betaTemp"]
q<-1/sqrt(param.mean["tau_add"])
#uncertainties for each forecast
prow<-sample.int(nrow(params),Nmc,replace=TRUE)
Qmc<-1/sqrt(params[prow,"tau_add"])
drow<-sample.int(ncol(temp.max[[s]]),Nmc,replace=TRUE)
#forecast step
site.pheno[[s]]<-phenoforecast(IC=IC.ens[[s]],
tempcast=temp.max[[s]][,drow],
beta=params[prow,"betaTemp"],
Q=Qmc,
n=Nmc,
gmin=min(site.gcc[[s]]$gcc_90,na.rm=T),
gmax=max(site.gcc[[s]]$gcc_90,na.rm=T))
}
#tempcast list:
tempcast.l<-list()
tempcast.l[[s]]<-list()
for (s in siteID){
tempcast.l[[s]][[1]]<-temp.max[[s]][,drow]
}
##end forecast loop
#next steps: plotting each site with confidence intervals
##EVERYTHING BELOW THIS LINE IS OUR BART FORECAST PRACTICE
##########################################################
#---------------trying the deterministic---------
if(FALSE){
PhF.BART<-phenoforecast(IC=IC,
tempcast=temp.max.mean$BART,
beta=param.mean["betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
plot(0,0, xlim=c(0,NT),ylim=range(PhF.BART))
for (p in 1:Nmc){
points(PhF.BART[p,],type="l",col=p)
}
#this will make confidence intervals
time.f<-1:NT
ci.PHF.BART <- apply(as.matrix(PhF.BART),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART[1,],ci.PHF.BART[3,],col=col.alpha("lightBlue",0.6))
#-----------------
#initial condition ensemble created from last gcc observation point & sd
IC.ens<-rnorm(Nmc,tail(BART$gcc_90,1),tail(BART$gcc_sd,1))
PhF.BART.IC<-phenoforecast(IC=IC.ens,
tempcast=temp.max.mean$BART,
beta=param.mean["betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
time.f<-1:NT
ci.PHF.BART.IC <- apply(as.matrix(PhF.BART.IC),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IC))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART.IC[1,],ci.PHF.BART.IC[3,],col=col.alpha("lightBlue",0.6))
#-----------------
#parameter uncertainty for beta
prow <- sample.int(nrow(params),Nmc,replace=TRUE)
PhF.BART.IP<-phenoforecast(IC=IC.ens,
tempcast=temp.max.mean$BART,
beta=params[prow,"betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IP <- apply(as.matrix(PhF.BART.IP),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IP[1,],ci.PhF.BART.IP[3,],col=col.alpha("lightBlue",0.6))
#---------------driver uncertainty
drow<-sample.int(ncol(temp.max$BART),Nmc,replace=TRUE)
PhF.BART.IPT<-phenoforecast(IC=IC.ens,
tempcast=temp.max$BART[,drow], #this is not working
beta=params[prow,"betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IPT <- apply(as.matrix(PhF.BART.IPT),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPT))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPT[1,],ci.PhF.BART.IPT[3,],col=col.alpha("lightBlue",0.6))
#----------------process error
Qmc <- 1/sqrt(params[prow,"tau_add"])
PhF.BART.IPTP<-phenoforecast(IC=IC.ens,
tempcast=temp.max$BART[,drow], #this is not working
beta=params[prow,"betaTemp"],
Q=Qmc,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IPTP <- apply(as.matrix(PhF.BART.IPTP),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPTP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPTP[1,],ci.PhF.BART.IPTP[3,],col=col.alpha("lightBlue",0.6))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPTP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPTP[1,],ci.PhF.BART.IPTP[3,],col=col.alpha("lightBlue",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IP[1,],ci.PhF.BART.IP[3,],col=col.alpha("green",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPT[1,],ci.PhF.BART.IPT[3,],col=col.alpha("thistle3",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART.IC[1,],ci.PHF.BART.IC[3,],col=col.alpha("red2",0.6))
#ecoforecastR::ciEnvelope(time.f,ci.PhF.BART[1,],ci.PhF.BART[3,],col=col.alpha("thistle3"))
}
|
/Milestone6_phenoforecast.R
|
permissive
|
EcoForecast/PhenoPhriends
|
R
| false | false | 8,268 |
r
|
library(ecoforecastR)
#making a forecast and having fun
##HERE IS WHAT GOES IN:
#IC=initial conditions, from j.pheno.out$params, see below
#tempcast=max temp forecast from NOAA ensembles
#beta=slope of temp data (assessed from daymet data?)
#q=process error tau_add
#Nmc=# of mcmc runs
#gmin=default value min gcc
#gmax=default value max gcc
##STILL NEED TO SAVE DATA VALUES EACH TIME TO GET GMIN AND GMAX FOR EACH SITE
#the timestep is 16 days:
NT=16
#the number of ensemble members is 10:
Nmc=1000
# #we set gcc min and max values, they are different for each run/site and they are here:
# load(file=paste0(as.character(siteID[i]),".data.Rdata"))
# gmin=data$gmin
# gmax=data$gmax
#load gcc data into list:
site.gcc<-list()
site.gcc$BART<-BART
site.gcc$CLBJ<-CLBJ
site.gcc$DELA<-DELA
site.gcc$GRSM<-GRSM
site.gcc$HARV<-HARV
site.gcc$SCBI<-SCBI
site.gcc$STEI<-STEI
site.gcc$UKFS<-UKFS
#getting all IC's for each site:
IC.ens<-list()
for (s in siteID){
IC.ens[[s]]<-rnorm(Nmc,tail(site.gcc[[s]]$gcc_90,1),tail(site.gcc[[s]]$gcc_sd,1))
}
#FORECAST FUNCTION
phenoforecast <- function(IC,tempcast,beta,Q,n=Nmc,gmin,gmax){
N <- matrix(NA,n,NT)
Nprev <- IC
for(t in 1:NT){
mu = Nprev + beta*tempcast[t,] #or [,t] depending on dim
N[,t] <- pmax(pmin(rnorm(n,mu,Q),gmax),gmin) #ensuring we are btw min and max we set
Nprev <- N[,t]
}
return(N)
}
#finding mean temp from NOAA ensembles
#WAIT! do unit conversions first because its in Kelvin!
#make function to convert from kelvin to celsius (like daymet data we used to calibrate the model)
k.to.c<-function(k){
return(k-273.15)
}
#noaa temp data in celsius
#df1.c<-apply(df1,2,k.to.c)
df1.c <- lapply(df1,k.to.c)
###now we need to group them by site
# df1.BART<-df1.c[1:31,]
# df1.CLBJ<-df1.c[32:62,]
# df1.DELA<-df1.c[63:93,]
# df1.GRSM<-df1.c[94:124,]
# df1.HARV<-df1.c[125:155,]
# df1.SCBI<-df1.c[156:186,]
# df1.STEI<-df1.c[187:217,]
# df1.UKFS<-df1.c[218:248,]
#findmaxtemp<-function(x){
# return(max(x))
#}
#BART.temp.test<-tapply(df1.BART,day,max)
findmaxtemp<-function(x){
try=as.vector(x)
return(tapply(try, rep(1:16, each=24), max))
}
#MUST DO FOR ALL SITES
#temp.max <- matrix(findmaxtemp(df1.BART[1,-1]),ncol=1) #drops the 1st observation (analysis)
#temp.max <- apply(df1.c$BART[,-1],1,findmaxtemp) #days vs ensemble members
#temp.max.mean<-matrix(apply(temp.max,1,mean),ncol=1)
#FINDS MAX TEMP ENSEMBLE MEAN FOR EACH SITE:
#temp.max.mean<-list()
#for (s in siteID){
# temp.max<-apply(df1.c[[s]][,-1],1,findmaxtemp)
# temp.max.mean[[s]]<-matrix(apply(temp.max,1,mean),ncol=1)
#}
temp.max<-list()
temp.max.mean<-list()
for (s in siteID){
temp.max[[s]]<-matrix(apply(df1.c[[s]][,-1],1,findmaxtemp),nrow=NT)
temp.max.mean[[s]]<-matrix(apply(temp.max[[s]],1,mean),ncol=1)
}
## parameters
## initial conditions
#IC <-data$mu_ic ##we don't have this? START @ END OF GCC TIME SERIES AND ITS UNCERTAINTY(sd) FOR EACH SITE
#phiend<-phenoforecast(IC,temp.max,beta,q,Nmc,gmin,gmax)
#next steps: compute confidence intervals, add in uncertainties 1 by one, do for 35 not 16, then set up for all sites,THEN assess where we're at
time=1:NT
#------THE FORECAST LOOP----------
site.pheno<-list()
#forecast loop
for (s in siteID){
load(paste0("MCMC/",s,".Rdata"))
params<-as.matrix(j.pheno.out)
param.mean <- apply(params,2,mean)
beta<-param.mean["betaTemp"]
q<-1/sqrt(param.mean["tau_add"])
#uncertainties for each forecast
prow<-sample.int(nrow(params),Nmc,replace=TRUE)
Qmc<-1/sqrt(params[prow,"tau_add"])
drow<-sample.int(ncol(temp.max[[s]]),Nmc,replace=TRUE)
#forecast step
site.pheno[[s]]<-phenoforecast(IC=IC.ens[[s]],
tempcast=temp.max[[s]][,drow],
beta=params[prow,"betaTemp"],
Q=Qmc,
n=Nmc,
gmin=min(site.gcc[[s]]$gcc_90,na.rm=T),
gmax=max(site.gcc[[s]]$gcc_90,na.rm=T))
}
#tempcast list:
tempcast.l<-list()
tempcast.l[[s]]<-list()
for (s in siteID){
tempcast.l[[s]][[1]]<-temp.max[[s]][,drow]
}
##end forecast loop
#next steps: plotting each site with confidence intervals
##EVERYTHING BELOW THIS LINE IS OUR BART FORECAST PRACTICE
##########################################################
#---------------trying the deterministic---------
if(FALSE){
PhF.BART<-phenoforecast(IC=IC,
tempcast=temp.max.mean$BART,
beta=param.mean["betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
plot(0,0, xlim=c(0,NT),ylim=range(PhF.BART))
for (p in 1:Nmc){
points(PhF.BART[p,],type="l",col=p)
}
#this will make confidence intervals
time.f<-1:NT
ci.PHF.BART <- apply(as.matrix(PhF.BART),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART[1,],ci.PHF.BART[3,],col=col.alpha("lightBlue",0.6))
#-----------------
#initial condition ensemble created from last gcc observation point & sd
IC.ens<-rnorm(Nmc,tail(BART$gcc_90,1),tail(BART$gcc_sd,1))
PhF.BART.IC<-phenoforecast(IC=IC.ens,
tempcast=temp.max.mean$BART,
beta=param.mean["betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
time.f<-1:NT
ci.PHF.BART.IC <- apply(as.matrix(PhF.BART.IC),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IC))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART.IC[1,],ci.PHF.BART.IC[3,],col=col.alpha("lightBlue",0.6))
#-----------------
#parameter uncertainty for beta
prow <- sample.int(nrow(params),Nmc,replace=TRUE)
PhF.BART.IP<-phenoforecast(IC=IC.ens,
tempcast=temp.max.mean$BART,
beta=params[prow,"betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IP <- apply(as.matrix(PhF.BART.IP),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IP[1,],ci.PhF.BART.IP[3,],col=col.alpha("lightBlue",0.6))
#---------------driver uncertainty
drow<-sample.int(ncol(temp.max$BART),Nmc,replace=TRUE)
PhF.BART.IPT<-phenoforecast(IC=IC.ens,
tempcast=temp.max$BART[,drow], #this is not working
beta=params[prow,"betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IPT <- apply(as.matrix(PhF.BART.IPT),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPT))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPT[1,],ci.PhF.BART.IPT[3,],col=col.alpha("lightBlue",0.6))
#----------------process error
Qmc <- 1/sqrt(params[prow,"tau_add"])
PhF.BART.IPTP<-phenoforecast(IC=IC.ens,
tempcast=temp.max$BART[,drow], #this is not working
beta=params[prow,"betaTemp"],
Q=Qmc,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IPTP <- apply(as.matrix(PhF.BART.IPTP),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPTP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPTP[1,],ci.PhF.BART.IPTP[3,],col=col.alpha("lightBlue",0.6))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPTP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPTP[1,],ci.PhF.BART.IPTP[3,],col=col.alpha("lightBlue",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IP[1,],ci.PhF.BART.IP[3,],col=col.alpha("green",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPT[1,],ci.PhF.BART.IPT[3,],col=col.alpha("thistle3",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART.IC[1,],ci.PHF.BART.IC[3,],col=col.alpha("red2",0.6))
#ecoforecastR::ciEnvelope(time.f,ci.PhF.BART[1,],ci.PhF.BART[3,],col=col.alpha("thistle3"))
}
|
/R/hello.R
|
no_license
|
HenrikBengtsson/rmini
|
R
| false | false | 83 |
r
| ||
# Emily Wapman
# 08/13/2020
# Daily Assignment 08
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
covid = read_csv(url)
state.of.interest = "Montana"
library(ggthemes)
gg = covid %>%
filter(state == state.of.interest) %>%
group_by(date) %>%
summarise(cases = sum(cases), na.rm = TRUE) %>%
mutate(newCases = cases - lag(cases),
roll7 = rollmean(newCases, 7, fill = NA, align = "right")) %>%
ggplot(aes(x = date)) +
geom_col(aes(y = newCases), col = NA, fill = "#F5B8B5") +
geom_line(aes(y = roll7), col = "darkred", size = 1) +
ggthemes::theme_wsj() +
labs(title = paste("New Reported Cases by Day in", state.of.interest)) +
theme(plot.background = element_rect(fill = "white"),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 14, face = 'bold')) +
theme(aspect.ratio = 0.5)
ggsave(gg, file = "~/github/geog176A-daily-exercises/img/exercise08.png")
|
/R/day-08.R
|
no_license
|
ewapman/geog176A-daily-exercises
|
R
| false | false | 977 |
r
|
# Emily Wapman
# 08/13/2020
# Daily Assignment 08
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
covid = read_csv(url)
state.of.interest = "Montana"
library(ggthemes)
gg = covid %>%
filter(state == state.of.interest) %>%
group_by(date) %>%
summarise(cases = sum(cases), na.rm = TRUE) %>%
mutate(newCases = cases - lag(cases),
roll7 = rollmean(newCases, 7, fill = NA, align = "right")) %>%
ggplot(aes(x = date)) +
geom_col(aes(y = newCases), col = NA, fill = "#F5B8B5") +
geom_line(aes(y = roll7), col = "darkred", size = 1) +
ggthemes::theme_wsj() +
labs(title = paste("New Reported Cases by Day in", state.of.interest)) +
theme(plot.background = element_rect(fill = "white"),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 14, face = 'bold')) +
theme(aspect.ratio = 0.5)
ggsave(gg, file = "~/github/geog176A-daily-exercises/img/exercise08.png")
|
#Load the labels
labels <- read.table("UCI HAR Dataset/activity_labels.txt")
labels[,2] <- as.character(labels[,2])
#get the features
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
#Get the data that is necessary
featuresNeeded <- grep(".*mean.*|.*std.*", features[,2])
featuresNeeded.names <- features[featuresNeeded,2]
featuresNeeded.names = gsub('-mean', 'Mean', featuresNeeded.names)
featuresNeeded.names = gsub('-std', 'Std', featuresNeeded.names)
featuresNeeded.names <- gsub('[-()]', '', featuresNeeded.names)
# Load the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresNeeded]
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresNeeded]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge datasets and add labels
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresNeeded.names)
# turn activities & subjects into factors
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
/run_Analysis.R
|
no_license
|
jkanta/Getting_and_Clearing_Data
|
R
| false | false | 1,680 |
r
|
#Load the labels
labels <- read.table("UCI HAR Dataset/activity_labels.txt")
labels[,2] <- as.character(labels[,2])
#get the features
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
#Get the data that is necessary
featuresNeeded <- grep(".*mean.*|.*std.*", features[,2])
featuresNeeded.names <- features[featuresNeeded,2]
featuresNeeded.names = gsub('-mean', 'Mean', featuresNeeded.names)
featuresNeeded.names = gsub('-std', 'Std', featuresNeeded.names)
featuresNeeded.names <- gsub('[-()]', '', featuresNeeded.names)
# Load the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresNeeded]
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresNeeded]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge datasets and add labels
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresNeeded.names)
# turn activities & subjects into factors
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CAStab.R
\docType{methods}
\name{names,CASTable-method}
\alias{names,CASTable-method}
\title{Names of a CAS Table}
\usage{
\S4method{names}{CASTable}(x)
}
\arguments{
\item{x}{A CASTable object.}
}
\value{
vector
}
\description{
Returns the list of column names for the in-memory
table that is referenced by the \code{\link{CASTable}} object.
}
\examples{
\dontrun{
names(ct1)
}
}
% Copyright SAS Institute
|
/man/names-CASTable-method.Rd
|
permissive
|
sassoftware/R-swat
|
R
| false | true | 485 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CAStab.R
\docType{methods}
\name{names,CASTable-method}
\alias{names,CASTable-method}
\title{Names of a CAS Table}
\usage{
\S4method{names}{CASTable}(x)
}
\arguments{
\item{x}{A CASTable object.}
}
\value{
vector
}
\description{
Returns the list of column names for the in-memory
table that is referenced by the \code{\link{CASTable}} object.
}
\examples{
\dontrun{
names(ct1)
}
}
% Copyright SAS Institute
|
install.packages("tidyverse")
library(tidyverse)
getwd()
R.version.string
update.packages(ask = FALSE, checkBuilt = TRUE)
yes
which git()
library(tidyverse)
some_variable <- c(3,2,1)
some_variable
vector_test_integer <- c(1L, 7L, 8)
vector_test_integer_2 <- c(1L, 7L, 8L)
typeof(vector_test_integer)
typeof(vector_test_integer_2)
latter_three <- c(-1, 0, 1)
length(latter_three)
latter_three / 0
atomic_vector_conversion <- sample(1000, 100, replace = TRUE)
?sample()
atomic_vector_conversion
length(atomic_vector_conversion)
y <- atomic_vector_conversion > 500
y
as.double(y)
#naming
named_vector <- c(one = 1, two = 2, three = 3)
named_vector
later_named_vector <- c(1:3)
later_named_vector
set_names(later_named_vector, c("one", "two", "three"))
?set_names
later_named_vector
new_list <- list(numbers = 1:50, characters = c("Hello", "world", "!"), logical_vec = c(TRUE, FALSE), another_list = list(1:5, 6:10))
head(new_list, n = 3)
?head
str(new_list) # structure focused
#factors
mdbs <- factor(levels = c("AfD", "Buendnis90/Die Gruenen", "CDU", "CSU", "Die Linke", "SPD"))
levels(mdbs)
mdbs[1]
#date and time
library(lubridate)
date <- as.Date("1970-01-02")
date
typeof(date)
unclass(date)
typeof(date)
?unclass
class(date)
datetime <- ymd_hms("1970-01-01 01:00:00")
datetime
#tibble time
library(tidyverse)
new_tibble <- tibble(
a = 1:5,
b = c("Hi", ",", "it's", "me", "!"),
`an invalid name` = TRUE
)
new_tibble
#access directly
new_tibble$a
new_tibble$b
typeof(new_tibble[["a"]])
install.packages('usethis')
library(usethis)
usethis::create_github_token()
usethis::c
|
/session_1_testing.R
|
no_license
|
rebewp/courseR
|
R
| false | false | 1,587 |
r
|
install.packages("tidyverse")
library(tidyverse)
getwd()
R.version.string
update.packages(ask = FALSE, checkBuilt = TRUE)
yes
which git()
library(tidyverse)
some_variable <- c(3,2,1)
some_variable
vector_test_integer <- c(1L, 7L, 8)
vector_test_integer_2 <- c(1L, 7L, 8L)
typeof(vector_test_integer)
typeof(vector_test_integer_2)
latter_three <- c(-1, 0, 1)
length(latter_three)
latter_three / 0
atomic_vector_conversion <- sample(1000, 100, replace = TRUE)
?sample()
atomic_vector_conversion
length(atomic_vector_conversion)
y <- atomic_vector_conversion > 500
y
as.double(y)
#naming
named_vector <- c(one = 1, two = 2, three = 3)
named_vector
later_named_vector <- c(1:3)
later_named_vector
set_names(later_named_vector, c("one", "two", "three"))
?set_names
later_named_vector
new_list <- list(numbers = 1:50, characters = c("Hello", "world", "!"), logical_vec = c(TRUE, FALSE), another_list = list(1:5, 6:10))
head(new_list, n = 3)
?head
str(new_list) # structure focused
#factors
mdbs <- factor(levels = c("AfD", "Buendnis90/Die Gruenen", "CDU", "CSU", "Die Linke", "SPD"))
levels(mdbs)
mdbs[1]
#date and time
library(lubridate)
date <- as.Date("1970-01-02")
date
typeof(date)
unclass(date)
typeof(date)
?unclass
class(date)
datetime <- ymd_hms("1970-01-01 01:00:00")
datetime
#tibble time
library(tidyverse)
new_tibble <- tibble(
a = 1:5,
b = c("Hi", ",", "it's", "me", "!"),
`an invalid name` = TRUE
)
new_tibble
#access directly
new_tibble$a
new_tibble$b
typeof(new_tibble[["a"]])
install.packages('usethis')
library(usethis)
usethis::create_github_token()
usethis::c
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/m2eQTL.R
\name{load_meth}
\alias{load_meth}
\title{Takes the path to a DNA methylation matrix and return a MethylationData object.}
\usage{
load_meth(meth_data_loc)
}
\arguments{
\item{meth_data_loc}{path to a matrix of DNA methylation beta values}
}
\value{
a MethylationData object
}
\description{
Takes the path to a DNA methylation matrix and return a MethylationData object.
}
|
/man/load_meth.Rd
|
no_license
|
jeffreyat/M2EFM
|
R
| false | true | 461 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/m2eQTL.R
\name{load_meth}
\alias{load_meth}
\title{Takes the path to a DNA methylation matrix and return a MethylationData object.}
\usage{
load_meth(meth_data_loc)
}
\arguments{
\item{meth_data_loc}{path to a matrix of DNA methylation beta values}
}
\value{
a MethylationData object
}
\description{
Takes the path to a DNA methylation matrix and return a MethylationData object.
}
|
# function1 <-
|
/R/f1.R
|
no_license
|
Nicolas-Schmidt/mediaR
|
R
| false | false | 15 |
r
|
# function1 <-
|
################################################
#
# THE GENETIC MAP COMPARATOR
#
###############################################
# ---- PART1 : Check that the currently-installed version of R is at least the minimum required version.
R_min_version = "3.1"
R_version = paste0(R.Version()$major, ".", R.Version()$minor)
if(compareVersion(R_version, R_min_version) < 0){
stop("You do not have the latest required version of R installed.\n",
"Launch should fail.\n",
"Go to http://cran.r-project.org/ and update your version of R.")
}
# ----- PART2: Install basic required packages if not available/installed.
install_missing_packages = function(pkg, version = NULL, verbose = TRUE){
availpacks = .packages(all.available = TRUE)
#source("http://bioconductor.org/biocLite.R")
missingPackage = FALSE
if(!any(pkg %in% availpacks)){
if(verbose){
message("The following package is missing.\n",
pkg, "\n",
"Installation will be attempted...")
}
missingPackage <- TRUE
}
if(!is.null(version) & !missingPackage){
# version provided and package not missing, so compare.
if( compareVersion(a = as.character(packageVersion(pkg)),
b = version) < 0 ){
if(verbose){
message("Current version of package\n",
pkg, "\t",
packageVersion(pkg), "\n",
"is less than required.
Update will be attempted.")
}
missingPackage <- TRUE
}
}
if(missingPackage){
#biocLite(i, suppressUpdates = TRUE)
print(pkg)
print(paste("---- installing a more recent version of",pkg,sep=""))
install.packages(pkg, repos = "http://cran.r-project.org") }
}
# PART3: --- Define list of package names and required versions.
deppkgs = c(shiny="0.14.2", plotly = "4.5.6", ggplot2 = "2.2.0", DT="0.2", shinythemes="1.1", shinyAce="0.2.1", RColorBrewer="1.1.2", qualV="0.3.2", colourpicker="0.2")
# Loop on package check, install, update
pkg1 = mapply(install_missing_packages,
pkg = names(deppkgs),
version = deppkgs,
MoreArgs = list(verbose = TRUE),
SIMPLIFY = FALSE,
USE.NAMES = TRUE)
################################################################################
# Load packages that must be fully-loaded
################################################################################
for(i in names(deppkgs)){
library(i, character.only = TRUE)
message(i, " package version:\n", packageVersion(i))
}
################################################################################
# In this file, I add all functions / file / parameters that are NOT reactive and that are common to ui.R and server.R
# It is my global environment !
# == Check if libraries are available. install it if not.
#getPckg <- function(pckg) install.packages(pckg, repos = "http://cran.r-project.org")
#for(i in c("shiny","plotly","DT","RColorBrewer","shinyAce","shinythemes","qualV")){
# pckg = try(require(i, character.only = TRUE))
# if(!pckg) {
# getPckg(i)
#}}
# == load Libraries
#library(shiny)
#library(plotly)
#library(DT)
#library(RColorBrewer)
#library(shinyAce)
#library(shinythemes)
#library(qualV)
# == Colors for the App :
my_colors=brewer.pal( 12 , "Set3")[-2]
# == Get the legends
legend1=read.table("LEGEND/legend_sheet1.txt",sep="@")[,2]
legend2=read.table("LEGEND/legend_sheet2.txt",sep="@")[,2]
legend3=read.table("LEGEND/legend_sheet3.txt",sep="@")[,2]
legend4=read.table("LEGEND/legend_sheet4.txt",sep="@")[,2]
legend5=read.table("LEGEND/legend_sheet5.txt",sep="@")[,2]
legend6=read.table("LEGEND/legend_sheet6.txt",sep="@")[,2]
# == Functions
# Donut plot
source("RESSOURCES/donut_function.R")
# == Set the size of the logo of partners
grand=1.5
|
/global.R
|
no_license
|
dengkuistat/GenMap-Comparator
|
R
| false | false | 3,847 |
r
|
################################################
#
# THE GENETIC MAP COMPARATOR
#
###############################################
# ---- PART1 : Check that the currently-installed version of R is at least the minimum required version.
R_min_version = "3.1"
R_version = paste0(R.Version()$major, ".", R.Version()$minor)
if(compareVersion(R_version, R_min_version) < 0){
stop("You do not have the latest required version of R installed.\n",
"Launch should fail.\n",
"Go to http://cran.r-project.org/ and update your version of R.")
}
# ----- PART2: Install basic required packages if not available/installed.
install_missing_packages = function(pkg, version = NULL, verbose = TRUE){
availpacks = .packages(all.available = TRUE)
#source("http://bioconductor.org/biocLite.R")
missingPackage = FALSE
if(!any(pkg %in% availpacks)){
if(verbose){
message("The following package is missing.\n",
pkg, "\n",
"Installation will be attempted...")
}
missingPackage <- TRUE
}
if(!is.null(version) & !missingPackage){
# version provided and package not missing, so compare.
if( compareVersion(a = as.character(packageVersion(pkg)),
b = version) < 0 ){
if(verbose){
message("Current version of package\n",
pkg, "\t",
packageVersion(pkg), "\n",
"is less than required.
Update will be attempted.")
}
missingPackage <- TRUE
}
}
if(missingPackage){
#biocLite(i, suppressUpdates = TRUE)
print(pkg)
print(paste("---- installing a more recent version of",pkg,sep=""))
install.packages(pkg, repos = "http://cran.r-project.org") }
}
# PART3: --- Define list of package names and required versions.
deppkgs = c(shiny="0.14.2", plotly = "4.5.6", ggplot2 = "2.2.0", DT="0.2", shinythemes="1.1", shinyAce="0.2.1", RColorBrewer="1.1.2", qualV="0.3.2", colourpicker="0.2")
# Loop on package check, install, update
pkg1 = mapply(install_missing_packages,
pkg = names(deppkgs),
version = deppkgs,
MoreArgs = list(verbose = TRUE),
SIMPLIFY = FALSE,
USE.NAMES = TRUE)
################################################################################
# Load packages that must be fully-loaded
################################################################################
for(i in names(deppkgs)){
library(i, character.only = TRUE)
message(i, " package version:\n", packageVersion(i))
}
################################################################################
# In this file, I add all functions / file / parameters that are NOT reactive and that are common to ui.R and server.R
# It is my global environment !
# == Check if libraries are available. install it if not.
#getPckg <- function(pckg) install.packages(pckg, repos = "http://cran.r-project.org")
#for(i in c("shiny","plotly","DT","RColorBrewer","shinyAce","shinythemes","qualV")){
# pckg = try(require(i, character.only = TRUE))
# if(!pckg) {
# getPckg(i)
#}}
# == load Libraries
#library(shiny)
#library(plotly)
#library(DT)
#library(RColorBrewer)
#library(shinyAce)
#library(shinythemes)
#library(qualV)
# == Colors for the App :
my_colors=brewer.pal( 12 , "Set3")[-2]
# == Get the legends
legend1=read.table("LEGEND/legend_sheet1.txt",sep="@")[,2]
legend2=read.table("LEGEND/legend_sheet2.txt",sep="@")[,2]
legend3=read.table("LEGEND/legend_sheet3.txt",sep="@")[,2]
legend4=read.table("LEGEND/legend_sheet4.txt",sep="@")[,2]
legend5=read.table("LEGEND/legend_sheet5.txt",sep="@")[,2]
legend6=read.table("LEGEND/legend_sheet6.txt",sep="@")[,2]
# == Functions
# Donut plot
source("RESSOURCES/donut_function.R")
# == Set the size of the logo of partners
grand=1.5
|
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
ShraddhaShrestha/Exploratory-Analysis
|
R
| false | false | 1,281 |
r
|
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
test_that("dm_select_tbl() selects a part of a larger `dm` as a reduced `dm`?", {
def <-
dm_for_filter() %>%
dm_rm_fk(tf_5, m, tf_6) %>%
dm_rm_fk(tf_2, d, tf_1) %>%
dm_get_def()
dm_for_filter_smaller <- new_dm3(def[def$table %in% c("tf_1", "tf_6"), ])
expect_equivalent_dm(
dm_select_tbl(dm_for_filter(), -tf_2, -tf_3, -tf_4, -tf_5),
dm_for_filter_smaller
)
})
test_that("dm_select_tbl() can reorder the tables in a `dm`", {
reordered_dm_for_filter <-
dm_for_filter() %>%
dm_get_def() %>%
arrange(c(3:1, 6:4)) %>%
new_dm3()
expect_equivalent_dm(
dm_select_tbl(dm_for_filter(), tf_3:tf_1, tf_6:tf_4),
reordered_dm_for_filter
)
})
test_that("dm_select_tbl() remembers all FKs", {
skip_if_src("postgres")
skip_if_src("mssql")
reordered_dm_nycflights_small_cycle <- dm_add_fk(dm_nycflights_small(), flights, origin, airports) %>%
dm_get_def() %>%
filter(!(table %in% c("airlines", "planes"))) %>%
slice(2:1) %>%
new_dm3()
expect_equivalent_dm(
dm_add_fk(dm_nycflights_small(), flights, origin, airports) %>%
dm_select_tbl(airports, flights),
reordered_dm_nycflights_small_cycle
)
})
test_that("dm_rename_tbl() renames a `dm`", {
# FIXME: PR #313: do these test-tibbles here also need to be copied to `my_test_src()`?
dm_rename <-
as_dm(list(a = tibble(x = 1), b = tibble(y = 1))) %>%
dm_add_pk(b, y) %>%
dm_add_fk(a, x, b)
dm_rename_a <-
as_dm(list(c = tibble(x = 1), b = tibble(y = 1))) %>%
dm_add_pk(b, y) %>%
dm_add_fk(c, x, b)
dm_rename_b <-
as_dm(list(a = tibble(x = 1), e = tibble(y = 1))) %>%
dm_add_pk(e, y) %>%
dm_add_fk(a, x, e)
dm_rename_bd <-
as_dm(list(a = tibble(x = 1), d = tibble(y = 1))) %>%
dm_add_pk(d, y) %>%
dm_add_fk(a, x, d)
expect_equivalent_dm(
dm_rename_tbl(dm_rename, c = a),
dm_rename_a
)
expect_equivalent_dm(
dm_rename_tbl(dm_rename, e = b),
dm_rename_b
)
skip("dm argument")
expect_equivalent_dm(
dm_rename_tbl(dm_rename, d = b),
dm_rename_bd
)
})
test_that("errors for selecting and renaming tables work", {
expect_error(
dm_select_tbl(dm_for_filter(), t_new = c(tf_1, tf_2)),
class = "vctrs_error_names_must_be_unique"
)
expect_error(
dm_rename_tbl(dm_for_filter(), t_new = c(tf_1, tf_2)),
class = "vctrs_error_names_must_be_unique"
)
})
|
/tests/testthat/test-select-tbl.R
|
permissive
|
pat-s/dm
|
R
| false | false | 2,406 |
r
|
test_that("dm_select_tbl() selects a part of a larger `dm` as a reduced `dm`?", {
def <-
dm_for_filter() %>%
dm_rm_fk(tf_5, m, tf_6) %>%
dm_rm_fk(tf_2, d, tf_1) %>%
dm_get_def()
dm_for_filter_smaller <- new_dm3(def[def$table %in% c("tf_1", "tf_6"), ])
expect_equivalent_dm(
dm_select_tbl(dm_for_filter(), -tf_2, -tf_3, -tf_4, -tf_5),
dm_for_filter_smaller
)
})
test_that("dm_select_tbl() can reorder the tables in a `dm`", {
reordered_dm_for_filter <-
dm_for_filter() %>%
dm_get_def() %>%
arrange(c(3:1, 6:4)) %>%
new_dm3()
expect_equivalent_dm(
dm_select_tbl(dm_for_filter(), tf_3:tf_1, tf_6:tf_4),
reordered_dm_for_filter
)
})
test_that("dm_select_tbl() remembers all FKs", {
skip_if_src("postgres")
skip_if_src("mssql")
reordered_dm_nycflights_small_cycle <- dm_add_fk(dm_nycflights_small(), flights, origin, airports) %>%
dm_get_def() %>%
filter(!(table %in% c("airlines", "planes"))) %>%
slice(2:1) %>%
new_dm3()
expect_equivalent_dm(
dm_add_fk(dm_nycflights_small(), flights, origin, airports) %>%
dm_select_tbl(airports, flights),
reordered_dm_nycflights_small_cycle
)
})
test_that("dm_rename_tbl() renames a `dm`", {
# FIXME: PR #313: do these test-tibbles here also need to be copied to `my_test_src()`?
dm_rename <-
as_dm(list(a = tibble(x = 1), b = tibble(y = 1))) %>%
dm_add_pk(b, y) %>%
dm_add_fk(a, x, b)
dm_rename_a <-
as_dm(list(c = tibble(x = 1), b = tibble(y = 1))) %>%
dm_add_pk(b, y) %>%
dm_add_fk(c, x, b)
dm_rename_b <-
as_dm(list(a = tibble(x = 1), e = tibble(y = 1))) %>%
dm_add_pk(e, y) %>%
dm_add_fk(a, x, e)
dm_rename_bd <-
as_dm(list(a = tibble(x = 1), d = tibble(y = 1))) %>%
dm_add_pk(d, y) %>%
dm_add_fk(a, x, d)
expect_equivalent_dm(
dm_rename_tbl(dm_rename, c = a),
dm_rename_a
)
expect_equivalent_dm(
dm_rename_tbl(dm_rename, e = b),
dm_rename_b
)
skip("dm argument")
expect_equivalent_dm(
dm_rename_tbl(dm_rename, d = b),
dm_rename_bd
)
})
test_that("errors for selecting and renaming tables work", {
expect_error(
dm_select_tbl(dm_for_filter(), t_new = c(tf_1, tf_2)),
class = "vctrs_error_names_must_be_unique"
)
expect_error(
dm_rename_tbl(dm_for_filter(), t_new = c(tf_1, tf_2)),
class = "vctrs_error_names_must_be_unique"
)
})
|
median2 <- function(x){apply(x, 2, median)}
lower2 <- function(x){apply(x, 2, quantile, 0.025)}
upper2 <- function(x){apply(x, 2, quantile, 0.975)}
sd2 <- function(x){apply(x, 2, sd)}
Summary <- function(x){
names2 <- c("SimulatedValue", "Median", "Lower95%", "Upper95%", "Individuals", "Sereies","SeriesPerID", "n.obs")
n.sim <- x$n.sim[1]
n.ss <- x$n.ss
x$SeriesPerInd <- x$Series/x$Individuals
##PopulationMeans
mIntercept <- as.vector(unlist(lapply(x$Intercept, median2)))
lIntercept <- as.vector(unlist(lapply(x$Intercept, lower2)))
uIntercept <- as.vector(unlist(lapply(x$Intercept,upper2)))
Intercept <- data.frame(x$SimInt, mIntercept, lIntercept, uIntercept, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs))
colnames(Intercept) <- names2
mSlope <- as.vector(unlist(lapply(x$Slope, median2)))
lSlope <- as.vector(unlist(lapply(x$Slope, lower2)))
uSlope <- as.vector(unlist(lapply(x$Slope,upper2)))
Slope <- data.frame(x$SimSlope, mSlope, lSlope, uSlope, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs))
colnames(Slope) <- names2
Population.means <- list(Intercept=Intercept, Slope=Slope)
##Individual VCV
aID <- lapply(x$IDVCV, median2)
cID <- lapply(x$IDVCV, lower2)
bID <- lapply(x$IDVCV,upper2)
medianID <- as.data.frame(matrix(unlist(aID),n.ss,4, byrow=TRUE))
UpperID <- as.data.frame(matrix(unlist(bID),n.ss,4, byrow=TRUE))
LowerID <- as.data.frame(matrix(unlist(cID),n.ss,4, byrow=TRUE))
SimVCVInd <- as.data.frame(matrix(unlist(x$SimVCVInd),n.ss,4, byrow=TRUE))
IDIntVar <- cbind(SimVCVInd[,1], medianID[,1], LowerID[,1], UpperID[,1], x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs))
IDCoVar <- cbind(SimVCVInd[,2], medianID[,2], LowerID[,2], UpperID[,2], x$Individuals, x$Series,x$SeriesPerInd, unlist(x$n.obs))
IDSlopeVar <- cbind(SimVCVInd[,4], medianID[,4], LowerID[,4], UpperID[,4], x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs))
colnames(IDIntVar) <- names2
colnames(IDCoVar) <- names2
colnames(IDSlopeVar) <- names2
ID <- list(IntVar=IDIntVar, CoVar=IDCoVar, SlopeVar=IDSlopeVar)
##Series VCV
aSeries <- lapply(x$SeriesVCV, median2)
cSeries <- lapply(x$SeriesVCV, lower2)
bSeries <- lapply(x$SeriesVCV,upper2)
medianSeries <- as.data.frame(matrix(unlist(aSeries),n.ss,4, byrow=TRUE))
UpperSeries <- as.data.frame(matrix(unlist(bSeries),n.ss,4, byrow=TRUE))
LowerSeries <- as.data.frame(matrix(unlist(cSeries),n.ss,4, byrow=TRUE))
SimVCVSeries <- as.data.frame(matrix(unlist(x$SimVCVSeries),n.ss,4, byrow=TRUE))
SeriesIntVar <- cbind(SimVCVSeries[,1], medianSeries[,1], LowerSeries[,1], UpperSeries[,1], x$Individuals, x$Series,x$SeriesPerInd, unlist(x$n.obs))
SeriesCoVar <- cbind(SimVCVSeries[,2], medianSeries[,2], LowerSeries[,2], UpperSeries[,2], x$Individuals, x$Series,x$SeriesPerInd, unlist(x$n.obs))
SeriesSlopeVar <- cbind(SimVCVSeries[,4], medianSeries[,4], LowerSeries[,4], UpperSeries[,4], x$Individuals,x$Series, x$SeriesPerInd, unlist(x$n.obs))
colnames(SeriesIntVar) <- names2
colnames(SeriesCoVar) <- names2
colnames(SeriesSlopeVar) <- names2
Series <- list(IntVar=SeriesIntVar, CoVar=SeriesCoVar,SlopeVar=SeriesSlopeVar)
##Residuals
aResiduals <- as.vector(unlist(lapply(x$Residuals, median2)))
lResiduals <- as.vector(unlist(lapply(x$Residuals, lower2)))
uResiduals <- as.vector(unlist(lapply(x$Residuals,upper2)))
Residuals <- as.data.frame(cbind(x$SimResiduals, aResiduals, lResiduals, uResiduals, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs)))
colnames(Residuals) <- names2
##Repeatabilites
mRInt <- as.vector(unlist(lapply(x$RInt, median)))
mRSlope <- as.vector(unlist(lapply(x$RSlope, median)))
mRrn <- as.vector(unlist(lapply(x$Rrn, median)))
lRInt <- as.vector(unlist(lapply(x$RInt, quantile, 0.025)))
lRSlope <- as.vector(unlist(lapply(x$RSlope, quantile, 0.025)))
lRrn <- as.vector(unlist(lapply(x$Rrn, quantile, 0.025)))
uRInt <- as.vector(unlist(lapply(x$RInt, quantile, 0.975)))
uRSlope <- as.vector(unlist(lapply(x$RSlope, quantile, 0.975)))
uRrn <- as.vector(unlist(lapply(x$Rrn, quantile, 0.975)))
SimRepInt <- as.vector(SimVCVInd[,1]/(SimVCVSeries[,1] + SimVCVInd[,1]))
SimRepSlope <- as.vector(SimVCVInd[,4]/(SimVCVSeries[,4] + SimVCVInd[,4]))
SimRepRn <- (SimVCVInd[,1] + SimVCVInd[,4] + (2*SimVCVInd[,2]))/((SimVCVSeries[,1] + SimVCVSeries[,4] + (2*SimVCVSeries[,2])) + (SimVCVInd[,1] + SimVCVInd[,4] + (2*SimVCVInd[,2])))
RInt <- as.data.frame(cbind(SimRepInt[1], mRInt, lRInt, uRInt, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs)))
colnames(RInt) <- names2
RSlope <- as.data.frame(cbind(SimRepSlope[1], mRSlope, lRSlope, uRSlope, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs)))
colnames(RSlope) <- names2
Rrn <- as.data.frame(cbind(SimRepRn[1], mRrn, lRrn, uRrn, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs)))
colnames(Rrn) <- names2
Repeatabilities <- list(Intercept=RInt, Slope=RSlope)
summary <- list(Population.means=Population.means, ID=ID, Series=Series, Repeatabilities=Repeatabilities, Residuals=Residuals)
print(summary)
}
|
/MultiRR/R/Summary.r
|
no_license
|
ingted/R-Examples
|
R
| false | false | 5,290 |
r
|
median2 <- function(x){apply(x, 2, median)}
lower2 <- function(x){apply(x, 2, quantile, 0.025)}
upper2 <- function(x){apply(x, 2, quantile, 0.975)}
sd2 <- function(x){apply(x, 2, sd)}
Summary <- function(x){
names2 <- c("SimulatedValue", "Median", "Lower95%", "Upper95%", "Individuals", "Sereies","SeriesPerID", "n.obs")
n.sim <- x$n.sim[1]
n.ss <- x$n.ss
x$SeriesPerInd <- x$Series/x$Individuals
##PopulationMeans
mIntercept <- as.vector(unlist(lapply(x$Intercept, median2)))
lIntercept <- as.vector(unlist(lapply(x$Intercept, lower2)))
uIntercept <- as.vector(unlist(lapply(x$Intercept,upper2)))
Intercept <- data.frame(x$SimInt, mIntercept, lIntercept, uIntercept, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs))
colnames(Intercept) <- names2
mSlope <- as.vector(unlist(lapply(x$Slope, median2)))
lSlope <- as.vector(unlist(lapply(x$Slope, lower2)))
uSlope <- as.vector(unlist(lapply(x$Slope,upper2)))
Slope <- data.frame(x$SimSlope, mSlope, lSlope, uSlope, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs))
colnames(Slope) <- names2
Population.means <- list(Intercept=Intercept, Slope=Slope)
##Individual VCV
aID <- lapply(x$IDVCV, median2)
cID <- lapply(x$IDVCV, lower2)
bID <- lapply(x$IDVCV,upper2)
medianID <- as.data.frame(matrix(unlist(aID),n.ss,4, byrow=TRUE))
UpperID <- as.data.frame(matrix(unlist(bID),n.ss,4, byrow=TRUE))
LowerID <- as.data.frame(matrix(unlist(cID),n.ss,4, byrow=TRUE))
SimVCVInd <- as.data.frame(matrix(unlist(x$SimVCVInd),n.ss,4, byrow=TRUE))
IDIntVar <- cbind(SimVCVInd[,1], medianID[,1], LowerID[,1], UpperID[,1], x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs))
IDCoVar <- cbind(SimVCVInd[,2], medianID[,2], LowerID[,2], UpperID[,2], x$Individuals, x$Series,x$SeriesPerInd, unlist(x$n.obs))
IDSlopeVar <- cbind(SimVCVInd[,4], medianID[,4], LowerID[,4], UpperID[,4], x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs))
colnames(IDIntVar) <- names2
colnames(IDCoVar) <- names2
colnames(IDSlopeVar) <- names2
ID <- list(IntVar=IDIntVar, CoVar=IDCoVar, SlopeVar=IDSlopeVar)
##Series VCV
aSeries <- lapply(x$SeriesVCV, median2)
cSeries <- lapply(x$SeriesVCV, lower2)
bSeries <- lapply(x$SeriesVCV,upper2)
medianSeries <- as.data.frame(matrix(unlist(aSeries),n.ss,4, byrow=TRUE))
UpperSeries <- as.data.frame(matrix(unlist(bSeries),n.ss,4, byrow=TRUE))
LowerSeries <- as.data.frame(matrix(unlist(cSeries),n.ss,4, byrow=TRUE))
SimVCVSeries <- as.data.frame(matrix(unlist(x$SimVCVSeries),n.ss,4, byrow=TRUE))
SeriesIntVar <- cbind(SimVCVSeries[,1], medianSeries[,1], LowerSeries[,1], UpperSeries[,1], x$Individuals, x$Series,x$SeriesPerInd, unlist(x$n.obs))
SeriesCoVar <- cbind(SimVCVSeries[,2], medianSeries[,2], LowerSeries[,2], UpperSeries[,2], x$Individuals, x$Series,x$SeriesPerInd, unlist(x$n.obs))
SeriesSlopeVar <- cbind(SimVCVSeries[,4], medianSeries[,4], LowerSeries[,4], UpperSeries[,4], x$Individuals,x$Series, x$SeriesPerInd, unlist(x$n.obs))
colnames(SeriesIntVar) <- names2
colnames(SeriesCoVar) <- names2
colnames(SeriesSlopeVar) <- names2
Series <- list(IntVar=SeriesIntVar, CoVar=SeriesCoVar,SlopeVar=SeriesSlopeVar)
##Residuals
aResiduals <- as.vector(unlist(lapply(x$Residuals, median2)))
lResiduals <- as.vector(unlist(lapply(x$Residuals, lower2)))
uResiduals <- as.vector(unlist(lapply(x$Residuals,upper2)))
Residuals <- as.data.frame(cbind(x$SimResiduals, aResiduals, lResiduals, uResiduals, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs)))
colnames(Residuals) <- names2
##Repeatabilites
mRInt <- as.vector(unlist(lapply(x$RInt, median)))
mRSlope <- as.vector(unlist(lapply(x$RSlope, median)))
mRrn <- as.vector(unlist(lapply(x$Rrn, median)))
lRInt <- as.vector(unlist(lapply(x$RInt, quantile, 0.025)))
lRSlope <- as.vector(unlist(lapply(x$RSlope, quantile, 0.025)))
lRrn <- as.vector(unlist(lapply(x$Rrn, quantile, 0.025)))
uRInt <- as.vector(unlist(lapply(x$RInt, quantile, 0.975)))
uRSlope <- as.vector(unlist(lapply(x$RSlope, quantile, 0.975)))
uRrn <- as.vector(unlist(lapply(x$Rrn, quantile, 0.975)))
SimRepInt <- as.vector(SimVCVInd[,1]/(SimVCVSeries[,1] + SimVCVInd[,1]))
SimRepSlope <- as.vector(SimVCVInd[,4]/(SimVCVSeries[,4] + SimVCVInd[,4]))
SimRepRn <- (SimVCVInd[,1] + SimVCVInd[,4] + (2*SimVCVInd[,2]))/((SimVCVSeries[,1] + SimVCVSeries[,4] + (2*SimVCVSeries[,2])) + (SimVCVInd[,1] + SimVCVInd[,4] + (2*SimVCVInd[,2])))
RInt <- as.data.frame(cbind(SimRepInt[1], mRInt, lRInt, uRInt, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs)))
colnames(RInt) <- names2
RSlope <- as.data.frame(cbind(SimRepSlope[1], mRSlope, lRSlope, uRSlope, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs)))
colnames(RSlope) <- names2
Rrn <- as.data.frame(cbind(SimRepRn[1], mRrn, lRrn, uRrn, x$Individuals, x$Series, x$SeriesPerInd, unlist(x$n.obs)))
colnames(Rrn) <- names2
Repeatabilities <- list(Intercept=RInt, Slope=RSlope)
summary <- list(Population.means=Population.means, ID=ID, Series=Series, Repeatabilities=Repeatabilities, Residuals=Residuals)
print(summary)
}
|
\name{Pull}
\alias{Pull}
\title{Select rows from data frame}
\description{Selects rows from data frame basing on the evaluation of the second argument}
\usage{Pull(df, ...)}
\arguments{
\item{df}{Data frame to select from}
\item{...}{Arguments to with(df, ...)}
}
\details{
If the first argument is not a data frame, function will stop with an error.
Pull() is similar to subset() (but is much simpler and allows non-logical
values) and to dplyr::filter() function.
Please avoid using Pull() in non-ineractive mode.
}
\value{Data frame}
% \references{}
\author{Alexey Shipunov}
% \seealso{}
\examples{
`[`(trees, 3, 1) # ... so square bracket is a command
## arguments of `[` are independent; this is why square bracket does not "catch" the context:
trees[trees$Girth < 11 & trees$Height == 65, ] # boring and long
trees[trees$Girth < 11 & sample(0:1, nrow(trees), replace=TRUE), ] # yes, boring, long but flexible
trees[with(trees, Girth < 11 & Height == 65), ] # less boring but still long
## it would be nice to avoid typing "trees" twice:
Pull(trees, Girth < 11 & Height == 65) # shorter
Pull(trees, Girth < 11 & sample(0:1, nrow(trees), replace=TRUE)) # flexibility is still here
Pull(trees, Girth < 11 & sample(0:1, nrow(trees),
replace=TRUE))$Height # if you want also select columns
Pull(trees, grep(81, Height)) # select not only by TRUE/FALSE but also by row index
}
\keyword{manip}
|
/man/Pull.rd
|
no_license
|
cran/shipunov
|
R
| false | false | 1,406 |
rd
|
\name{Pull}
\alias{Pull}
\title{Select rows from data frame}
\description{Selects rows from data frame basing on the evaluation of the second argument}
\usage{Pull(df, ...)}
\arguments{
\item{df}{Data frame to select from}
\item{...}{Arguments to with(df, ...)}
}
\details{
If the first argument is not a data frame, function will stop with an error.
Pull() is similar to subset() (but is much simpler and allows non-logical
values) and to dplyr::filter() function.
Please avoid using Pull() in non-ineractive mode.
}
\value{Data frame}
% \references{}
\author{Alexey Shipunov}
% \seealso{}
\examples{
`[`(trees, 3, 1) # ... so square bracket is a command
## arguments of `[` are independent; this is why square bracket does not "catch" the context:
trees[trees$Girth < 11 & trees$Height == 65, ] # boring and long
trees[trees$Girth < 11 & sample(0:1, nrow(trees), replace=TRUE), ] # yes, boring, long but flexible
trees[with(trees, Girth < 11 & Height == 65), ] # less boring but still long
## it would be nice to avoid typing "trees" twice:
Pull(trees, Girth < 11 & Height == 65) # shorter
Pull(trees, Girth < 11 & sample(0:1, nrow(trees), replace=TRUE)) # flexibility is still here
Pull(trees, Girth < 11 & sample(0:1, nrow(trees),
replace=TRUE))$Height # if you want also select columns
Pull(trees, grep(81, Height)) # select not only by TRUE/FALSE but also by row index
}
\keyword{manip}
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/googlesheet.R
\name{googlesheet}
\alias{googlesheet}
\title{The googlesheet object}
\usage{
googlesheet()
}
\description{
The googlesheet object stores information that \code{googlesheets} requires in
order to communicate with the
\href{https://developers.google.com/google-apps/spreadsheets/}{Google Sheets
API}.
}
\details{
Very little of this is of interest to the user. A googlesheet object
includes the fields:
\itemize{
\item \code{sheet_key} the key of the spreadsheet
\item \code{sheet_title} the title of the spreadsheet
\item \code{n_ws} the number of worksheets contained in the spreadsheet
\item \code{ws_feed} the "worksheets feed" of the spreadsheet
\item \code{sheet_id} the id of the spreadsheet
\item \code{updated} the time of last update (at time of registration)
\item \code{get_date} the time of registration
\item \code{visibility} visibility of spreadsheet (Google's confusing
vocabulary); actually, does not describe a property of spreadsheet itself but
rather whether requests will be made with or without authentication
\item \code{author_name} the name of the owner
\item \code{author_email} the email of the owner
\item \code{links} data.frame of links specific to the spreadsheet
\item \code{ws} a data.frame about the worksheets contained in the
spreadsheet
}
TO DO: this documentation is neither here nor there. Either the object is
self-explanatory and this isn't really needed. Or this needs to get beefed
up. Probably the latter.
}
|
/man/googlesheet.Rd
|
permissive
|
craigcitro/googlesheets
|
R
| false | false | 1,560 |
rd
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/googlesheet.R
\name{googlesheet}
\alias{googlesheet}
\title{The googlesheet object}
\usage{
googlesheet()
}
\description{
The googlesheet object stores information that \code{googlesheets} requires in
order to communicate with the
\href{https://developers.google.com/google-apps/spreadsheets/}{Google Sheets
API}.
}
\details{
Very little of this is of interest to the user. A googlesheet object
includes the fields:
\itemize{
\item \code{sheet_key} the key of the spreadsheet
\item \code{sheet_title} the title of the spreadsheet
\item \code{n_ws} the number of worksheets contained in the spreadsheet
\item \code{ws_feed} the "worksheets feed" of the spreadsheet
\item \code{sheet_id} the id of the spreadsheet
\item \code{updated} the time of last update (at time of registration)
\item \code{get_date} the time of registration
\item \code{visibility} visibility of spreadsheet (Google's confusing
vocabulary); actually, does not describe a property of spreadsheet itself but
rather whether requests will be made with or without authentication
\item \code{author_name} the name of the owner
\item \code{author_email} the email of the owner
\item \code{links} data.frame of links specific to the spreadsheet
\item \code{ws} a data.frame about the worksheets contained in the
spreadsheet
}
TO DO: this documentation is neither here nor there. Either the object is
self-explanatory and this isn't really needed. Or this needs to get beefed
up. Probably the latter.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reg.linreg.R
\name{reg.linreg}
\alias{reg.linreg}
\title{Linear regression processor}
\usage{
reg.linreg(dataframe,dependent)
}
\arguments{
\item{dataframe}{:a data frame, which includes the dependent variable}
\item{dependent}{:dependent variable}
}
\description{
This function will take a data frame and the dependent variable and fit all possible combinations of models.
The result will be a data frame of models and test statistics for all the models possible. The test statistics are current set and contain all
the following: R-squared, Adjusted R-squared, Degree of freedom, Residual standard error, AIC, BIC, Durbin-Watson statistic.
}
\examples{
reg.linreg(mtcars,"mpg")
}
|
/man/reg.linreg.Rd
|
no_license
|
cran/PMmisc
|
R
| false | true | 782 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reg.linreg.R
\name{reg.linreg}
\alias{reg.linreg}
\title{Linear regression processor}
\usage{
reg.linreg(dataframe,dependent)
}
\arguments{
\item{dataframe}{:a data frame, which includes the dependent variable}
\item{dependent}{:dependent variable}
}
\description{
This function will take a data frame and the dependent variable and fit all possible combinations of models.
The result will be a data frame of models and test statistics for all the models possible. The test statistics are current set and contain all
the following: R-squared, Adjusted R-squared, Degree of freedom, Residual standard error, AIC, BIC, Durbin-Watson statistic.
}
\examples{
reg.linreg(mtcars,"mpg")
}
|
eye<-function (n,m=n) {
# eye(n) returns the n-by-n identity matrix.
# eye(n,m) returns an n-by-m matrix with 1's on the diagonal and 0's elsewhere.
return(diag(1,n,m));
} # end function
|
/DbtTools/RetroMAT/R/eye.R
|
no_license
|
markus-flicke/KD_Projekt_1
|
R
| false | false | 203 |
r
|
eye<-function (n,m=n) {
# eye(n) returns the n-by-n identity matrix.
# eye(n,m) returns an n-by-m matrix with 1's on the diagonal and 0's elsewhere.
return(diag(1,n,m));
} # end function
|
## This function saves a list of functions to set and to get a matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL #setting a value of a matrix's inverse being cached to zero
set <- function(y) { #setting a matrix and indicating that its inverse
#is not stored
x <<- y
m <<- NULL
}
get <- function() x #extracting a stored matrix
setinv <- function(solve) m <<- solve #setting an inverse of a matrix
getinv <- function() m #getting a stored inverse of a matrix
s<-list(set = set, get = get, #setting a list of pre-defined functions
setinv = setinv,
getinv = getinv)
}
## This function operates with the results of the previous function and returns
## an inverse of the matrix specified as an input for the previous function. If
## its inverse has already been calculated, it returns the cached value.
cacheSolve <- function(x, ...) {
m <- x$getinv() #extracting inversed matrix from cache
if(!is.null(m)) { #checking if the extracted data is not null
message("getting cached data") #if it is not null, printing the message
return(m) #and returning its value, finishing the function
}
data <- x$get() #otherwise, taking out the matrix
m <- solve(data, ...) #computing its inverse
x$setinv(m) #storing its inverse in a cache
m #returning the value of inverse
}
|
/cachematrix.R
|
no_license
|
stalkernz/ProgrammingAssignment2
|
R
| false | false | 1,736 |
r
|
## This function saves a list of functions to set and to get a matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL #setting a value of a matrix's inverse being cached to zero
set <- function(y) { #setting a matrix and indicating that its inverse
#is not stored
x <<- y
m <<- NULL
}
get <- function() x #extracting a stored matrix
setinv <- function(solve) m <<- solve #setting an inverse of a matrix
getinv <- function() m #getting a stored inverse of a matrix
s<-list(set = set, get = get, #setting a list of pre-defined functions
setinv = setinv,
getinv = getinv)
}
## This function operates with the results of the previous function and returns
## an inverse of the matrix specified as an input for the previous function. If
## its inverse has already been calculated, it returns the cached value.
cacheSolve <- function(x, ...) {
m <- x$getinv() #extracting inversed matrix from cache
if(!is.null(m)) { #checking if the extracted data is not null
message("getting cached data") #if it is not null, printing the message
return(m) #and returning its value, finishing the function
}
data <- x$get() #otherwise, taking out the matrix
m <- solve(data, ...) #computing its inverse
x$setinv(m) #storing its inverse in a cache
m #returning the value of inverse
}
|
plot1 <- function(){
require(sqldf)
ho = read.csv.sql("household_power_consumption.txt", sep = ";", sql = "select * from file where Date = '2/1/2007' or Date = '2/2/2007'")
ho[ho == "?"]<-NA
dates = as.Date(as.Date(ho$Date, format = "%m/%d/%Y"), format = "%Y-%m-%d")
datetimes = paste(dates, ho$Time)
ho$Date = strptime(datetimes, "%Y-%m-%d %H:%M:%S")
png(filename = "plot1.png")
hist(ho$Global_active_power, col = "red", xlab= "Global Active Power (Kilowatts)", main = "Global Active Power")
dev.off()
}
|
/plot1.R
|
no_license
|
eladiomontero/ExData_Plotting1
|
R
| false | false | 576 |
r
|
plot1 <- function(){
require(sqldf)
ho = read.csv.sql("household_power_consumption.txt", sep = ";", sql = "select * from file where Date = '2/1/2007' or Date = '2/2/2007'")
ho[ho == "?"]<-NA
dates = as.Date(as.Date(ho$Date, format = "%m/%d/%Y"), format = "%Y-%m-%d")
datetimes = paste(dates, ho$Time)
ho$Date = strptime(datetimes, "%Y-%m-%d %H:%M:%S")
png(filename = "plot1.png")
hist(ho$Global_active_power, col = "red", xlab= "Global Active Power (Kilowatts)", main = "Global Active Power")
dev.off()
}
|
structure(list(record_id = 5, name_first = "John Lee", name_last = "Walker",
address = "Hotel Suite\nNew Orleans LA, 70115", telephone = "(405) 321-5555",
email = "left@hippocket.com", dob = structure(-5375, class = "Date"),
age = 59, sex = 1, demographics_complete = 2, height = 193.04,
weight = 104, bmi = 27.9, comments = "Had a hand for trouble and a eye for cash\n\nHe had a gold watch chain and a black mustache",
mugshot = "mugshot-5.jpg", health_complete = 0, race___1 = 1,
race___2 = 0, race___3 = 0, race___4 = 0, race___5 = 0, race___6 = 1,
ethnicity = 2, interpreter_needed = 0, race_and_ethnicity_complete = 2), row.names = c(NA,
-1L), class = "data.frame")
|
/inst/test-data/specific-redcapr/read-batch-simple/filter-character.R
|
permissive
|
OuhscBbmc/REDCapR
|
R
| false | false | 707 |
r
|
structure(list(record_id = 5, name_first = "John Lee", name_last = "Walker",
address = "Hotel Suite\nNew Orleans LA, 70115", telephone = "(405) 321-5555",
email = "left@hippocket.com", dob = structure(-5375, class = "Date"),
age = 59, sex = 1, demographics_complete = 2, height = 193.04,
weight = 104, bmi = 27.9, comments = "Had a hand for trouble and a eye for cash\n\nHe had a gold watch chain and a black mustache",
mugshot = "mugshot-5.jpg", health_complete = 0, race___1 = 1,
race___2 = 0, race___3 = 0, race___4 = 0, race___5 = 0, race___6 = 1,
ethnicity = 2, interpreter_needed = 0, race_and_ethnicity_complete = 2), row.names = c(NA,
-1L), class = "data.frame")
|
#plot_bericht-------------------------------------------------------------------
#' @title plot_bericht
#' @description Produces a segment plot used in standard dating reports of the
#' BOKU tree ring lab based on the overview excel table.
#' @param filename a path to a .xls file used for dating overview in the BOKU
#' tree ring lab.
#' @param encoding encoding of the .xls file
#' @param set_lwd width of the segments
#' @param multi adjusts the height of the output png image
#' @export
#' @examples
#' #use the following line to select the file interactively:
#' \dontrun{
#' plot_bericht(file.choose())
#' }
plot_bericht <- function(filename, encoding = 'latin1', set_lwd = 50,
multi = 0.7) {
data('species', envir = environment())
rownames(species) <- species[ ,'german']
#read in files
header <- gdata::read.xls (filename, sheet = 1, header = FALSE,
encoding = encoding)
header <- header[1:3, 4:5]
df <- gdata::read.xls(filename, sheet = 1, header = TRUE, pattern='Nr.',
encoding=encoding)
columns <- c(2, 3, 4, 5, 7)
df[,columns] <- lapply(df[ ,columns], FUN = function(x) as.character(x))
#test if all species in "Baumart" are specified within the object species
if(any(!(df[ , 3] %in% species[,'german']))){
stop('typing error in "Baumart" or species not implemented')
}
#tests for typing errors in series length
if(!is.integer(df[,6])){
stop('typing error in column "Jahrringanzahl"')
}
#tests for typing errors in column "WK"
if(any(!(df[ ,5] %in% c('keine', 'nein', 'ja')))){
stop('problem in column "WK"')
}
#selects only dated series
df <- df[grep('[0-9]{1,4}', df[,4]), ]
#separate date end and unmeasured years from string
date.end <- as.data.frame(as.numeric(stringr::str_extract(df[ ,4], '-?\\d{1,4}')))
colnames(date.end) <- 'letztes.Jahr'
date.begin <- date.end - df[6] + 1
colnames(date.begin) <- 'erstes.Jahr'
unmeasured <- substr(df[ ,4], stringr::str_locate(df[ ,4], '-?\\d{1,4}')[ ,2]
+ 1, nchar(df[,4]))
replacements <- list(c('\\+', ''), c('min.', ''), c('JR', ''),
c(' ', ''))
unmeasured <- tryCatch(as.double(mgsub(replacements, unmeasured)), warning =
function(w){stop('problem in column "letztes Jahr"')})
unmeasured[is.na(unmeasured)] <- 0
#compiling data set for plotting
dat <- data.frame(df[2:3], date.begin, date.end , df[6], Farbe = NA, df[5],
unmeasured)
dat[ ,6] <- species[dat[, 'Holzart'], 'color']
dat <- dat[order(dat$letztes.Jahr+dat$unmeasured), ]
######plot
makeplot <- function() {
xlim <- c(min(dat[3], na.rm = TRUE) - 10,
max(dat[ , 4] + dat[ , 8], na.rm = TRUE) + 10)
ylim <- c(0, nrow(dat) + 1)
png(paste(header[1, 2], '.png', sep = ""), units = 'in',
height = (ylim[2] + 2) * multi, width = 12, res = 150)
par(mai = c(1, 1, 0.4, 1))
plot(0, xlim = xlim, main = header[1, 2], type = 'l', bty = 'o', yaxt = 'n',
ylim = ylim, ylab = 'Probe', xlab = 'Jahr', lwd = set_lwd, xaxs = 'i',
yaxs = 'i')
grid(ny = NA, col = 'grey50')
axis(2, at = seq_len(nrow(dat)), labels = dat[ ,1], las = 1)
axis(4, at = seq_len(nrow(dat)),
labels = dat[ , 4] + dat[ , 8], las = 1, outer = F)
#plot not measured rings
these_lines <- which(!is.na(dat[ ,8]))
lapply(these_lines, FUN = function(i) {
segments(dat[i, 4], i, dat[i, 4] + dat[i, 8], col = 'grey90',
lend = 1,lwd = set_lwd, xaxs = 'i', yaxs = 'i')
})
#plot segments
lapply(seq_len(nrow(dat)), FUN = function(i) {
lines(as.numeric(dat[i, 3:4]), c(i,i), lwd = set_lwd, lend = 3,
col = dat[i, 6], xaxs = 'i', yaxs = 'i')
#text(xlim[2] + 0.5, i, dat[i, 4] + dat[i, 8], pos = 4)
})
#waldkante
these_lines <- which(dat[ ,7] == 'ja')
lapply(these_lines, FUN = function(i) {
points(dat[i, 4] + dat[i, 8] + 2, i, pch = 20)
lines(as.numeric(c(dat[i, 4] + dat[i, 8] - 1,dat[i, 4] + dat[i, 8])), c(i, i),
lwd = set_lwd, lend = 1, col = 'black', xaxs = 'i', yaxs = 'i')
})
#legend
legende <- species[unique(dat$Holzart),]
legend('bottom', legend = legende[, 'german'], fill = as.character(legende[, 'color']),
ncol = nrow(legende), bty = 'n')
dev.off()
}
try(makeplot())
}
|
/R/plot_bericht.R
|
no_license
|
konradmayer/trlboku
|
R
| false | false | 4,455 |
r
|
#plot_bericht-------------------------------------------------------------------
#' @title plot_bericht
#' @description Produces a segment plot used in standard dating reports of the
#' BOKU tree ring lab based on the overview excel table.
#' @param filename a path to a .xls file used for dating overview in the BOKU
#' tree ring lab.
#' @param encoding encoding of the .xls file
#' @param set_lwd width of the segments
#' @param multi adjusts the height of the output png image
#' @export
#' @examples
#' #use the following line to select the file interactively:
#' \dontrun{
#' plot_bericht(file.choose())
#' }
plot_bericht <- function(filename, encoding = 'latin1', set_lwd = 50,
multi = 0.7) {
data('species', envir = environment())
rownames(species) <- species[ ,'german']
#read in files
header <- gdata::read.xls (filename, sheet = 1, header = FALSE,
encoding = encoding)
header <- header[1:3, 4:5]
df <- gdata::read.xls(filename, sheet = 1, header = TRUE, pattern='Nr.',
encoding=encoding)
columns <- c(2, 3, 4, 5, 7)
df[,columns] <- lapply(df[ ,columns], FUN = function(x) as.character(x))
#test if all species in "Baumart" are specified within the object species
if(any(!(df[ , 3] %in% species[,'german']))){
stop('typing error in "Baumart" or species not implemented')
}
#tests for typing errors in series length
if(!is.integer(df[,6])){
stop('typing error in column "Jahrringanzahl"')
}
#tests for typing errors in column "WK"
if(any(!(df[ ,5] %in% c('keine', 'nein', 'ja')))){
stop('problem in column "WK"')
}
#selects only dated series
df <- df[grep('[0-9]{1,4}', df[,4]), ]
#separate date end and unmeasured years from string
date.end <- as.data.frame(as.numeric(stringr::str_extract(df[ ,4], '-?\\d{1,4}')))
colnames(date.end) <- 'letztes.Jahr'
date.begin <- date.end - df[6] + 1
colnames(date.begin) <- 'erstes.Jahr'
unmeasured <- substr(df[ ,4], stringr::str_locate(df[ ,4], '-?\\d{1,4}')[ ,2]
+ 1, nchar(df[,4]))
replacements <- list(c('\\+', ''), c('min.', ''), c('JR', ''),
c(' ', ''))
unmeasured <- tryCatch(as.double(mgsub(replacements, unmeasured)), warning =
function(w){stop('problem in column "letztes Jahr"')})
unmeasured[is.na(unmeasured)] <- 0
#compiling data set for plotting
dat <- data.frame(df[2:3], date.begin, date.end , df[6], Farbe = NA, df[5],
unmeasured)
dat[ ,6] <- species[dat[, 'Holzart'], 'color']
dat <- dat[order(dat$letztes.Jahr+dat$unmeasured), ]
######plot
makeplot <- function() {
xlim <- c(min(dat[3], na.rm = TRUE) - 10,
max(dat[ , 4] + dat[ , 8], na.rm = TRUE) + 10)
ylim <- c(0, nrow(dat) + 1)
png(paste(header[1, 2], '.png', sep = ""), units = 'in',
height = (ylim[2] + 2) * multi, width = 12, res = 150)
par(mai = c(1, 1, 0.4, 1))
plot(0, xlim = xlim, main = header[1, 2], type = 'l', bty = 'o', yaxt = 'n',
ylim = ylim, ylab = 'Probe', xlab = 'Jahr', lwd = set_lwd, xaxs = 'i',
yaxs = 'i')
grid(ny = NA, col = 'grey50')
axis(2, at = seq_len(nrow(dat)), labels = dat[ ,1], las = 1)
axis(4, at = seq_len(nrow(dat)),
labels = dat[ , 4] + dat[ , 8], las = 1, outer = F)
#plot not measured rings
these_lines <- which(!is.na(dat[ ,8]))
lapply(these_lines, FUN = function(i) {
segments(dat[i, 4], i, dat[i, 4] + dat[i, 8], col = 'grey90',
lend = 1,lwd = set_lwd, xaxs = 'i', yaxs = 'i')
})
#plot segments
lapply(seq_len(nrow(dat)), FUN = function(i) {
lines(as.numeric(dat[i, 3:4]), c(i,i), lwd = set_lwd, lend = 3,
col = dat[i, 6], xaxs = 'i', yaxs = 'i')
#text(xlim[2] + 0.5, i, dat[i, 4] + dat[i, 8], pos = 4)
})
#waldkante
these_lines <- which(dat[ ,7] == 'ja')
lapply(these_lines, FUN = function(i) {
points(dat[i, 4] + dat[i, 8] + 2, i, pch = 20)
lines(as.numeric(c(dat[i, 4] + dat[i, 8] - 1,dat[i, 4] + dat[i, 8])), c(i, i),
lwd = set_lwd, lend = 1, col = 'black', xaxs = 'i', yaxs = 'i')
})
#legend
legende <- species[unique(dat$Holzart),]
legend('bottom', legend = legende[, 'german'], fill = as.character(legende[, 'color']),
ncol = nrow(legende), bty = 'n')
dev.off()
}
try(makeplot())
}
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(1.79939860374846e+165, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615856383-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 172 |
r
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(1.79939860374846e+165, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
library(raster)
## To stop raster using the tmp dir which is slow and fills up
rasterOptions(tmpdir = "/work/phillips", chunksize = 524288, maxmemory = 134217728)
scaleGL <- function(layername, data, load = ".", save = "."){
if(!(layername %in% names(data))) stop("Layername must be in data")
print(layername)
dat <- data[,which(names(data) == layername)]
min <- min(dat, na.rm = TRUE)
max <- max(dat, na.rm = TRUE)
tif <- raster(load)
dividby10 <- c("bio10_1","bio10_5",
"bio10_6", "bio10_7","bio10_8","bio10_9","bio10_10",
"bio10_11", "PHIHOX", "ORCDRC")
if(layername %in% dividby10){
print("Dividing by 10")
tif <- tif/10
}
print("Capping minimum value")
tif[tif<min] <- min
print("Capping maximum value")
tif[tif>max] <- max
print("Scaling tif")
tif <- scale(tif)
print("Saving tif")
writeRaster(tif, save, format = "GTiff")
# do.call(file.remove, list(list.files(dirname(rasterTmpFile()), full.names = TRUE)))
rm(tif)
}
calculateWeightedMeanGL <- function(folder, savefolder){
# tifs <- c("PHIHOX", "CLYPPT", "SLTPPT", "CECSOL", "ORCDRC") ## "SNDPPT", "TAXNWRB_1"
tifs <- "ORCDRC"
print(tifs)
layers <- c("sl1","sl2", "sl3", "sl4")
weight <- c(0.001, 0.05, 0.1, 0.15)
filenames <- list.files(folder)
for(t in tifs){
print(paste("Calculating weighted mean of", t))
dl <- filenames[grep(t, filenames)]
dl <- dl[grep(paste(layers,collapse="|"), dl)]
print(paste("Creating stack of", file.path(folder, dl)))
s <- raster::stack(file.path(folder, dl))
## writeRaster(s, "hdf8_EVI.TIF")
print("Doing calculations")
weighted.mean(x = s, w = weight, na.rm=FALSE,
filename=file.path(savefolder, paste(t, "_weighted.tif", sep ="")), format = "GTiff")
rm(s)
# do.call(file.remove, list(list.files(dirname(rasterTmpFile()), full.names = TRUE)))
}
}
args <- commandArgs(trailingOnly = TRUE)
folder <- args[1] # data_dir
savefolder <- args[2] # output_dir
date <- args[3]
site_dir <- args[4]
print(folder)
print(savefolder)
print(date)
print(site_dir)
list.files(folder)
# folder <- 'D:/Helens/sWorm'
# savefolder <- folder
# date <- date
# print("Reclassifying Snow Layer")
#
# r <- raster(file.path(folder, "snow_2015_sum.tif"))
#
# print("Changing values")
# r[r > 3] <- 4 # Anything above 3
#
# print("Saving!")
# r <- writeRaster(r, filename=file.path(savefolder, "Snow_newValues.tif"), format="GTiff", overwrite=TRUE)
#
#
#
print("Loading datasets")
#richness <- read.csv(file.path(site_dir, paste('sitesRichness_', date, '.csv', sep="")))
#abundance <- read.csv(file.path(site_dir, paste('sitesAbundance_', date, '.csv', sep="")))
biomass <- read.csv(file.path(site_dir, paste('sitesBiomass_', date, '.csv', sep="")))
# print("Calculating the weighted mean of soil data")
# calculateWeightedMeanGL(folder, savefolder)
# print("Calculating richness layers")
# # Richness
#
# scaleGL(layername = "ORCDRC", data = richness, load = file.path(folder, "ORCDRC_weighted.tif"),
# save = file.path(savefolder, 'ORCDRC_RichnessCutScaled.tif'))
# scaleGL(layername = "PHIHOX", data = richness, load = file.path(folder, "PHIHOX_weighted.tif"),
# save = file.path(savefolder, 'PHIHOX_RichnessCutScaled.tif'))
# scaleGL(layername = "CLYPPT", data = richness, load = file.path(folder, "CLYPPT_weighted.tif"),
# save = file.path(savefolder, 'CLYPPT_RichnessCutScaled.tif'))
# scaleGL(layername = "SLTPPT", data = richness, load = file.path(folder, "SLTPPT_weighted.tif"),
# save = file.path(savefolder, 'SLTPPT_RichnessCutScaled.tif'))
# scaleGL(layername = "CECSOL", data = richness, load = file.path(folder, "CECSOL_weighted.tif"),
# save = file.path(savefolder, 'CECSOL_RichnessCutScaled.tif'))
#
# scaleGL(layername = 'bio10_1', data = richness, load = file.path(folder, 'CHELSA_bio10_1.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_1_RichnessCutScaled.tif'))
# scaleGL(layername = 'bio10_4', data = richness, load = file.path(folder, 'CHELSA_bio10_4.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_4_RichnessCutScaled.tif'))
# scaleGL(layername = 'bio10_7', data = richness, load = file.path(folder, 'CHELSA_bio10_7.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_7_RichnessCutScaled.tif'))
#
# scaleGL(layername = 'bio10_12', data = richness, load = file.path(folder, 'CHELSA_bio10_12.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_12_RichnessCutScaled.tif'))
# scaleGL(layername = 'bio10_15', data = richness, load = file.path(folder, 'CHELSA_bio10_15.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_15_RichnessCutScaled.tif'))
#
# scaleGL(layername = 'Aridity', data = richness, load = file.path(folder, 'ai_yr_TIF.tif'),
# save = file.path(savefolder, 'Aridity_RichnessScaled.tif'))
# scaleGL(layername = 'PETyr', data = richness, load = file.path(folder, 'pet_he_yr_TIF.tif'),
# save = file.path(savefolder, 'PETyr_RichnessScaled.tif'))
# scaleGL(layername = 'PET_SD', data = richness, load = file.path(folder, 'pet_he_SD.tif'),
# save = file.path(savefolder, 'PETSD_RichnessScaled.tif'))
# scaleGL(layername = 'elevation', data = richness, load = file.path(folder, 'elevation.tif'),
# save = file.path(savefolder, 'elevation_RichnessScaled.tif'))
#
# ## Abundance
# print("Calculating abundance layers")
# scaleGL(layername = "ORCDRC", data = abundance, load = file.path(folder, "ORCDRC_weighted.tif"),
# save = file.path(savefolder, 'ORCDRC_AbundanceCutScaled.tif'))
# scaleGL(layername = "PHIHOX", data = abundance, load = file.path(folder, "PHIHOX_weighted.tif"),
# save = file.path(savefolder, 'PHIHOX_AbundanceCutScaled.tif'))
# scaleGL(layername = "CLYPPT", data = abundance, load = file.path(folder, "CLYPPT_weighted.tif"),
# save = file.path(savefolder, 'CLYPPT_AbundanceCutScaled.tif'))
# scaleGL(layername = "SLTPPT", data = abundance, load = file.path(folder, "SLTPPT_weighted.tif"),
# save = file.path(savefolder, 'SLTPPT_AbundanceCutScaled.tif'))
# scaleGL(layername = "CECSOL", data = abundance, load = file.path(folder, "CECSOL_weighted.tif"),
# save = file.path(savefolder, 'CECSOL_AbundanceCutScaled.tif'))
#
# scaleGL(layername = 'bio10_1', data = abundance, load = file.path(folder, 'CHELSA_bio10_1.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_1_AbundanceCutScaled.tif'))
# scaleGL(layername = 'bio10_4', data = abundance, load = file.path(folder, 'CHELSA_bio10_4.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_4_AbundanceCutScaled.tif'))
# scaleGL(layername = 'bio10_7', data = abundance, load = file.path(folder, 'CHELSA_bio10_7.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_7_AbundanceCutScaled.tif'))
# scaleGL(layername = 'bio10_12', data = abundance, load = file.path(folder, 'CHELSA_bio10_12.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_12_AbundanceCutScaled.tif'))
# scaleGL(layername = 'bio10_15', data = abundance, load = file.path(folder, 'CHELSA_bio10_15.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_15_AbundanceCutScaled.tif'))
#
# scaleGL(layername = 'Aridity', data = abundance, load = file.path(folder, 'ai_yr_TIF.tif'),
# save = file.path(savefolder, 'Aridity_AbundanceScaled.tif'))
# scaleGL(layername = 'PETyr', data = abundance, load = file.path(folder, 'pet_he_yr_TIF.tif'),
# save = file.path(savefolder, 'PETyr_AbundanceScaled.tif'))
# scaleGL(layername = 'PET_SD', data = abundance, load = file.path(folder, 'pet_he_SD.tif'),
# save = file.path(savefolder, 'PETSD_AbundanceScaled.tif'))
# scaleGL(layername = 'elevation', data = abundance, load = file.path(folder, 'elevation.tif'),
# save = file.path(savefolder, 'elevation_AbundanceScaled.tif'))
#
# Biomass
print("Calculating biomass layers")
scaleGL(layername = "ORCDRC", data = biomass, load = file.path(folder, "ORCDRC_weighted.tif"),
save = file.path(savefolder, 'ORCDRC_BiomassCutScaled.tif'))
scaleGL(layername = "PHIHOX", data = biomass, load = file.path(folder, "PHIHOX_weighted.tif"),
save = file.path(savefolder, 'PHIHOX_BiomassCutScaled.tif'))
scaleGL(layername = "CLYPPT", data = biomass, load = file.path(folder, "CLYPPT_weighted.tif"),
save = file.path(savefolder, 'CLYPPT_BiomassCutScaled.tif'))
scaleGL(layername = "SLTPPT", data = biomass, load = file.path(folder, "SLTPPT_weighted.tif"),
save = file.path(savefolder, 'SLTPPT_BiomassCutScaled.tif'))
scaleGL(layername = "CECSOL", data = biomass, load = file.path(folder, "CECSOL_weighted.tif"),
save = file.path(savefolder, 'CECSOL_BiomassCutScaled.tif'))
scaleGL(layername = 'bio10_1', data = biomass, load = file.path(folder, 'CHELSA_bio10_1.tif'),
save = file.path(savefolder, 'CHELSA_bio10_1_BiomassCutScaled.tif'))
scaleGL(layername = 'bio10_4', data = biomass, load = file.path(folder, 'CHELSA_bio10_4.tif'),
save = file.path(savefolder, 'CHELSA_bio10_4_BiomassCutScaled.tif'))
scaleGL(layername = 'bio10_7', data = biomass, load = file.path(folder, 'CHELSA_bio10_7.tif'),
save = file.path(savefolder, 'CHELSA_bio10_7_BiomassCutScaled.tif'))
scaleGL(layername = 'bio10_12', data = biomass, load = file.path(folder, 'CHELSA_bio10_12.tif'),
save = file.path(savefolder, 'CHELSA_bio10_12_BiomassCutScaled.tif'))
scaleGL(layername = 'bio10_15', data = biomass, load = file.path(folder, 'CHELSA_bio10_15.tif'),
save = file.path(savefolder, 'CHELSA_bio10_15_BiomassCutScaled.tif'))
scaleGL(layername = 'Aridity', data = biomass, load = file.path(folder, 'ai_yr_TIF.tif'),
save = file.path(savefolder, 'Aridity_BiomassScaled.tif'))
scaleGL(layername = 'PETyr', data = biomass, load = file.path(folder, 'pet_he_yr_TIF.tif'),
save = file.path(savefolder, 'PETyr_BiomassScaled.tif'))
scaleGL(layername = 'PET_SD', data = biomass, load = file.path(folder, 'pet_he_SD.tif'),
save = file.path(savefolder, 'PETSD_BiomassScaled.tif'))
scaleGL(layername = 'elevation', data = biomass, load = file.path(folder, 'elevation.tif'),
save = file.path(savefolder, 'elevation_BiomassScaled.tif'))
print("Done!")
|
/PreparingGlobalLayers/PrepareGlobalLayers_biomass.R
|
permissive
|
MaximilianPi/GlobalEWDiversity
|
R
| false | false | 10,432 |
r
|
library(raster)
## To stop raster using the tmp dir which is slow and fills up
rasterOptions(tmpdir = "/work/phillips", chunksize = 524288, maxmemory = 134217728)
scaleGL <- function(layername, data, load = ".", save = "."){
if(!(layername %in% names(data))) stop("Layername must be in data")
print(layername)
dat <- data[,which(names(data) == layername)]
min <- min(dat, na.rm = TRUE)
max <- max(dat, na.rm = TRUE)
tif <- raster(load)
dividby10 <- c("bio10_1","bio10_5",
"bio10_6", "bio10_7","bio10_8","bio10_9","bio10_10",
"bio10_11", "PHIHOX", "ORCDRC")
if(layername %in% dividby10){
print("Dividing by 10")
tif <- tif/10
}
print("Capping minimum value")
tif[tif<min] <- min
print("Capping maximum value")
tif[tif>max] <- max
print("Scaling tif")
tif <- scale(tif)
print("Saving tif")
writeRaster(tif, save, format = "GTiff")
# do.call(file.remove, list(list.files(dirname(rasterTmpFile()), full.names = TRUE)))
rm(tif)
}
calculateWeightedMeanGL <- function(folder, savefolder){
# tifs <- c("PHIHOX", "CLYPPT", "SLTPPT", "CECSOL", "ORCDRC") ## "SNDPPT", "TAXNWRB_1"
tifs <- "ORCDRC"
print(tifs)
layers <- c("sl1","sl2", "sl3", "sl4")
weight <- c(0.001, 0.05, 0.1, 0.15)
filenames <- list.files(folder)
for(t in tifs){
print(paste("Calculating weighted mean of", t))
dl <- filenames[grep(t, filenames)]
dl <- dl[grep(paste(layers,collapse="|"), dl)]
print(paste("Creating stack of", file.path(folder, dl)))
s <- raster::stack(file.path(folder, dl))
## writeRaster(s, "hdf8_EVI.TIF")
print("Doing calculations")
weighted.mean(x = s, w = weight, na.rm=FALSE,
filename=file.path(savefolder, paste(t, "_weighted.tif", sep ="")), format = "GTiff")
rm(s)
# do.call(file.remove, list(list.files(dirname(rasterTmpFile()), full.names = TRUE)))
}
}
args <- commandArgs(trailingOnly = TRUE)
folder <- args[1] # data_dir
savefolder <- args[2] # output_dir
date <- args[3]
site_dir <- args[4]
print(folder)
print(savefolder)
print(date)
print(site_dir)
list.files(folder)
# folder <- 'D:/Helens/sWorm'
# savefolder <- folder
# date <- date
# print("Reclassifying Snow Layer")
#
# r <- raster(file.path(folder, "snow_2015_sum.tif"))
#
# print("Changing values")
# r[r > 3] <- 4 # Anything above 3
#
# print("Saving!")
# r <- writeRaster(r, filename=file.path(savefolder, "Snow_newValues.tif"), format="GTiff", overwrite=TRUE)
#
#
#
print("Loading datasets")
#richness <- read.csv(file.path(site_dir, paste('sitesRichness_', date, '.csv', sep="")))
#abundance <- read.csv(file.path(site_dir, paste('sitesAbundance_', date, '.csv', sep="")))
biomass <- read.csv(file.path(site_dir, paste('sitesBiomass_', date, '.csv', sep="")))
# print("Calculating the weighted mean of soil data")
# calculateWeightedMeanGL(folder, savefolder)
# print("Calculating richness layers")
# # Richness
#
# scaleGL(layername = "ORCDRC", data = richness, load = file.path(folder, "ORCDRC_weighted.tif"),
# save = file.path(savefolder, 'ORCDRC_RichnessCutScaled.tif'))
# scaleGL(layername = "PHIHOX", data = richness, load = file.path(folder, "PHIHOX_weighted.tif"),
# save = file.path(savefolder, 'PHIHOX_RichnessCutScaled.tif'))
# scaleGL(layername = "CLYPPT", data = richness, load = file.path(folder, "CLYPPT_weighted.tif"),
# save = file.path(savefolder, 'CLYPPT_RichnessCutScaled.tif'))
# scaleGL(layername = "SLTPPT", data = richness, load = file.path(folder, "SLTPPT_weighted.tif"),
# save = file.path(savefolder, 'SLTPPT_RichnessCutScaled.tif'))
# scaleGL(layername = "CECSOL", data = richness, load = file.path(folder, "CECSOL_weighted.tif"),
# save = file.path(savefolder, 'CECSOL_RichnessCutScaled.tif'))
#
# scaleGL(layername = 'bio10_1', data = richness, load = file.path(folder, 'CHELSA_bio10_1.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_1_RichnessCutScaled.tif'))
# scaleGL(layername = 'bio10_4', data = richness, load = file.path(folder, 'CHELSA_bio10_4.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_4_RichnessCutScaled.tif'))
# scaleGL(layername = 'bio10_7', data = richness, load = file.path(folder, 'CHELSA_bio10_7.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_7_RichnessCutScaled.tif'))
#
# scaleGL(layername = 'bio10_12', data = richness, load = file.path(folder, 'CHELSA_bio10_12.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_12_RichnessCutScaled.tif'))
# scaleGL(layername = 'bio10_15', data = richness, load = file.path(folder, 'CHELSA_bio10_15.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_15_RichnessCutScaled.tif'))
#
# scaleGL(layername = 'Aridity', data = richness, load = file.path(folder, 'ai_yr_TIF.tif'),
# save = file.path(savefolder, 'Aridity_RichnessScaled.tif'))
# scaleGL(layername = 'PETyr', data = richness, load = file.path(folder, 'pet_he_yr_TIF.tif'),
# save = file.path(savefolder, 'PETyr_RichnessScaled.tif'))
# scaleGL(layername = 'PET_SD', data = richness, load = file.path(folder, 'pet_he_SD.tif'),
# save = file.path(savefolder, 'PETSD_RichnessScaled.tif'))
# scaleGL(layername = 'elevation', data = richness, load = file.path(folder, 'elevation.tif'),
# save = file.path(savefolder, 'elevation_RichnessScaled.tif'))
#
# ## Abundance
# print("Calculating abundance layers")
# scaleGL(layername = "ORCDRC", data = abundance, load = file.path(folder, "ORCDRC_weighted.tif"),
# save = file.path(savefolder, 'ORCDRC_AbundanceCutScaled.tif'))
# scaleGL(layername = "PHIHOX", data = abundance, load = file.path(folder, "PHIHOX_weighted.tif"),
# save = file.path(savefolder, 'PHIHOX_AbundanceCutScaled.tif'))
# scaleGL(layername = "CLYPPT", data = abundance, load = file.path(folder, "CLYPPT_weighted.tif"),
# save = file.path(savefolder, 'CLYPPT_AbundanceCutScaled.tif'))
# scaleGL(layername = "SLTPPT", data = abundance, load = file.path(folder, "SLTPPT_weighted.tif"),
# save = file.path(savefolder, 'SLTPPT_AbundanceCutScaled.tif'))
# scaleGL(layername = "CECSOL", data = abundance, load = file.path(folder, "CECSOL_weighted.tif"),
# save = file.path(savefolder, 'CECSOL_AbundanceCutScaled.tif'))
#
# scaleGL(layername = 'bio10_1', data = abundance, load = file.path(folder, 'CHELSA_bio10_1.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_1_AbundanceCutScaled.tif'))
# scaleGL(layername = 'bio10_4', data = abundance, load = file.path(folder, 'CHELSA_bio10_4.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_4_AbundanceCutScaled.tif'))
# scaleGL(layername = 'bio10_7', data = abundance, load = file.path(folder, 'CHELSA_bio10_7.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_7_AbundanceCutScaled.tif'))
# scaleGL(layername = 'bio10_12', data = abundance, load = file.path(folder, 'CHELSA_bio10_12.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_12_AbundanceCutScaled.tif'))
# scaleGL(layername = 'bio10_15', data = abundance, load = file.path(folder, 'CHELSA_bio10_15.tif'),
# save = file.path(savefolder, 'CHELSA_bio10_15_AbundanceCutScaled.tif'))
#
# scaleGL(layername = 'Aridity', data = abundance, load = file.path(folder, 'ai_yr_TIF.tif'),
# save = file.path(savefolder, 'Aridity_AbundanceScaled.tif'))
# scaleGL(layername = 'PETyr', data = abundance, load = file.path(folder, 'pet_he_yr_TIF.tif'),
# save = file.path(savefolder, 'PETyr_AbundanceScaled.tif'))
# scaleGL(layername = 'PET_SD', data = abundance, load = file.path(folder, 'pet_he_SD.tif'),
# save = file.path(savefolder, 'PETSD_AbundanceScaled.tif'))
# scaleGL(layername = 'elevation', data = abundance, load = file.path(folder, 'elevation.tif'),
# save = file.path(savefolder, 'elevation_AbundanceScaled.tif'))
#
# Biomass
print("Calculating biomass layers")
scaleGL(layername = "ORCDRC", data = biomass, load = file.path(folder, "ORCDRC_weighted.tif"),
save = file.path(savefolder, 'ORCDRC_BiomassCutScaled.tif'))
scaleGL(layername = "PHIHOX", data = biomass, load = file.path(folder, "PHIHOX_weighted.tif"),
save = file.path(savefolder, 'PHIHOX_BiomassCutScaled.tif'))
scaleGL(layername = "CLYPPT", data = biomass, load = file.path(folder, "CLYPPT_weighted.tif"),
save = file.path(savefolder, 'CLYPPT_BiomassCutScaled.tif'))
scaleGL(layername = "SLTPPT", data = biomass, load = file.path(folder, "SLTPPT_weighted.tif"),
save = file.path(savefolder, 'SLTPPT_BiomassCutScaled.tif'))
scaleGL(layername = "CECSOL", data = biomass, load = file.path(folder, "CECSOL_weighted.tif"),
save = file.path(savefolder, 'CECSOL_BiomassCutScaled.tif'))
scaleGL(layername = 'bio10_1', data = biomass, load = file.path(folder, 'CHELSA_bio10_1.tif'),
save = file.path(savefolder, 'CHELSA_bio10_1_BiomassCutScaled.tif'))
scaleGL(layername = 'bio10_4', data = biomass, load = file.path(folder, 'CHELSA_bio10_4.tif'),
save = file.path(savefolder, 'CHELSA_bio10_4_BiomassCutScaled.tif'))
scaleGL(layername = 'bio10_7', data = biomass, load = file.path(folder, 'CHELSA_bio10_7.tif'),
save = file.path(savefolder, 'CHELSA_bio10_7_BiomassCutScaled.tif'))
scaleGL(layername = 'bio10_12', data = biomass, load = file.path(folder, 'CHELSA_bio10_12.tif'),
save = file.path(savefolder, 'CHELSA_bio10_12_BiomassCutScaled.tif'))
scaleGL(layername = 'bio10_15', data = biomass, load = file.path(folder, 'CHELSA_bio10_15.tif'),
save = file.path(savefolder, 'CHELSA_bio10_15_BiomassCutScaled.tif'))
scaleGL(layername = 'Aridity', data = biomass, load = file.path(folder, 'ai_yr_TIF.tif'),
save = file.path(savefolder, 'Aridity_BiomassScaled.tif'))
scaleGL(layername = 'PETyr', data = biomass, load = file.path(folder, 'pet_he_yr_TIF.tif'),
save = file.path(savefolder, 'PETyr_BiomassScaled.tif'))
scaleGL(layername = 'PET_SD', data = biomass, load = file.path(folder, 'pet_he_SD.tif'),
save = file.path(savefolder, 'PETSD_BiomassScaled.tif'))
scaleGL(layername = 'elevation', data = biomass, load = file.path(folder, 'elevation.tif'),
save = file.path(savefolder, 'elevation_BiomassScaled.tif'))
print("Done!")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bamUtils.R
\name{varbase}
\alias{varbase}
\title{Returns variant bases and ranges from GRanges or GappedAlignments input}
\usage{
varbase(reads, soft = TRUE, verbose = TRUE)
}
\arguments{
\item{reads}{GenomicRanges or GRangesList or GappedAlignments or data.frame/data.table reads to extract variants from}
\item{soft}{boolean Flag to include soft-clipped matches (default == TRUE)}
\item{verbose}{boolean verbose flag (default == TRUE)}
}
\description{
Takes GRanges or GappedAlignments object "reads" and uses cigar, MD, seq fields
to return variant bases and ranges
Teturns GRangesList (of same length as input) of variant base positions with character vector
$varbase field populated with variant bases for each GRanges item in grl[[k]],
with the following handling for insertions, deletions, and substitution GRange's:
Substitutions: nchar(gr$varbase) = width(gr) of the corresponding var
Insertions: nchar(gr$varbase)>=1, width(gr) ==0
Deletions: gr$varbase = '', width(gr)>=1
Each GRanges also has $type flag which shows the cigar string code for the event i.e.
S = soft clip --> varbase represents clipped bases
I = insertion --> varbase represents inserted bases
D = deletion --> varbase is empty
X = mismatch --> varbase represents mismatched bases
}
|
/man/varbase.Rd
|
no_license
|
jimhester/bamUtils
|
R
| false | true | 1,346 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bamUtils.R
\name{varbase}
\alias{varbase}
\title{Returns variant bases and ranges from GRanges or GappedAlignments input}
\usage{
varbase(reads, soft = TRUE, verbose = TRUE)
}
\arguments{
\item{reads}{GenomicRanges or GRangesList or GappedAlignments or data.frame/data.table reads to extract variants from}
\item{soft}{boolean Flag to include soft-clipped matches (default == TRUE)}
\item{verbose}{boolean verbose flag (default == TRUE)}
}
\description{
Takes GRanges or GappedAlignments object "reads" and uses cigar, MD, seq fields
to return variant bases and ranges
Teturns GRangesList (of same length as input) of variant base positions with character vector
$varbase field populated with variant bases for each GRanges item in grl[[k]],
with the following handling for insertions, deletions, and substitution GRange's:
Substitutions: nchar(gr$varbase) = width(gr) of the corresponding var
Insertions: nchar(gr$varbase)>=1, width(gr) ==0
Deletions: gr$varbase = '', width(gr)>=1
Each GRanges also has $type flag which shows the cigar string code for the event i.e.
S = soft clip --> varbase represents clipped bases
I = insertion --> varbase represents inserted bases
D = deletion --> varbase is empty
X = mismatch --> varbase represents mismatched bases
}
|
# installs dependencies, runs R CMD check, runs covr::codecov()
do_package_checks(error_on="never")
if (ci_on_ghactions() && ci_has_env("BUILD_PKGDOWN")) {
# creates pkgdown site and pushes to gh-pages branch
# only for the runner with the "BUILD_PKGDOWN" env var set
do_pkgdown()
}
|
/tic.R
|
permissive
|
vjcitn/alr
|
R
| false | false | 290 |
r
|
# installs dependencies, runs R CMD check, runs covr::codecov()
do_package_checks(error_on="never")
if (ci_on_ghactions() && ci_has_env("BUILD_PKGDOWN")) {
# creates pkgdown site and pushes to gh-pages branch
# only for the runner with the "BUILD_PKGDOWN" env var set
do_pkgdown()
}
|
skip_on_cran()
test_df <-
tibble::tibble(
v1 = factor(c("no")),
v2 = factor(c("yes", "no")),
v3 = factor(c("YeS", "No"), levels = c("YeS", "No"))
)
test_that("assign_dichotomous_value_one for yes/no factors", {
expect_equal(
assign_dichotomous_value_one(
data = test_df, variable = "v2",
summary_type = "dichotomous",
class = "factor",
value = NULL
),
"yes"
)
})
test_that("input checks", {
expect_error(
assign_dichotomous_value(
data = trial, variable = "stage",
summary_type = "dichotomous",
class = "factor", value = NULL
),
"'stage' is dichotomous, but I was unable to determine the level.*"
)
})
|
/tests/testthat/test-assign_dichotomous_value.R
|
permissive
|
raphidoc/gtsummary
|
R
| false | false | 695 |
r
|
skip_on_cran()
test_df <-
tibble::tibble(
v1 = factor(c("no")),
v2 = factor(c("yes", "no")),
v3 = factor(c("YeS", "No"), levels = c("YeS", "No"))
)
test_that("assign_dichotomous_value_one for yes/no factors", {
expect_equal(
assign_dichotomous_value_one(
data = test_df, variable = "v2",
summary_type = "dichotomous",
class = "factor",
value = NULL
),
"yes"
)
})
test_that("input checks", {
expect_error(
assign_dichotomous_value(
data = trial, variable = "stage",
summary_type = "dichotomous",
class = "factor", value = NULL
),
"'stage' is dichotomous, but I was unable to determine the level.*"
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sjPlotInteractions.R
\name{sjp.int}
\alias{sjp.int}
\title{Plot interaction effects of (generalized) linear (mixed) models}
\usage{
sjp.int(fit, type = "cond", int.term = NULL, int.plot.index = NULL,
diff = FALSE, moderatorValues = "minmax", swapPredictors = FALSE,
plevel = 0.05, title = NULL, fillColor = "grey", fillAlpha = 0.3,
geom.colors = "Set1", geom.size = NULL, axisTitle.x = NULL,
axisTitle.y = NULL, axisLabels.x = NULL, legendTitle = NULL,
legendLabels = NULL, showValueLabels = FALSE, breakTitleAt = 50,
breakLegendLabelsAt = 20, breakLegendTitleAt = 20, axisLimits.x = NULL,
axisLimits.y = NULL, gridBreaksAt = NULL, showCI = FALSE,
valueLabel.digits = 2, facet.grid = FALSE, printPlot = TRUE)
}
\arguments{
\item{fit}{the fitted (generalized) linear (mixed) model object, including interaction terms. Accepted model
classes are
\itemize{
\item linear models (\code{\link{lm}})
\item generalized linear models (\code{\link{glm}})
\item linear mixed effects models (\code{\link[lme4]{lmer}})
\item generalized linear mixed effects models (\code{\link[lme4]{glmer}})
\item non-linear mixed effects models (\code{\link[lme4]{nlmer}})
\item linear mixed effects models (\code{\link[nlme]{lme}}, but only for \code{type = "eff"})
\item generalized least squares models (\code{\link[nlme]{gls}}, but only for \code{type = "eff"})
\item panel data estimators (\code{plm})
}}
\item{type}{interaction plot type. Use one of following values:
\describe{
\item{\code{type = "cond"}}{(default) plots the mere \emph{change} of the moderating effect on the response value (conditional effect). See 'Details'.}
\item{\code{type = "eff"}}{plots the overall moderation effect on the response value. See 'Details'.}
\item{\code{type = "emm"}}{plots the estimated marginal means (least square means). If this type is chosen, not all function arguments are applicable. See 'Details'.}
}}
\item{int.term}{select interaction term of \code{fit} (as character), which should be plotted
when using \code{type = "eff"}. By default, this argument can be ignored
(i.e. \code{int.term = NULL}). See 'Details'.}
\item{int.plot.index}{numeric vector with index numbers that indicate which
interaction terms should be plotted in case the \code{fit} has more than
one interaction. By default, this values is \code{NULL}, hence all interactions
are plotted.}
\item{diff}{if \code{FALSE} (default), the minimum and maximum interaction effects of the moderating variable
is shown (one line each). if \code{TRUE}, only the difference between minimum and maximum interaction effect
is shown (single line). Only applies to \code{type = "cond"}.}
\item{moderatorValues}{indicates which values of the moderator variable should be used when plotting the effects of the
independent variable on the dependent variable.
\describe{
\item{\code{"minmax"}}{(default) minimum and maximum values (lower and upper bounds) of the moderator are used to plot the interaction between independent variable and moderator.}
\item{\code{"meansd"}}{uses the mean value of the moderator as well as one standard deviation below and above mean value to plot the effect of the moderator on the independent variable (following the convention suggested by Cohen and Cohen and popularized by Aiken and West, i.e. using the mean, the value one standard deviation above, and the value one standard deviation below the mean as values of the moderator, see \href{http://www.theanalysisfactor.com/3-tips-interpreting-moderation/}{Grace-Martin K: 3 Tips to Make Interpreting Moderation Effects Easier}).}
\item{\code{"zeromax"}}{is similar to the \code{"minmax"} option, however, \code{0} is always used as minimum value for the moderator. This may be useful for predictors that don't have an empirical zero-value, but absence of moderation should be simulated by using 0 as minimum.}
\item{\code{"quart"}}{calculates and uses the quartiles (lower, median and upper) of the moderator value.}
}}
\item{swapPredictors}{if \code{TRUE}, the predictor on the x-axis and the moderator value in an interaction are
swapped. For \code{type = "eff"}, the first interaction term is used as moderator and the second term
is plotted at the x-axis. For \code{type = "cond"}, the interaction's predictor with less unique values is
printed along the x-axis. Default is \code{FALSE}, so the second predictor in an interaction, respectively
the predictor with more unique values is printed along the x-axis.}
\item{plevel}{indicates at which p-value an interaction term is considered as \emph{significant},
i.e. at which p-level an interaction term will be considered for plotting. Default is
0.05 (5 percent), hence, non-significant interactions are excluded by default. This
argument does not apply to \code{type = "eff"}.}
\item{title}{a default title used for the plots. Should be a character vector
of same length as interaction plots to be plotted. Default value is \code{NULL}, which means that each plot's title
includes the dependent variable as well as the names of the interaction terms.}
\item{fillColor}{fill color of the shaded area between the minimum and maximum lines. Default is \code{"grey"}.
Either set \code{fillColor} to \code{NULL} or use 0 for \code{fillAlpha} if you want to hide the shaded area.}
\item{fillAlpha}{alpha value (transparancy) of the shaded area between the minimum and maximum lines. Default is 0.4.
Use either 0 or set \code{fillColor} to \code{NULL} if you want to hide the shaded area.}
\item{geom.colors}{vector of color values. First value is the color of the line indicating the lower bound of
the interaction term (moderator value). Second value is the color of the line indicating the upper bound of
the interaction term (moderator value). Third value, if applicable, is the color of the line indicating the
mean value of the interaction term (moderator value). Third value is only used when
\code{moderatorValues = "meansd"}. Or, if \code{diff = TRUE}, only one color value for the
line indicating the upper difference between lower and upper bound of interaction terms.}
\item{geom.size}{size resp. width of the geoms (bar width, line thickness or point size, depending on \code{type} argument).
Note that bar and bin widths mostly need smaller values than dot sizes (i.e. if \code{type = "dots"}).
By default, \code{geom.size = NULL}, which means that this argument is automatically
adjusted depending on the plot type.}
\item{axisTitle.x}{a default title used for the x-axis. Should be a character vector
of same length as interaction plots to be plotted. Default value is \code{NULL},
which means that each plot's x-axis uses the predictor's name as title.}
\item{axisTitle.y}{a default title used for the y-axis. Default value is \code{NULL},
which means that each plot's y-axis uses the dependent variable's name as title.}
\item{axisLabels.x}{character vector with value labels of the repeated measure variable
that are used for labelling the x-axis.}
\item{legendTitle}{title of the diagram's legend. A character vector of same length as
amount of interaction plots to be plotted (i.e. one vector element for each
plot's legend title).}
\item{legendLabels}{labels for the guide/legend. Either a character vector of same length as
amount of legend labels of the plot, or a \code{list} of character vectors, if more than one
interaction plot is plotted (i.e. one vector of legend labels for each interaction plot).
Default is \code{NULL}, so the name of the predictor with min/max-effect is used
as legend label.}
\item{showValueLabels}{if \code{TRUE}, value labels are plotted along the lines. Default is \code{FALSE}.}
\item{breakTitleAt}{determines how many chars of the plot title are displayed in
one line and when a line break is inserted into the title.}
\item{breakLegendLabelsAt}{determines how many chars of the legend labels are
displayed in one line and when a line break is inserted.}
\item{breakLegendTitleAt}{determines how many chars of the legend's title
are displayed in one line and when a line break is inserted.}
\item{axisLimits.y}{numeric vector of length two, defining lower and upper axis limits
of the y scale. By default, this argument is set to \code{NULL}, i.e. the
y-axis ranges from 0 to required maximum.}
\item{gridBreaksAt}{set breaks for the axis, i.e. at every \code{gridBreaksAt}'th
position a major grid is being printed.}
\item{showCI}{may be a numeric or logical value. If \code{showCI} is logical and
\code{TRUE}, a 95\% confidence region will be plotted. If \code{showCI}
if numeric, must be a number between 0 and 1, indicating the proportion
for the confidence regeion (e.g. \code{showCI = 0.9} plots a 90\% CI).
Only applies to \code{type = "emm"} or \code{type = "eff"}.}
\item{valueLabel.digits}{the amount of digits of the displayed value labels. Defaults to 2.}
\item{facet.grid}{\code{TRUE} for faceted plots instead of an integrated single plot.}
\item{printPlot}{logical, if \code{TRUE} (default), plots the results as graph. Use \code{FALSE} if you don't
want to plot any graphs. In either case, the ggplot-object will be returned as value.}
}
\value{
(Insisibily) returns the ggplot-objects with the complete plot-list (\code{plot.list})
as well as the data frame that were used for setting up the ggplot-objects (\code{df.list}).
}
\description{
Plot regression (predicted values) or probability lines (predicted probabilities) of
significant interaction terms to better understand effects
of moderations in regression models. This function accepts following fitted model classes:
\itemize{
\item linear models (\code{\link{lm}})
\item generalized linear models (\code{\link{glm}})
\item linear mixed effects models (\code{\link[lme4]{lmer}})
\item generalized linear mixed effects models (\code{\link[lme4]{glmer}})
\item non-linear mixed effects models (\code{\link[lme4]{nlmer}})
\item linear mixed effects models (\code{\link[nlme]{lme}}, but only for \code{type = "eff"})
\item generalized least squares models (\code{\link[nlme]{gls}}, but only for \code{type = "eff"})
\item panel data estimators (\code{plm})
}
Note that beside interaction terms, also the single predictors of each interaction (main effects)
must be included in the fitted model as well. Thus, \code{lm(dep ~ pred1 * pred2)} will work,
but \code{lm(dep ~ pred1:pred2)} won't!
}
\details{
\describe{
\item{\code{type = "cond"}}{plots the effective \emph{change} or \emph{impact}
(conditional effect) on a dependent variable of a moderation effect, as
described in \href{http://www.theanalysisfactor.com/clarifications-on-interpreting-interactions-in-regression/}{Grace-Martin},
i.e. the difference of the moderation effect on the dependent variable in \emph{presence}
and \emph{absence} of the moderating effect (\emph{simple slope} plot or
\emph{conditional effect}, see \href{http://imaging.mrc-cbu.cam.ac.uk/statswiki/FAQ/SobelTest?action=AttachFile&do=get&target=process.pdf}{Hayes 2012}).
Hence, this plot type may be used especially for \emph{binary or dummy coded}
moderator values (see also \href{http://jee3.web.rice.edu/interaction-overconfidence.pdf}{Esarey and Summer 2015}).
This type \emph{does not} show the overall effect of interactions on the result of Y. Use
\code{type = "eff"} for effect displays similar to the \code{\link[effects]{effect}}-function
from the \pkg{effects}-package.
}
\item{\code{type = "eff"}}{plots the overall effects (marginal effects) of the interaction, with all remaining
covariates set to the mean. Effects are calculated using the \code{\link[effects]{effect}}-
function from the \pkg{effects}-package. \cr \cr
Following arguments \emph{do not} apply to this function: \code{diff}, \code{axisLabels.x}.
}
\item{\code{type = "emm"}}{plots the estimated marginal means of repeated measures designs,
like two-way repeated measures AN(C)OVA. In detail, this type plots estimated marginal means
(also called \emph{least square means} or \emph{marginal means}) of (significant) interaction terms.
The fitted models may be linear (mixed effects)
models of class \code{\link{lm}} or \code{\link[lme4]{merMod}}. This function may be used, for example,
to plot differences in interventions between control and treatment groups over multiple time points.
\itemize{
\item Following paramters apply to this plot type: \code{showCI}, \code{valueLabel.digits} and \code{axisLabels.x}.
\item Following arguments \emph{do not} apply to this function: \code{int.term}, \code{int.plot.index}, \code{diff}, \code{moderatorValues}, \code{fillColor}, \code{fillAlpha}.
}
}
}
The argument \code{int.term} only applies to \code{type = "eff"} and can be used
to select a specific interaction term of the model that should be plotted. The function
then calls \code{effect(int.term, fit)} to compute effects for this specific interaction
term only. This approach is recommended, when the fitted model contains many observations
and/or variables, which may slow down the effect-computation dramatically. In such cases,
consider computing effects for selected interaction terms only with \code{int.terms}.
See 'Examples'.
}
\note{
Note that beside interaction terms, also the single predictors of each interaction (main effects)
must be included in the fitted model as well. Thus, \code{lm(dep ~ pred1 * pred2)} will work,
but \code{lm(dep ~ pred1:pred2)} won't! \cr \cr
For \code{type = "emm"}, all interaction terms have to be \code{\link{factor}}s!
Furthermore, for \code{type = "eff"}, predictors of interactions that are introduced first into the model
are used as grouping variable, while the latter predictor is printed along the x-axis
(i.e. lm(y~a+b+a:b) means that "a" is used as grouping variable and "b" is plotted along the x-axis).
}
\examples{
# Note that the data sets used in this example may not be perfectly suitable for
# fitting linear models. I just used them because they are part of the R-software.
# fit "dummy" model. Note that moderator should enter
# first the model, followed by predictor. Else, use
# argument "swapPredictors" to change predictor on
# x-axis with moderator
fit <- lm(weight ~ Diet * Time, data = ChickWeight)
# show summary to see significant interactions
summary(fit)
# plot regression line of interaction terms, including value labels
sjp.int(fit, type = "eff", showValueLabels = TRUE)
# load sample data set
library(sjmisc)
data(efc)
# create data frame with variables that should be included
# in the model
mydf <- data.frame(usage = efc$tot_sc_e,
sex = efc$c161sex,
education = efc$c172code,
burden = efc$neg_c_7,
dependency = efc$e42dep)
# convert gender predictor to factor
mydf$sex <- relevel(factor(mydf$sex), ref = "2")
# fit "dummy" model
fit <- lm(usage ~ .*., data = mydf)
summary(fit)
# plot interactions. note that type = "cond" only considers
# significant interactions by default. use "plevel" to
# adjust p-level sensivity
sjp.int(fit, type = "cond")
# plot only selected interaction term for
# type = "eff"
sjp.int(fit, type = "eff", int.term = "sex*education")
# plot interactions, using mean and sd as moderator
# values to calculate interaction effect
sjp.int(fit, type = "eff", moderatorValues = "meansd")
sjp.int(fit, type = "cond", moderatorValues = "meansd")
# plot interactions, including those with p-value up to 0.1
sjp.int(fit,
type = "cond",
plevel = 0.1)
# -------------------------------
# Predictors for negative impact of care.
# Data from the EUROFAMCARE sample dataset
# -------------------------------
library(sjmisc)
data(efc)
# create binary response
y <- ifelse(efc$neg_c_7 < median(stats::na.omit(efc$neg_c_7)), 0, 1)
# create data frame for fitted model
mydf <- data.frame(y = as.factor(y),
sex = as.factor(efc$c161sex),
barthel = as.numeric(efc$barthtot))
# fit model
fit <- glm(y ~ sex * barthel,
data = mydf,
family = binomial(link = "logit"))
# plot interaction, increase p-level sensivity
sjp.int(fit,
type = "eff",
legendLabels = get_labels(efc$c161sex),
plevel = 0.1)
sjp.int(fit,
type = "cond",
legendLabels = get_labels(efc$c161sex),
plevel = 0.1)
\dontrun{
# -------------------------------
# Plot estimated marginal means
# -------------------------------
# load sample data set
library(sjmisc)
data(efc)
# create data frame with variables that should be included
# in the model
mydf <- data.frame(burden = efc$neg_c_7,
sex = efc$c161sex,
education = efc$c172code)
# convert gender predictor to factor
mydf$sex <- factor(mydf$sex)
mydf$education <- factor(mydf$education)
# name factor levels and dependent variable
levels(mydf$sex) <- c("female", "male")
levels(mydf$education) <- c("low", "mid", "high")
mydf$burden <- set_label(mydf$burden, "care burden")
# fit "dummy" model
fit <- lm(burden ~ .*., data = mydf)
summary(fit)
# plot marginal means of interactions, no interaction found
sjp.int(fit, type = "emm")
# plot marginal means of interactions, including those with p-value up to 1
sjp.int(fit, type = "emm", plevel = 1)
# swap predictors
sjp.int(fit,
type = "emm",
plevel = 1,
swapPredictors = TRUE)
# -------------------------------
# Plot effects
# -------------------------------
# add continuous variable
mydf$barthel <- efc$barthtot
# re-fit model with continuous variable
fit <- lm(burden ~ .*., data = mydf)
# plot effects
sjp.int(fit, type = "eff", showCI = TRUE)
# plot effects, faceted
sjp.int(fit,
type = "eff",
int.plot.index = 3,
showCI = TRUE,
facet.grid = TRUE)}
}
\references{
\itemize{
\item Aiken and West (1991). Multiple Regression: Testing and Interpreting Interactions.
\item Brambor T, Clark WR and Golder M (2006) Understanding Interaction Models: Improving Empirical Analyses. Political Analysis 14: 63-82 \href{https://files.nyu.edu/mrg217/public/pa_final.pdf}{download}
\item Esarey J, Sumner JL (2015) Marginal Effects in Interaction Models: Determining and Controlling the False Positive Rate. \href{http://jee3.web.rice.edu/interaction-overconfidence.pdf}{download}
\item Fox J (2003) Effect displays in R for generalised linear models. Journal of Statistical Software 8:15, 1–27, \href{http://www.jstatsoft.org/v08/i15/}{<http://www.jstatsoft.org/v08/i15/>}
\item Hayes AF (2012) PROCESS: A versatile computational tool for observed variable mediation, moderation, and conditional process modeling [White paper] \href{http://imaging.mrc-cbu.cam.ac.uk/statswiki/FAQ/SobelTest?action=AttachFile&do=get&target=process.pdf}{download}
\item \href{http://www.theanalysisfactor.com/interpreting-interactions-in-regression/}{Grace-Martin K: Interpreting Interactions in Regression}
\item \href{http://www.theanalysisfactor.com/clarifications-on-interpreting-interactions-in-regression/}{Grace-Martin K: Clarifications on Interpreting Interactions in Regression}
\item \href{http://www.theanalysisfactor.com/3-tips-interpreting-moderation/}{Grace-Martin K: 3 Tips to Make Interpreting Moderation Effects Easier}
\item \href{http://www.theanalysisfactor.com/using-adjusted-means-to-interpret-moderators-in-analysis-of-covariance/}{Grace-Martin K: Using Adjusted Means to Interpret Moderators in Analysis of Covariance.}
}
}
\seealso{
\href{http://www.strengejacke.de/sjPlot/sjp.int/}{sjPlot manual: sjp.int}
}
|
/man/sjp.int.Rd
|
no_license
|
ashenkin/devel
|
R
| false | true | 20,423 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sjPlotInteractions.R
\name{sjp.int}
\alias{sjp.int}
\title{Plot interaction effects of (generalized) linear (mixed) models}
\usage{
sjp.int(fit, type = "cond", int.term = NULL, int.plot.index = NULL,
diff = FALSE, moderatorValues = "minmax", swapPredictors = FALSE,
plevel = 0.05, title = NULL, fillColor = "grey", fillAlpha = 0.3,
geom.colors = "Set1", geom.size = NULL, axisTitle.x = NULL,
axisTitle.y = NULL, axisLabels.x = NULL, legendTitle = NULL,
legendLabels = NULL, showValueLabels = FALSE, breakTitleAt = 50,
breakLegendLabelsAt = 20, breakLegendTitleAt = 20, axisLimits.x = NULL,
axisLimits.y = NULL, gridBreaksAt = NULL, showCI = FALSE,
valueLabel.digits = 2, facet.grid = FALSE, printPlot = TRUE)
}
\arguments{
\item{fit}{the fitted (generalized) linear (mixed) model object, including interaction terms. Accepted model
classes are
\itemize{
\item linear models (\code{\link{lm}})
\item generalized linear models (\code{\link{glm}})
\item linear mixed effects models (\code{\link[lme4]{lmer}})
\item generalized linear mixed effects models (\code{\link[lme4]{glmer}})
\item non-linear mixed effects models (\code{\link[lme4]{nlmer}})
\item linear mixed effects models (\code{\link[nlme]{lme}}, but only for \code{type = "eff"})
\item generalized least squares models (\code{\link[nlme]{gls}}, but only for \code{type = "eff"})
\item panel data estimators (\code{plm})
}}
\item{type}{interaction plot type. Use one of following values:
\describe{
\item{\code{type = "cond"}}{(default) plots the mere \emph{change} of the moderating effect on the response value (conditional effect). See 'Details'.}
\item{\code{type = "eff"}}{plots the overall moderation effect on the response value. See 'Details'.}
\item{\code{type = "emm"}}{plots the estimated marginal means (least square means). If this type is chosen, not all function arguments are applicable. See 'Details'.}
}}
\item{int.term}{select interaction term of \code{fit} (as character), which should be plotted
when using \code{type = "eff"}. By default, this argument can be ignored
(i.e. \code{int.term = NULL}). See 'Details'.}
\item{int.plot.index}{numeric vector with index numbers that indicate which
interaction terms should be plotted in case the \code{fit} has more than
one interaction. By default, this values is \code{NULL}, hence all interactions
are plotted.}
\item{diff}{if \code{FALSE} (default), the minimum and maximum interaction effects of the moderating variable
is shown (one line each). if \code{TRUE}, only the difference between minimum and maximum interaction effect
is shown (single line). Only applies to \code{type = "cond"}.}
\item{moderatorValues}{indicates which values of the moderator variable should be used when plotting the effects of the
independent variable on the dependent variable.
\describe{
\item{\code{"minmax"}}{(default) minimum and maximum values (lower and upper bounds) of the moderator are used to plot the interaction between independent variable and moderator.}
\item{\code{"meansd"}}{uses the mean value of the moderator as well as one standard deviation below and above mean value to plot the effect of the moderator on the independent variable (following the convention suggested by Cohen and Cohen and popularized by Aiken and West, i.e. using the mean, the value one standard deviation above, and the value one standard deviation below the mean as values of the moderator, see \href{http://www.theanalysisfactor.com/3-tips-interpreting-moderation/}{Grace-Martin K: 3 Tips to Make Interpreting Moderation Effects Easier}).}
\item{\code{"zeromax"}}{is similar to the \code{"minmax"} option, however, \code{0} is always used as minimum value for the moderator. This may be useful for predictors that don't have an empirical zero-value, but absence of moderation should be simulated by using 0 as minimum.}
\item{\code{"quart"}}{calculates and uses the quartiles (lower, median and upper) of the moderator value.}
}}
\item{swapPredictors}{if \code{TRUE}, the predictor on the x-axis and the moderator value in an interaction are
swapped. For \code{type = "eff"}, the first interaction term is used as moderator and the second term
is plotted at the x-axis. For \code{type = "cond"}, the interaction's predictor with less unique values is
printed along the x-axis. Default is \code{FALSE}, so the second predictor in an interaction, respectively
the predictor with more unique values is printed along the x-axis.}
\item{plevel}{indicates at which p-value an interaction term is considered as \emph{significant},
i.e. at which p-level an interaction term will be considered for plotting. Default is
0.05 (5 percent), hence, non-significant interactions are excluded by default. This
argument does not apply to \code{type = "eff"}.}
\item{title}{a default title used for the plots. Should be a character vector
of same length as interaction plots to be plotted. Default value is \code{NULL}, which means that each plot's title
includes the dependent variable as well as the names of the interaction terms.}
\item{fillColor}{fill color of the shaded area between the minimum and maximum lines. Default is \code{"grey"}.
Either set \code{fillColor} to \code{NULL} or use 0 for \code{fillAlpha} if you want to hide the shaded area.}
\item{fillAlpha}{alpha value (transparancy) of the shaded area between the minimum and maximum lines. Default is 0.4.
Use either 0 or set \code{fillColor} to \code{NULL} if you want to hide the shaded area.}
\item{geom.colors}{vector of color values. First value is the color of the line indicating the lower bound of
the interaction term (moderator value). Second value is the color of the line indicating the upper bound of
the interaction term (moderator value). Third value, if applicable, is the color of the line indicating the
mean value of the interaction term (moderator value). Third value is only used when
\code{moderatorValues = "meansd"}. Or, if \code{diff = TRUE}, only one color value for the
line indicating the upper difference between lower and upper bound of interaction terms.}
\item{geom.size}{size resp. width of the geoms (bar width, line thickness or point size, depending on \code{type} argument).
Note that bar and bin widths mostly need smaller values than dot sizes (i.e. if \code{type = "dots"}).
By default, \code{geom.size = NULL}, which means that this argument is automatically
adjusted depending on the plot type.}
\item{axisTitle.x}{a default title used for the x-axis. Should be a character vector
of same length as interaction plots to be plotted. Default value is \code{NULL},
which means that each plot's x-axis uses the predictor's name as title.}
\item{axisTitle.y}{a default title used for the y-axis. Default value is \code{NULL},
which means that each plot's y-axis uses the dependent variable's name as title.}
\item{axisLabels.x}{character vector with value labels of the repeated measure variable
that are used for labelling the x-axis.}
\item{legendTitle}{title of the diagram's legend. A character vector of same length as
amount of interaction plots to be plotted (i.e. one vector element for each
plot's legend title).}
\item{legendLabels}{labels for the guide/legend. Either a character vector of same length as
amount of legend labels of the plot, or a \code{list} of character vectors, if more than one
interaction plot is plotted (i.e. one vector of legend labels for each interaction plot).
Default is \code{NULL}, so the name of the predictor with min/max-effect is used
as legend label.}
\item{showValueLabels}{if \code{TRUE}, value labels are plotted along the lines. Default is \code{FALSE}.}
\item{breakTitleAt}{determines how many chars of the plot title are displayed in
one line and when a line break is inserted into the title.}
\item{breakLegendLabelsAt}{determines how many chars of the legend labels are
displayed in one line and when a line break is inserted.}
\item{breakLegendTitleAt}{determines how many chars of the legend's title
are displayed in one line and when a line break is inserted.}
\item{axisLimits.y}{numeric vector of length two, defining lower and upper axis limits
of the y scale. By default, this argument is set to \code{NULL}, i.e. the
y-axis ranges from 0 to required maximum.}
\item{gridBreaksAt}{set breaks for the axis, i.e. at every \code{gridBreaksAt}'th
position a major grid is being printed.}
\item{showCI}{may be a numeric or logical value. If \code{showCI} is logical and
\code{TRUE}, a 95\% confidence region will be plotted. If \code{showCI}
if numeric, must be a number between 0 and 1, indicating the proportion
for the confidence regeion (e.g. \code{showCI = 0.9} plots a 90\% CI).
Only applies to \code{type = "emm"} or \code{type = "eff"}.}
\item{valueLabel.digits}{the amount of digits of the displayed value labels. Defaults to 2.}
\item{facet.grid}{\code{TRUE} for faceted plots instead of an integrated single plot.}
\item{printPlot}{logical, if \code{TRUE} (default), plots the results as graph. Use \code{FALSE} if you don't
want to plot any graphs. In either case, the ggplot-object will be returned as value.}
}
\value{
(Insisibily) returns the ggplot-objects with the complete plot-list (\code{plot.list})
as well as the data frame that were used for setting up the ggplot-objects (\code{df.list}).
}
\description{
Plot regression (predicted values) or probability lines (predicted probabilities) of
significant interaction terms to better understand effects
of moderations in regression models. This function accepts following fitted model classes:
\itemize{
\item linear models (\code{\link{lm}})
\item generalized linear models (\code{\link{glm}})
\item linear mixed effects models (\code{\link[lme4]{lmer}})
\item generalized linear mixed effects models (\code{\link[lme4]{glmer}})
\item non-linear mixed effects models (\code{\link[lme4]{nlmer}})
\item linear mixed effects models (\code{\link[nlme]{lme}}, but only for \code{type = "eff"})
\item generalized least squares models (\code{\link[nlme]{gls}}, but only for \code{type = "eff"})
\item panel data estimators (\code{plm})
}
Note that beside interaction terms, also the single predictors of each interaction (main effects)
must be included in the fitted model as well. Thus, \code{lm(dep ~ pred1 * pred2)} will work,
but \code{lm(dep ~ pred1:pred2)} won't!
}
\details{
\describe{
\item{\code{type = "cond"}}{plots the effective \emph{change} or \emph{impact}
(conditional effect) on a dependent variable of a moderation effect, as
described in \href{http://www.theanalysisfactor.com/clarifications-on-interpreting-interactions-in-regression/}{Grace-Martin},
i.e. the difference of the moderation effect on the dependent variable in \emph{presence}
and \emph{absence} of the moderating effect (\emph{simple slope} plot or
\emph{conditional effect}, see \href{http://imaging.mrc-cbu.cam.ac.uk/statswiki/FAQ/SobelTest?action=AttachFile&do=get&target=process.pdf}{Hayes 2012}).
Hence, this plot type may be used especially for \emph{binary or dummy coded}
moderator values (see also \href{http://jee3.web.rice.edu/interaction-overconfidence.pdf}{Esarey and Summer 2015}).
This type \emph{does not} show the overall effect of interactions on the result of Y. Use
\code{type = "eff"} for effect displays similar to the \code{\link[effects]{effect}}-function
from the \pkg{effects}-package.
}
\item{\code{type = "eff"}}{plots the overall effects (marginal effects) of the interaction, with all remaining
covariates set to the mean. Effects are calculated using the \code{\link[effects]{effect}}-
function from the \pkg{effects}-package. \cr \cr
Following arguments \emph{do not} apply to this function: \code{diff}, \code{axisLabels.x}.
}
\item{\code{type = "emm"}}{plots the estimated marginal means of repeated measures designs,
like two-way repeated measures AN(C)OVA. In detail, this type plots estimated marginal means
(also called \emph{least square means} or \emph{marginal means}) of (significant) interaction terms.
The fitted models may be linear (mixed effects)
models of class \code{\link{lm}} or \code{\link[lme4]{merMod}}. This function may be used, for example,
to plot differences in interventions between control and treatment groups over multiple time points.
\itemize{
\item Following paramters apply to this plot type: \code{showCI}, \code{valueLabel.digits} and \code{axisLabels.x}.
\item Following arguments \emph{do not} apply to this function: \code{int.term}, \code{int.plot.index}, \code{diff}, \code{moderatorValues}, \code{fillColor}, \code{fillAlpha}.
}
}
}
The argument \code{int.term} only applies to \code{type = "eff"} and can be used
to select a specific interaction term of the model that should be plotted. The function
then calls \code{effect(int.term, fit)} to compute effects for this specific interaction
term only. This approach is recommended, when the fitted model contains many observations
and/or variables, which may slow down the effect-computation dramatically. In such cases,
consider computing effects for selected interaction terms only with \code{int.terms}.
See 'Examples'.
}
\note{
Note that beside interaction terms, also the single predictors of each interaction (main effects)
must be included in the fitted model as well. Thus, \code{lm(dep ~ pred1 * pred2)} will work,
but \code{lm(dep ~ pred1:pred2)} won't! \cr \cr
For \code{type = "emm"}, all interaction terms have to be \code{\link{factor}}s!
Furthermore, for \code{type = "eff"}, predictors of interactions that are introduced first into the model
are used as grouping variable, while the latter predictor is printed along the x-axis
(i.e. lm(y~a+b+a:b) means that "a" is used as grouping variable and "b" is plotted along the x-axis).
}
\examples{
# Note that the data sets used in this example may not be perfectly suitable for
# fitting linear models. I just used them because they are part of the R-software.
# fit "dummy" model. Note that moderator should enter
# first the model, followed by predictor. Else, use
# argument "swapPredictors" to change predictor on
# x-axis with moderator
fit <- lm(weight ~ Diet * Time, data = ChickWeight)
# show summary to see significant interactions
summary(fit)
# plot regression line of interaction terms, including value labels
sjp.int(fit, type = "eff", showValueLabels = TRUE)
# load sample data set
library(sjmisc)
data(efc)
# create data frame with variables that should be included
# in the model
mydf <- data.frame(usage = efc$tot_sc_e,
sex = efc$c161sex,
education = efc$c172code,
burden = efc$neg_c_7,
dependency = efc$e42dep)
# convert gender predictor to factor
mydf$sex <- relevel(factor(mydf$sex), ref = "2")
# fit "dummy" model
fit <- lm(usage ~ .*., data = mydf)
summary(fit)
# plot interactions. note that type = "cond" only considers
# significant interactions by default. use "plevel" to
# adjust p-level sensivity
sjp.int(fit, type = "cond")
# plot only selected interaction term for
# type = "eff"
sjp.int(fit, type = "eff", int.term = "sex*education")
# plot interactions, using mean and sd as moderator
# values to calculate interaction effect
sjp.int(fit, type = "eff", moderatorValues = "meansd")
sjp.int(fit, type = "cond", moderatorValues = "meansd")
# plot interactions, including those with p-value up to 0.1
sjp.int(fit,
type = "cond",
plevel = 0.1)
# -------------------------------
# Predictors for negative impact of care.
# Data from the EUROFAMCARE sample dataset
# -------------------------------
library(sjmisc)
data(efc)
# create binary response
y <- ifelse(efc$neg_c_7 < median(stats::na.omit(efc$neg_c_7)), 0, 1)
# create data frame for fitted model
mydf <- data.frame(y = as.factor(y),
sex = as.factor(efc$c161sex),
barthel = as.numeric(efc$barthtot))
# fit model
fit <- glm(y ~ sex * barthel,
data = mydf,
family = binomial(link = "logit"))
# plot interaction, increase p-level sensivity
sjp.int(fit,
type = "eff",
legendLabels = get_labels(efc$c161sex),
plevel = 0.1)
sjp.int(fit,
type = "cond",
legendLabels = get_labels(efc$c161sex),
plevel = 0.1)
\dontrun{
# -------------------------------
# Plot estimated marginal means
# -------------------------------
# load sample data set
library(sjmisc)
data(efc)
# create data frame with variables that should be included
# in the model
mydf <- data.frame(burden = efc$neg_c_7,
sex = efc$c161sex,
education = efc$c172code)
# convert gender predictor to factor
mydf$sex <- factor(mydf$sex)
mydf$education <- factor(mydf$education)
# name factor levels and dependent variable
levels(mydf$sex) <- c("female", "male")
levels(mydf$education) <- c("low", "mid", "high")
mydf$burden <- set_label(mydf$burden, "care burden")
# fit "dummy" model
fit <- lm(burden ~ .*., data = mydf)
summary(fit)
# plot marginal means of interactions, no interaction found
sjp.int(fit, type = "emm")
# plot marginal means of interactions, including those with p-value up to 1
sjp.int(fit, type = "emm", plevel = 1)
# swap predictors
sjp.int(fit,
type = "emm",
plevel = 1,
swapPredictors = TRUE)
# -------------------------------
# Plot effects
# -------------------------------
# add continuous variable
mydf$barthel <- efc$barthtot
# re-fit model with continuous variable
fit <- lm(burden ~ .*., data = mydf)
# plot effects
sjp.int(fit, type = "eff", showCI = TRUE)
# plot effects, faceted
sjp.int(fit,
type = "eff",
int.plot.index = 3,
showCI = TRUE,
facet.grid = TRUE)}
}
\references{
\itemize{
\item Aiken and West (1991). Multiple Regression: Testing and Interpreting Interactions.
\item Brambor T, Clark WR and Golder M (2006) Understanding Interaction Models: Improving Empirical Analyses. Political Analysis 14: 63-82 \href{https://files.nyu.edu/mrg217/public/pa_final.pdf}{download}
\item Esarey J, Sumner JL (2015) Marginal Effects in Interaction Models: Determining and Controlling the False Positive Rate. \href{http://jee3.web.rice.edu/interaction-overconfidence.pdf}{download}
\item Fox J (2003) Effect displays in R for generalised linear models. Journal of Statistical Software 8:15, 1–27, \href{http://www.jstatsoft.org/v08/i15/}{<http://www.jstatsoft.org/v08/i15/>}
\item Hayes AF (2012) PROCESS: A versatile computational tool for observed variable mediation, moderation, and conditional process modeling [White paper] \href{http://imaging.mrc-cbu.cam.ac.uk/statswiki/FAQ/SobelTest?action=AttachFile&do=get&target=process.pdf}{download}
\item \href{http://www.theanalysisfactor.com/interpreting-interactions-in-regression/}{Grace-Martin K: Interpreting Interactions in Regression}
\item \href{http://www.theanalysisfactor.com/clarifications-on-interpreting-interactions-in-regression/}{Grace-Martin K: Clarifications on Interpreting Interactions in Regression}
\item \href{http://www.theanalysisfactor.com/3-tips-interpreting-moderation/}{Grace-Martin K: 3 Tips to Make Interpreting Moderation Effects Easier}
\item \href{http://www.theanalysisfactor.com/using-adjusted-means-to-interpret-moderators-in-analysis-of-covariance/}{Grace-Martin K: Using Adjusted Means to Interpret Moderators in Analysis of Covariance.}
}
}
\seealso{
\href{http://www.strengejacke.de/sjPlot/sjp.int/}{sjPlot manual: sjp.int}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/time_helper_functions.R
\name{bom_ymd_to_date}
\alias{bom_ymd_to_date}
\title{Convert integer year, month, day values to Date objects.}
\usage{
bom_ymd_to_date(year, month, day)
}
\arguments{
\item{year}{Either a vector of four digit year numbers, or a data frame or matrix with
column names year, month, day (all lower case).}
\item{month}{A vector of month numbers with the same length as \code{year}. Ignored if
\code{year} is a matrix or data frame.}
\item{day}{A vector of day numbers with the same length as \code{year}. Ignored if
\code{year} is a matrix or data frame.}
}
\value{
A vector of \code{Date} objects.
}
\description{
This is a helper for other package functions. If year is a data frame or
matrix it is assumed to have columns year, month and day. Otherwise three
equal-length vectors are expected.
}
|
/man/bom_ymd_to_date.Rd
|
permissive
|
mbedward/CERMBweather
|
R
| false | true | 901 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/time_helper_functions.R
\name{bom_ymd_to_date}
\alias{bom_ymd_to_date}
\title{Convert integer year, month, day values to Date objects.}
\usage{
bom_ymd_to_date(year, month, day)
}
\arguments{
\item{year}{Either a vector of four digit year numbers, or a data frame or matrix with
column names year, month, day (all lower case).}
\item{month}{A vector of month numbers with the same length as \code{year}. Ignored if
\code{year} is a matrix or data frame.}
\item{day}{A vector of day numbers with the same length as \code{year}. Ignored if
\code{year} is a matrix or data frame.}
}
\value{
A vector of \code{Date} objects.
}
\description{
This is a helper for other package functions. If year is a data frame or
matrix it is assumed to have columns year, month and day. Otherwise three
equal-length vectors are expected.
}
|
# removing all the objects
rm(list=ls())
# Loading the prudential dataset
LifeInsurance_data <- read.csv("/Users/mounikabandam/Documents/Stevens/CS 513 Knowledge Dis and Data mining/Project/Data/train.csv",na.strings=c("",NA))
# taking only 10000 records to run neuralnetwork because of the performance issue
LifeInsurance_data <- LifeInsurance_data[-c(10001:59381),]
# converting the categorized columns to factor Columns
# class(LifeInsurance_data$Response) #integer
# str(LifeInsurance_data) # as the data in the dataset as num, int... making the categorical values as factors.
# cols <- c("Product_Info_1", "Product_Info_2", "Product_Info_3", "Product_Info_5", "Product_Info_6", "Product_Info_7", "Employment_Info_2", "Employment_Info_3", "Employment_Info_5", "InsuredInfo_1", "InsuredInfo_2", "InsuredInfo_3", "InsuredInfo_4", "InsuredInfo_5", "InsuredInfo_6", "InsuredInfo_7", "Insurance_History_1", "Insurance_History_2", "Insurance_History_3", "Insurance_History_4", "Insurance_History_7", "Insurance_History_8", "Insurance_History_9", "Family_Hist_1", "Medical_History_2", "Medical_History_3", "Medical_History_4", "Medical_History_5", "Medical_History_6", "Medical_History_7", "Medical_History_8", "Medical_History_9", "Medical_History_11", "Medical_History_12", "Medical_History_13", "Medical_History_14", "Medical_History_16", "Medical_History_17", "Medical_History_18", "Medical_History_19", "Medical_History_20", "Medical_History_21", "Medical_History_22", "Medical_History_23", "Medical_History_25", "Medical_History_26", "Medical_History_27", "Medical_History_28", "Medical_History_29", "Medical_History_30", "Medical_History_31", "Medical_History_33", "Medical_History_34", "Medical_History_35", "Medical_History_36", "Medical_History_37", "Medical_History_38", "Medical_History_39", "Medical_History_40", "Medical_History_41","Response")
# LifeInsurance_data[cols] <- lapply(LifeInsurance_data[cols], factor)
# Removing the dummy variables as facing problem with the model running
LifeInsurance_data <- LifeInsurance_data[,-c(80:127)]
# Removng columns which are not required for the model(reason mentioned in DataCleaning file)
LifeInsurance_data <- LifeInsurance_data[,c(-1,-3,-30,-35,-36,-37,-38,-48,-53,-62,-70)]
# code for handling the missing values in traing data
LifeInsurance_data$Employment_Info_1[is.na(LifeInsurance_data$Employment_Info_1)]<-mean(LifeInsurance_data$Employment_Info_1, na.rm = TRUE)
LifeInsurance_data$Employment_Info_4[is.na(LifeInsurance_data$Employment_Info_4)]<-mean(LifeInsurance_data$Employment_Info_4, na.rm = TRUE)
LifeInsurance_data$Medical_History_1[is.na(LifeInsurance_data$Medical_History_1)]<-median(LifeInsurance_data$Medical_History_1, na.rm = TRUE)
LifeInsurance_data$Employment_Info_6[is.na(LifeInsurance_data$Employment_Info_6)]<-mean(LifeInsurance_data$Employment_Info_6, na.rm = TRUE)
# making multi output ANN
one <- ifelse(LifeInsurance_data$Response==1,1,0)
two <- ifelse(LifeInsurance_data$Response==2,1,0)
three <- ifelse(LifeInsurance_data$Response==3,1,0)
four <- ifelse(LifeInsurance_data$Response==4,1,0)
five <- ifelse(LifeInsurance_data$Response==5,1,0)
six <- ifelse(LifeInsurance_data$Response==6,1,0)
seven <- ifelse(LifeInsurance_data$Response==7,1,0)
eight <- ifelse(LifeInsurance_data$Response==8,1,0)
LifeInsurance_data2 <- data.frame(LifeInsurance_data,one,two,three,four,five,six,seven,eight)
# LifeInsurance_data2 <- LifeInsurance_data2[,-c(69)]
# Getting the index values starting form 1 and then every fifth record(1,6,11,16,...)
index<-seq(from=1,to=nrow(LifeInsurance_data2),by=5)
# Splitting the data into traing and test
# Store every fifth record in a "test" dataset starting with the first record
test<-LifeInsurance_data2[index,]
# Store the rest in the "training" dataset
training<-LifeInsurance_data2[-index,]
# install and loading the neuralnet package
# install.packages("neuralnet")
library("neuralnet")
# neural net alogorithm application
n <- names(LifeInsurance_data2)
f <- as.formula(paste("one + two + three + four + five+ six + seven + eight ~", paste(n[!n %in% c("one","two","three","four","five","six","seven","eight","Response")], collapse = " + ")))
# k <- as.formula(paste("~ Response +", paste(n[!n %in% "Response"], collapse = " + ")))
#data_matrix <- model.matrix(k, data = training)
prudentialANN <- neuralnet(f, data = training, hidden=1, threshold=0.01)
# print(prudentialANN)
plot(prudentialANN)
# data_matrix_test <- model.matrix(k, data = test)
prudentialANNresult <- compute(prudentialANN, test[,-c(69:77)] )
# getting the result in another dataframe
prudentialANN_result <- as.data.frame(prudentialANNresult$net.result)
colnames(prudentialANN_result) <- c("one","two","three","four","five","six","seven","eight")
# making the values as signle digit numbers for the calculation of error rate
prudentialANN_result$one<-ifelse(prudentialANN_result$one>=.1,1,0)
prudentialANN_result$two<-ifelse(prudentialANN_result$two>=.1,1,0)
prudentialANN_result$three<-ifelse(prudentialANN_result$three>=.1,1,0)
prudentialANN_result$four<-ifelse(prudentialANN_result$four>=.1,1,0)
prudentialANN_result$five<-ifelse(prudentialANN_result$five>=.1,1,0)
prudentialANN_result$six<-ifelse(prudentialANN_result$six>=.1,1,0)
prudentialANN_result$seven<-ifelse(prudentialANN_result$seven>=.1,1,0)
prudentialANN_result$eight<-ifelse(prudentialANN_result$eight>=.1,1,0)
# neural net is not working for the dataset, this can be cncluded with the following result
prudentialANN_result
|
/code/Neuralnet_Prudential.R
|
permissive
|
mbandam/CS513-KDD
|
R
| false | false | 5,498 |
r
|
# removing all the objects
rm(list=ls())
# Loading the prudential dataset
LifeInsurance_data <- read.csv("/Users/mounikabandam/Documents/Stevens/CS 513 Knowledge Dis and Data mining/Project/Data/train.csv",na.strings=c("",NA))
# taking only 10000 records to run neuralnetwork because of the performance issue
LifeInsurance_data <- LifeInsurance_data[-c(10001:59381),]
# converting the categorized columns to factor Columns
# class(LifeInsurance_data$Response) #integer
# str(LifeInsurance_data) # as the data in the dataset as num, int... making the categorical values as factors.
# cols <- c("Product_Info_1", "Product_Info_2", "Product_Info_3", "Product_Info_5", "Product_Info_6", "Product_Info_7", "Employment_Info_2", "Employment_Info_3", "Employment_Info_5", "InsuredInfo_1", "InsuredInfo_2", "InsuredInfo_3", "InsuredInfo_4", "InsuredInfo_5", "InsuredInfo_6", "InsuredInfo_7", "Insurance_History_1", "Insurance_History_2", "Insurance_History_3", "Insurance_History_4", "Insurance_History_7", "Insurance_History_8", "Insurance_History_9", "Family_Hist_1", "Medical_History_2", "Medical_History_3", "Medical_History_4", "Medical_History_5", "Medical_History_6", "Medical_History_7", "Medical_History_8", "Medical_History_9", "Medical_History_11", "Medical_History_12", "Medical_History_13", "Medical_History_14", "Medical_History_16", "Medical_History_17", "Medical_History_18", "Medical_History_19", "Medical_History_20", "Medical_History_21", "Medical_History_22", "Medical_History_23", "Medical_History_25", "Medical_History_26", "Medical_History_27", "Medical_History_28", "Medical_History_29", "Medical_History_30", "Medical_History_31", "Medical_History_33", "Medical_History_34", "Medical_History_35", "Medical_History_36", "Medical_History_37", "Medical_History_38", "Medical_History_39", "Medical_History_40", "Medical_History_41","Response")
# LifeInsurance_data[cols] <- lapply(LifeInsurance_data[cols], factor)
# Removing the dummy variables as facing problem with the model running
LifeInsurance_data <- LifeInsurance_data[,-c(80:127)]
# Removng columns which are not required for the model(reason mentioned in DataCleaning file)
LifeInsurance_data <- LifeInsurance_data[,c(-1,-3,-30,-35,-36,-37,-38,-48,-53,-62,-70)]
# code for handling the missing values in traing data
LifeInsurance_data$Employment_Info_1[is.na(LifeInsurance_data$Employment_Info_1)]<-mean(LifeInsurance_data$Employment_Info_1, na.rm = TRUE)
LifeInsurance_data$Employment_Info_4[is.na(LifeInsurance_data$Employment_Info_4)]<-mean(LifeInsurance_data$Employment_Info_4, na.rm = TRUE)
LifeInsurance_data$Medical_History_1[is.na(LifeInsurance_data$Medical_History_1)]<-median(LifeInsurance_data$Medical_History_1, na.rm = TRUE)
LifeInsurance_data$Employment_Info_6[is.na(LifeInsurance_data$Employment_Info_6)]<-mean(LifeInsurance_data$Employment_Info_6, na.rm = TRUE)
# making multi output ANN
one <- ifelse(LifeInsurance_data$Response==1,1,0)
two <- ifelse(LifeInsurance_data$Response==2,1,0)
three <- ifelse(LifeInsurance_data$Response==3,1,0)
four <- ifelse(LifeInsurance_data$Response==4,1,0)
five <- ifelse(LifeInsurance_data$Response==5,1,0)
six <- ifelse(LifeInsurance_data$Response==6,1,0)
seven <- ifelse(LifeInsurance_data$Response==7,1,0)
eight <- ifelse(LifeInsurance_data$Response==8,1,0)
LifeInsurance_data2 <- data.frame(LifeInsurance_data,one,two,three,four,five,six,seven,eight)
# LifeInsurance_data2 <- LifeInsurance_data2[,-c(69)]
# Getting the index values starting form 1 and then every fifth record(1,6,11,16,...)
index<-seq(from=1,to=nrow(LifeInsurance_data2),by=5)
# Splitting the data into traing and test
# Store every fifth record in a "test" dataset starting with the first record
test<-LifeInsurance_data2[index,]
# Store the rest in the "training" dataset
training<-LifeInsurance_data2[-index,]
# install and loading the neuralnet package
# install.packages("neuralnet")
library("neuralnet")
# neural net alogorithm application
n <- names(LifeInsurance_data2)
f <- as.formula(paste("one + two + three + four + five+ six + seven + eight ~", paste(n[!n %in% c("one","two","three","four","five","six","seven","eight","Response")], collapse = " + ")))
# k <- as.formula(paste("~ Response +", paste(n[!n %in% "Response"], collapse = " + ")))
#data_matrix <- model.matrix(k, data = training)
prudentialANN <- neuralnet(f, data = training, hidden=1, threshold=0.01)
# print(prudentialANN)
plot(prudentialANN)
# data_matrix_test <- model.matrix(k, data = test)
prudentialANNresult <- compute(prudentialANN, test[,-c(69:77)] )
# getting the result in another dataframe
prudentialANN_result <- as.data.frame(prudentialANNresult$net.result)
colnames(prudentialANN_result) <- c("one","two","three","four","five","six","seven","eight")
# making the values as signle digit numbers for the calculation of error rate
prudentialANN_result$one<-ifelse(prudentialANN_result$one>=.1,1,0)
prudentialANN_result$two<-ifelse(prudentialANN_result$two>=.1,1,0)
prudentialANN_result$three<-ifelse(prudentialANN_result$three>=.1,1,0)
prudentialANN_result$four<-ifelse(prudentialANN_result$four>=.1,1,0)
prudentialANN_result$five<-ifelse(prudentialANN_result$five>=.1,1,0)
prudentialANN_result$six<-ifelse(prudentialANN_result$six>=.1,1,0)
prudentialANN_result$seven<-ifelse(prudentialANN_result$seven>=.1,1,0)
prudentialANN_result$eight<-ifelse(prudentialANN_result$eight>=.1,1,0)
# neural net is not working for the dataset, this can be cncluded with the following result
prudentialANN_result
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nmea-util.R
\name{nmea_date_time}
\alias{nmea_date_time}
\alias{nmea_longitude}
\alias{nmea_latitude}
\title{NMEA utility functions}
\usage{
nmea_date_time(date, time)
nmea_longitude(degree_spec, hemisphere)
nmea_latitude(degree_spec, hemisphere)
}
\arguments{
\item{date}{A Date object or NMEA datestamp (DDMMYY)}
\item{time}{A hms object or NMEA time (HHMMSS)}
\item{degree_spec}{A longitude/latitude as it appears in the NMEA sentence
(e.g., "4916.45"). This is in the form (dddmm.mm) where there are always
two minutes digits to the left of the decimal point.}
\item{hemisphere}{One of "S", "N", "W", or "E".}
}
\value{
\itemize{
\item \code{nmea_date_time}: A POSIXct datetime (UTC)
\item \code{nmea_longitude} and \code{nmea_latitude}: A numeric longitude/latitude
}
}
\description{
Some values like datetime, longitude, and latitude, are distributed
accross fields.
}
\examples{
nmea_date_time("191194", "225446")
nmea_longitude("12311.12", "W")
nmea_latitude("4916.45", "N")
}
|
/man/nmea_date_time.Rd
|
permissive
|
paleolimbot/nmea
|
R
| false | true | 1,069 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nmea-util.R
\name{nmea_date_time}
\alias{nmea_date_time}
\alias{nmea_longitude}
\alias{nmea_latitude}
\title{NMEA utility functions}
\usage{
nmea_date_time(date, time)
nmea_longitude(degree_spec, hemisphere)
nmea_latitude(degree_spec, hemisphere)
}
\arguments{
\item{date}{A Date object or NMEA datestamp (DDMMYY)}
\item{time}{A hms object or NMEA time (HHMMSS)}
\item{degree_spec}{A longitude/latitude as it appears in the NMEA sentence
(e.g., "4916.45"). This is in the form (dddmm.mm) where there are always
two minutes digits to the left of the decimal point.}
\item{hemisphere}{One of "S", "N", "W", or "E".}
}
\value{
\itemize{
\item \code{nmea_date_time}: A POSIXct datetime (UTC)
\item \code{nmea_longitude} and \code{nmea_latitude}: A numeric longitude/latitude
}
}
\description{
Some values like datetime, longitude, and latitude, are distributed
accross fields.
}
\examples{
nmea_date_time("191194", "225446")
nmea_longitude("12311.12", "W")
nmea_latitude("4916.45", "N")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/watson.alchemy.R
\name{watson.alchemy.test}
\alias{watson.alchemy.test}
\title{WatsonR - Alchemy Language Test}
\usage{
watson.alchemy.test(creds)
}
\arguments{
\item{creds}{json file containing the alchemy api key to use for this call}
}
\value{
NOTHING - just prints the response (hopefully 200 & API response) on screen
}
\description{
Alchemy Language Functions - Light Test
}
|
/man/watson.alchemy.test.Rd
|
no_license
|
rustyoldrake/WatsonR
|
R
| false | true | 459 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/watson.alchemy.R
\name{watson.alchemy.test}
\alias{watson.alchemy.test}
\title{WatsonR - Alchemy Language Test}
\usage{
watson.alchemy.test(creds)
}
\arguments{
\item{creds}{json file containing the alchemy api key to use for this call}
}
\value{
NOTHING - just prints the response (hopefully 200 & API response) on screen
}
\description{
Alchemy Language Functions - Light Test
}
|
source('Scripts/R/paths.R')
source('Scripts/R/go.enrichment.R')
source('Scripts/R/util.R')
require(ggplot2)
load(PATHS$EQTM.ME.DATA)
load(PATHS$EXPR.GENE.ANNOT.DATA)
load(PATHS$EXPR.RANGES.DATA)
load(PATHS$METH.RANGES.DATA)
load(PATHS$HERV.EQTM.OVERLAP.DATA)
cis.pos.pairs <- eqtm.me$cis$ntest
trans.pos.pairs <- eqtm.me$trans$ntests
cis.pairs <- eqtm.me$cis$eqtls
cis.pairs$snps <- as.character(cis.pairs$snps)
cis.pairs$gene <- as.character(cis.pairs$gene)
cis.cpgs <- unique(cis.pairs$snps)
cis.probes <- unique(cis.pairs$gene)
cis.genes <- unique(na.omit(probe2gene[cis.probes]))
probes.distances <- distanceToNearest(expr.ranges, snp.ranges)
pos.cis.probes <- names(expr.ranges[mcols(probes.distances)$distance < 5e5])
pos.cis.genes <- unique(probe2gene[pos.cis.probes[pos.cis.probes %in% names(probe2gene)]])
cis.gene.enrichment <- go.enrichment(cis.genes, pos.cis.genes, gsc, c('BP'))
trans.pairs <- eqtm.me$trans$eqtls
trans.pairs$snps <- as.character(trans.pairs$snps)
trans.pairs$gene <- as.character(trans.pairs$gene)
trans.snps <- unique(as.character(trans.pairs$snps))
trans.probes <- unique(as.character(trans.pairs$gene))
trans.genes <- unique(na.omit(probe2gene[trans.probes]))
hervS2.cis.either.pairs <- hervS2.eqtm.overlap$cis.either
hervS2.cis.either.cpgs <- as.character(unique(hervS2.cis.either.pairs$snps))
hervS2.cis.either.probes <- as.character(unique(hervS2.cis.either.pairs$gene))
hervS2.cis.either.genes <- unique(na.omit(probe2gene[hervS2.cis.either.probes]))
hervS2.trans.either.pairs <- hervS2.eqtm.overlap$trans.either
hervS2.trans.either.cpgs <- as.character(unique(hervS2.trans.either.pairs$snps))
hervS2.trans.either.probes <- as.character(unique(hervS2.trans.either.pairs$gene))
hervS2.trans.either.genes <- unique(na.omit(probe2gene[hervS2.trans.either.probes]))
hervS2.either.genes <- unique(c(hervS2.cis.either.genes, hervS2.trans.either.genes))
|
/R/eqtm.stats.R
|
no_license
|
TheSeoman/Scripts
|
R
| false | false | 1,895 |
r
|
source('Scripts/R/paths.R')
source('Scripts/R/go.enrichment.R')
source('Scripts/R/util.R')
require(ggplot2)
load(PATHS$EQTM.ME.DATA)
load(PATHS$EXPR.GENE.ANNOT.DATA)
load(PATHS$EXPR.RANGES.DATA)
load(PATHS$METH.RANGES.DATA)
load(PATHS$HERV.EQTM.OVERLAP.DATA)
cis.pos.pairs <- eqtm.me$cis$ntest
trans.pos.pairs <- eqtm.me$trans$ntests
cis.pairs <- eqtm.me$cis$eqtls
cis.pairs$snps <- as.character(cis.pairs$snps)
cis.pairs$gene <- as.character(cis.pairs$gene)
cis.cpgs <- unique(cis.pairs$snps)
cis.probes <- unique(cis.pairs$gene)
cis.genes <- unique(na.omit(probe2gene[cis.probes]))
probes.distances <- distanceToNearest(expr.ranges, snp.ranges)
pos.cis.probes <- names(expr.ranges[mcols(probes.distances)$distance < 5e5])
pos.cis.genes <- unique(probe2gene[pos.cis.probes[pos.cis.probes %in% names(probe2gene)]])
cis.gene.enrichment <- go.enrichment(cis.genes, pos.cis.genes, gsc, c('BP'))
trans.pairs <- eqtm.me$trans$eqtls
trans.pairs$snps <- as.character(trans.pairs$snps)
trans.pairs$gene <- as.character(trans.pairs$gene)
trans.snps <- unique(as.character(trans.pairs$snps))
trans.probes <- unique(as.character(trans.pairs$gene))
trans.genes <- unique(na.omit(probe2gene[trans.probes]))
hervS2.cis.either.pairs <- hervS2.eqtm.overlap$cis.either
hervS2.cis.either.cpgs <- as.character(unique(hervS2.cis.either.pairs$snps))
hervS2.cis.either.probes <- as.character(unique(hervS2.cis.either.pairs$gene))
hervS2.cis.either.genes <- unique(na.omit(probe2gene[hervS2.cis.either.probes]))
hervS2.trans.either.pairs <- hervS2.eqtm.overlap$trans.either
hervS2.trans.either.cpgs <- as.character(unique(hervS2.trans.either.pairs$snps))
hervS2.trans.either.probes <- as.character(unique(hervS2.trans.either.pairs$gene))
hervS2.trans.either.genes <- unique(na.omit(probe2gene[hervS2.trans.either.probes]))
hervS2.either.genes <- unique(c(hervS2.cis.either.genes, hervS2.trans.either.genes))
|
## These functions together allow us to avoid taxing computations by caching the results of the initial
## matrix inversion and reusing when required.
## The function below creates a special vector, captures the input matrix and sets up the default methods to handle
## the input matrix and its inverse
#input here must be a squared matrix than can be successful inverted
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(inv) m <<- inv
getinv <- function() m
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## This function reads the input matrix and checks if the inverse has been previously calculated. If so,
## it returns the cached value. If not, it computes the inverse.
cacheSolve <- function(x, ...) {
# get input matrix m
m <- x$getinv()
# check if the inverse has already been computed. If yes, return cached matrix and tell me so!
if(!is.null(m)) {
message("We already did that! Getting cached data")
return(m)
}
# if not, compute inverse and return it
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
/cachematrix.R
|
no_license
|
raulza/ProgrammingAssignment2
|
R
| false | false | 1,200 |
r
|
## These functions together allow us to avoid taxing computations by caching the results of the initial
## matrix inversion and reusing when required.
## The function below creates a special vector, captures the input matrix and sets up the default methods to handle
## the input matrix and its inverse
#input here must be a squared matrix than can be successful inverted
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(inv) m <<- inv
getinv <- function() m
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## This function reads the input matrix and checks if the inverse has been previously calculated. If so,
## it returns the cached value. If not, it computes the inverse.
cacheSolve <- function(x, ...) {
# get input matrix m
m <- x$getinv()
# check if the inverse has already been computed. If yes, return cached matrix and tell me so!
if(!is.null(m)) {
message("We already did that! Getting cached data")
return(m)
}
# if not, compute inverse and return it
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/merge_samples.R
\name{merge_samples}
\alias{merge_samples}
\alias{merge_samples,Multiwave-method}
\title{Merge Sampled Data based on IDs}
\usage{
merge_samples(x, phase, wave, id = NULL, sampled_ind = "already_sampled_ind")
}
\arguments{
\item{x}{an object of class \code{"Multiwave"}.}
\item{phase}{A numeric value specifying the phase of the
Multiwave object that
the specified wave is in. Cannot be phase 1.}
\item{wave}{A numeric value specifying the wave of the Multiwave
object that the merge should be
performed in. This wave must have a valid dataframe in the
\code{"sampled data"} slot. The previous wave, taken as the final
wave of the previous phase if \code{wave} = 1, must have a valid
dataframe in the \code{"data"} slot.}
\item{id}{A character value specifying the name of the column holding unit
ids. Taken from wave, phase, or overall metadata (searched for in that
order) if \code{NULL}. Defaults to \code{NULL}.}
\item{sampled_ind}{a character value specifying the name of the column that
should hold the indicator of whether each unit has already been sampled in
the current phase.}
}
\value{
A Multiwave object with the merged dataframe in the
\code{"data"} slot of the specified wave.
}
\description{
In an object of class \code{"Mutiwave"}, \code{merge_samples} creates
a dataframe in the \code{"data"} slot of the specified wave by merging
the dataframe in the \code{"sampled data"} slot with the dataframe in
the \code{"data"} slot of the previous wave.
}
\details{
If a column name in the \code{"sampled data"} matches a column name in
the \code{"data"} slot of the previous wave, these columns will be
merged into one column with the same name in the output dataframe.
For ids that have non-missing values in both columns of the merge,
the value from
\code{"sampled_data"} will overwrite the previous value and a warning
will be printed. All ids present in the \code{"data"} from the previous
wave but missing from \code{"sampled_data"} will be given NA values
for the newly merged variables
Columns in \code{"sampled_data"} that do not match names of the
\code{"data"} from the previous wave will be added as new columns in
the output dataframe. All ids that do not appear in
\code{"sampled_data"} will receive NA values for these new variables.
}
\examples{
library(datasets)
iris <- data.frame(iris, id = 1:150)
MySurvey <- new_multiwave(phases = 2, waves = c(1, 3))
get_data(MySurvey, phase = 1, slot = "data") <-
data.frame(dplyr::select(iris, -Sepal.Width))
get_data(MySurvey, phase = 2, wave = 1, slot = "sampled_data") <-
dplyr::select(iris, id, Sepal.Width)[1:40, ]
MySurvey <- merge_samples(MySurvey, phase = 2, wave = 1, id = "id")
}
|
/man/merge_samples.Rd
|
no_license
|
jennybc/optimall
|
R
| false | true | 2,760 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/merge_samples.R
\name{merge_samples}
\alias{merge_samples}
\alias{merge_samples,Multiwave-method}
\title{Merge Sampled Data based on IDs}
\usage{
merge_samples(x, phase, wave, id = NULL, sampled_ind = "already_sampled_ind")
}
\arguments{
\item{x}{an object of class \code{"Multiwave"}.}
\item{phase}{A numeric value specifying the phase of the
Multiwave object that
the specified wave is in. Cannot be phase 1.}
\item{wave}{A numeric value specifying the wave of the Multiwave
object that the merge should be
performed in. This wave must have a valid dataframe in the
\code{"sampled data"} slot. The previous wave, taken as the final
wave of the previous phase if \code{wave} = 1, must have a valid
dataframe in the \code{"data"} slot.}
\item{id}{A character value specifying the name of the column holding unit
ids. Taken from wave, phase, or overall metadata (searched for in that
order) if \code{NULL}. Defaults to \code{NULL}.}
\item{sampled_ind}{a character value specifying the name of the column that
should hold the indicator of whether each unit has already been sampled in
the current phase.}
}
\value{
A Multiwave object with the merged dataframe in the
\code{"data"} slot of the specified wave.
}
\description{
In an object of class \code{"Mutiwave"}, \code{merge_samples} creates
a dataframe in the \code{"data"} slot of the specified wave by merging
the dataframe in the \code{"sampled data"} slot with the dataframe in
the \code{"data"} slot of the previous wave.
}
\details{
If a column name in the \code{"sampled data"} matches a column name in
the \code{"data"} slot of the previous wave, these columns will be
merged into one column with the same name in the output dataframe.
For ids that have non-missing values in both columns of the merge,
the value from
\code{"sampled_data"} will overwrite the previous value and a warning
will be printed. All ids present in the \code{"data"} from the previous
wave but missing from \code{"sampled_data"} will be given NA values
for the newly merged variables
Columns in \code{"sampled_data"} that do not match names of the
\code{"data"} from the previous wave will be added as new columns in
the output dataframe. All ids that do not appear in
\code{"sampled_data"} will receive NA values for these new variables.
}
\examples{
library(datasets)
iris <- data.frame(iris, id = 1:150)
MySurvey <- new_multiwave(phases = 2, waves = c(1, 3))
get_data(MySurvey, phase = 1, slot = "data") <-
data.frame(dplyr::select(iris, -Sepal.Width))
get_data(MySurvey, phase = 2, wave = 1, slot = "sampled_data") <-
dplyr::select(iris, id, Sepal.Width)[1:40, ]
MySurvey <- merge_samples(MySurvey, phase = 2, wave = 1, id = "id")
}
|
get_grouped_score <- function(server_score,
returner_score){
# --> Game Point
if(server_score == 'AD'){
return('Game Point')
}
# --> Break Point
if(returner_score == 'AD'){
return('Break Point')
}
server_score = as.numeric(levels(server_score))[server_score]
returner_score = as.numeric(levels(returner_score))[returner_score]
# --> Game Point
if(( (server_score == 40) | (server_score == 6) ) & (server_score > returner_score) ){
return('Game Point')
}
if( (server_score > 6) & ( (server_score - returner_score) == 1 ) ){
return('Game Point')
}
# --> Break Point
if( ( (returner_score == 40) | (returner_score == 6) ) & (returner_score > server_score) ){
return('Break Point')
}
if( (returner_score > 6) & ( (returner_score - server_score) == 1 ) ){
return('Break Point')
}
# --> Even / 1st point
if(server_score == returner_score) {
if(server_score ==0 ){
return('First Point')
} else{
return('Even')
}
}
if( (server_score > 6) & (returner_score > 6) & ( abs(server_score - returner_score) == 2) ){
return('First Point')
}
#--> Ahead
if(server_score > returner_score){
return('Ahead')
}
# -->Behind
if(server_score < returner_score){
return('Behind')
}
return(NULL)
}
|
/prototypes/serve_speeds/src/grouped_score.R
|
no_license
|
petertea96/tennis_analytics
|
R
| false | false | 1,362 |
r
|
get_grouped_score <- function(server_score,
returner_score){
# --> Game Point
if(server_score == 'AD'){
return('Game Point')
}
# --> Break Point
if(returner_score == 'AD'){
return('Break Point')
}
server_score = as.numeric(levels(server_score))[server_score]
returner_score = as.numeric(levels(returner_score))[returner_score]
# --> Game Point
if(( (server_score == 40) | (server_score == 6) ) & (server_score > returner_score) ){
return('Game Point')
}
if( (server_score > 6) & ( (server_score - returner_score) == 1 ) ){
return('Game Point')
}
# --> Break Point
if( ( (returner_score == 40) | (returner_score == 6) ) & (returner_score > server_score) ){
return('Break Point')
}
if( (returner_score > 6) & ( (returner_score - server_score) == 1 ) ){
return('Break Point')
}
# --> Even / 1st point
if(server_score == returner_score) {
if(server_score ==0 ){
return('First Point')
} else{
return('Even')
}
}
if( (server_score > 6) & (returner_score > 6) & ( abs(server_score - returner_score) == 2) ){
return('First Point')
}
#--> Ahead
if(server_score > returner_score){
return('Ahead')
}
# -->Behind
if(server_score < returner_score){
return('Behind')
}
return(NULL)
}
|
#----------------------------------------------------------------------
# Purpose: Condition an Airline dataset by filtering out NAs where the
# departure delay in the input dataset is unknown.
#
# Then treat anything longer than minutesOfDelayWeTolerate
# as delayed.
#----------------------------------------------------------------------
# setwd("/Users/tomk/0xdata/ws/h2o/R/tests/testdir_demos")
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../findNSourceUtils.R')
options(echo=TRUE)
heading("BEGIN TEST")
conn <- new("H2OClient", ip=myIP, port=myPort)
filePath <- "../../../smalldata/airlines/allyears2k_headers.zip"
air.hex = h2o.uploadFile(conn, filePath, "air.hex")
dim(air.hex)
colnames(air.hex)
numCols = ncol(air.hex)
x_cols = c("Month", "DayofMonth", "DayOfWeek", "CRSDepTime", "CRSArrTime", "UniqueCarrier", "CRSElapsedTime", "Origin", "Dest", "Distance")
y_col = "SynthDepDelayed"
noDepDelayedNAs.hex = air.hex[!is.na(air.hex$DepDelay)]
dim(noDepDelayedNAs.hex)
minutesOfDelayWeTolerate = 15
noDepDelayedNAs.hex[,numCols+1] = noDepDelayedNAs.hex$DepDelay > minutesOfDelayWeTolerate
noDepDelayedNAs.hex[,numCols+1] = as.factor(noDepDelayedNAs.hex[,numCols+1])
cn = colnames(noDepDelayedNAs.hex)
cn[numCols+1] = y_col
colnames(noDepDelayedNAs.hex) = cn
air.gbm = h2o.gbm(x = x_cols, y = y_col, data = noDepDelayedNAs.hex)
air.gbm
PASS_BANNER()
|
/R/tests/testdir_demos/runit_demo_prep_airlines.R
|
permissive
|
vkuznet/h2o
|
R
| false | false | 1,432 |
r
|
#----------------------------------------------------------------------
# Purpose: Condition an Airline dataset by filtering out NAs where the
# departure delay in the input dataset is unknown.
#
# Then treat anything longer than minutesOfDelayWeTolerate
# as delayed.
#----------------------------------------------------------------------
# setwd("/Users/tomk/0xdata/ws/h2o/R/tests/testdir_demos")
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../findNSourceUtils.R')
options(echo=TRUE)
heading("BEGIN TEST")
conn <- new("H2OClient", ip=myIP, port=myPort)
filePath <- "../../../smalldata/airlines/allyears2k_headers.zip"
air.hex = h2o.uploadFile(conn, filePath, "air.hex")
dim(air.hex)
colnames(air.hex)
numCols = ncol(air.hex)
x_cols = c("Month", "DayofMonth", "DayOfWeek", "CRSDepTime", "CRSArrTime", "UniqueCarrier", "CRSElapsedTime", "Origin", "Dest", "Distance")
y_col = "SynthDepDelayed"
noDepDelayedNAs.hex = air.hex[!is.na(air.hex$DepDelay)]
dim(noDepDelayedNAs.hex)
minutesOfDelayWeTolerate = 15
noDepDelayedNAs.hex[,numCols+1] = noDepDelayedNAs.hex$DepDelay > minutesOfDelayWeTolerate
noDepDelayedNAs.hex[,numCols+1] = as.factor(noDepDelayedNAs.hex[,numCols+1])
cn = colnames(noDepDelayedNAs.hex)
cn[numCols+1] = y_col
colnames(noDepDelayedNAs.hex) = cn
air.gbm = h2o.gbm(x = x_cols, y = y_col, data = noDepDelayedNAs.hex)
air.gbm
PASS_BANNER()
|
#' Print Objects
#'
#' Print the results of a multiple comparison test (\code{multicomp.test}).
#'
#'
#' @param x an object of class "MCT" from \code{multicomp.test}.
#' @param digits the number of significant digits to print numeric data.
#' @param \dots not used for method, required for other methods.
#' @return The object \code{x} is returned invisibly.
#' @note The printed output contains a description of the test, critical
#' values, the variables in the test, and two tables: the paired comparisons
#' and associations among the groups. The table of the paried comparisons shows
#' the groups in the comparison, the estimate of the difference between the
#' group means, the standard error of the difference, lower and upper
#' confidence intervals, and a flag that indicates if the confidence interval
#' excludes 0, which indicates wheter the difference is significantly different
#' from 0 at the user-specified value. The table of asociations shows the
#' group, the mean value of the response, the number of observations in the
#' group, and any number of columns names "A," "B," and so forth that represent
#' possible associations of the groups whare an "X" is present in the group.
#' @export
#' @method print MCT
print.MCT <- function(x, digits=4, ...) {
## print function for objects of class MCT
cat("\t", x$title, "\n")
if(x$cv.method == "lsd")
cat("Pairwise")
else
cat("Overall")
cat(" error rate: ", x$alpha, "\nCritical value: ", round(x$crit.value, digits),
" by the ", x$cv.method, " method\n\n", sep="")
cat("Response variable: ", x$response, "\nGroup variable: ", x$groups,
"\n\n", sep="")
cat("Table of paired comparisons, ", round(1 - x$alpha, 4) * 100,
" percent confidence intervals\n excluding 0 are flagged by *.\n", sep="")
pmat <- format(signif(x$table, digits))
pmat <- cbind(pmat, flag=ifelse(x$table[,3]*x$table[,4] > 0, "*", " "))
print(pmat, quote=FALSE)
cat("\nTable of associations among groups\n")
pmat=cbind(Mean=signif(x$means, digits), Size=x$sizes, x$assoc)
print(pmat, quote=FALSE)
cat("\n")
invisible(x)
}
|
/R/print.MCT.R
|
permissive
|
gvanzin/smwrStats
|
R
| false | false | 2,088 |
r
|
#' Print Objects
#'
#' Print the results of a multiple comparison test (\code{multicomp.test}).
#'
#'
#' @param x an object of class "MCT" from \code{multicomp.test}.
#' @param digits the number of significant digits to print numeric data.
#' @param \dots not used for method, required for other methods.
#' @return The object \code{x} is returned invisibly.
#' @note The printed output contains a description of the test, critical
#' values, the variables in the test, and two tables: the paired comparisons
#' and associations among the groups. The table of the paried comparisons shows
#' the groups in the comparison, the estimate of the difference between the
#' group means, the standard error of the difference, lower and upper
#' confidence intervals, and a flag that indicates if the confidence interval
#' excludes 0, which indicates wheter the difference is significantly different
#' from 0 at the user-specified value. The table of asociations shows the
#' group, the mean value of the response, the number of observations in the
#' group, and any number of columns names "A," "B," and so forth that represent
#' possible associations of the groups whare an "X" is present in the group.
#' @export
#' @method print MCT
print.MCT <- function(x, digits=4, ...) {
## print function for objects of class MCT
cat("\t", x$title, "\n")
if(x$cv.method == "lsd")
cat("Pairwise")
else
cat("Overall")
cat(" error rate: ", x$alpha, "\nCritical value: ", round(x$crit.value, digits),
" by the ", x$cv.method, " method\n\n", sep="")
cat("Response variable: ", x$response, "\nGroup variable: ", x$groups,
"\n\n", sep="")
cat("Table of paired comparisons, ", round(1 - x$alpha, 4) * 100,
" percent confidence intervals\n excluding 0 are flagged by *.\n", sep="")
pmat <- format(signif(x$table, digits))
pmat <- cbind(pmat, flag=ifelse(x$table[,3]*x$table[,4] > 0, "*", " "))
print(pmat, quote=FALSE)
cat("\nTable of associations among groups\n")
pmat=cbind(Mean=signif(x$means, digits), Size=x$sizes, x$assoc)
print(pmat, quote=FALSE)
cat("\n")
invisible(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SpectrumParser.R
\name{SpectrumParser}
\alias{SpectrumParser}
\alias{importSpectrum}
\title{Spectrum parser}
\usage{
importSpectrum(object, file, ...)
}
\arguments{
\item{object}{object extending \code{SpectrumParser}.}
\item{file}{\code{character(1)} with the name of the file from which spectrum
data should be imported.}
}
\value{
a \code{Spectra} object - TODO needs to be discussed!
}
\description{
Classes extending the base \code{SpectrumParser} object are supposed to read
spectrum data from a certain input format and return a standardized output
format.
\code{importSpectrum} reads spectrum data from a file, extracted all fields and
maps them to the corresponding \emph{standard} fields using the parser's schema
definition.
}
\author{
Johannes Rainer
}
|
/man/SpectrumParser.Rd
|
no_license
|
jorainer/MSnio
|
R
| false | true | 845 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SpectrumParser.R
\name{SpectrumParser}
\alias{SpectrumParser}
\alias{importSpectrum}
\title{Spectrum parser}
\usage{
importSpectrum(object, file, ...)
}
\arguments{
\item{object}{object extending \code{SpectrumParser}.}
\item{file}{\code{character(1)} with the name of the file from which spectrum
data should be imported.}
}
\value{
a \code{Spectra} object - TODO needs to be discussed!
}
\description{
Classes extending the base \code{SpectrumParser} object are supposed to read
spectrum data from a certain input format and return a standardized output
format.
\code{importSpectrum} reads spectrum data from a file, extracted all fields and
maps them to the corresponding \emph{standard} fields using the parser's schema
definition.
}
\author{
Johannes Rainer
}
|
DeRNAseq <- function(ct, grps, paired = FALSE, mthds = 0, min.count = 6, min.present = 2, num.cluster=1, just.stat = TRUE,
norm.count = c('DESeq', 'TMM', 'RLE', 'QQ', 'UpperQuantile', 'Median', 'TotalCount'),
norm.logged = c('Loess', 'VST', 'Rlog', 'QQ', 'UpperQuantile', 'Median'), force.norm = FALSE) {
require(DEGandMore);
data(DeMethodMeta);
######################################################################################################
# Internal function
normalizeCnt <- function(cnt, mthds) NormWrapper(cnt, paste('Norm', mthds, sep=''));
normalizeLog <- function(cnt, mthds) {
if (tolower(mthds[1]) %in% c('rlog', 'vst')) NormWrapper(cnt, paste('Norm', mthds, sep='')) else {
cnt[cnt==0] <- 1/3;
logged <- log2(cnt);
NormWrapper(logged, mthd=paste('Norm', mthds, sep=''));
}
}
######################################################################################################
################################################################
# Method names
if (identical(mthds, 0)) mthds <- DeRNAseqMethods(0) else
if (identical(mthds, 1)) mthds <- DeRNAseqMethods(1) else
if (identical(mthds, 2)) mthds <- DeRNAseqMethods(2) else
if (identical(mthds, 3)) mthds <- DeRNAseqMethods(3) else
mthds <- mthds[mthds %in% DeRNAseqMethods()];
if (length(mthds) == 0) mthds <- DeRNAseqMethods();
################################################################
################################################################################################################
# Make sure the 2 groups to be compared are well-defined
grps <- lapply(grps[1:2], function(s) s[s %in% colnames(ct) | (s>0 & s<=ncol(ct))]);
if (length(grps[[1]])<2 | length(grps[[2]])<2) stop('No enough samples in one or both groups; requires 2.\n');
if (is.null(names(grps))) names(grps) <- c('A', 'B');
if (is.na(names(grps)[1])) names(grps)[1] <- 'A';
if (is.na(names(grps)[2])) names(grps)[1] <- 'B';
ct <- as.matrix(ct[, c(grps[[1]], grps[[2]])]);
################################################################################################################
###########################################################################################################
# Only test genes with required number of total read count in all tested samples
if (min.count == 0) min.present <- 0;
d0 <- ct[rowSums(ct) >= min.count, c(grps[[1]], grps[[2]]), drop=FALSE];
d0 <- ct[apply(ct, 1, function(c) length(c[c>0]))>=min.present, , drop=FALSE];
if (nrow(d0) < 1) stop('No genes have total read count more than ', min.count, '; reduce the cutoff.\n');
###########################################################################################################
#####################################################################################################################
# Prepare normalized data if some methods require it
norm <- DeMethodMeta[mthds, 'Normalization'];
logd <- DeMethodMeta[mthds, 'Logged'];
if (length(norm[norm=='No' & logd=='No'])>0 | force.norm) {
norm1 <- paste('Norm', norm.count[1], sep='');
if (!(norm1 %in% NormMethods())) stop('Normalization method not available: ', sub('Norm', '', norm.count), '\n');
d1 <- NormWrapper(d0, norm1);
};
if (length(norm[logd == 'Yes'])>0 | force.norm) {
norm2 <- paste('Norm', norm.logged[1], sep='');
if (!(norm2 %in% NormMethods())) stop('Normalization method not available: ', sub('Norm', '', norm.logged), '\n');
if (norm.logged[1] %in% c('VST', 'Rlog')) {
d2 <- NormWrapper(d0, norm2);
} else {
d2 <- d0;
d2[d2==0] <- 1/3;
d2 <- log2(d2);
d2 <- NormWrapper(d2, norm2);
}
};
#####################################################################################################################
####################################################################
# Prepare inputs
d <- lapply(mthds, function(m) {
lg <- DeMethodMeta[m, 'Logged']=='Yes';
nm <- DeMethodMeta[m, 'Normalization']=='Yes';
if (!lg & nm) d <- d0 else if (!lg) d <- d1 else d <- d2;
list(method=m, data=d, group=grps, paired=paired, logged=lg);
});
names(d) <- mthds;
####################################################################
cat("Running DE analysis with methods: ", paste(mthds, collapse='; '), '\n');
###################################################################################################
###################################################################################################
if (length(mthds)>1 & num.cluster>1 & require(snow)) {
####################################################################
runDe <- function(d) {
require(DEGandMore);
DeWrapper(d$data, d$group, d$method, d$paired, d$logged)$results;
}
####################################################################
d <- d[rev(order(DeMethodMeta[names(d), 'Speed']))];
nm <- names(d);
cl <- snow::makeCluster(num.cluster, type='SOCK');
stat <- snow::clusterApplyLB(cl, d[nm!='DePlgem'], runDe);
snow::stopCluster(cl);
names(stat) <- names(d)[nm!='DePlgem'];
if ('DePlgem' %in% nm) stat$DePlgem <- runDe(d[['DePlgem']]);
} else {
stat <- lapply(d, function(d) DeWrapper(d$data, d$group, d$method, d$paired, d$logged)$results);
}
stat <- stat[mthds];
###################################################################################################
###################################################################################################
if (length(logd[logd=='Yes']) > 0) {
un <- 2^d2;
un <- un * (mean(d0, na.rm=TRUE)/mean(un, na.rm=TRUE));
m1 <- rowMeans(un[, grps[[1]], drop=FALSE]);
m2 <- rowMeans(un[, grps[[2]], drop=FALSE]);
tb <- cbind(m1, m2, m2-m1);
stat[logd=='Yes'] <- lapply(stat[logd=='Yes'], function(s) {
s$stat[, 1:3] <- tb;
s;
});
}
if (just.stat) stat <- lapply(stat, function(s) s$stat[, 1:6]);
normalized <- list();
if (length(norm[norm=='No' & logd=='No'])>0 | force.norm) normalized$count <- d1;
if (length(logd[logd=='Yes'])>0 | force.norm) normalized$logged <- d2;
input <- list(original=ct, filtered=d0, normalized=normalized, methods=mthds, groups=grps, paired=paired,
minimal.count=min.count, number.cluster=num.cluster, normalization=c(norm.count[1], norm.logged[1]));
list(input=input, output=stat);
}
##########################################################################################################
##########################################################################################################
# Method groups for DE analysis of RNA-seq data
DeRNAseqMethods <- function(group=NA) {
require(DEGandMore);
data(DeMethodMeta);
########################################################
# Method categories
default <- rownames(DeMethodMeta)[DeMethodMeta$Default=='Yes'];
speed <- list(
fast = rownames(DeMethodMeta)[DeMethodMeta$Speed=='Fast'],
medium = rownames(DeMethodMeta)[DeMethodMeta$Speed=='Medium'],
slow = rownames(DeMethodMeta)[DeMethodMeta$Speed=='Slow'],
slower = rownames(DeMethodMeta)[DeMethodMeta$Speed=='Slower']
);
########################################################
group <- group[1];
if (is.na(group)) group <- -1;
if (group==0) default else
if (group==1) as.vector(unlist(speed[1])) else
if (group==2) as.vector(unlist(speed[1:2])) else
if (group==3) as.vector(unlist(speed[1:3])) else
unique(as.vector(unlist(speed)));
}
|
/R/DeRNAseq.R
|
no_license
|
zhezhangsh/DEGandMore
|
R
| false | false | 7,648 |
r
|
DeRNAseq <- function(ct, grps, paired = FALSE, mthds = 0, min.count = 6, min.present = 2, num.cluster=1, just.stat = TRUE,
norm.count = c('DESeq', 'TMM', 'RLE', 'QQ', 'UpperQuantile', 'Median', 'TotalCount'),
norm.logged = c('Loess', 'VST', 'Rlog', 'QQ', 'UpperQuantile', 'Median'), force.norm = FALSE) {
require(DEGandMore);
data(DeMethodMeta);
######################################################################################################
# Internal function
normalizeCnt <- function(cnt, mthds) NormWrapper(cnt, paste('Norm', mthds, sep=''));
normalizeLog <- function(cnt, mthds) {
if (tolower(mthds[1]) %in% c('rlog', 'vst')) NormWrapper(cnt, paste('Norm', mthds, sep='')) else {
cnt[cnt==0] <- 1/3;
logged <- log2(cnt);
NormWrapper(logged, mthd=paste('Norm', mthds, sep=''));
}
}
######################################################################################################
################################################################
# Method names
if (identical(mthds, 0)) mthds <- DeRNAseqMethods(0) else
if (identical(mthds, 1)) mthds <- DeRNAseqMethods(1) else
if (identical(mthds, 2)) mthds <- DeRNAseqMethods(2) else
if (identical(mthds, 3)) mthds <- DeRNAseqMethods(3) else
mthds <- mthds[mthds %in% DeRNAseqMethods()];
if (length(mthds) == 0) mthds <- DeRNAseqMethods();
################################################################
################################################################################################################
# Make sure the 2 groups to be compared are well-defined
grps <- lapply(grps[1:2], function(s) s[s %in% colnames(ct) | (s>0 & s<=ncol(ct))]);
if (length(grps[[1]])<2 | length(grps[[2]])<2) stop('No enough samples in one or both groups; requires 2.\n');
if (is.null(names(grps))) names(grps) <- c('A', 'B');
if (is.na(names(grps)[1])) names(grps)[1] <- 'A';
if (is.na(names(grps)[2])) names(grps)[1] <- 'B';
ct <- as.matrix(ct[, c(grps[[1]], grps[[2]])]);
################################################################################################################
###########################################################################################################
# Only test genes with required number of total read count in all tested samples
if (min.count == 0) min.present <- 0;
d0 <- ct[rowSums(ct) >= min.count, c(grps[[1]], grps[[2]]), drop=FALSE];
d0 <- ct[apply(ct, 1, function(c) length(c[c>0]))>=min.present, , drop=FALSE];
if (nrow(d0) < 1) stop('No genes have total read count more than ', min.count, '; reduce the cutoff.\n');
###########################################################################################################
#####################################################################################################################
# Prepare normalized data if some methods require it
norm <- DeMethodMeta[mthds, 'Normalization'];
logd <- DeMethodMeta[mthds, 'Logged'];
if (length(norm[norm=='No' & logd=='No'])>0 | force.norm) {
norm1 <- paste('Norm', norm.count[1], sep='');
if (!(norm1 %in% NormMethods())) stop('Normalization method not available: ', sub('Norm', '', norm.count), '\n');
d1 <- NormWrapper(d0, norm1);
};
if (length(norm[logd == 'Yes'])>0 | force.norm) {
norm2 <- paste('Norm', norm.logged[1], sep='');
if (!(norm2 %in% NormMethods())) stop('Normalization method not available: ', sub('Norm', '', norm.logged), '\n');
if (norm.logged[1] %in% c('VST', 'Rlog')) {
d2 <- NormWrapper(d0, norm2);
} else {
d2 <- d0;
d2[d2==0] <- 1/3;
d2 <- log2(d2);
d2 <- NormWrapper(d2, norm2);
}
};
#####################################################################################################################
####################################################################
# Prepare inputs
d <- lapply(mthds, function(m) {
lg <- DeMethodMeta[m, 'Logged']=='Yes';
nm <- DeMethodMeta[m, 'Normalization']=='Yes';
if (!lg & nm) d <- d0 else if (!lg) d <- d1 else d <- d2;
list(method=m, data=d, group=grps, paired=paired, logged=lg);
});
names(d) <- mthds;
####################################################################
cat("Running DE analysis with methods: ", paste(mthds, collapse='; '), '\n');
###################################################################################################
###################################################################################################
if (length(mthds)>1 & num.cluster>1 & require(snow)) {
####################################################################
runDe <- function(d) {
require(DEGandMore);
DeWrapper(d$data, d$group, d$method, d$paired, d$logged)$results;
}
####################################################################
d <- d[rev(order(DeMethodMeta[names(d), 'Speed']))];
nm <- names(d);
cl <- snow::makeCluster(num.cluster, type='SOCK');
stat <- snow::clusterApplyLB(cl, d[nm!='DePlgem'], runDe);
snow::stopCluster(cl);
names(stat) <- names(d)[nm!='DePlgem'];
if ('DePlgem' %in% nm) stat$DePlgem <- runDe(d[['DePlgem']]);
} else {
stat <- lapply(d, function(d) DeWrapper(d$data, d$group, d$method, d$paired, d$logged)$results);
}
stat <- stat[mthds];
###################################################################################################
###################################################################################################
if (length(logd[logd=='Yes']) > 0) {
un <- 2^d2;
un <- un * (mean(d0, na.rm=TRUE)/mean(un, na.rm=TRUE));
m1 <- rowMeans(un[, grps[[1]], drop=FALSE]);
m2 <- rowMeans(un[, grps[[2]], drop=FALSE]);
tb <- cbind(m1, m2, m2-m1);
stat[logd=='Yes'] <- lapply(stat[logd=='Yes'], function(s) {
s$stat[, 1:3] <- tb;
s;
});
}
if (just.stat) stat <- lapply(stat, function(s) s$stat[, 1:6]);
normalized <- list();
if (length(norm[norm=='No' & logd=='No'])>0 | force.norm) normalized$count <- d1;
if (length(logd[logd=='Yes'])>0 | force.norm) normalized$logged <- d2;
input <- list(original=ct, filtered=d0, normalized=normalized, methods=mthds, groups=grps, paired=paired,
minimal.count=min.count, number.cluster=num.cluster, normalization=c(norm.count[1], norm.logged[1]));
list(input=input, output=stat);
}
##########################################################################################################
##########################################################################################################
# Method groups for DE analysis of RNA-seq data
DeRNAseqMethods <- function(group=NA) {
require(DEGandMore);
data(DeMethodMeta);
########################################################
# Method categories
default <- rownames(DeMethodMeta)[DeMethodMeta$Default=='Yes'];
speed <- list(
fast = rownames(DeMethodMeta)[DeMethodMeta$Speed=='Fast'],
medium = rownames(DeMethodMeta)[DeMethodMeta$Speed=='Medium'],
slow = rownames(DeMethodMeta)[DeMethodMeta$Speed=='Slow'],
slower = rownames(DeMethodMeta)[DeMethodMeta$Speed=='Slower']
);
########################################################
group <- group[1];
if (is.na(group)) group <- -1;
if (group==0) default else
if (group==1) as.vector(unlist(speed[1])) else
if (group==2) as.vector(unlist(speed[1:2])) else
if (group==3) as.vector(unlist(speed[1:3])) else
unique(as.vector(unlist(speed)));
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gh_rate_limit.R
\name{gh_rate_limit}
\alias{gh_rate_limit}
\title{Return GitHub user's current rate limits}
\usage{
gh_rate_limit(
response = NULL,
.token = NULL,
.api_url = NULL,
.send_headers = NULL
)
}
\arguments{
\item{response}{\code{gh_response} object from a previous \code{gh} call, rate
limit values are determined from values in the response header.
Optional argument, if missing a call to "GET /rate_limit" will be made.}
\item{.token}{Authentication token. Defaults to \code{GITHUB_PAT} or
\code{GITHUB_TOKEN} environment variables, in this order if any is set.
See \code{\link[=gh_token]{gh_token()}} if you need more flexibility, e.g. different tokens
for different GitHub Enterprise deployments.}
\item{.api_url}{Github API url (default: \url{https://api.github.com}). Used
if \code{endpoint} just contains a path. Defaults to \code{GITHUB_API_URL}
environment variable if set.}
\item{.send_headers}{Named character vector of header field values
(except \code{Authorization}, which is handled via \code{.token}). This can be
used to override or augment the default \code{User-Agent} header:
\code{"https://github.com/r-lib/gh"}.}
}
\value{
A \code{list} object containing the overall \code{limit}, \code{remaining} limit, and the
limit \code{reset} time.
}
\description{
Reports the current rate limit status for the authenticated user,
either pulls this information from a previous successful request
or directly from the GitHub API.
}
\details{
Further details on GitHub's API rate limit policies are available at
\url{https://docs.github.com/v3/#rate-limiting}.
}
|
/man/gh_rate_limit.Rd
|
permissive
|
krlmlr/gh
|
R
| false | true | 1,671 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gh_rate_limit.R
\name{gh_rate_limit}
\alias{gh_rate_limit}
\title{Return GitHub user's current rate limits}
\usage{
gh_rate_limit(
response = NULL,
.token = NULL,
.api_url = NULL,
.send_headers = NULL
)
}
\arguments{
\item{response}{\code{gh_response} object from a previous \code{gh} call, rate
limit values are determined from values in the response header.
Optional argument, if missing a call to "GET /rate_limit" will be made.}
\item{.token}{Authentication token. Defaults to \code{GITHUB_PAT} or
\code{GITHUB_TOKEN} environment variables, in this order if any is set.
See \code{\link[=gh_token]{gh_token()}} if you need more flexibility, e.g. different tokens
for different GitHub Enterprise deployments.}
\item{.api_url}{Github API url (default: \url{https://api.github.com}). Used
if \code{endpoint} just contains a path. Defaults to \code{GITHUB_API_URL}
environment variable if set.}
\item{.send_headers}{Named character vector of header field values
(except \code{Authorization}, which is handled via \code{.token}). This can be
used to override or augment the default \code{User-Agent} header:
\code{"https://github.com/r-lib/gh"}.}
}
\value{
A \code{list} object containing the overall \code{limit}, \code{remaining} limit, and the
limit \code{reset} time.
}
\description{
Reports the current rate limit status for the authenticated user,
either pulls this information from a previous successful request
or directly from the GitHub API.
}
\details{
Further details on GitHub's API rate limit policies are available at
\url{https://docs.github.com/v3/#rate-limiting}.
}
|
## read txt file from the data directory
## while specifying the types of the columns
powerData <- read.table(file ="./data/household_power_consumption.txt", header=TRUE,sep=";",na.strings="?")
## subset only the data from the dates 2007-02-01 and 2007-02-02
powerDataUse <- powerData[powerData$Date %in% c("1/2/2007","2/2/2007") ,]
## convert the Data and Time columns to Date and Time formats
powerDataUse$DateTime <- strptime(paste(powerDataUse$Date, powerDataUse$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
## create plot and print to png file
png(file="plot2.png",width=400,height=350,res=72)
plot(powerDataUse$DateTime, powerDataUse$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
nadinush/ExData_Plotting1
|
R
| false | false | 723 |
r
|
## read txt file from the data directory
## while specifying the types of the columns
powerData <- read.table(file ="./data/household_power_consumption.txt", header=TRUE,sep=";",na.strings="?")
## subset only the data from the dates 2007-02-01 and 2007-02-02
powerDataUse <- powerData[powerData$Date %in% c("1/2/2007","2/2/2007") ,]
## convert the Data and Time columns to Date and Time formats
powerDataUse$DateTime <- strptime(paste(powerDataUse$Date, powerDataUse$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
## create plot and print to png file
png(file="plot2.png",width=400,height=350,res=72)
plot(powerDataUse$DateTime, powerDataUse$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.off()
|
#workspace(Básico).
#Todos os exemplos abaixo deverão ser utilizados no workspace do R.
#Todos exemplos abaixo são funções prontas do R.
#1. ls() é uma função que irá mostrar as variáveis que foram declaradas.
#2. rm(NOME DA VARIÁVEL) é uma função que irá remover a variável que foi passada como parâmetro, ela também pode remover todas as variáveis ou objetos que estão em uma lista.
#Ex: rm([NOME DA VARIÁVEL 1, NOME DA VARIÁVEL 2]) ou rm(LISTA DE VARIÁVEIS) ou rm(list = ls()).
#3. class(OBJETO) é uma função que retorna o tipo daquele objeto.
#4. is.TIPO_DE_DADO(OBJETO) essa função retorna um valor lógico(True ou False).
#Ex: is.integer(4) ou var <- 4 is.numeric(var) ou is.character("Oi").
#5. as.TIPO_DE_DADO(OBJETO) é uma função que converte tipos de dados desde que seja possível.
#Ex: as.integer("4") ou as.integer(4.3) ou as.integer(4), todos os resultados serão 4.
#Ex: as.logical(1) ou as.logical("True") ou as.logical("T"), resultado é True; as.logical(0), resultado é False.
#6. getwd() fornece o diretorio no qual você está trabalhando.
#7. setwd("Novo caminho do diretório") muda a pasta ao qual vai estar o seu projeto R. (Barras devem ser estas: /)
|
/trabalhando_com_workspace_do_R.R
|
no_license
|
sheyslong/Estudo-sobre-Linguagem-R
|
R
| false | false | 1,228 |
r
|
#workspace(Básico).
#Todos os exemplos abaixo deverão ser utilizados no workspace do R.
#Todos exemplos abaixo são funções prontas do R.
#1. ls() é uma função que irá mostrar as variáveis que foram declaradas.
#2. rm(NOME DA VARIÁVEL) é uma função que irá remover a variável que foi passada como parâmetro, ela também pode remover todas as variáveis ou objetos que estão em uma lista.
#Ex: rm([NOME DA VARIÁVEL 1, NOME DA VARIÁVEL 2]) ou rm(LISTA DE VARIÁVEIS) ou rm(list = ls()).
#3. class(OBJETO) é uma função que retorna o tipo daquele objeto.
#4. is.TIPO_DE_DADO(OBJETO) essa função retorna um valor lógico(True ou False).
#Ex: is.integer(4) ou var <- 4 is.numeric(var) ou is.character("Oi").
#5. as.TIPO_DE_DADO(OBJETO) é uma função que converte tipos de dados desde que seja possível.
#Ex: as.integer("4") ou as.integer(4.3) ou as.integer(4), todos os resultados serão 4.
#Ex: as.logical(1) ou as.logical("True") ou as.logical("T"), resultado é True; as.logical(0), resultado é False.
#6. getwd() fornece o diretorio no qual você está trabalhando.
#7. setwd("Novo caminho do diretório") muda a pasta ao qual vai estar o seu projeto R. (Barras devem ser estas: /)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/misc.R
\name{normalize.freqs}
\alias{normalize.freqs}
\title{Normalizes allele frequencies such that their sum is 1}
\usage{
normalize.freqs(freqs)
}
\arguments{
\item{freqs}{list of per locus allele frequencies}
}
\value{
list
}
\description{
Normalizes allele frequencies such that their sum is 1
}
\examples{
data(freqsNLsgmplus)
fr0 <- normalize.freqs(freqsNLsgmplus)
stopifnot(all.equal(sapply(fr0,sum),setNames(rep(1,length(fr0)),names(fr0))))
}
|
/man/normalize.freqs.Rd
|
no_license
|
cran/DNAprofiles
|
R
| false | false | 539 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/misc.R
\name{normalize.freqs}
\alias{normalize.freqs}
\title{Normalizes allele frequencies such that their sum is 1}
\usage{
normalize.freqs(freqs)
}
\arguments{
\item{freqs}{list of per locus allele frequencies}
}
\value{
list
}
\description{
Normalizes allele frequencies such that their sum is 1
}
\examples{
data(freqsNLsgmplus)
fr0 <- normalize.freqs(freqsNLsgmplus)
stopifnot(all.equal(sapply(fr0,sum),setNames(rep(1,length(fr0)),names(fr0))))
}
|
library(phreeqc)
### Name: phrGetSelectedOutput
### Title: Returns the contents of the selected output as a list of data
### frames.
### Aliases: phrGetSelectedOutput
### ** Examples
# Load database and run ex2
phrLoadDatabaseString(phreeqc.dat)
phrRunString(ex2)
# display a summary of the results
df <- phrGetSelectedOutput()
summary(df$n1)
|
/data/genthat_extracted_code/phreeqc/examples/phrGetSelectedOutput.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 355 |
r
|
library(phreeqc)
### Name: phrGetSelectedOutput
### Title: Returns the contents of the selected output as a list of data
### frames.
### Aliases: phrGetSelectedOutput
### ** Examples
# Load database and run ex2
phrLoadDatabaseString(phreeqc.dat)
phrRunString(ex2)
# display a summary of the results
df <- phrGetSelectedOutput()
summary(df$n1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ConceptSet.R
\name{resolveConceptSet}
\alias{resolveConceptSet}
\title{Resolve a concept set to the included standard concept IDs}
\usage{
resolveConceptSet(conceptSetDefinition, baseUrl, vocabularySourceKey = NULL)
}
\arguments{
\item{conceptSetDefinition}{A concept set definition, for example as obtained through the \code{\link{getConceptSetDefinition}} function,
or taken from a cohort definition.}
\item{baseUrl}{The base URL for the WebApi instance, for example:
"http://server.org:80/WebAPI".}
\item{vocabularySourceKey}{The source key of the Vocabulary. By default, the priority Vocabulary is
used.}
}
\value{
A vector of standard concept ids.
}
\description{
Resolve a concept set to the included standard concept IDs
}
\details{
Resolve a concept set to the included standard concept IDs
}
\examples{
\dontrun{
conceptSetDefinition <- getConceptSetDefinition(conceptSetId = 282,
baseUrl = "http://server.org:80/WebAPI")
conceptIds <- resolveConceptSet(conceptSetDefinition = conceptSetDefinition,
baseUrl = "http://server.org:80/WebAPI")
}
}
|
/man/resolveConceptSet.Rd
|
permissive
|
OHDSI/ROhdsiWebApi
|
R
| false | true | 1,215 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ConceptSet.R
\name{resolveConceptSet}
\alias{resolveConceptSet}
\title{Resolve a concept set to the included standard concept IDs}
\usage{
resolveConceptSet(conceptSetDefinition, baseUrl, vocabularySourceKey = NULL)
}
\arguments{
\item{conceptSetDefinition}{A concept set definition, for example as obtained through the \code{\link{getConceptSetDefinition}} function,
or taken from a cohort definition.}
\item{baseUrl}{The base URL for the WebApi instance, for example:
"http://server.org:80/WebAPI".}
\item{vocabularySourceKey}{The source key of the Vocabulary. By default, the priority Vocabulary is
used.}
}
\value{
A vector of standard concept ids.
}
\description{
Resolve a concept set to the included standard concept IDs
}
\details{
Resolve a concept set to the included standard concept IDs
}
\examples{
\dontrun{
conceptSetDefinition <- getConceptSetDefinition(conceptSetId = 282,
baseUrl = "http://server.org:80/WebAPI")
conceptIds <- resolveConceptSet(conceptSetDefinition = conceptSetDefinition,
baseUrl = "http://server.org:80/WebAPI")
}
}
|
library(shiny)
source("word_prediction.R")
### Function `predict_w4(input_text, ngram4.dt)`
### predicts the next word from a trigram input.
### arguments: 1) input_text, char;
### 2) ngram4.dt,, data.table of 4grams and their frequencies.
### 1) processes the input text,
### 2) makes a data.table from the last 3 words of the input text,
### 3) selects from an existing data.table of 4grams
### those that match the input trigram, and
### 4) returns a list `tops` of the 4th words ordered by most frequent.
load("tot.freqs_100_10w.RData")
### That loads the data.table of 4grams and frequencies
### `tot.freqs`
fix_apo <- function(word){
## fix the apostrophe in contractions.
wordN <- ifelse(grepl("'",word),gsub("'", "\\'",word,fixed=T),word)
wordN
}
na2commons <- function(word){
## `word` is a list of words.
commons <- c("the", "be", "to", "of", "and", "a")
if(length(word)==1){
if(is.na(word) | grepl("^na$",word, ignore.case=T))
word <- commons[round(runif(1,1,6),0)]
} else{
for(i in 1:length(word))
if(is.na(word[i]) | grepl("^na$",word[i], ignore.case=T))
word[i] <- commons[i]
}
word
}
insert_choice <- function(word, end_space){
## amends the input text with the chosen word.
## `text1` is the input text field (see file 'ui.R').
## `end_space` is boolean, and is defined in the shinyServer function.
paste("$('#text1').val($('#text1').val() + '",
ifelse(end_space, ""," "),
word, " ", "').trigger('change'); var input =
$('#text1'); input[0].selectionStart =
input[0].selectionEnd = input.val().length;",
sep='')
}
babble<-function(intext,N=1,top=TRUE){
phrase <- ""
for(i in 1:N){
ifelse(top,
wordnext <- na2commons(predict_w4(intext,tot.freqs)[1]),
wordnext <- na2commons(predict_w4(intext,tot.freqs)[round(runif(1,1,3),0)])
)
phrase <- ifelse(phrase == "", wordnext, paste(phrase,wordnext))
intext <- paste(intext,phrase)
}
phrase
}
clear <- "$('#text1').val('');
var input = $('#text1');
input[0].selectionStart = input[0].selectionEnd = input.val().length;"
shinyServer(
function(input, output, session) {
intext <- reactive({input$text1})
word <- reactive(predict_w4(intext(),tot.freqs)[1:3])
worda <- reactive( na2commons(word()) )
end_space <- reactive( grepl(" $", intext()) )
output$topPanel <- renderUI({
tags$script(src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js")
button1Click <- insert_choice(fix_apo(worda()[1]),end_space())
button2Click <- insert_choice(fix_apo(worda()[2]),end_space())
button3Click <- insert_choice(fix_apo(worda()[3]),end_space())
tags$div(
tags$button(type="button", id="word1but", worda()[1],
class="btn action-button shiny-bound-input",
onclick=button1Click)
,tags$button(type="button", id="word2but", worda()[2],
class="btn action-button shiny-bound-input",
onclick=button2Click)
,tags$button(type="button", id="word3but", worda()[3],
class="btn action-button shiny-bound-input",
onclick=button3Click)
)
})
output$bottomPanel <- renderUI({
buttonRClick <- insert_choice(fix_apo(babble(intext(),
input$num_bab,input$rand_bab)),end_space())
tags$div(
tags$button(type="button", id="randombut", "Babble",
class="btn action-button shiny-bound-input",
onclick=buttonRClick)
,tags$button(type="button", id="clearbut", "Clear",
class="btn action-button shiny-bound-input",
onclick=clear)
)
})
}
)
|
/next-word-predict/server.R
|
no_license
|
GopalaKrishnaChaitanyaY/Next_Word_Predictor
|
R
| false | false | 4,133 |
r
|
library(shiny)
source("word_prediction.R")
### Function `predict_w4(input_text, ngram4.dt)`
### predicts the next word from a trigram input.
### arguments: 1) input_text, char;
### 2) ngram4.dt,, data.table of 4grams and their frequencies.
### 1) processes the input text,
### 2) makes a data.table from the last 3 words of the input text,
### 3) selects from an existing data.table of 4grams
### those that match the input trigram, and
### 4) returns a list `tops` of the 4th words ordered by most frequent.
load("tot.freqs_100_10w.RData")
### That loads the data.table of 4grams and frequencies
### `tot.freqs`
fix_apo <- function(word){
## fix the apostrophe in contractions.
wordN <- ifelse(grepl("'",word),gsub("'", "\\'",word,fixed=T),word)
wordN
}
na2commons <- function(word){
## `word` is a list of words.
commons <- c("the", "be", "to", "of", "and", "a")
if(length(word)==1){
if(is.na(word) | grepl("^na$",word, ignore.case=T))
word <- commons[round(runif(1,1,6),0)]
} else{
for(i in 1:length(word))
if(is.na(word[i]) | grepl("^na$",word[i], ignore.case=T))
word[i] <- commons[i]
}
word
}
insert_choice <- function(word, end_space){
## amends the input text with the chosen word.
## `text1` is the input text field (see file 'ui.R').
## `end_space` is boolean, and is defined in the shinyServer function.
paste("$('#text1').val($('#text1').val() + '",
ifelse(end_space, ""," "),
word, " ", "').trigger('change'); var input =
$('#text1'); input[0].selectionStart =
input[0].selectionEnd = input.val().length;",
sep='')
}
babble<-function(intext,N=1,top=TRUE){
phrase <- ""
for(i in 1:N){
ifelse(top,
wordnext <- na2commons(predict_w4(intext,tot.freqs)[1]),
wordnext <- na2commons(predict_w4(intext,tot.freqs)[round(runif(1,1,3),0)])
)
phrase <- ifelse(phrase == "", wordnext, paste(phrase,wordnext))
intext <- paste(intext,phrase)
}
phrase
}
clear <- "$('#text1').val('');
var input = $('#text1');
input[0].selectionStart = input[0].selectionEnd = input.val().length;"
shinyServer(
function(input, output, session) {
intext <- reactive({input$text1})
word <- reactive(predict_w4(intext(),tot.freqs)[1:3])
worda <- reactive( na2commons(word()) )
end_space <- reactive( grepl(" $", intext()) )
output$topPanel <- renderUI({
tags$script(src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js")
button1Click <- insert_choice(fix_apo(worda()[1]),end_space())
button2Click <- insert_choice(fix_apo(worda()[2]),end_space())
button3Click <- insert_choice(fix_apo(worda()[3]),end_space())
tags$div(
tags$button(type="button", id="word1but", worda()[1],
class="btn action-button shiny-bound-input",
onclick=button1Click)
,tags$button(type="button", id="word2but", worda()[2],
class="btn action-button shiny-bound-input",
onclick=button2Click)
,tags$button(type="button", id="word3but", worda()[3],
class="btn action-button shiny-bound-input",
onclick=button3Click)
)
})
output$bottomPanel <- renderUI({
buttonRClick <- insert_choice(fix_apo(babble(intext(),
input$num_bab,input$rand_bab)),end_space())
tags$div(
tags$button(type="button", id="randombut", "Babble",
class="btn action-button shiny-bound-input",
onclick=buttonRClick)
,tags$button(type="button", id="clearbut", "Clear",
class="btn action-button shiny-bound-input",
onclick=clear)
)
})
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualizeMorphing.R
\name{visualizeMorphing}
\alias{visualizeMorphing}
\title{Fancy visualization of morphing.}
\usage{
visualizeMorphing(x, y, point.matching = NULL, alphas = c(0.25, 0.5, 0.75),
arrows = TRUE, in.one.plot = TRUE, point.colour = NULL)
}
\arguments{
\item{x}{[\code{Network}]\cr
First network.}
\item{y}{[\code{Network}]\cr
Second network.}
\item{point.matching}{[\code{matrix}]\cr
Point matching which shall be used for morphing. If \code{NULL}, an optimal
point matching is generated via function \code{\link{getOptimalPointMatching}}.
Default is \code{NULL}.}
\item{alphas}{[\code{numeric}]\cr
Vector of coefficients 'alpha' for convex combinations.}
\item{arrows}{[\code{logical(1)}]\cr
Draw arrows originating in the points of \code{x} and ending in the
points matched in \code{y}. Default is \code{TRUE}.}
\item{in.one.plot}{[\code{logical(1)}]\cr
Currently the function offers two different types of plot. If \code{in.one.plot}
is \code{TRUE}, which is the default value, the morphing is dipicted in one plot.
This is in particular useful for small instances. If set to \code{FALSE},
a matrix of plots is generated via \code{\link[ggplot2]{facet_grid}}. One
plot for each alpha value in \code{alphas}.}
\item{point.colour}{[\code{character(1)}]\cr
Which colour to use for the non-depot points?
Default is \code{NULL}. In this case the points are coloured by membership.
Only considered if \code{in.one.plot} is \code{FALSE}.}
}
\value{
[\code{\link[ggplot2]{ggplot}}]
}
\description{
Takes two instances of equal size and some alpha values. Computes the point matching
and morphings for the alpha values and visualizes the transition of points
of the first instance towards their matched counterparts of the second instance
with two different methods.
}
\seealso{
\code{\link{morphInstances}}
}
|
/man/visualizeMorphing.Rd
|
no_license
|
ani601/netgen
|
R
| false | true | 1,905 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualizeMorphing.R
\name{visualizeMorphing}
\alias{visualizeMorphing}
\title{Fancy visualization of morphing.}
\usage{
visualizeMorphing(x, y, point.matching = NULL, alphas = c(0.25, 0.5, 0.75),
arrows = TRUE, in.one.plot = TRUE, point.colour = NULL)
}
\arguments{
\item{x}{[\code{Network}]\cr
First network.}
\item{y}{[\code{Network}]\cr
Second network.}
\item{point.matching}{[\code{matrix}]\cr
Point matching which shall be used for morphing. If \code{NULL}, an optimal
point matching is generated via function \code{\link{getOptimalPointMatching}}.
Default is \code{NULL}.}
\item{alphas}{[\code{numeric}]\cr
Vector of coefficients 'alpha' for convex combinations.}
\item{arrows}{[\code{logical(1)}]\cr
Draw arrows originating in the points of \code{x} and ending in the
points matched in \code{y}. Default is \code{TRUE}.}
\item{in.one.plot}{[\code{logical(1)}]\cr
Currently the function offers two different types of plot. If \code{in.one.plot}
is \code{TRUE}, which is the default value, the morphing is dipicted in one plot.
This is in particular useful for small instances. If set to \code{FALSE},
a matrix of plots is generated via \code{\link[ggplot2]{facet_grid}}. One
plot for each alpha value in \code{alphas}.}
\item{point.colour}{[\code{character(1)}]\cr
Which colour to use for the non-depot points?
Default is \code{NULL}. In this case the points are coloured by membership.
Only considered if \code{in.one.plot} is \code{FALSE}.}
}
\value{
[\code{\link[ggplot2]{ggplot}}]
}
\description{
Takes two instances of equal size and some alpha values. Computes the point matching
and morphings for the alpha values and visualizes the transition of points
of the first instance towards their matched counterparts of the second instance
with two different methods.
}
\seealso{
\code{\link{morphInstances}}
}
|
/plot3.R
|
no_license
|
levilynn/ExData_Plotting1
|
R
| false | false | 1,102 |
r
| ||
# libraries ----------
librarian::shelf(
tidyverse, lubridate, ggsci, ggrepel, janitor, glue, here, ggtext, patchwork
)
source(here("Section 3 - Lockdown modeling", "src", "extract_cfr.R"))
# # use maharashtra pi schedule? use kerala cfr schedule? ----------
mh <- TRUE
if (mh == TRUE) {
tmp_outname <- "fig4_death_plot.pdf"
plt_title <- "Predicted number of daily COVID-19 deaths under moderate lockdown effect"
} else {
tmp_outname <- "figS4_death_plot.pdf"
plt_title <- "Predicted number of daily COVID-19 deaths under strong lockdown effect"
}
# load data -----------
obs <- read_csv("https://api.covid19india.org/csv/latest/case_time_series.csv",
col_types = cols()) %>%
clean_names() %>%
rename(
daily_cases = daily_confirmed,
daily_deaths = daily_deceased,
total_cases = total_confirmed,
total_deaths = total_deceased
) %>%
select(-date) %>%
rename(date = date_ymd) %>%
filter(date >= "2021-02-15") %>%
mutate(cfr = daily_deaths/daily_cases)
scenarios <- c("2021-03-01", "2021-03-15", "2021-03-30",
"2021-04-15", "2021-04-30", "no_intervention")
for (i in seq_along(scenarios)) {
if (mh == TRUE) {
tmp_filename <- glue("{scenarios[i]}_mh_smooth1_data.txt")
} else {
tmp_filename <- glue("{scenarios[i]}_smooth1_data.txt")
}
if (i == 1) {
p <- read_tsv(here("Section 3 - Lockdown modeling", "data",
tmp_filename),
col_types = cols()) %>%
mutate(scenario = scenarios[i])
} else {
p <- bind_rows(p,
read_tsv(here("Section 3 - Lockdown modeling", "data",
tmp_filename),
col_types = cols()) %>%
mutate(scenario = scenarios[i]))
}
}
p <- p %>%
mutate(
scenario = paste(
trimws(format(as.Date(scenario), '%B')),
trimws(format(as.Date(scenario), '%e'))
)
) %>%
mutate(
scenario = case_when(
scenario == "NA NA" ~ "No intervention",
T ~ scenario
)
) %>%
drop_na(incidence)
# extract CFR schedule and get plot defaults -----------
d <- extract_cfr()
# plt_def <- get_plt_def(mh = mh, kl = kl)
# prepare data -----------
clean_prep <- function(x, var) {
none <- obs %>% clean_scenario_cfr(p = x, use_cfr = {{ var}}, stop_obs = "2021-05-15", scen = "No intervention")
mar_15 <- obs %>% clean_scenario_cfr(p = x, use_cfr = {{ var }}, stop_obs = "2021-03-15", scen = "March 15")
mar_30 <- obs %>% clean_scenario_cfr(p = x, use_cfr = {{ var }}, stop_obs = "2021-03-30", scen = "March 30")
apr_15 <- obs %>% clean_scenario_cfr(p = x, use_cfr = {{ var }}, stop_obs = "2021-04-15", scen = "April 15")
total <- obs %>%
filter(date <= "2021-05-15") %>%
select(date, daily_deaths) %>%
rename(incidence = daily_deaths) %>%
add_column(scenario = "Observed") %>%
add_row(mar_15) %>%
add_row(mar_30) %>%
add_row(apr_15)
total.smoothed <- total %>%
filter(date <= "2021-05-15") %>%
nest(data = c(date, incidence)) %>%
mutate(m = purrr::map(data, loess, formula = incidence ~ as.numeric(date), span = 0.5),
fitted = purrr::map(m, `[[`, "fitted")) %>%
select(-m) %>%
unnest(cols = c(data, fitted))
total_smoothed_plot <- total.smoothed %>%
filter(scenario == "Observed") %>%
filter(date <= "2021-05-15") %>%
add_row(total.smoothed %>%
filter(scenario == "March 15",
date >= "2021-03-09")) %>%
add_row(total.smoothed %>%
filter(scenario == "March 30",
date >= "2021-03-24")) %>%
add_row(total.smoothed %>%
filter(scenario == "April 15",
date >= "2021-04-08")) %>%
mutate(scenario = factor(scenario, levels = c("Observed", "March 15", "March 30",
"April 15")))%>%
filter(date <= "2021-05-15")
return(total_smoothed_plot)
}
# tsp_india <- total_smoothed_plot
tsp_india <- clean_prep(x = p, var = cfr_t7)
tsp_mh <- clean_prep(x = p, var = cfr_mh_t7)
tsp_kl <- clean_prep(x = p, var = cfr_kl_t7)
# plot ----------
death_plt <- function(dat, title,
tmp_nudge = 500, tmp_repel_y = c(250, rep(4000, 3))) {
deaths_p <- dat %>%
ggplot() +
geom_line(aes(x = date, y = fitted, color = scenario), size = 1) +
scale_colour_lancet() +
xlab("Date") +
ylab("Daily deaths") +
geom_vline(data = dat %>%
group_by(scenario) %>%
filter(date == min(date)) %>%
dplyr::ungroup() %>%
select(scenario, date) %>%
filter(!(scenario %in% c("Observed", "No intervention"))),
aes(xintercept = date, color = scenario),
linetype = 'dashed') +
geom_label_repel(data = dat %>%
group_by(scenario) %>%
filter(fitted == max(fitted)) %>%
dplyr::ungroup() %>%
select(scenario, date, fitted) %>%
filter(!(scenario %in% c("Observed", "No intervention"))),
aes(x = date,
y = fitted,
label = paste0(formatC(round(fitted), format="f", big.mark=",", digits=0), " deaths"),
color = scenario,
family = "Lato"),
nudge_y = tmp_nudge,
nudge_x = -10,
size = 4,
show.legend = FALSE,
segment.size = 1) +
geom_text_repel(data = dat %>%
group_by(scenario) %>%
filter(date == min(date)) %>%
dplyr::ungroup() %>%
select(scenario, date, fitted) %>%
mutate(text = c("Observed data", "March 15\nlockdown", "March 30\nlockdown",
"April 15\nlockdown"),
x = as.Date(c("2021-03-01", "2021-03-09", "2021-03-24", "2021-04-08")),
y = tmp_repel_y),
aes(x = x,
y = y,
label = text,
color = scenario,
family = "Lato"),
nudge_x = -5,
size = 4,
show.legend = FALSE,
segment.size = 0) +
guides(color = guide_legend(nrow = 1)) +
labs(title = title,
y = "Daily deaths",
x = "",
color = "Date of intervention") +
scale_y_continuous(labels = scales::comma) +
scale_x_date(date_labels = "%B") +
theme_classic() +
theme(
text = element_text(family = "Lato"),
axis.text.x = element_text(size = 11, vjust = 0.5),
axis.text.y = element_text(size = 11),
axis.title.x = element_text(size = 11, face = "bold"),
axis.title.y = element_text(size = 11, face = "bold"),
legend.title = element_text(size = 11, face = "bold"),
legend.text = element_text(size = 11, face = "bold"),
legend.position = "none",
plot.title = element_text(size = 14, face = "bold"),
plot.subtitle = element_text(size = 11, hjust = 0, color = "gray40"),
plot.caption = element_markdown(size = 10, hjust = 0)
)
}
deaths_p <- death_plt(dat = tsp_india, title = "Moderate CFR")
deaths_p_mh <- death_plt(dat = tsp_mh, title = "High CFR")
deaths_p_kl <- death_plt(dat = tsp_kl, title = "Low CFR")
patched <- deaths_p_mh / deaths_p / deaths_p_kl
full_plt <- patched +
plot_annotation(
title = plt_title,
subtitle = "February 15, 2021 to May 15, 2021",
caption = glue("**Notes:** Observations and prediction period until May 15, 2021. ",
"Figures in boxes show peak number of deaths for each intervention.<br>",
"**Abbrev:** CFR, case-fatality rate<br>",
"**\uA9 COV-IND-19 Study Group**"),
tag_levels = c("A")
) &
theme(
text = element_text(family = "Lato"),
plot.title = element_text(size = 18, face = "bold"),
plot.subtitle = element_text(size = 14, hjust = 0, color = "gray40"),
plot.caption = element_markdown(size = 12, hjust = 0),
plot.tag.position = c(0, 1),
plot.tag = element_text(size = 18, hjust = 0, vjust = 1, family = "Lato", face = "bold")
)
# save output ----------
ggsave(filename = here("Section 3 - Lockdown modeling", "fig", tmp_outname),
plot = full_plt,
height = 12,
width = 10,
units = "in", device = cairo_pdf)
|
/old versions/Initial Submission/Section 3 - Lockdown modeling/fig4_death_plt.R
|
permissive
|
umich-cphds/covid_india_wave2
|
R
| false | false | 8,751 |
r
|
# libraries ----------
librarian::shelf(
tidyverse, lubridate, ggsci, ggrepel, janitor, glue, here, ggtext, patchwork
)
source(here("Section 3 - Lockdown modeling", "src", "extract_cfr.R"))
# # use maharashtra pi schedule? use kerala cfr schedule? ----------
mh <- TRUE
if (mh == TRUE) {
tmp_outname <- "fig4_death_plot.pdf"
plt_title <- "Predicted number of daily COVID-19 deaths under moderate lockdown effect"
} else {
tmp_outname <- "figS4_death_plot.pdf"
plt_title <- "Predicted number of daily COVID-19 deaths under strong lockdown effect"
}
# load data -----------
obs <- read_csv("https://api.covid19india.org/csv/latest/case_time_series.csv",
col_types = cols()) %>%
clean_names() %>%
rename(
daily_cases = daily_confirmed,
daily_deaths = daily_deceased,
total_cases = total_confirmed,
total_deaths = total_deceased
) %>%
select(-date) %>%
rename(date = date_ymd) %>%
filter(date >= "2021-02-15") %>%
mutate(cfr = daily_deaths/daily_cases)
scenarios <- c("2021-03-01", "2021-03-15", "2021-03-30",
"2021-04-15", "2021-04-30", "no_intervention")
for (i in seq_along(scenarios)) {
if (mh == TRUE) {
tmp_filename <- glue("{scenarios[i]}_mh_smooth1_data.txt")
} else {
tmp_filename <- glue("{scenarios[i]}_smooth1_data.txt")
}
if (i == 1) {
p <- read_tsv(here("Section 3 - Lockdown modeling", "data",
tmp_filename),
col_types = cols()) %>%
mutate(scenario = scenarios[i])
} else {
p <- bind_rows(p,
read_tsv(here("Section 3 - Lockdown modeling", "data",
tmp_filename),
col_types = cols()) %>%
mutate(scenario = scenarios[i]))
}
}
p <- p %>%
mutate(
scenario = paste(
trimws(format(as.Date(scenario), '%B')),
trimws(format(as.Date(scenario), '%e'))
)
) %>%
mutate(
scenario = case_when(
scenario == "NA NA" ~ "No intervention",
T ~ scenario
)
) %>%
drop_na(incidence)
# extract CFR schedule and get plot defaults -----------
d <- extract_cfr()
# plt_def <- get_plt_def(mh = mh, kl = kl)
# prepare data -----------
clean_prep <- function(x, var) {
none <- obs %>% clean_scenario_cfr(p = x, use_cfr = {{ var}}, stop_obs = "2021-05-15", scen = "No intervention")
mar_15 <- obs %>% clean_scenario_cfr(p = x, use_cfr = {{ var }}, stop_obs = "2021-03-15", scen = "March 15")
mar_30 <- obs %>% clean_scenario_cfr(p = x, use_cfr = {{ var }}, stop_obs = "2021-03-30", scen = "March 30")
apr_15 <- obs %>% clean_scenario_cfr(p = x, use_cfr = {{ var }}, stop_obs = "2021-04-15", scen = "April 15")
total <- obs %>%
filter(date <= "2021-05-15") %>%
select(date, daily_deaths) %>%
rename(incidence = daily_deaths) %>%
add_column(scenario = "Observed") %>%
add_row(mar_15) %>%
add_row(mar_30) %>%
add_row(apr_15)
total.smoothed <- total %>%
filter(date <= "2021-05-15") %>%
nest(data = c(date, incidence)) %>%
mutate(m = purrr::map(data, loess, formula = incidence ~ as.numeric(date), span = 0.5),
fitted = purrr::map(m, `[[`, "fitted")) %>%
select(-m) %>%
unnest(cols = c(data, fitted))
total_smoothed_plot <- total.smoothed %>%
filter(scenario == "Observed") %>%
filter(date <= "2021-05-15") %>%
add_row(total.smoothed %>%
filter(scenario == "March 15",
date >= "2021-03-09")) %>%
add_row(total.smoothed %>%
filter(scenario == "March 30",
date >= "2021-03-24")) %>%
add_row(total.smoothed %>%
filter(scenario == "April 15",
date >= "2021-04-08")) %>%
mutate(scenario = factor(scenario, levels = c("Observed", "March 15", "March 30",
"April 15")))%>%
filter(date <= "2021-05-15")
return(total_smoothed_plot)
}
# tsp_india <- total_smoothed_plot
tsp_india <- clean_prep(x = p, var = cfr_t7)
tsp_mh <- clean_prep(x = p, var = cfr_mh_t7)
tsp_kl <- clean_prep(x = p, var = cfr_kl_t7)
# plot ----------
death_plt <- function(dat, title,
tmp_nudge = 500, tmp_repel_y = c(250, rep(4000, 3))) {
deaths_p <- dat %>%
ggplot() +
geom_line(aes(x = date, y = fitted, color = scenario), size = 1) +
scale_colour_lancet() +
xlab("Date") +
ylab("Daily deaths") +
geom_vline(data = dat %>%
group_by(scenario) %>%
filter(date == min(date)) %>%
dplyr::ungroup() %>%
select(scenario, date) %>%
filter(!(scenario %in% c("Observed", "No intervention"))),
aes(xintercept = date, color = scenario),
linetype = 'dashed') +
geom_label_repel(data = dat %>%
group_by(scenario) %>%
filter(fitted == max(fitted)) %>%
dplyr::ungroup() %>%
select(scenario, date, fitted) %>%
filter(!(scenario %in% c("Observed", "No intervention"))),
aes(x = date,
y = fitted,
label = paste0(formatC(round(fitted), format="f", big.mark=",", digits=0), " deaths"),
color = scenario,
family = "Lato"),
nudge_y = tmp_nudge,
nudge_x = -10,
size = 4,
show.legend = FALSE,
segment.size = 1) +
geom_text_repel(data = dat %>%
group_by(scenario) %>%
filter(date == min(date)) %>%
dplyr::ungroup() %>%
select(scenario, date, fitted) %>%
mutate(text = c("Observed data", "March 15\nlockdown", "March 30\nlockdown",
"April 15\nlockdown"),
x = as.Date(c("2021-03-01", "2021-03-09", "2021-03-24", "2021-04-08")),
y = tmp_repel_y),
aes(x = x,
y = y,
label = text,
color = scenario,
family = "Lato"),
nudge_x = -5,
size = 4,
show.legend = FALSE,
segment.size = 0) +
guides(color = guide_legend(nrow = 1)) +
labs(title = title,
y = "Daily deaths",
x = "",
color = "Date of intervention") +
scale_y_continuous(labels = scales::comma) +
scale_x_date(date_labels = "%B") +
theme_classic() +
theme(
text = element_text(family = "Lato"),
axis.text.x = element_text(size = 11, vjust = 0.5),
axis.text.y = element_text(size = 11),
axis.title.x = element_text(size = 11, face = "bold"),
axis.title.y = element_text(size = 11, face = "bold"),
legend.title = element_text(size = 11, face = "bold"),
legend.text = element_text(size = 11, face = "bold"),
legend.position = "none",
plot.title = element_text(size = 14, face = "bold"),
plot.subtitle = element_text(size = 11, hjust = 0, color = "gray40"),
plot.caption = element_markdown(size = 10, hjust = 0)
)
}
deaths_p <- death_plt(dat = tsp_india, title = "Moderate CFR")
deaths_p_mh <- death_plt(dat = tsp_mh, title = "High CFR")
deaths_p_kl <- death_plt(dat = tsp_kl, title = "Low CFR")
patched <- deaths_p_mh / deaths_p / deaths_p_kl
full_plt <- patched +
plot_annotation(
title = plt_title,
subtitle = "February 15, 2021 to May 15, 2021",
caption = glue("**Notes:** Observations and prediction period until May 15, 2021. ",
"Figures in boxes show peak number of deaths for each intervention.<br>",
"**Abbrev:** CFR, case-fatality rate<br>",
"**\uA9 COV-IND-19 Study Group**"),
tag_levels = c("A")
) &
theme(
text = element_text(family = "Lato"),
plot.title = element_text(size = 18, face = "bold"),
plot.subtitle = element_text(size = 14, hjust = 0, color = "gray40"),
plot.caption = element_markdown(size = 12, hjust = 0),
plot.tag.position = c(0, 1),
plot.tag = element_text(size = 18, hjust = 0, vjust = 1, family = "Lato", face = "bold")
)
# save output ----------
ggsave(filename = here("Section 3 - Lockdown modeling", "fig", tmp_outname),
plot = full_plt,
height = 12,
width = 10,
units = "in", device = cairo_pdf)
|
testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433657e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -3.52611543667973e+43, -1.49815227045093e+197, -2.61605817623304e+76, -2.05047590487847e-254, 1.86807199670821e+112, 6.10244991284866e-304, -3.23758639955247e+211, -4.11958095563334e+154, -7.46403404334263e+158, 8657593228606415, 8.34327535556312e+270, -1.14194149621191e+46, -2.93592439989456e+150, -8.9070509982283e+295, 1.83628735201614e+223, 5.95835080989286e-136, 2.07507571253324e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615831467-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 770 |
r
|
testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433657e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -3.52611543667973e+43, -1.49815227045093e+197, -2.61605817623304e+76, -2.05047590487847e-254, 1.86807199670821e+112, 6.10244991284866e-304, -3.23758639955247e+211, -4.11958095563334e+154, -7.46403404334263e+158, 8657593228606415, 8.34327535556312e+270, -1.14194149621191e+46, -2.93592439989456e+150, -8.9070509982283e+295, 1.83628735201614e+223, 5.95835080989286e-136, 2.07507571253324e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
#grid.newpage()
todays_date<-str_replace_all(today(), "-|20","")
file_name<-paste("graphs/reflections_dashboard_",todays_date,".pdf", sep="")
pdf(file_name, width=8.5,height=11,title="KIPP Chicago SL Relflections")
#### Grid Layout ####
la=grid.layout(8, 2, heights=unit.c(unit(1.5, "lines"),
unit(1.5, "lines"),
unit(1, "lines"),
unit(3, "inches") -unit(3,"lines"),
unit(1, "lines"),
unit(4, "inches") -unit(3,"lines"),
unit(1, "lines"),
unit(4, "inches") -unit(3,"lines")
),
widths=unit(8/2, "inches")
)
# Top level viewport that is 8.5"x11 inchees divided into text and
top.vp <- viewport(layout=la)
#### Child Viewports ####
#need to name child viewports
plottitle <- viewport(layout.pos.col = 1:2, layout.pos.row = 1,
name = "plottitle")
plotkey <- viewport(layout.pos.col = 1:2, layout.pos.row = 2,
name = "plotkey")
title11 <- viewport(layout.pos.col = 1, layout.pos.row = 3,
name = "title11")
title212 <- viewport(layout.pos.col = 1:2, layout.pos.row = 5,
name = "title212")
title312 <- viewport(layout.pos.col = 1:2, layout.pos.row = 7,
name = "title312")
title12 <- viewport(layout.pos.col = 2, layout.pos.row = 3,
name = "title12")
plotAtt <- viewport(layout.pos.col = 1, layout.pos.row = 4,
name = "plotAtt")
plotEnroll <- viewport(layout.pos.col = 2, layout.pos.row = 4,
name = "plotEnroll")
plotMAP <- viewport(layout.pos.col = 1:2, layout.pos.row = 6,
name = "plotXfers")
plotXfers <- viewport(layout.pos.col = 1:2, layout.pos.row = 8,
name = "plotMAP")
#plotISAT <- viewport(layout.pos.col = 1, layout.pos.row = 6,
# name = "plotISAT")
## viewport for Assessment tables in each row
vpMAPplots <- function(vpname="vpMAPPlotRegion"){
viewport(x=unit(3,"lines"),
y=1,
height=1,
width=unit.c(unit(1,"npc")-unit(3,"lines")),
just=c("left", "top"),
name=vpname
)
}
#### Tree ####
# Combine plots into tree
splot <- vpTree(top.vp, vpList(plottitle, plotkey,title11, title212, title312,title12, plotAtt, plotEnroll, plotMAP, plotXfers))
#push vpTree
grid.newpage()
pushViewport(splot)
#### Attend ####
seekViewport("plotAtt")
grid.rect()
grid.draw(att.gtbl)
seekViewport("title11")
grid.rect()
grid.text("Attendence", gp=gpar(fontsize=11, fontface="bold"))
#### Enrollment ####
seekViewport("plotEnroll")
grid.rect()
grid.draw(Enroll.g)
grid.text(" Orange line",
x=0,
y=unit(1, "lines"),
hjust=0,
gp=gpar(fontsize=6, fontface="italic", col="#E27425"),
name="enrollorange")
text.cursor<-convertWidth(grobWidth("enrollorange"), "npc")
grid.text(" indicates each grade's SY2012-13 budgeted enrollment.",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="enrolltext",
hjust=0)
seekViewport("title12")
grid.rect()
grid.text("Enrollment", gp=gpar(fontsize=11, fontface="bold"))
#### Transfers ####
seekViewport("plotXfers")
grid.rect()
transfer.g<-arrangeGrob(transfer.plot, transfer.gtbl, nrow=1)
grid.draw(transfer.g)
grid.text("Bars and numbers show cumulative transfers.",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=unit(4, "lines"),
y=unit(1, "lines"),
name="xfertext",
hjust=0)
seekViewport("title212")
grid.rect()
grid.text("Transfers", gp=gpar(fontsize=11, fontface="bold"))
##### Title ####
seekViewport("plottitle")
grid.rect()
grid.text("DRAFT--KIPP Chicago School Leader Reflection Dashboard--DRAFT", gp=gpar(fontsize=12, fontface="bold"))
#### Key ####
seekViewport("plotkey")
grid.rect()
grid.text(" Key: ", # NB: the spaces before "K" after the "y" in "Key"
gp=gpar(fontsize=10, fontface="bold"),
x=0,#unit(1, "lines"),
name="key",
hjust=0)
text.cursor<-convertWidth(grobWidth("key"), "npc")
grid.text("KIPP Chicago schools indicated by color: ",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="text1",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("text1"), "npc")
grid.text("KAPS ",
gp=gpar(col="purple", fontsize=10),
x=text.cursor,
name="kaps",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kaps"), "npc")
grid.text(" KAMS ",
gp=gpar(col="#439539", fontsize=10),
x=text.cursor,
name="kams",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kams"), "npc")
grid.text(" KCCP",
gp=gpar(col="#60A2D7", fontsize=10),
x=text.cursor,
name="kccp",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kccp"), "npc")
grid.text(". Comparative metrics indicated by ",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="text2",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("text2"), "npc")
grid.text("orange",
gp=gpar(col="#E27425", fontsize=10),
x=text.cursor,
name="orange",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("orange"), "npc")
grid.text(".",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="period",
hjust=0)
#### Page 2 ####
grid.newpage()
#### Grid Layout ####
la2=grid.layout(6, 2, heights=unit.c(unit(1.5, "lines"),
unit(1.5, "lines"),
unit(1, "lines"),
unit(6.25, "inches") -unit(3,"lines"),
unit(1, "lines"),
unit(4.75, "inches") -unit(4,"lines")
),
widths=unit(8/2, "inches")
)
# Top level viewport that is 8.5"x11 inchees divided into text and
top2.vp <- viewport(layout=la2)
#### Child Viewports ####
#need to name child viewports
page2title <- viewport(layout.pos.col = 1:2, layout.pos.row = 1,
name = "page2title")
page2key <- viewport(layout.pos.col = 1:2, layout.pos.row = 2,
name = "page2key")
title2.1 <- viewport(layout.pos.col = 1:2, layout.pos.row = 3,
name = "title2.1")
title2.2 <- viewport(layout.pos.col = 1:2, layout.pos.row = 5,
name = "title2.2")
plotMAP2 <- viewport(layout.pos.col = 1:2, layout.pos.row = 4,
name = "plotMAP2")
plotISAT2 <- viewport(layout.pos.col = 1:2, layout.pos.row = 6,
name = "plotISAT2")
#### Tree ####
# Combine plots into tree
splot2 <- vpTree(top2.vp, vpList(page2title, page2key,title2.1, title2.2, plotMAP2,plotISAT2))
#push vpTree
#grid.show.layout(la2)
pushViewport(splot2)
##### Title Page 2 ####
seekViewport("page2title")
grid.rect()
grid.text("DRAFT--KIPP Chicago School Leader Reflection Dashboard--DRAFT", gp=gpar(fontsize=12, fontface="bold"))
#### Key Page 2 ####
seekViewport("page2key")
grid.rect()
grid.text(" Key: ", # NB: the spaces before "K" after the "y" in "Key"
gp=gpar(fontsize=10, fontface="bold"),
x=0,#unit(1, "lines"),
name="key",
hjust=0)
text.cursor<-convertWidth(grobWidth("key"), "npc")
grid.text("KIPP Chicago schools indicated by color: ",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="text1",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("text1"), "npc")
grid.text("KAPS ",
gp=gpar(col="purple", fontsize=10),
x=text.cursor,
name="kaps",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kaps"), "npc")
grid.text(" KAMS ",
gp=gpar(col="#439539", fontsize=10),
x=text.cursor,
name="kams",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kams"), "npc")
grid.text(" KCCP",
gp=gpar(col="#60A2D7", fontsize=10),
x=text.cursor,
name="kccp",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kccp"), "npc")
grid.text(". Comparative metrics indicated by ",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="text2",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("text2"), "npc")
grid.text("orange",
gp=gpar(col="#E27425", fontsize=10),
x=text.cursor,
name="orange",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("orange"), "npc")
grid.text(".",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="period",
hjust=0)
#### MAP Page 2 ####
# Section Title
seekViewport("title2.1")
grid.rect()
grid.text("Assessements",
gp=gpar(fontsize=11,
fontface="bold"
)
)
seekViewport("plotMAP2")
pushViewport(viewport(x=0,
y=1,
height=.5,
width=1,
just=c("left", "top"),
name="mapgrowth2"
)
)
grid.rect()
grid.text("MAP\n% Above Typical Growth",
x = unit(1.2,"lines"),
rot = 90,
gp=gpar(fontsize=8, fontface="bold"))
grid.text("Ranked against KIPP Network",
x = unit(4,"lines"),
rot = 90,
gp=gpar(fontsize=6, fontface="italic", col="#8D8685"))
grid.text("(Fall-to-Spring for K, 2, & 5; Spring-to-Spring for 1 & 6-8)",
x = unit(6,"lines"),
rot = 90,
gp=gpar(fontsize=5, fontface="italic", col="#8D8685"))
# viewport for MAP tables in each row
vpMAPplots <- function(vpname="vpMAPPlotRegion"){
viewport(x=unit(3,"lines"),
y=1,
height=1,
width=unit.c(unit(1,"npc")-unit(3,"lines")),
just=c("left", "top"),
name=vpname
)
}
pushViewport(vpMAPplots("vpMapPlotGrowth2"))
grid.rect()
grid.draw(map.bar.plot)
grid.text(" Dark orange lines ",
x=0,
y=unit(1, "lines"),
hjust=0,
gp=gpar(fontsize=6, fontface="italic", col="#E27425"),
name="mapnatorange")
text.cursor<-convertWidth(grobWidth("mapnatorange"), "npc")
grid.text(" indicate the national percent above typical growth (50%); ",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="maptextnat",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("maptextnat"), "npc")
grid.text("bronze lines ",
gp=gpar(col="#C49A6C", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="mapkippyellow",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("mapkippyellow"), "npc")
grid.text(" indicate the KIPP Network percent above typical growth.",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="maptextkipp",
hjust=0)
#### MAP Growth Magnitude 2 ####
seekViewport("plotMAP2")
pushViewport(viewport(x=0,
y=.5,
height=.5,
width=1,
just=c("left", "top"),
name="mapmag2"
)
)
grid.rect()
grid.text("MAP\nAverage Magnitude of Growth",
x = unit(1.2,"lines"),
rot = 90,
gp=gpar(fontsize=8, fontface="bold"))
grid.text("Ranked against KIPP Network",
x = unit(4,"lines"),
rot = 90,
gp=gpar(fontsize=6, fontface="italic", col="#8D8685"))
grid.text("(Fall-to-Spring for K, 2, & 5; Spring-to-Spring for 1 & 6-8)",
x = unit(6,"lines"),
rot = 90,
gp=gpar(fontsize=5, fontface="italic", col="#8D8685"))
pushViewport(vpMAPplots("vpMapPlotMag2"))
grid.rect()
grid.draw(map.mag.plot)
grid.text(" Dark orange lines ",
x=0,
y=unit(1, "lines"),
hjust=0,
gp=gpar(fontsize=6, fontface="italic", col="#E27425"),
name="magnatorange")
text.cursor<-convertWidth(grobWidth("magnatorange"), "npc")
grid.text(" indicate the national average magnitude of growth (1 = Actual Growth/Expected Growth); ",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="magtextnat",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("magtextnat"), "npc")
grid.text("bronze lines ",
gp=gpar(col="#C49A6C", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="magkippyellow",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("magkippyellow"), "npc")
grid.text(" indicate the KIPP Network average magnitude of growth.",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="magtextkipp",
hjust=0)
#### ISAT Page 2 ####
seekViewport("plotISAT2")
pushViewport(viewport(x=0, y=.5, height=.5, width=1, just=c("left", "top"), name="isat2"))
grid.rect()
pushViewport(vpMAPplots("vpISAT2"))
grid.rect()
gPlot <- ggplotGrob(ISAT.58.Comp.plot)
gTbl <- ggplotGrob(ISAT.58.Comp.tbl)
maxHeight = grid::unit.pmax(gPlot$heights[2:3], gTbl$heights[2:3])
gPlot$heights[2:3] <- as.list(maxHeight)
gTbl$heights[2:3] <- as.list(maxHeight)
isat.g<-arrangeGrob(gPlot, gTbl, ncol=2, widths=c(1,2))
grid.draw(isat.g)
grid.text(" Orange lines & numbers",
x=0,
y=unit(1, "lines"),
hjust=0,
gp=gpar(fontsize=6, fontface="italic", col="#E27425"),
name="cpsorange")
text.cursor<-convertWidth(grobWidth("cpsorange"), "npc")
grid.text(" indicates CPS performance for each grade over all schools. ",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="cpstext",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("cpstext"), "npc")
grid.text("Gray dots ",
gp=gpar(col="#8D8685", fontsize=6, fontface="bold.italic"),
x=text.cursor,
y=unit(1, "lines"),
name="graydots",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("graydots"), "npc")
grid.text("indicate KIPP Chicago high/low performance from SY2006-7 to SY2012-13.",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="cpstext2",
hjust=0)
seekViewport("isat2")
grid.text("ISAT\n% Meets/Exceeds", x = unit(1.2,"lines"), rot = 90, gp=gpar(fontsize=8, fontface="bold"))
isat.extext<-"Orange indicates CPS performance"
grid.text("New cut scores applied retroactively",
x = unit(4,"lines"),
rot = 90,
gp=gpar(fontsize=6, fontface="italic", col="#8D8685"))
dev.off()
|
/School_Leader_Reflections/src/buildDB_v3.R
|
no_license
|
kippchicago/Data_Analysis
|
R
| false | false | 15,743 |
r
|
#grid.newpage()
todays_date<-str_replace_all(today(), "-|20","")
file_name<-paste("graphs/reflections_dashboard_",todays_date,".pdf", sep="")
pdf(file_name, width=8.5,height=11,title="KIPP Chicago SL Relflections")
#### Grid Layout ####
la=grid.layout(8, 2, heights=unit.c(unit(1.5, "lines"),
unit(1.5, "lines"),
unit(1, "lines"),
unit(3, "inches") -unit(3,"lines"),
unit(1, "lines"),
unit(4, "inches") -unit(3,"lines"),
unit(1, "lines"),
unit(4, "inches") -unit(3,"lines")
),
widths=unit(8/2, "inches")
)
# Top level viewport that is 8.5"x11 inchees divided into text and
top.vp <- viewport(layout=la)
#### Child Viewports ####
#need to name child viewports
plottitle <- viewport(layout.pos.col = 1:2, layout.pos.row = 1,
name = "plottitle")
plotkey <- viewport(layout.pos.col = 1:2, layout.pos.row = 2,
name = "plotkey")
title11 <- viewport(layout.pos.col = 1, layout.pos.row = 3,
name = "title11")
title212 <- viewport(layout.pos.col = 1:2, layout.pos.row = 5,
name = "title212")
title312 <- viewport(layout.pos.col = 1:2, layout.pos.row = 7,
name = "title312")
title12 <- viewport(layout.pos.col = 2, layout.pos.row = 3,
name = "title12")
plotAtt <- viewport(layout.pos.col = 1, layout.pos.row = 4,
name = "plotAtt")
plotEnroll <- viewport(layout.pos.col = 2, layout.pos.row = 4,
name = "plotEnroll")
plotMAP <- viewport(layout.pos.col = 1:2, layout.pos.row = 6,
name = "plotXfers")
plotXfers <- viewport(layout.pos.col = 1:2, layout.pos.row = 8,
name = "plotMAP")
#plotISAT <- viewport(layout.pos.col = 1, layout.pos.row = 6,
# name = "plotISAT")
## viewport for Assessment tables in each row
vpMAPplots <- function(vpname="vpMAPPlotRegion"){
viewport(x=unit(3,"lines"),
y=1,
height=1,
width=unit.c(unit(1,"npc")-unit(3,"lines")),
just=c("left", "top"),
name=vpname
)
}
#### Tree ####
# Combine plots into tree
splot <- vpTree(top.vp, vpList(plottitle, plotkey,title11, title212, title312,title12, plotAtt, plotEnroll, plotMAP, plotXfers))
#push vpTree
grid.newpage()
pushViewport(splot)
#### Attend ####
seekViewport("plotAtt")
grid.rect()
grid.draw(att.gtbl)
seekViewport("title11")
grid.rect()
grid.text("Attendence", gp=gpar(fontsize=11, fontface="bold"))
#### Enrollment ####
seekViewport("plotEnroll")
grid.rect()
grid.draw(Enroll.g)
grid.text(" Orange line",
x=0,
y=unit(1, "lines"),
hjust=0,
gp=gpar(fontsize=6, fontface="italic", col="#E27425"),
name="enrollorange")
text.cursor<-convertWidth(grobWidth("enrollorange"), "npc")
grid.text(" indicates each grade's SY2012-13 budgeted enrollment.",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="enrolltext",
hjust=0)
seekViewport("title12")
grid.rect()
grid.text("Enrollment", gp=gpar(fontsize=11, fontface="bold"))
#### Transfers ####
seekViewport("plotXfers")
grid.rect()
transfer.g<-arrangeGrob(transfer.plot, transfer.gtbl, nrow=1)
grid.draw(transfer.g)
grid.text("Bars and numbers show cumulative transfers.",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=unit(4, "lines"),
y=unit(1, "lines"),
name="xfertext",
hjust=0)
seekViewport("title212")
grid.rect()
grid.text("Transfers", gp=gpar(fontsize=11, fontface="bold"))
##### Title ####
seekViewport("plottitle")
grid.rect()
grid.text("DRAFT--KIPP Chicago School Leader Reflection Dashboard--DRAFT", gp=gpar(fontsize=12, fontface="bold"))
#### Key ####
seekViewport("plotkey")
grid.rect()
grid.text(" Key: ", # NB: the spaces before "K" after the "y" in "Key"
gp=gpar(fontsize=10, fontface="bold"),
x=0,#unit(1, "lines"),
name="key",
hjust=0)
text.cursor<-convertWidth(grobWidth("key"), "npc")
grid.text("KIPP Chicago schools indicated by color: ",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="text1",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("text1"), "npc")
grid.text("KAPS ",
gp=gpar(col="purple", fontsize=10),
x=text.cursor,
name="kaps",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kaps"), "npc")
grid.text(" KAMS ",
gp=gpar(col="#439539", fontsize=10),
x=text.cursor,
name="kams",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kams"), "npc")
grid.text(" KCCP",
gp=gpar(col="#60A2D7", fontsize=10),
x=text.cursor,
name="kccp",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kccp"), "npc")
grid.text(". Comparative metrics indicated by ",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="text2",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("text2"), "npc")
grid.text("orange",
gp=gpar(col="#E27425", fontsize=10),
x=text.cursor,
name="orange",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("orange"), "npc")
grid.text(".",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="period",
hjust=0)
#### Page 2 ####
grid.newpage()
#### Grid Layout ####
la2=grid.layout(6, 2, heights=unit.c(unit(1.5, "lines"),
unit(1.5, "lines"),
unit(1, "lines"),
unit(6.25, "inches") -unit(3,"lines"),
unit(1, "lines"),
unit(4.75, "inches") -unit(4,"lines")
),
widths=unit(8/2, "inches")
)
# Top level viewport that is 8.5"x11 inchees divided into text and
top2.vp <- viewport(layout=la2)
#### Child Viewports ####
#need to name child viewports
page2title <- viewport(layout.pos.col = 1:2, layout.pos.row = 1,
name = "page2title")
page2key <- viewport(layout.pos.col = 1:2, layout.pos.row = 2,
name = "page2key")
title2.1 <- viewport(layout.pos.col = 1:2, layout.pos.row = 3,
name = "title2.1")
title2.2 <- viewport(layout.pos.col = 1:2, layout.pos.row = 5,
name = "title2.2")
plotMAP2 <- viewport(layout.pos.col = 1:2, layout.pos.row = 4,
name = "plotMAP2")
plotISAT2 <- viewport(layout.pos.col = 1:2, layout.pos.row = 6,
name = "plotISAT2")
#### Tree ####
# Combine plots into tree
splot2 <- vpTree(top2.vp, vpList(page2title, page2key,title2.1, title2.2, plotMAP2,plotISAT2))
#push vpTree
#grid.show.layout(la2)
pushViewport(splot2)
##### Title Page 2 ####
seekViewport("page2title")
grid.rect()
grid.text("DRAFT--KIPP Chicago School Leader Reflection Dashboard--DRAFT", gp=gpar(fontsize=12, fontface="bold"))
#### Key Page 2 ####
seekViewport("page2key")
grid.rect()
grid.text(" Key: ", # NB: the spaces before "K" after the "y" in "Key"
gp=gpar(fontsize=10, fontface="bold"),
x=0,#unit(1, "lines"),
name="key",
hjust=0)
text.cursor<-convertWidth(grobWidth("key"), "npc")
grid.text("KIPP Chicago schools indicated by color: ",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="text1",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("text1"), "npc")
grid.text("KAPS ",
gp=gpar(col="purple", fontsize=10),
x=text.cursor,
name="kaps",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kaps"), "npc")
grid.text(" KAMS ",
gp=gpar(col="#439539", fontsize=10),
x=text.cursor,
name="kams",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kams"), "npc")
grid.text(" KCCP",
gp=gpar(col="#60A2D7", fontsize=10),
x=text.cursor,
name="kccp",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("kccp"), "npc")
grid.text(". Comparative metrics indicated by ",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="text2",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("text2"), "npc")
grid.text("orange",
gp=gpar(col="#E27425", fontsize=10),
x=text.cursor,
name="orange",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("orange"), "npc")
grid.text(".",
gp=gpar(col="#8D8685", fontsize=10, fontface="italic"),
x=text.cursor,
name="period",
hjust=0)
#### MAP Page 2 ####
# Section Title
seekViewport("title2.1")
grid.rect()
grid.text("Assessements",
gp=gpar(fontsize=11,
fontface="bold"
)
)
seekViewport("plotMAP2")
pushViewport(viewport(x=0,
y=1,
height=.5,
width=1,
just=c("left", "top"),
name="mapgrowth2"
)
)
grid.rect()
grid.text("MAP\n% Above Typical Growth",
x = unit(1.2,"lines"),
rot = 90,
gp=gpar(fontsize=8, fontface="bold"))
grid.text("Ranked against KIPP Network",
x = unit(4,"lines"),
rot = 90,
gp=gpar(fontsize=6, fontface="italic", col="#8D8685"))
grid.text("(Fall-to-Spring for K, 2, & 5; Spring-to-Spring for 1 & 6-8)",
x = unit(6,"lines"),
rot = 90,
gp=gpar(fontsize=5, fontface="italic", col="#8D8685"))
# viewport for MAP tables in each row
vpMAPplots <- function(vpname="vpMAPPlotRegion"){
viewport(x=unit(3,"lines"),
y=1,
height=1,
width=unit.c(unit(1,"npc")-unit(3,"lines")),
just=c("left", "top"),
name=vpname
)
}
pushViewport(vpMAPplots("vpMapPlotGrowth2"))
grid.rect()
grid.draw(map.bar.plot)
grid.text(" Dark orange lines ",
x=0,
y=unit(1, "lines"),
hjust=0,
gp=gpar(fontsize=6, fontface="italic", col="#E27425"),
name="mapnatorange")
text.cursor<-convertWidth(grobWidth("mapnatorange"), "npc")
grid.text(" indicate the national percent above typical growth (50%); ",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="maptextnat",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("maptextnat"), "npc")
grid.text("bronze lines ",
gp=gpar(col="#C49A6C", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="mapkippyellow",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("mapkippyellow"), "npc")
grid.text(" indicate the KIPP Network percent above typical growth.",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="maptextkipp",
hjust=0)
#### MAP Growth Magnitude 2 ####
seekViewport("plotMAP2")
pushViewport(viewport(x=0,
y=.5,
height=.5,
width=1,
just=c("left", "top"),
name="mapmag2"
)
)
grid.rect()
grid.text("MAP\nAverage Magnitude of Growth",
x = unit(1.2,"lines"),
rot = 90,
gp=gpar(fontsize=8, fontface="bold"))
grid.text("Ranked against KIPP Network",
x = unit(4,"lines"),
rot = 90,
gp=gpar(fontsize=6, fontface="italic", col="#8D8685"))
grid.text("(Fall-to-Spring for K, 2, & 5; Spring-to-Spring for 1 & 6-8)",
x = unit(6,"lines"),
rot = 90,
gp=gpar(fontsize=5, fontface="italic", col="#8D8685"))
pushViewport(vpMAPplots("vpMapPlotMag2"))
grid.rect()
grid.draw(map.mag.plot)
grid.text(" Dark orange lines ",
x=0,
y=unit(1, "lines"),
hjust=0,
gp=gpar(fontsize=6, fontface="italic", col="#E27425"),
name="magnatorange")
text.cursor<-convertWidth(grobWidth("magnatorange"), "npc")
grid.text(" indicate the national average magnitude of growth (1 = Actual Growth/Expected Growth); ",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="magtextnat",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("magtextnat"), "npc")
grid.text("bronze lines ",
gp=gpar(col="#C49A6C", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="magkippyellow",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("magkippyellow"), "npc")
grid.text(" indicate the KIPP Network average magnitude of growth.",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="magtextkipp",
hjust=0)
#### ISAT Page 2 ####
seekViewport("plotISAT2")
pushViewport(viewport(x=0, y=.5, height=.5, width=1, just=c("left", "top"), name="isat2"))
grid.rect()
pushViewport(vpMAPplots("vpISAT2"))
grid.rect()
gPlot <- ggplotGrob(ISAT.58.Comp.plot)
gTbl <- ggplotGrob(ISAT.58.Comp.tbl)
maxHeight = grid::unit.pmax(gPlot$heights[2:3], gTbl$heights[2:3])
gPlot$heights[2:3] <- as.list(maxHeight)
gTbl$heights[2:3] <- as.list(maxHeight)
isat.g<-arrangeGrob(gPlot, gTbl, ncol=2, widths=c(1,2))
grid.draw(isat.g)
grid.text(" Orange lines & numbers",
x=0,
y=unit(1, "lines"),
hjust=0,
gp=gpar(fontsize=6, fontface="italic", col="#E27425"),
name="cpsorange")
text.cursor<-convertWidth(grobWidth("cpsorange"), "npc")
grid.text(" indicates CPS performance for each grade over all schools. ",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="cpstext",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("cpstext"), "npc")
grid.text("Gray dots ",
gp=gpar(col="#8D8685", fontsize=6, fontface="bold.italic"),
x=text.cursor,
y=unit(1, "lines"),
name="graydots",
hjust=0)
text.cursor<-text.cursor + convertWidth(grobWidth("graydots"), "npc")
grid.text("indicate KIPP Chicago high/low performance from SY2006-7 to SY2012-13.",
gp=gpar(col="#8D8685", fontsize=6, fontface="italic"),
x=text.cursor,
y=unit(1, "lines"),
name="cpstext2",
hjust=0)
seekViewport("isat2")
grid.text("ISAT\n% Meets/Exceeds", x = unit(1.2,"lines"), rot = 90, gp=gpar(fontsize=8, fontface="bold"))
isat.extext<-"Orange indicates CPS performance"
grid.text("New cut scores applied retroactively",
x = unit(4,"lines"),
rot = 90,
gp=gpar(fontsize=6, fontface="italic", col="#8D8685"))
dev.off()
|
### PROJECT 1 Covid19 - Statistics
# @authors: Ànnia, Rebecca, Rocío, Victor.
############################################################
############ READING DATA FROM EXCEL/CSV COVID ############
############################################################
install.packages("readr")
install.packages("tidyverse")
library(readr);
library(tidyverse);
#setwd("~/AMAZON") !!!! Rebecca
#data <- read.csv("https://mvtec-group2.s3-eu-west-1.amazonaws.com/rawdata/A_covidDaily.csv")
# Add a try catch
data <- read.csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
data <- rename(data, code = iso_code)
head(data) # mostrar 10 1es files cada colm
names(data) # mostrar nom columnes
# is R reading data correctly?
# Has dd the correct number of ROWS and COLUMNS?
dim(data)
# TIPO DE OBJETO DATOS
class(data)
#check table
#View(data)
#open access by name to columns
#attach(data)
#?attach
#are all columns of expected types?
sapply(data, class)
# ens quedem amb les columnes que tenen menys NA's
apply(
is.na
(data), 2, mean)
# reduir la bd eliminant colm --> https://www.listendata.com/2015/06/r-keep-drop-columns-from-data-frame.html
data <- subset(data, select = c(code, continent, location, date, total_cases, total_cases_per_million, new_cases, reproduction_rate, total_deaths, total_deaths_per_million, new_deaths, hospital_beds_per_thousand, total_tests, total_tests_per_thousand, population, population_density, median_age, gdp_per_capita))
## dates ##
class(data$date) # --> ha ser data i no caràcter
library(lubridate)
#data$date <- ymd(data$date)
data$date <- as.Date(data$date, format="%Y-%m-%d")
class(data$date)
# x saber quants NA hi ha per colm
apply(is.na(data), 2, sum) # 2 = columnes
############################################################
############ READING DATA OBESITY ############
############################################################
ob <- read.csv("obesitat.csv")
sapply(ob, class)
# renaming columns
ob <- rename(ob, country = Entity, code = Code, year = Year, obesity = Prevalence.of.obesity..both.sexes....WHO..2019.)
# taking 2016 year values only
ob <- filter(ob, year==2016)
# removing some rows that don't serve
ob <- ob[!(ob$country=="Africa" | ob$country=="Americas" | ob$country=="Eastern Mediterranean" | ob$country=="Europe" | ob$country=="Global" | ob$country=="South-East Asia" | ob$country=="Western Pacific"),]
head(ob)
ob <- subset(ob, select = -c(year))
############################################################
############ READING DATA CAUSES DEATH ############
############################################################
dataDeaths <- read.csv("mortality_causes.csv")
#head(dataDeaths)
#View(dataDeaths)
# renaming columns and rows
dataDeaths <- rename(dataDeaths, location = Country, causes = Causes, numDeathsOther = Both.sexes)
dataDeaths$location [dataDeaths$location == "Cabo Verde"] <- "Cape Verde"
dataDeaths$location [dataDeaths$location == "United States of America"] <- "United States"
dataDeaths$location [dataDeaths$location == "Republic of Moldova"] <- "Moldova"
dataDeaths$location [dataDeaths$location == "Bolivia (Plurinational State of)"] <- "Bolivia"
#sapply(dataDeaths, class)
# removing column year
dataDeaths <- dataDeaths[,-2]
head(dataDeaths)
library(reshape2)
dataDeaths <- dcast(dataDeaths,location~causes)
head(dataDeaths)
#class(dataDeathsOk)
dataDeaths <- rename(dataDeaths, cardiovascular_deaths = "Cardiovascular diseases", pulmonary_deaths = "Chronic obstructive pulmonary disease", diabetes_deaths = "Diabetes mellitus", cancer_deaths = "Malignant neoplasms")
names(dataDeaths)
# top 10 countries
top10Deaths <- dataDeaths %>%
filter(location %in% c("Cape Verde", "South Africa", "Djibouti", "Sao Tome and Principe", "Libya", "Gabon", "Eswatini", "Equatorial Guinea", "Morocco", "Namibia",
"Qatar", "Bahrain", "Kuwait", "Armenia", "Israel", "Oman", "Maldives", "Singapore", "Georgia", "United Arab Emirates",
"Andorra", "San Marino", "Vatican", "Luxembourg", "Montenegro", "Belgium", "Spain", "Czechia", "Moldova", "Switzerland",
"Panama", "Costa Rica", "Dominican Republic", "Bahamas", "Honduras", "Mexico", "Belize", "Canada", "Guatemala",
"Australia", "New Zealand", "Marshall Islands", "Papua New Guinea", "Fiji", "Solomon Islands", "Vanuatu", "Samoa",
"Chile", "Peru", "Argentina", "Colombia", "Bolivia", "Ecuador", "Suriname", "Paraguay", "Guyana","Brazil","United States"))
head(top10Deaths)
top10Deaths <- rename(top10Deaths, country = location)
############################################################
############ READING DATA REBECCA ############
############################################################
dataSecurity <- read.csv("healthSecurity.csv")
head(dataSecurity)
############################################################
############ READING DATA TEMPERATURE ############
############################################################
dataTemp <- read.csv("temperatura.csv")
dataTemp <- subset(dataTemp, select = -c(X))
dataTemp <- rename(dataTemp, country = Country)
write.csv(dataTemp, file = "B-top10DataTemperature.csv")
############################################################
############ JOIN COVID + ALL EXTRA DATA ############
############################################################
# info countries extra - Karina#
library(readxl) # x poder llegir arxiu, q és xlsx
pp <- read_excel("country-info.xlsx")
head(pp)
sapply(pp, class)
# seleccionar colm ok
pp <- subset(pp, select = c(COUNTRY, Government_Type, Corruption_preception))
# tmb es podria fer amb pp <- select(pp, COUNTRY, Government_Type, Corruption_preception)
# canviar nom colm
pp <- rename(pp, country = COUNTRY, gov = Government_Type, corruption = Corruption_preception)
# obesity + info paisos extra
obExtra <- left_join(ob, pp, by = "country")
head(obExtra)
extra <- left_join(top10Deaths, dataSecurity, by = "country")
#extra <- left_join(extra, dataTemp, by = "country")
extra <- left_join(obExtra, extra, by = "country")
# top 10 countries extra
extra <- extra %>%
filter(country %in% c("Cape Verde", "South Africa", "Djibouti", "Sao Tome and Principe", "Libya", "Gabon", "Eswatini", "Equatorial Guinea", "Morocco", "Namibia",
"Qatar", "Bahrain", "Kuwait", "Armenia", "Israel", "Oman", "Maldives", "Singapore", "Georgia", "United Arab Emirates",
"Andorra", "San Marino", "Vatican", "Luxembourg", "Montenegro", "Belgium", "Spain", "Czechia", "Moldova", "Switzerland",
"Panama", "Costa Rica", "Dominican Republic", "Bahamas", "Honduras", "Mexico", "Belize", "Canada", "Guatemala",
"Australia", "New Zealand", "Marshall Islands", "Papua New Guinea", "Fiji", "Solomon Islands", "Vanuatu", "Samoa",
"Chile", "Peru", "Argentina", "Colombia", "Bolivia", "Ecuador", "Suriname", "Paraguay", "Guyana","Brazil","United States"))
# extra + covid data
dataOk <- left_join(data, extra, by = "code")
dataOk <- subset(dataOk, select = -c(country))
# dataOk = extra + covid
dataOk <- dataOk %>%
filter(location %in% c("Cape Verde", "South Africa", "Djibouti", "Sao Tome and Principe", "Libya", "Gabon", "Eswatini", "Equatorial Guinea", "Morocco", "Namibia",
"Qatar", "Bahrain", "Kuwait", "Armenia", "Israel", "Oman", "Maldives", "Singapore", "Georgia", "United Arab Emirates",
"Andorra", "San Marino", "Vatican", "Luxembourg", "Montenegro", "Belgium", "Spain", "Czechia", "Moldova", "Switzerland",
"Panama", "Costa Rica", "Dominican Republic", "Bahamas", "Honduras", "Mexico", "Belize", "Canada", "Guatemala",
"Australia", "New Zealand", "Marshall Islands", "Papua New Guinea", "Fiji", "Solomon Islands", "Vanuatu", "Samoa",
"Chile", "Peru", "Argentina", "Colombia", "Bolivia", "Ecuador", "Suriname", "Paraguay", "Guyana","Brazil","United States"))
write.csv(dataOk, file = "B-top10Data.csv")
|
/Rscripts/1_preprocessing.R
|
no_license
|
arixha/MVTEC-Stats-Project1
|
R
| false | false | 8,246 |
r
|
### PROJECT 1 Covid19 - Statistics
# @authors: Ànnia, Rebecca, Rocío, Victor.
############################################################
############ READING DATA FROM EXCEL/CSV COVID ############
############################################################
install.packages("readr")
install.packages("tidyverse")
library(readr);
library(tidyverse);
#setwd("~/AMAZON") !!!! Rebecca
#data <- read.csv("https://mvtec-group2.s3-eu-west-1.amazonaws.com/rawdata/A_covidDaily.csv")
# Add a try catch
data <- read.csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
data <- rename(data, code = iso_code)
head(data) # mostrar 10 1es files cada colm
names(data) # mostrar nom columnes
# is R reading data correctly?
# Has dd the correct number of ROWS and COLUMNS?
dim(data)
# TIPO DE OBJETO DATOS
class(data)
#check table
#View(data)
#open access by name to columns
#attach(data)
#?attach
#are all columns of expected types?
sapply(data, class)
# ens quedem amb les columnes que tenen menys NA's
apply(
is.na
(data), 2, mean)
# reduir la bd eliminant colm --> https://www.listendata.com/2015/06/r-keep-drop-columns-from-data-frame.html
data <- subset(data, select = c(code, continent, location, date, total_cases, total_cases_per_million, new_cases, reproduction_rate, total_deaths, total_deaths_per_million, new_deaths, hospital_beds_per_thousand, total_tests, total_tests_per_thousand, population, population_density, median_age, gdp_per_capita))
## dates ##
class(data$date) # --> ha ser data i no caràcter
library(lubridate)
#data$date <- ymd(data$date)
data$date <- as.Date(data$date, format="%Y-%m-%d")
class(data$date)
# x saber quants NA hi ha per colm
apply(is.na(data), 2, sum) # 2 = columnes
############################################################
############ READING DATA OBESITY ############
############################################################
ob <- read.csv("obesitat.csv")
sapply(ob, class)
# renaming columns
ob <- rename(ob, country = Entity, code = Code, year = Year, obesity = Prevalence.of.obesity..both.sexes....WHO..2019.)
# taking 2016 year values only
ob <- filter(ob, year==2016)
# removing some rows that don't serve
ob <- ob[!(ob$country=="Africa" | ob$country=="Americas" | ob$country=="Eastern Mediterranean" | ob$country=="Europe" | ob$country=="Global" | ob$country=="South-East Asia" | ob$country=="Western Pacific"),]
head(ob)
ob <- subset(ob, select = -c(year))
############################################################
############ READING DATA CAUSES DEATH ############
############################################################
dataDeaths <- read.csv("mortality_causes.csv")
#head(dataDeaths)
#View(dataDeaths)
# renaming columns and rows
dataDeaths <- rename(dataDeaths, location = Country, causes = Causes, numDeathsOther = Both.sexes)
dataDeaths$location [dataDeaths$location == "Cabo Verde"] <- "Cape Verde"
dataDeaths$location [dataDeaths$location == "United States of America"] <- "United States"
dataDeaths$location [dataDeaths$location == "Republic of Moldova"] <- "Moldova"
dataDeaths$location [dataDeaths$location == "Bolivia (Plurinational State of)"] <- "Bolivia"
#sapply(dataDeaths, class)
# removing column year
dataDeaths <- dataDeaths[,-2]
head(dataDeaths)
library(reshape2)
dataDeaths <- dcast(dataDeaths,location~causes)
head(dataDeaths)
#class(dataDeathsOk)
dataDeaths <- rename(dataDeaths, cardiovascular_deaths = "Cardiovascular diseases", pulmonary_deaths = "Chronic obstructive pulmonary disease", diabetes_deaths = "Diabetes mellitus", cancer_deaths = "Malignant neoplasms")
names(dataDeaths)
# top 10 countries
top10Deaths <- dataDeaths %>%
filter(location %in% c("Cape Verde", "South Africa", "Djibouti", "Sao Tome and Principe", "Libya", "Gabon", "Eswatini", "Equatorial Guinea", "Morocco", "Namibia",
"Qatar", "Bahrain", "Kuwait", "Armenia", "Israel", "Oman", "Maldives", "Singapore", "Georgia", "United Arab Emirates",
"Andorra", "San Marino", "Vatican", "Luxembourg", "Montenegro", "Belgium", "Spain", "Czechia", "Moldova", "Switzerland",
"Panama", "Costa Rica", "Dominican Republic", "Bahamas", "Honduras", "Mexico", "Belize", "Canada", "Guatemala",
"Australia", "New Zealand", "Marshall Islands", "Papua New Guinea", "Fiji", "Solomon Islands", "Vanuatu", "Samoa",
"Chile", "Peru", "Argentina", "Colombia", "Bolivia", "Ecuador", "Suriname", "Paraguay", "Guyana","Brazil","United States"))
head(top10Deaths)
top10Deaths <- rename(top10Deaths, country = location)
############################################################
############ READING DATA REBECCA ############
############################################################
dataSecurity <- read.csv("healthSecurity.csv")
head(dataSecurity)
############################################################
############ READING DATA TEMPERATURE ############
############################################################
dataTemp <- read.csv("temperatura.csv")
dataTemp <- subset(dataTemp, select = -c(X))
dataTemp <- rename(dataTemp, country = Country)
write.csv(dataTemp, file = "B-top10DataTemperature.csv")
############################################################
############ JOIN COVID + ALL EXTRA DATA ############
############################################################
# info countries extra - Karina#
library(readxl) # x poder llegir arxiu, q és xlsx
pp <- read_excel("country-info.xlsx")
head(pp)
sapply(pp, class)
# seleccionar colm ok
pp <- subset(pp, select = c(COUNTRY, Government_Type, Corruption_preception))
# tmb es podria fer amb pp <- select(pp, COUNTRY, Government_Type, Corruption_preception)
# canviar nom colm
pp <- rename(pp, country = COUNTRY, gov = Government_Type, corruption = Corruption_preception)
# obesity + info paisos extra
obExtra <- left_join(ob, pp, by = "country")
head(obExtra)
extra <- left_join(top10Deaths, dataSecurity, by = "country")
#extra <- left_join(extra, dataTemp, by = "country")
extra <- left_join(obExtra, extra, by = "country")
# top 10 countries extra
extra <- extra %>%
filter(country %in% c("Cape Verde", "South Africa", "Djibouti", "Sao Tome and Principe", "Libya", "Gabon", "Eswatini", "Equatorial Guinea", "Morocco", "Namibia",
"Qatar", "Bahrain", "Kuwait", "Armenia", "Israel", "Oman", "Maldives", "Singapore", "Georgia", "United Arab Emirates",
"Andorra", "San Marino", "Vatican", "Luxembourg", "Montenegro", "Belgium", "Spain", "Czechia", "Moldova", "Switzerland",
"Panama", "Costa Rica", "Dominican Republic", "Bahamas", "Honduras", "Mexico", "Belize", "Canada", "Guatemala",
"Australia", "New Zealand", "Marshall Islands", "Papua New Guinea", "Fiji", "Solomon Islands", "Vanuatu", "Samoa",
"Chile", "Peru", "Argentina", "Colombia", "Bolivia", "Ecuador", "Suriname", "Paraguay", "Guyana","Brazil","United States"))
# extra + covid data
dataOk <- left_join(data, extra, by = "code")
dataOk <- subset(dataOk, select = -c(country))
# dataOk = extra + covid
dataOk <- dataOk %>%
filter(location %in% c("Cape Verde", "South Africa", "Djibouti", "Sao Tome and Principe", "Libya", "Gabon", "Eswatini", "Equatorial Guinea", "Morocco", "Namibia",
"Qatar", "Bahrain", "Kuwait", "Armenia", "Israel", "Oman", "Maldives", "Singapore", "Georgia", "United Arab Emirates",
"Andorra", "San Marino", "Vatican", "Luxembourg", "Montenegro", "Belgium", "Spain", "Czechia", "Moldova", "Switzerland",
"Panama", "Costa Rica", "Dominican Republic", "Bahamas", "Honduras", "Mexico", "Belize", "Canada", "Guatemala",
"Australia", "New Zealand", "Marshall Islands", "Papua New Guinea", "Fiji", "Solomon Islands", "Vanuatu", "Samoa",
"Chile", "Peru", "Argentina", "Colombia", "Bolivia", "Ecuador", "Suriname", "Paraguay", "Guyana","Brazil","United States"))
write.csv(dataOk, file = "B-top10Data.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/antsVersions.R
\name{antsVersions}
\alias{antsVersions}
\title{antsVersions}
\format{A data frame listing the following variables.
\describe{
\item{\code{Dependency}}{ Name of software dependency. }
\item{\code{GitTag}}{ The git tag. This can also be used to trace other dependencies, e.g. the ITK version used by the current ANTs version. }
}}
\description{
A data frame defining the git tag for the current
atropos version of ANTs.
}
\references{
\url{https://github.com/stnava/ANTs}
}
|
/man/antsVersions.Rd
|
permissive
|
muschellij2/atropos
|
R
| false | true | 573 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/antsVersions.R
\name{antsVersions}
\alias{antsVersions}
\title{antsVersions}
\format{A data frame listing the following variables.
\describe{
\item{\code{Dependency}}{ Name of software dependency. }
\item{\code{GitTag}}{ The git tag. This can also be used to trace other dependencies, e.g. the ITK version used by the current ANTs version. }
}}
\description{
A data frame defining the git tag for the current
atropos version of ANTs.
}
\references{
\url{https://github.com/stnava/ANTs}
}
|
married_name_change <- function(df, n_errors, lname, sex, dob = NULL, age = NULL){
df_s <- df[df[[sex]] == "f",]
if(!is.null(dob)){
# interval(start = dob, end = givendate) /
# duration(num = 1, units = "years")
} else if(!is.null(age)){
#filter age
df_s <- df_s[df_s[[age]] > 20,]
}
if (n_errors > nrow(df_s)){
warning("Not enough samples found for simulating married last name change.")
n_errors <- nrow(df_s)
}
# browser()
candidate_ids <- sample(df_s$id, n_errors)
old_lnames <- df[df$id %in% candidate_ids,][[lname]]
new_names <- lnames_all %>%
sample(length(candidate_ids))
df[df$id %in% candidate_ids, lname] <- new_names
# error_record <- attr(df, "error_record")
df <- update_error_record(df,
df[df$id %in% candidate_ids, ][["id"]],
lname,
"married_name_change",
old_lnames,
new_names)
df
}
add_duplicates <- function(df_pairs, n_errors){
df_original <- df_pairs$df_original
df_secondary <- df_pairs$df_secondary
ids <-
attributes(df_secondary) %>%
pluck("error_record") %>%
sample_n(nrow(.)) %>%
pull(id) %>%
unique()
ids <- sample(ids, n_errors)
df_secondary_new <-
df_original %>%
filter(id %in% ids) %>%
bind_rows(df_secondary)
# browser()
attr(df_secondary_new, "error_record") <-
attr(df_secondary, "error_record")
df_secondary_new <-
update_error_record(df_secondary_new,
ids,
"all_fields",
"duplicate",
"original",
"original")
df_pairs$df_secondary <- df_secondary_new
attr(df_pairs$df_secondary, "error_record") <-
attr(df_secondary_new, "error_record")
df_pairs
}
#give an id_field you would like to use
# duplicates <- function(df, n_errors, id_col){
#
# # stopifnot(length(col_names) == 1)
#
# if(n_errors > nrow(df)){
# warning("Nor enough samples found for generating duplicates")
# n_errors <- nrow(df)
# }
#
# if (nrow(df) == 1){
# candidate_ids <- df$id
# dup_df <- df
# old_vals <- dup_df[[id_col]]
# new_vals <- list(df[[id_col]] %>%
# indel() %>%
# repl()) %>%
# transpose()
# } else{
# candidate_ids <- sample(df$id, n_errors)
# dup_df <- df[df$id %in% candidate_ids,]
# old_vals <- dup_df[[id_col]]
# new_vals <- list(
# sample(df[[id_col]][!df$id %in% candidate_ids], n_errors) %>%
# indel() %>%
# repl()) %>%
# transpose()
# }
#
# new_vals <- unlist(new_vals)
# if(all(str_length(str_extract(new_vals, one_or_more(DIGIT))) == str_length(new_vals))){
# new_vals <- as.integer(new_vals)
# }
#
# dup_df[, id_col] <- new_vals
#
# error_record <- attr(df, "error_record")
#
# df <- bind_rows(dup_df, df) %>% arrange(id)
#
# attr(df, "error_record") <- error_record
#
# # browser()
# df <- update_error_record(df,
# candidate_ids,
# id_col,
# "duplicates",
# old_vals,
# new_vals)
# df
# }
twins_generate <- function(df, n_errors, fname, id_col = NULL, sex = NULL){
# fname <- col_names
if(n_errors > nrow(df)){
warning("Nor enough samples found for generating duplicates")
n_errors <- nrow(df)
}
fnames_lookup <-
tibble(fname = fnames_male, sex = "m") %>%
bind_rows(tibble(fname = fnames_female, sex = "f") %>% sample_n(3000)) %>%
sample_n(nrow(.)) %>%
mutate(fname_len = str_length(fname))
search_name <- function(name){
fnames_lookup %>%
filter(fname_len == str_length(name),
str_sub(fname, 1, 1) == str_sub(name, 1, 1)) %>%
sample_n(1) %>%
select(-fname_len)
}
if (nrow(df) == 1){
candidate_ids <- df$id
twins_df <- df[df$id %in% candidate_ids,]
} else{
candidate_ids <- sort(sample(df$id, n_errors))
twins_df <- df[df$id %in% candidate_ids,]
}
twins_df_cp <- twins_df
fnames_old <- twins_df[[fname]]
twins_df_new <- map_df(fnames_old, search_name)
twins_df[[fname]] <- twins_df_new$fname
twins_df$id <- str_c("123", twins_df$id, "789") %>% as.integer()
if(!is.null(sex)){
twins_df[[sex]] <- twins_df_new$sex
}
if(!is.null(id_col)){
twins_df[[id_col]] <- repl(twins_df[[id_col]])
}
error_record <- attr(df, "error_record")
df <- df %>% bind_rows(twins_df) %>% arrange(id)
attr(df, "error_record") <- error_record
df <- update_error_record(df,
candidate_ids,
fname,
"twins",
twins_df_cp$fname,
twins_df[[fname]])
df
}
twins_identify <- function(){
}
|
/R/6_file_based_errors.R
|
no_license
|
pinformatics/rlErrorGeneratoR
|
R
| false | false | 4,943 |
r
|
married_name_change <- function(df, n_errors, lname, sex, dob = NULL, age = NULL){
df_s <- df[df[[sex]] == "f",]
if(!is.null(dob)){
# interval(start = dob, end = givendate) /
# duration(num = 1, units = "years")
} else if(!is.null(age)){
#filter age
df_s <- df_s[df_s[[age]] > 20,]
}
if (n_errors > nrow(df_s)){
warning("Not enough samples found for simulating married last name change.")
n_errors <- nrow(df_s)
}
# browser()
candidate_ids <- sample(df_s$id, n_errors)
old_lnames <- df[df$id %in% candidate_ids,][[lname]]
new_names <- lnames_all %>%
sample(length(candidate_ids))
df[df$id %in% candidate_ids, lname] <- new_names
# error_record <- attr(df, "error_record")
df <- update_error_record(df,
df[df$id %in% candidate_ids, ][["id"]],
lname,
"married_name_change",
old_lnames,
new_names)
df
}
add_duplicates <- function(df_pairs, n_errors){
df_original <- df_pairs$df_original
df_secondary <- df_pairs$df_secondary
ids <-
attributes(df_secondary) %>%
pluck("error_record") %>%
sample_n(nrow(.)) %>%
pull(id) %>%
unique()
ids <- sample(ids, n_errors)
df_secondary_new <-
df_original %>%
filter(id %in% ids) %>%
bind_rows(df_secondary)
# browser()
attr(df_secondary_new, "error_record") <-
attr(df_secondary, "error_record")
df_secondary_new <-
update_error_record(df_secondary_new,
ids,
"all_fields",
"duplicate",
"original",
"original")
df_pairs$df_secondary <- df_secondary_new
attr(df_pairs$df_secondary, "error_record") <-
attr(df_secondary_new, "error_record")
df_pairs
}
#give an id_field you would like to use
# duplicates <- function(df, n_errors, id_col){
#
# # stopifnot(length(col_names) == 1)
#
# if(n_errors > nrow(df)){
# warning("Nor enough samples found for generating duplicates")
# n_errors <- nrow(df)
# }
#
# if (nrow(df) == 1){
# candidate_ids <- df$id
# dup_df <- df
# old_vals <- dup_df[[id_col]]
# new_vals <- list(df[[id_col]] %>%
# indel() %>%
# repl()) %>%
# transpose()
# } else{
# candidate_ids <- sample(df$id, n_errors)
# dup_df <- df[df$id %in% candidate_ids,]
# old_vals <- dup_df[[id_col]]
# new_vals <- list(
# sample(df[[id_col]][!df$id %in% candidate_ids], n_errors) %>%
# indel() %>%
# repl()) %>%
# transpose()
# }
#
# new_vals <- unlist(new_vals)
# if(all(str_length(str_extract(new_vals, one_or_more(DIGIT))) == str_length(new_vals))){
# new_vals <- as.integer(new_vals)
# }
#
# dup_df[, id_col] <- new_vals
#
# error_record <- attr(df, "error_record")
#
# df <- bind_rows(dup_df, df) %>% arrange(id)
#
# attr(df, "error_record") <- error_record
#
# # browser()
# df <- update_error_record(df,
# candidate_ids,
# id_col,
# "duplicates",
# old_vals,
# new_vals)
# df
# }
twins_generate <- function(df, n_errors, fname, id_col = NULL, sex = NULL){
# fname <- col_names
if(n_errors > nrow(df)){
warning("Nor enough samples found for generating duplicates")
n_errors <- nrow(df)
}
fnames_lookup <-
tibble(fname = fnames_male, sex = "m") %>%
bind_rows(tibble(fname = fnames_female, sex = "f") %>% sample_n(3000)) %>%
sample_n(nrow(.)) %>%
mutate(fname_len = str_length(fname))
search_name <- function(name){
fnames_lookup %>%
filter(fname_len == str_length(name),
str_sub(fname, 1, 1) == str_sub(name, 1, 1)) %>%
sample_n(1) %>%
select(-fname_len)
}
if (nrow(df) == 1){
candidate_ids <- df$id
twins_df <- df[df$id %in% candidate_ids,]
} else{
candidate_ids <- sort(sample(df$id, n_errors))
twins_df <- df[df$id %in% candidate_ids,]
}
twins_df_cp <- twins_df
fnames_old <- twins_df[[fname]]
twins_df_new <- map_df(fnames_old, search_name)
twins_df[[fname]] <- twins_df_new$fname
twins_df$id <- str_c("123", twins_df$id, "789") %>% as.integer()
if(!is.null(sex)){
twins_df[[sex]] <- twins_df_new$sex
}
if(!is.null(id_col)){
twins_df[[id_col]] <- repl(twins_df[[id_col]])
}
error_record <- attr(df, "error_record")
df <- df %>% bind_rows(twins_df) %>% arrange(id)
attr(df, "error_record") <- error_record
df <- update_error_record(df,
candidate_ids,
fname,
"twins",
twins_df_cp$fname,
twins_df[[fname]])
df
}
twins_identify <- function(){
}
|
rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "CA"
beta0 <- c(-6, -5)
betaE <- c(log(1), log(1.75))
betaU <- c(log(1.5), log(2))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen8",patt,".RData"))
|
/Simulations/Scripts/R/Rare/Scenario 8/CMPEn50KrareScen8CA.R
|
no_license
|
yadevi/CausalMPE
|
R
| false | false | 4,218 |
r
|
rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "CA"
beta0 <- c(-6, -5)
betaE <- c(log(1), log(1.75))
betaU <- c(log(1.5), log(2))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen8",patt,".RData"))
|
# Laura Niss
# Lab2
# reading histograms
# note: while we always talk about binomail(n, p) in class, in my R code this is
# equivalent to binomial(s, p), I'm using "s" to be consisnt with the R documentation
s=15
n=100
p=.25
set.seed(1)
data <- rbinom(n, s, p)
data_proportion <- data/s
hist(data, breaks = 10, freq = FALSE, main = "Histogram of Successes",
xlab = "Count of successes",
ylab = "Component Density", col = "green")
hist(data, breaks = 10, freq = TRUE, main = "Histogram of Successes",
xlab = "Count of successes",
ylab = "Count of Trials", col = "green")
hist(data_proportion, breaks = 10, freq = FALSE, main = "Histogram of Successes",
xlab = "Proportion of successes",
ylab = "Component Density", col = "green")
hist(data_proportion, breaks = 10, freq = TRUE, main = "Histogram of Successes",
xlab = "Proportion of successes",
ylab = "Count of Trials", col = "green")
# In general, I would recommend "freq=TRUE" and graphing the proportions.
############################
# Lets look at how the sample mean converges to the expectation (law of large numbers),
# and how variance changes with sample size
# X~Binomial, E[X]=np, Var[X]=np(1-p)
# change n, s, and p to see how large samples compare to small samples and how a
# large number of trials compares to a small number. When both are large, what does
# the distribution look like?
s <- 30
p <- .5
n <- 10
set.seed(3)
binom_sample <- rbinom(n, s, p)
hist(binom_sample, breaks = 8, freq = TRUE, col = "green")
t_mean <- s*p # true mean
t_mean
mean(binom_sample)
t_var <- s*p*(1-p) # true variance
t_var
var(binom_sample)
# Compare to normal dist. with same mean and variance
# rnorm(n, mean = 0, sd = 1)
set.seed(3)
norm_sample <- rnorm(n, t_mean, sqrt(t_var))
hist(norm_sample, breaks = 8, freq = TRUE, col = "green")
# extra: compare sample mean and variance of our norm_sample to the true mean and variance.
# how does sample size affect this?
# Try setting xlim, ylim to compare histograms better
# Question: how does the normal sample differ from the binomial?
# Question: how big do you think s compared to p needs to be for a good normal approximation
# of the binomial?
# Ex:
n <- 10
s <- 10
p <- .9999999
set.seed(3)
binom_ex <- rbinom(n, s, p)
hist(binom_ex, breaks = 8, freq = TRUE, col = "green") # binomial
hist(rnorm(n, s*p, sqrt(s*p*(1-p))), breaks = 8, freq = TRUE, col = "green") # norm approx
hist(rep(10, 10), breaks = 8, freq = TRUE, col = "green")# choose 10 each time with prob 1
# which one is better at approximating our binomial dist?
############################
# Challenge: create example of Simpson's paradox using rmultinom(n, size, prob)
# Ex. of rmultinom usage
n <- 1000
s <- 33
p <- c(.2, .3, .4, .1) # must sum to 1!
set.seed(3)
multi_sample <- rmultinom(n, s, p)
df <- data.frame(multi_sample)
multi_sample[,1]
mean_cat1 <- mean(multi_sample[1,])
mean_prop_in_cat1 <- mean_cat1 / s
|
/lab2_code_Laura.R
|
no_license
|
laurakn/Stats408
|
R
| false | false | 2,953 |
r
|
# Laura Niss
# Lab2
# reading histograms
# note: while we always talk about binomail(n, p) in class, in my R code this is
# equivalent to binomial(s, p), I'm using "s" to be consisnt with the R documentation
s=15
n=100
p=.25
set.seed(1)
data <- rbinom(n, s, p)
data_proportion <- data/s
hist(data, breaks = 10, freq = FALSE, main = "Histogram of Successes",
xlab = "Count of successes",
ylab = "Component Density", col = "green")
hist(data, breaks = 10, freq = TRUE, main = "Histogram of Successes",
xlab = "Count of successes",
ylab = "Count of Trials", col = "green")
hist(data_proportion, breaks = 10, freq = FALSE, main = "Histogram of Successes",
xlab = "Proportion of successes",
ylab = "Component Density", col = "green")
hist(data_proportion, breaks = 10, freq = TRUE, main = "Histogram of Successes",
xlab = "Proportion of successes",
ylab = "Count of Trials", col = "green")
# In general, I would recommend "freq=TRUE" and graphing the proportions.
############################
# Lets look at how the sample mean converges to the expectation (law of large numbers),
# and how variance changes with sample size
# X~Binomial, E[X]=np, Var[X]=np(1-p)
# change n, s, and p to see how large samples compare to small samples and how a
# large number of trials compares to a small number. When both are large, what does
# the distribution look like?
s <- 30
p <- .5
n <- 10
set.seed(3)
binom_sample <- rbinom(n, s, p)
hist(binom_sample, breaks = 8, freq = TRUE, col = "green")
t_mean <- s*p # true mean
t_mean
mean(binom_sample)
t_var <- s*p*(1-p) # true variance
t_var
var(binom_sample)
# Compare to normal dist. with same mean and variance
# rnorm(n, mean = 0, sd = 1)
set.seed(3)
norm_sample <- rnorm(n, t_mean, sqrt(t_var))
hist(norm_sample, breaks = 8, freq = TRUE, col = "green")
# extra: compare sample mean and variance of our norm_sample to the true mean and variance.
# how does sample size affect this?
# Try setting xlim, ylim to compare histograms better
# Question: how does the normal sample differ from the binomial?
# Question: how big do you think s compared to p needs to be for a good normal approximation
# of the binomial?
# Ex:
n <- 10
s <- 10
p <- .9999999
set.seed(3)
binom_ex <- rbinom(n, s, p)
hist(binom_ex, breaks = 8, freq = TRUE, col = "green") # binomial
hist(rnorm(n, s*p, sqrt(s*p*(1-p))), breaks = 8, freq = TRUE, col = "green") # norm approx
hist(rep(10, 10), breaks = 8, freq = TRUE, col = "green")# choose 10 each time with prob 1
# which one is better at approximating our binomial dist?
############################
# Challenge: create example of Simpson's paradox using rmultinom(n, size, prob)
# Ex. of rmultinom usage
n <- 1000
s <- 33
p <- c(.2, .3, .4, .1) # must sum to 1!
set.seed(3)
multi_sample <- rmultinom(n, s, p)
df <- data.frame(multi_sample)
multi_sample[,1]
mean_cat1 <- mean(multi_sample[1,])
mean_prop_in_cat1 <- mean_cat1 / s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.