content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/propagate_flag.R
\name{propagate_flag}
\alias{propagate_flag}
\title{Derive flags for an aggregates using diffrent methods}
\usage{
propagate_flag(flags, method = "", codelist = NULL, flag_weights = 0,
threshold = 0.5)
}
\arguments{
\item{flags}{A data.frame or a matrix containing the flags of the series (one column per period)
without row identifiers (e.g. country code).}
\item{method}{A string contains the method to to derive the flag for the aggregate. It can take the value,
"hierarchy", "frequency" or "weighted".}
\item{codelist}{A string or character vector defining the list of acceptable flags in case the method "hierarchy"
is chosen. In case of the string equals to "estat" or "sdmx" then the predefined standard Eurostat and SDMX codelist
is used, otherwise the characters in the sring will define the hierarchical order.}
\item{flag_weights}{A data.frame or a matrix containing the corresponding weights of the series (one column per
period) without row identifiers (e.g. country code). It has the same size and dimesion as the \code{flags} parameter.}
\item{threshold}{The threshold which above the should be the waights in order the aggregate to receive a flag.
Defalut value is 0.5, but can be changed to any value.}
}
\value{
\code{propagate_flag} returns a list with the same size as the number of periods (columns) in the flags
parameter. In case of the methods is "hierarchy" or "frequency", then only the derived flag(s) is returned. In case
of weighted it returns the flag(s) and the sum of weights if it is above the threshold, otherwise the list contains
\code{NA} where the sum of weights are below the threshold.
}
\description{
The wrapper function to use the different method and provide a structured return value independently
from the method used.
}
\examples{
flags <- tidyr::spread(test_data[, c(1:3)], key = time, value = flags)
weights <- tidyr::spread(test_data[, c(1, 3:4)], key = time, value = values)
propagate_flag(flags[, c(2:ncol(flags))],"hierarchy","puebscd")
propagate_flag(flags[, c(2:ncol(flags))],"hierarchy","estat")
propagate_flag(flags[, c(2:ncol(flags))],"frequency")
flags<-flags[, c(2:ncol(flags))]
weights<-weights[, c(2:ncol(weights))]
propagate_flag(flags,"weighted",flag_weights=weights)
propagate_flag(flags,"weighted",flag_weights=weights,threshold=0.1)
}
\seealso{
\code{\link{flag_hierarchy}}, \code{\link{flag_frequency}}, \code{\link{flag_weighted}}
}
|
/man/propagate_flag.Rd
|
no_license
|
eurostat/flagr
|
R
| false | true | 2,507 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/propagate_flag.R
\name{propagate_flag}
\alias{propagate_flag}
\title{Derive flags for an aggregates using diffrent methods}
\usage{
propagate_flag(flags, method = "", codelist = NULL, flag_weights = 0,
threshold = 0.5)
}
\arguments{
\item{flags}{A data.frame or a matrix containing the flags of the series (one column per period)
without row identifiers (e.g. country code).}
\item{method}{A string contains the method to to derive the flag for the aggregate. It can take the value,
"hierarchy", "frequency" or "weighted".}
\item{codelist}{A string or character vector defining the list of acceptable flags in case the method "hierarchy"
is chosen. In case of the string equals to "estat" or "sdmx" then the predefined standard Eurostat and SDMX codelist
is used, otherwise the characters in the sring will define the hierarchical order.}
\item{flag_weights}{A data.frame or a matrix containing the corresponding weights of the series (one column per
period) without row identifiers (e.g. country code). It has the same size and dimesion as the \code{flags} parameter.}
\item{threshold}{The threshold which above the should be the waights in order the aggregate to receive a flag.
Defalut value is 0.5, but can be changed to any value.}
}
\value{
\code{propagate_flag} returns a list with the same size as the number of periods (columns) in the flags
parameter. In case of the methods is "hierarchy" or "frequency", then only the derived flag(s) is returned. In case
of weighted it returns the flag(s) and the sum of weights if it is above the threshold, otherwise the list contains
\code{NA} where the sum of weights are below the threshold.
}
\description{
The wrapper function to use the different method and provide a structured return value independently
from the method used.
}
\examples{
flags <- tidyr::spread(test_data[, c(1:3)], key = time, value = flags)
weights <- tidyr::spread(test_data[, c(1, 3:4)], key = time, value = values)
propagate_flag(flags[, c(2:ncol(flags))],"hierarchy","puebscd")
propagate_flag(flags[, c(2:ncol(flags))],"hierarchy","estat")
propagate_flag(flags[, c(2:ncol(flags))],"frequency")
flags<-flags[, c(2:ncol(flags))]
weights<-weights[, c(2:ncol(weights))]
propagate_flag(flags,"weighted",flag_weights=weights)
propagate_flag(flags,"weighted",flag_weights=weights,threshold=0.1)
}
\seealso{
\code{\link{flag_hierarchy}}, \code{\link{flag_frequency}}, \code{\link{flag_weighted}}
}
|
#### IMPORT ####
train <- read_csv(file = "data/raw/train.csv")
test <- read_csv(file = "data/raw/test.csv")
|
/Store_Item_Demand_Forecasting_Challenge/src/data/data.R
|
no_license
|
Mattias99/Kaggle
|
R
| false | false | 108 |
r
|
#### IMPORT ####
train <- read_csv(file = "data/raw/train.csv")
test <- read_csv(file = "data/raw/test.csv")
|
source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample355.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results355.csv",sep=",")
|
/Reduced model optimizations/explorelikereduced355.R
|
no_license
|
roszenil/Bichromdryad
|
R
| false | false | 750 |
r
|
source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample355.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results355.csv",sep=",")
|
# Title : TODO
# Objective : TODO
# Created by: abhisgup
# Created on: 4/18/2021
|
/archive/basic_script.R
|
no_license
|
defabhishek/sandbox
|
R
| false | false | 86 |
r
|
# Title : TODO
# Objective : TODO
# Created by: abhisgup
# Created on: 4/18/2021
|
library(fCertificates)
### Name: DiscountPlusCertificate
### Title: DiscountPlus Certificate valuation using pricing by duplication
### Aliases: DiscountPlusCertificate
### Keywords: math
### ** Examples
##
DiscountPlusCertificate(S=42, X=42, B=30, Time=1, r=0.035, r_d=0, sigma=0.3, ratio=1)
## payoff diagram
S <- seq(0, 100)
p <- DiscountPlusCertificate(S, X=42, B=30, Time=1, r=0.035, r_d=0, sigma=0.3, ratio=1)
p2 <- DiscountPlusCertificate(S, X=42, B=30, Time=0, r=0.035, r_d=0, sigma=0.3, ratio=1)
plot(S, p, type="l", col="red", , ylim=range(p, p2, na.rm=TRUE),
xlab="underlying price", ylab="payoff", main="Barrier Discount")
lines(S, p2, col="blue")
abline(v=c(30, 42), lty=2, col="gray80")
|
/data/genthat_extracted_code/fCertificates/examples/DiscountPlusCertificate.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 713 |
r
|
library(fCertificates)
### Name: DiscountPlusCertificate
### Title: DiscountPlus Certificate valuation using pricing by duplication
### Aliases: DiscountPlusCertificate
### Keywords: math
### ** Examples
##
DiscountPlusCertificate(S=42, X=42, B=30, Time=1, r=0.035, r_d=0, sigma=0.3, ratio=1)
## payoff diagram
S <- seq(0, 100)
p <- DiscountPlusCertificate(S, X=42, B=30, Time=1, r=0.035, r_d=0, sigma=0.3, ratio=1)
p2 <- DiscountPlusCertificate(S, X=42, B=30, Time=0, r=0.035, r_d=0, sigma=0.3, ratio=1)
plot(S, p, type="l", col="red", , ylim=range(p, p2, na.rm=TRUE),
xlab="underlying price", ylab="payoff", main="Barrier Discount")
lines(S, p2, col="blue")
abline(v=c(30, 42), lty=2, col="gray80")
|
stmv_predictionarea_polygons = function(p, sloc, global_sppoly=NULL, windowsize.half=0, stmv_au_buffer_links=0, stmv_au_distance_reference="none" ) {
pa_coord_names = p$stmv_variables$LOCS[1:2]
if (is.null( global_sppoly )) {
# essentially identical to stmv_predictionarea_lattice, but for the addition of sppoly before addtion time
pa = stmv_predictionarea_space( p=p, sloc=sloc, windowsize.half=windowsize.half )
if (is.null(pa)) return(NULL)
## now add polygon representation of spatial lattice
dx = p$pres
dy = p$pres
x_r = range(pa[, pa_coord_names[1]])
x_c = range(pa[, pa_coord_names[2]])
nr = trunc( diff(x_r)/dx ) + 1L
nc = trunc( diff(x_c)/dy ) + 1L
# check: dr = diff(x_r)/(nr-1) == dx ;; dc = diff(x_c)/(nc-1) # == dy
# default behaviour .. lattice grid
# data_subset = NULL
sppoly = sf::st_as_sf( pa, coords=pa_coord_names, crs=st_crs( p$aegis_proj4string_planar_km) )
if (0) {
# not using raster -- incomplete
sppoly = (
st_make_grid( sppoly, cellsize=areal_units_resolution_km, what="polygons", square=TRUE )
%>% st_as_sf( crs=st_crs( p$aegis_proj4string_planar_km ))
)
spdf0 = sf::st_as_sf( pa, coords=pa_coord_names, crs=st_crs( p$aegis_proj4string_planar_km ) )
sppoly = st_as_sf( st_make_grid( spdf0, n=c(nr, nc), what="polygons", square=TRUE ) )
sppoly$internal_id = 1:nrow(sppoly)
spdf0$internal_id = st_points_in_polygons( spdf0, sppoly, varname="internal_id" )
o = match( sppoly$internal_id,spdf0$internal_id )
sppoly = st_as_sf( st_make_grid( Z, cellsize=areal_units_resolution_km, what="polygons", square=TRUE ) )
sppoly$AUID = as.character( 1:nrow(sppoly) ) # row index
row.names(sppoly) = sppoly$AUID
require(raster)
raster_template = raster::raster(extent(Z), res=areal_units_resolution_km, crs=projection(Z) ) # +1 to increase the area
sppoly = raster::rasterize( Z, raster_template, field=Z$z ) #NOTE :: TODO : move to stars::st_rasterize
sppoly = as(sppoly, "SpatialPixelsDataFrame")
sppoly = as( as(sppoly, "SpatialPolygonsDataFrame"), "sf")
raster_template = NULL
}
sppoly = as( as( raster::raster( sppoly, nrows=nr, ncols=nc ), "SpatialPolygonsDataFrame" ), "sf" )
if (exists("i", sppoly) ) {
sppoly$AUID = sppoly$i
} else {
sppoly$AUID = 1:nrow(sppoly)
}
sppoly$AUID = as.character( sppoly$AUID )
# poly* function operate on Spatial* data
NB_graph = poly2nb(sppoly, row.names=sppoly$AUID, queen=TRUE) # slow .. ~1hr?
NB_graph.remove = which(card(NB_graph) == 0)
if ( length(NB_graph.remove) > 0 ) {
# remove isolated locations and recreate sppoly .. alternatively add links to NB_graph
NB_graph.keep = which(card(NB_graph) > 0)
NB_graph = nb_remove( NB_graph, NB_graph.remove )
sppoly = sppoly[NB_graph.keep,]
row.names(sppoly) = sppoly$AUID
sppoly = sp::spChFIDs( sppoly, row.names(sppoly) ) #fix id's
# sppoly = sppoly[ order(sppoly$AUID), ]
}
attr(sppoly, "NB_graph") = NB_graph # adding neighbourhood as an attribute to sppoly
NB_graph =NULL
NB_graph.remove =NULL
attr( pa, "sppoly" ) = sppoly
if ( exists("TIME", p$stmv_variables) ) pa = stmv_predictionarea_time( p=p, pa=pa )
rownames(pa) = NULL
return(pa)
}
# ------------
if ( !is.null( global_sppoly )) {
au = as( global_sppoly, "sf")
au$AUID = 1:nrow(au)
nbfocal = st_points_in_polygons(
pts = st_transform( st_as_sf( t(sloc), coords=c(1,2), crs=st_crs(p$aegis_proj4string_planar_km) ), crs=st_crs(au) ),
polys = au[, "AUID"],
varname="AUID"
)
if (is.na(nbfocal)) {
message( "no data" )
return (NULL)
}
NB_graph = attr(au, "NB_graph") # full matrix
nbnames = attr( NB_graph, "region.id")
nnAUID = nbnames[NB_graph[[which( nbnames == nbfocal )]]] ## nearest neighbours
tokeep = unique( c( nnAUID, nbfocal ) )
if ( stmv_au_buffer_links > 0 ) {
# no of additional neighbourhood links ... 0 == nearest neighbours, 1 == nn + next nearest neighbours, etc
for (i in 1:stmv_au_buffer_links ) {
new = NULL
for ( foc in nnAUID ) {
new = c( new, nbnames[NB_graph[[which( nbnames == foc )]]] ) ## nearest neighbours
}
tokeep = unique( c(tokeep, new) )
nnAUID = tokeep
}
}
if ( windowsize.half > 0 ) {
if (stmv_au_distance_reference=="none") {
# nothing to do
}
if (stmv_au_distance_reference=="centroid") {
# distance based filtering based on centroids
aucoo = coordinates( au )
inrange = which( (abs(aucoo[,1] - sloc[1]) <= windowsize.half) & (abs(aucoo[,2] - sloc[2]) <= windowsize.half) )
todrop = setdiff( nbnames, nbnames[inrange] )
tokeep = setdiff( tokeep, todrop)
}
if (stmv_au_distance_reference=="inside_or_touches_boundary") {
# distance based filtering based on centroids
ausf = as( au, "sf")
foc = st_buffer( ausf[ which(ausf$AUID==nbfocal), ], dist= windowsize.half )
inrange = which( unlist( list2DF( st_intersects( ausf, foc ) ) ) ==1 )
todrop = setdiff( nbnames, nbnames[inrange] )
tokeep = setdiff( tokeep, todrop)
}
if (stmv_au_distance_reference=="completely_inside_boundary") {
# distance based filtering based on centroids
ausf = as( au, "sf")
foc = st_buffer( ausf[ which(ausf$AUID==nbfocal), ], dist= windowsize.half )
inrange = which( unlist( list2DF( st_contains( ausf, foc ) ) ) ==1 )
todrop = setdiff( nbnames, nbnames[inrange] )
tokeep = setdiff( tokeep, todrop)
}
}
sppoly = au[ order( match( tokeep, au$AUID ) ), ]
row.names(sppoly) = sppoly$AUID
pa = data.frame( coordinates( sppoly ) )
names(pa) = pa_coord_names
pa$AUID = sppoly$AUID
pa$i = match( sppoly$AUID, global_sppoly$AUID )
# prediction covariates i.e., independent stmv_variables/ covariates
pvars = c(pa_coord_names, "i")
if (p$nloccov > 0) {
# .. not necessary except when covars are modelled locally
for (ci in 1:p$nloccov) {
vn = p$stmv_variables$local_cov[ci]
pu = NULL
pu = stmv_attach( p$storage_backend, p$ptr$Pcov[[vn]] )
nts = ncol(pu)
if ( nts== 1 ) {
pvars = c( pvars, vn )
pa[,vn] = pu[pa$i] # i.e., a static variable
}
}
}
pa = as.data.frame(pa[, ..pvars])
# completed, reconstruction of spatial vars
# Time next
if ( exists("TIME", p$stmv_variables) ) pa = stmv_predictionarea_time( p=p, pa=pa )
# poly* function operate on Spatial* data
# NB_graph = attr(au, "NB_graph") # full matrix
# nbnames = attr( NB_graph, "region.id")
nnAUID = nbnames[NB_graph[[which( nbnames == nbfocal )]]] ## nearest neighbours
tokeep = unique( c( nnAUID, nbfocal ) )
NB_graph.remove = which( ! (nbnames %in% sppoly$AUID ) )
if ( length(NB_graph.remove) > 0 ) {
# remove isolated locations and recreate sppoly .. alternatively add links to NB_graph
NB_graph = nb_remove( NB_graph, NB_graph.remove )
sppoly = sp::spChFIDs( sppoly, row.names(sppoly) ) #fix id's
# sppoly = sppoly[ order(sppoly$AUID), ]
}
attr(sppoly, "NB_graph") = NB_graph # adding neighbourhood as an attribute to sppoly
NB_graph =NULL
NB_graph.remove =NULL
attr( pa, "sppoly" ) = sppoly
rownames(pa) = NULL
return(pa)
}
if (0) {
jj = which( card(NB_graph) == 0)
jj = match( tokeep, au$AUID )
plot(sppoly)
plot(sppoly[jj,], add=T, col="red")
dev.new()
edit(NB_graph, polys=sppoly)
card(NB_graph) # last check if any more isolated areas
}
}
|
/R/stmv_predictionarea_polygons.R
|
permissive
|
jae0/stmv
|
R
| false | false | 8,055 |
r
|
stmv_predictionarea_polygons = function(p, sloc, global_sppoly=NULL, windowsize.half=0, stmv_au_buffer_links=0, stmv_au_distance_reference="none" ) {
pa_coord_names = p$stmv_variables$LOCS[1:2]
if (is.null( global_sppoly )) {
# essentially identical to stmv_predictionarea_lattice, but for the addition of sppoly before addtion time
pa = stmv_predictionarea_space( p=p, sloc=sloc, windowsize.half=windowsize.half )
if (is.null(pa)) return(NULL)
## now add polygon representation of spatial lattice
dx = p$pres
dy = p$pres
x_r = range(pa[, pa_coord_names[1]])
x_c = range(pa[, pa_coord_names[2]])
nr = trunc( diff(x_r)/dx ) + 1L
nc = trunc( diff(x_c)/dy ) + 1L
# check: dr = diff(x_r)/(nr-1) == dx ;; dc = diff(x_c)/(nc-1) # == dy
# default behaviour .. lattice grid
# data_subset = NULL
sppoly = sf::st_as_sf( pa, coords=pa_coord_names, crs=st_crs( p$aegis_proj4string_planar_km) )
if (0) {
# not using raster -- incomplete
sppoly = (
st_make_grid( sppoly, cellsize=areal_units_resolution_km, what="polygons", square=TRUE )
%>% st_as_sf( crs=st_crs( p$aegis_proj4string_planar_km ))
)
spdf0 = sf::st_as_sf( pa, coords=pa_coord_names, crs=st_crs( p$aegis_proj4string_planar_km ) )
sppoly = st_as_sf( st_make_grid( spdf0, n=c(nr, nc), what="polygons", square=TRUE ) )
sppoly$internal_id = 1:nrow(sppoly)
spdf0$internal_id = st_points_in_polygons( spdf0, sppoly, varname="internal_id" )
o = match( sppoly$internal_id,spdf0$internal_id )
sppoly = st_as_sf( st_make_grid( Z, cellsize=areal_units_resolution_km, what="polygons", square=TRUE ) )
sppoly$AUID = as.character( 1:nrow(sppoly) ) # row index
row.names(sppoly) = sppoly$AUID
require(raster)
raster_template = raster::raster(extent(Z), res=areal_units_resolution_km, crs=projection(Z) ) # +1 to increase the area
sppoly = raster::rasterize( Z, raster_template, field=Z$z ) #NOTE :: TODO : move to stars::st_rasterize
sppoly = as(sppoly, "SpatialPixelsDataFrame")
sppoly = as( as(sppoly, "SpatialPolygonsDataFrame"), "sf")
raster_template = NULL
}
sppoly = as( as( raster::raster( sppoly, nrows=nr, ncols=nc ), "SpatialPolygonsDataFrame" ), "sf" )
if (exists("i", sppoly) ) {
sppoly$AUID = sppoly$i
} else {
sppoly$AUID = 1:nrow(sppoly)
}
sppoly$AUID = as.character( sppoly$AUID )
# poly* function operate on Spatial* data
NB_graph = poly2nb(sppoly, row.names=sppoly$AUID, queen=TRUE) # slow .. ~1hr?
NB_graph.remove = which(card(NB_graph) == 0)
if ( length(NB_graph.remove) > 0 ) {
# remove isolated locations and recreate sppoly .. alternatively add links to NB_graph
NB_graph.keep = which(card(NB_graph) > 0)
NB_graph = nb_remove( NB_graph, NB_graph.remove )
sppoly = sppoly[NB_graph.keep,]
row.names(sppoly) = sppoly$AUID
sppoly = sp::spChFIDs( sppoly, row.names(sppoly) ) #fix id's
# sppoly = sppoly[ order(sppoly$AUID), ]
}
attr(sppoly, "NB_graph") = NB_graph # adding neighbourhood as an attribute to sppoly
NB_graph =NULL
NB_graph.remove =NULL
attr( pa, "sppoly" ) = sppoly
if ( exists("TIME", p$stmv_variables) ) pa = stmv_predictionarea_time( p=p, pa=pa )
rownames(pa) = NULL
return(pa)
}
# ------------
if ( !is.null( global_sppoly )) {
au = as( global_sppoly, "sf")
au$AUID = 1:nrow(au)
nbfocal = st_points_in_polygons(
pts = st_transform( st_as_sf( t(sloc), coords=c(1,2), crs=st_crs(p$aegis_proj4string_planar_km) ), crs=st_crs(au) ),
polys = au[, "AUID"],
varname="AUID"
)
if (is.na(nbfocal)) {
message( "no data" )
return (NULL)
}
NB_graph = attr(au, "NB_graph") # full matrix
nbnames = attr( NB_graph, "region.id")
nnAUID = nbnames[NB_graph[[which( nbnames == nbfocal )]]] ## nearest neighbours
tokeep = unique( c( nnAUID, nbfocal ) )
if ( stmv_au_buffer_links > 0 ) {
# no of additional neighbourhood links ... 0 == nearest neighbours, 1 == nn + next nearest neighbours, etc
for (i in 1:stmv_au_buffer_links ) {
new = NULL
for ( foc in nnAUID ) {
new = c( new, nbnames[NB_graph[[which( nbnames == foc )]]] ) ## nearest neighbours
}
tokeep = unique( c(tokeep, new) )
nnAUID = tokeep
}
}
if ( windowsize.half > 0 ) {
if (stmv_au_distance_reference=="none") {
# nothing to do
}
if (stmv_au_distance_reference=="centroid") {
# distance based filtering based on centroids
aucoo = coordinates( au )
inrange = which( (abs(aucoo[,1] - sloc[1]) <= windowsize.half) & (abs(aucoo[,2] - sloc[2]) <= windowsize.half) )
todrop = setdiff( nbnames, nbnames[inrange] )
tokeep = setdiff( tokeep, todrop)
}
if (stmv_au_distance_reference=="inside_or_touches_boundary") {
# distance based filtering based on centroids
ausf = as( au, "sf")
foc = st_buffer( ausf[ which(ausf$AUID==nbfocal), ], dist= windowsize.half )
inrange = which( unlist( list2DF( st_intersects( ausf, foc ) ) ) ==1 )
todrop = setdiff( nbnames, nbnames[inrange] )
tokeep = setdiff( tokeep, todrop)
}
if (stmv_au_distance_reference=="completely_inside_boundary") {
# distance based filtering based on centroids
ausf = as( au, "sf")
foc = st_buffer( ausf[ which(ausf$AUID==nbfocal), ], dist= windowsize.half )
inrange = which( unlist( list2DF( st_contains( ausf, foc ) ) ) ==1 )
todrop = setdiff( nbnames, nbnames[inrange] )
tokeep = setdiff( tokeep, todrop)
}
}
sppoly = au[ order( match( tokeep, au$AUID ) ), ]
row.names(sppoly) = sppoly$AUID
pa = data.frame( coordinates( sppoly ) )
names(pa) = pa_coord_names
pa$AUID = sppoly$AUID
pa$i = match( sppoly$AUID, global_sppoly$AUID )
# prediction covariates i.e., independent stmv_variables/ covariates
pvars = c(pa_coord_names, "i")
if (p$nloccov > 0) {
# .. not necessary except when covars are modelled locally
for (ci in 1:p$nloccov) {
vn = p$stmv_variables$local_cov[ci]
pu = NULL
pu = stmv_attach( p$storage_backend, p$ptr$Pcov[[vn]] )
nts = ncol(pu)
if ( nts== 1 ) {
pvars = c( pvars, vn )
pa[,vn] = pu[pa$i] # i.e., a static variable
}
}
}
pa = as.data.frame(pa[, ..pvars])
# completed, reconstruction of spatial vars
# Time next
if ( exists("TIME", p$stmv_variables) ) pa = stmv_predictionarea_time( p=p, pa=pa )
# poly* function operate on Spatial* data
# NB_graph = attr(au, "NB_graph") # full matrix
# nbnames = attr( NB_graph, "region.id")
nnAUID = nbnames[NB_graph[[which( nbnames == nbfocal )]]] ## nearest neighbours
tokeep = unique( c( nnAUID, nbfocal ) )
NB_graph.remove = which( ! (nbnames %in% sppoly$AUID ) )
if ( length(NB_graph.remove) > 0 ) {
# remove isolated locations and recreate sppoly .. alternatively add links to NB_graph
NB_graph = nb_remove( NB_graph, NB_graph.remove )
sppoly = sp::spChFIDs( sppoly, row.names(sppoly) ) #fix id's
# sppoly = sppoly[ order(sppoly$AUID), ]
}
attr(sppoly, "NB_graph") = NB_graph # adding neighbourhood as an attribute to sppoly
NB_graph =NULL
NB_graph.remove =NULL
attr( pa, "sppoly" ) = sppoly
rownames(pa) = NULL
return(pa)
}
if (0) {
jj = which( card(NB_graph) == 0)
jj = match( tokeep, au$AUID )
plot(sppoly)
plot(sppoly[jj,], add=T, col="red")
dev.new()
edit(NB_graph, polys=sppoly)
card(NB_graph) # last check if any more isolated areas
}
}
|
# Note: This demo is based on code by Frank Bergmann
# http://frank-fbergmann.blogspot.co.uk/2012/07/r-bindings-for-libsbml-550.html
# load an SBML file
data(enzymaticreaction)
# create a _p_SBMLDocument object
doc = readSBMLFromString(enzymaticreaction)
# now create a _p_Model object
mod = SBMLDocument_getModel(doc)
# get a _p_Species object from the model
s <- Model_getSpecies(mod ,0)
# no CV terms will be added if there is no metaid to reference,
# so set a metaId if not already set
if (SBase_isSetMetaId(s) == FALSE)
SBase_setMetaId(s, "__meta2501")
# create CVTerm objects
cv <- CVTerm("BIOLOGICAL_QUALIFIER")
CVTerm_setBiologicalQualifierType(cv, "BQB_IS_VERSION_OF")
CVTerm_addResource(cv, "http://www.geneontology.org/#GO:0005892")
cv2 <- CVTerm("BIOLOGICAL_QUALIFIER")
CVTerm_setBiologicalQualifierType(cv2, "BQB_IS")
CVTerm_addResource(cv2, "http://www.geneontology.org/#GO:0005895")
cv1 <- CVTerm("BIOLOGICAL_QUALIFIER")
CVTerm_setBiologicalQualifierType(cv1, "BQB_IS_VERSION_OF")
CVTerm_addResource(cv1, "http://www.ebi.ac.uk/interpro/#IPR002394")
#now add the CV terms cv, cv2, cv1 to the _p_Species object s
SBase_addCVTerm(s, cv)
SBase_addCVTerm(s, cv2)
SBase_addCVTerm(s, cv1)
# The SBML document with CV terms added can be written using
# writeSBML(doc, "nameOfOutputFile.xml")
|
/package/demo/addCVTerms.R
|
no_license
|
jwcasement/libsbmlwrapper
|
R
| false | false | 1,312 |
r
|
# Note: This demo is based on code by Frank Bergmann
# http://frank-fbergmann.blogspot.co.uk/2012/07/r-bindings-for-libsbml-550.html
# load an SBML file
data(enzymaticreaction)
# create a _p_SBMLDocument object
doc = readSBMLFromString(enzymaticreaction)
# now create a _p_Model object
mod = SBMLDocument_getModel(doc)
# get a _p_Species object from the model
s <- Model_getSpecies(mod ,0)
# no CV terms will be added if there is no metaid to reference,
# so set a metaId if not already set
if (SBase_isSetMetaId(s) == FALSE)
SBase_setMetaId(s, "__meta2501")
# create CVTerm objects
cv <- CVTerm("BIOLOGICAL_QUALIFIER")
CVTerm_setBiologicalQualifierType(cv, "BQB_IS_VERSION_OF")
CVTerm_addResource(cv, "http://www.geneontology.org/#GO:0005892")
cv2 <- CVTerm("BIOLOGICAL_QUALIFIER")
CVTerm_setBiologicalQualifierType(cv2, "BQB_IS")
CVTerm_addResource(cv2, "http://www.geneontology.org/#GO:0005895")
cv1 <- CVTerm("BIOLOGICAL_QUALIFIER")
CVTerm_setBiologicalQualifierType(cv1, "BQB_IS_VERSION_OF")
CVTerm_addResource(cv1, "http://www.ebi.ac.uk/interpro/#IPR002394")
#now add the CV terms cv, cv2, cv1 to the _p_Species object s
SBase_addCVTerm(s, cv)
SBase_addCVTerm(s, cv2)
SBase_addCVTerm(s, cv1)
# The SBML document with CV terms added can be written using
# writeSBML(doc, "nameOfOutputFile.xml")
|
#' @importFrom stats sd predict complete.cases median quantile as.formula model.frame
#' @importFrom stats binomial prcomp model.matrix terms poly cov var optimize
#' @importFrom stats mahalanobis runif cor na.pass
#' @importFrom purrr map map_dbl map_lgl map_chr map_df map2_df map_dfc reduce
#' @importFrom purrr map_dfr map_if
#' @importFrom ipred ipredbagg
#' @importFrom tibble as_tibble add_column is_tibble tibble
#' @importFrom dplyr filter group_by count ungroup do select_vars tbl_vars mutate
#' @importFrom dplyr tibble bind_rows slice right_join rename select full_join
#' @importFrom dplyr arrange desc bind_cols sample_n sample_frac mutate_at
#' @importFrom Matrix Matrix
#' @importFrom rlang quos call2 sym quo_get_expr quo_text expr f_lhs f_rhs
#' @importFrom rlang is_empty is_quosure as_character na_dbl syms !! names2
#' @importFrom rlang quo quo_squash exec na_dbl
#' @importFrom gower gower_topn
#' @importFrom lubridate year yday week decimal_date quarter semester wday month
#' @importFrom lubridate is.Date
#' @importFrom utils stack globalVariables packageVersion object.size install.packages
#' @importFrom tidyselect everything
#' @importFrom withr with_seed
#' @importFrom splines ns bs
# ------------------------------------------------------------------------------
utils::globalVariables(
c(
".", ".orig_order", # roles.R
"type", "new_type", # misc.R
"variable", # novel.R
"estimate", # lowerimpute.R
".row" # integer.R
)
)
# ------------------------------------------------------------------------------
# nocov start
.onLoad <- function(libname, pkgname) {
# This package has specific methods for the `tunable` generic. That generic
# is defined in the `tune` package. As of R 4.0, we need to register them.
recipe_exports <- getNamespaceExports(ns = "recipes")
tunable_steps <- grep("tunable.step", recipe_exports, fixed = TRUE, value = TRUE)
for (i in tunable_steps) {
s3_register("dplyr::tune", i)
}
}
s3_register <- function(generic, class, method = NULL) {
stopifnot(is.character(generic), length(generic) == 1)
stopifnot(is.character(class), length(class) == 1)
pieces <- strsplit(generic, "::")[[1]]
stopifnot(length(pieces) == 2)
package <- pieces[[1]]
generic <- pieces[[2]]
caller <- parent.frame()
get_method_env <- function() {
top <- topenv(caller)
if (isNamespace(top)) {
asNamespace(environmentName(top))
} else {
caller
}
}
get_method <- function(method, env) {
if (is.null(method)) {
get(paste0(generic, ".", class), envir = get_method_env())
} else {
method
}
}
method_fn <- get_method(method)
stopifnot(is.function(method_fn))
# Always register hook in case package is later unloaded & reloaded
setHook(
packageEvent(package, "onLoad"),
function(...) {
ns <- asNamespace(package)
# Refresh the method, it might have been updated by `devtools::load_all()`
method_fn <- get_method(method)
registerS3method(generic, class, method_fn, envir = ns)
}
)
# Avoid registration failures during loading (pkgload or regular)
if (!isNamespaceLoaded(package)) {
return(invisible())
}
envir <- asNamespace(package)
# Only register if generic can be accessed
if (exists(generic, envir)) {
registerS3method(generic, class, method_fn, envir = envir)
}
invisible()
}
# nocov end
|
/R/0_imports.R
|
no_license
|
EmilHvitfeldt/recipes
|
R
| false | false | 3,671 |
r
|
#' @importFrom stats sd predict complete.cases median quantile as.formula model.frame
#' @importFrom stats binomial prcomp model.matrix terms poly cov var optimize
#' @importFrom stats mahalanobis runif cor na.pass
#' @importFrom purrr map map_dbl map_lgl map_chr map_df map2_df map_dfc reduce
#' @importFrom purrr map_dfr map_if
#' @importFrom ipred ipredbagg
#' @importFrom tibble as_tibble add_column is_tibble tibble
#' @importFrom dplyr filter group_by count ungroup do select_vars tbl_vars mutate
#' @importFrom dplyr tibble bind_rows slice right_join rename select full_join
#' @importFrom dplyr arrange desc bind_cols sample_n sample_frac mutate_at
#' @importFrom Matrix Matrix
#' @importFrom rlang quos call2 sym quo_get_expr quo_text expr f_lhs f_rhs
#' @importFrom rlang is_empty is_quosure as_character na_dbl syms !! names2
#' @importFrom rlang quo quo_squash exec na_dbl
#' @importFrom gower gower_topn
#' @importFrom lubridate year yday week decimal_date quarter semester wday month
#' @importFrom lubridate is.Date
#' @importFrom utils stack globalVariables packageVersion object.size install.packages
#' @importFrom tidyselect everything
#' @importFrom withr with_seed
#' @importFrom splines ns bs
# ------------------------------------------------------------------------------
utils::globalVariables(
c(
".", ".orig_order", # roles.R
"type", "new_type", # misc.R
"variable", # novel.R
"estimate", # lowerimpute.R
".row" # integer.R
)
)
# ------------------------------------------------------------------------------
# nocov start
.onLoad <- function(libname, pkgname) {
# This package has specific methods for the `tunable` generic. That generic
# is defined in the `tune` package. As of R 4.0, we need to register them.
recipe_exports <- getNamespaceExports(ns = "recipes")
tunable_steps <- grep("tunable.step", recipe_exports, fixed = TRUE, value = TRUE)
for (i in tunable_steps) {
s3_register("dplyr::tune", i)
}
}
s3_register <- function(generic, class, method = NULL) {
stopifnot(is.character(generic), length(generic) == 1)
stopifnot(is.character(class), length(class) == 1)
pieces <- strsplit(generic, "::")[[1]]
stopifnot(length(pieces) == 2)
package <- pieces[[1]]
generic <- pieces[[2]]
caller <- parent.frame()
get_method_env <- function() {
top <- topenv(caller)
if (isNamespace(top)) {
asNamespace(environmentName(top))
} else {
caller
}
}
get_method <- function(method, env) {
if (is.null(method)) {
get(paste0(generic, ".", class), envir = get_method_env())
} else {
method
}
}
method_fn <- get_method(method)
stopifnot(is.function(method_fn))
# Always register hook in case package is later unloaded & reloaded
setHook(
packageEvent(package, "onLoad"),
function(...) {
ns <- asNamespace(package)
# Refresh the method, it might have been updated by `devtools::load_all()`
method_fn <- get_method(method)
registerS3method(generic, class, method_fn, envir = ns)
}
)
# Avoid registration failures during loading (pkgload or regular)
if (!isNamespaceLoaded(package)) {
return(invisible())
}
envir <- asNamespace(package)
# Only register if generic can be accessed
if (exists(generic, envir)) {
registerS3method(generic, class, method_fn, envir = envir)
}
invisible()
}
# nocov end
|
###################################################################################
#' Single Ensemble: Epsilon Greedy
#'
#' Eps. Greedy - Select the model with the greatest success with probability 1-epsilon (greedy decision),
#' select a random model with probability epsilon (explorative decision).
#' That means, the algorithm is completely greedy with epsilon=0, and completely explorative with epsilon=1\cr
#' The default reward function is \code{spotConfig$seq.ensemble.feed.func<-\link{spotFeedback.reward.bern}}.\cr
#' The value of epsilon \code{spotConfig$seq.greed.epsilon<-0.5}.
#'
#' This is a "single ensemble", meaning that in every sequential step only one model in the ensemble is trained and evaluated.
#' The target is to actively "learn" which of the models are most suitable, based on their individual success.\cr
#' The models used are specified in the \code{spotConfig} list, for instance:\cr
#' \code{spotConfig$seq.ensemble.predictors = c(spotPredictRandomForest, spotPredictEarth, spotPredictForrester, spotPredictDace)}\cr
#' To specify the settings of each individual model, set:\cr
#' \code{seq.ensemble.settings = list(list(setting=1),list(setting=2),list(setting=3),list(setting=4))}\cr
#' Any parameters set in each of the corresponding lists (here: 4 individual lists) will overwrite settings in the main \code{spotConfig} list,
#' when the concerned model function is called.
#'
#' @param rawB unmerged data
#' @param mergedB merged data
#' @param design new design points which should be predicted
#' @param spotConfig global list of all options, needed to provide data for calling functions
#' @param fit if an existing model ensemble fit is supplied, the models will not be build based on
#' data, but only evaluated with the existing fits (on the design data). To build the model,
#' this parameter has to be NULL. If it is not NULL the parameters mergedB and rawB will not be
#' used at all in the function.
#'
#' @return returns the list \code{spotConfig}
#'
#' @references - Joannes Vermorel and Mehryar Mohri. 2005. Multi-armed bandit algorithms and empirical evaluation.
#' In Proceedings of the 16th European conference on Machine Learning (ECML'05), Joao Gama, Rui Camacho,
#' Pavel B. Brazdil, Alipio Mario Jorge, and Luis Torgo (Eds.). Springer-Verlag, Berlin, Heidelberg, 437-448.\cr
#' - M. Friese, M. Zaefferer, T. Bartz-Beielstein, O. Flasch, P. Koch, W. Konen, and
#' B. Naujoks. Ensemble based optimization and tuning algorithms. In F. Hoffmann
#' and E. Huellermeier, editors, Proceedings 21. Workshop Computational Intelligence,
#' p. 119-134. Universitaetsverlag Karlsruhe, 2011.
#'
#' @export
###################################################################################
spotEnsembleSingleEpsGreedy <- function(rawB,mergedB,design,spotConfig,fit=NULL){
spotWriteLines(spotConfig$io.verbosity,3,"spotEnsembleSingleEpsGreedy started");
models=spotConfig$seq.ensemble.predictors
########################################################
# BUILD AND PREDICT
########################################################
if(is.null(fit)){
##############################DEFAULT HANDLING###################
if(is.null(spotConfig$seq.ensemble.feed.func))
spotConfig$seq.ensemble.feed.func<-spotFeedback.reward.bern;
if(is.null(spotConfig$seq.greed.epsilon))
spotConfig$seq.greed.epsilon<-0.5;
#################################################################
K=length(models)
eps<-spotConfig$seq.greed.epsilon
if(is.null(spotConfig$seq.greed.r)){#first initialization step
spotConfig$seq.greed.r<-rep(0,K)
spotConfig$seq.greed.n<-rep(0,K)
}
if(!is.null(spotConfig$seq.greed.imax)){ #calculate reward if a model was chosen in the last step
imax<-spotConfig$seq.greed.imax
reward<- spotConfig$seq.ensemble.feed.func(spotConfig,mergedB,rawB)
if(reward>0){#success
spotConfig$seq.greed.r[imax]=spotConfig$seq.greed.r[imax]+1
}
spotConfig$seq.greed.n[imax]<-spotConfig$seq.greed.n[imax]+1
}
meanReward<-spotConfig$seq.greed.r/spotConfig$seq.greed.n;
meanReward[which(is.na(meanReward))]<-0 #n is initialized with zero, so in the beginning mean values will be NaN
if(eps==1){ #if epsilon is one: completely random choice, (not excluding "best" model)
imax<-sample(1:K,1)
}else if(runif(1)<eps){
imax<-sample((1:K)[-imax],1)#exploration decision
}else{
imax<-sample(which(meanReward==max(meanReward)),1)#greedy decision, sample if there is more than one maximum in reward vector
}
tmpConf <- append(spotConfig$seq.ensemble.settings[[imax]],spotConfig)
conf <- eval(call(models[[imax]],rawB#evaluated chosen model
, mergedB
, design
, tmpConf))
spotConfig$seq.modelFit <- conf$seq.modelFit
spotConfig$seq.greed.imax<-imax#safe index, to associate reward with model
spotConfig$seq.greed.history<-c(spotConfig$seq.greed.history,imax)
}else{
########################################################
# PREDICT ONLY
########################################################
jj<-spotConfig$seq.greed.imax
tmpConf <- append(spotConfig$seq.ensemble.settings[[jj]],spotConfig)
conf <- eval(call(models[[jj]], NULL
, NULL
, design
, tmpConf
, fit
));
}
spotWriteLines(spotConfig$io.verbosity,3,"spotEnsembleSingleEpsGreedy finished");
spotConfig$seq.largeDesignY<-conf$seq.largeDesignY#usual output
spotConfig
}
|
/SPOT/R/spotEnsembleSingleEpsGreedy.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 5,438 |
r
|
###################################################################################
#' Single Ensemble: Epsilon Greedy
#'
#' Eps. Greedy - Select the model with the greatest success with probability 1-epsilon (greedy decision),
#' select a random model with probability epsilon (explorative decision).
#' That means, the algorithm is completely greedy with epsilon=0, and completely explorative with epsilon=1\cr
#' The default reward function is \code{spotConfig$seq.ensemble.feed.func<-\link{spotFeedback.reward.bern}}.\cr
#' The value of epsilon \code{spotConfig$seq.greed.epsilon<-0.5}.
#'
#' This is a "single ensemble", meaning that in every sequential step only one model in the ensemble is trained and evaluated.
#' The target is to actively "learn" which of the models are most suitable, based on their individual success.\cr
#' The models used are specified in the \code{spotConfig} list, for instance:\cr
#' \code{spotConfig$seq.ensemble.predictors = c(spotPredictRandomForest, spotPredictEarth, spotPredictForrester, spotPredictDace)}\cr
#' To specify the settings of each individual model, set:\cr
#' \code{seq.ensemble.settings = list(list(setting=1),list(setting=2),list(setting=3),list(setting=4))}\cr
#' Any parameters set in each of the corresponding lists (here: 4 individual lists) will overwrite settings in the main \code{spotConfig} list,
#' when the concerned model function is called.
#'
#' @param rawB unmerged data
#' @param mergedB merged data
#' @param design new design points which should be predicted
#' @param spotConfig global list of all options, needed to provide data for calling functions
#' @param fit if an existing model ensemble fit is supplied, the models will not be build based on
#' data, but only evaluated with the existing fits (on the design data). To build the model,
#' this parameter has to be NULL. If it is not NULL the parameters mergedB and rawB will not be
#' used at all in the function.
#'
#' @return returns the list \code{spotConfig}
#'
#' @references - Joannes Vermorel and Mehryar Mohri. 2005. Multi-armed bandit algorithms and empirical evaluation.
#' In Proceedings of the 16th European conference on Machine Learning (ECML'05), Joao Gama, Rui Camacho,
#' Pavel B. Brazdil, Alipio Mario Jorge, and Luis Torgo (Eds.). Springer-Verlag, Berlin, Heidelberg, 437-448.\cr
#' - M. Friese, M. Zaefferer, T. Bartz-Beielstein, O. Flasch, P. Koch, W. Konen, and
#' B. Naujoks. Ensemble based optimization and tuning algorithms. In F. Hoffmann
#' and E. Huellermeier, editors, Proceedings 21. Workshop Computational Intelligence,
#' p. 119-134. Universitaetsverlag Karlsruhe, 2011.
#'
#' @export
###################################################################################
spotEnsembleSingleEpsGreedy <- function(rawB,mergedB,design,spotConfig,fit=NULL){
spotWriteLines(spotConfig$io.verbosity,3,"spotEnsembleSingleEpsGreedy started");
models=spotConfig$seq.ensemble.predictors
########################################################
# BUILD AND PREDICT
########################################################
if(is.null(fit)){
##############################DEFAULT HANDLING###################
if(is.null(spotConfig$seq.ensemble.feed.func))
spotConfig$seq.ensemble.feed.func<-spotFeedback.reward.bern;
if(is.null(spotConfig$seq.greed.epsilon))
spotConfig$seq.greed.epsilon<-0.5;
#################################################################
K=length(models)
eps<-spotConfig$seq.greed.epsilon
if(is.null(spotConfig$seq.greed.r)){#first initialization step
spotConfig$seq.greed.r<-rep(0,K)
spotConfig$seq.greed.n<-rep(0,K)
}
if(!is.null(spotConfig$seq.greed.imax)){ #calculate reward if a model was chosen in the last step
imax<-spotConfig$seq.greed.imax
reward<- spotConfig$seq.ensemble.feed.func(spotConfig,mergedB,rawB)
if(reward>0){#success
spotConfig$seq.greed.r[imax]=spotConfig$seq.greed.r[imax]+1
}
spotConfig$seq.greed.n[imax]<-spotConfig$seq.greed.n[imax]+1
}
meanReward<-spotConfig$seq.greed.r/spotConfig$seq.greed.n;
meanReward[which(is.na(meanReward))]<-0 #n is initialized with zero, so in the beginning mean values will be NaN
if(eps==1){ #if epsilon is one: completely random choice, (not excluding "best" model)
imax<-sample(1:K,1)
}else if(runif(1)<eps){
imax<-sample((1:K)[-imax],1)#exploration decision
}else{
imax<-sample(which(meanReward==max(meanReward)),1)#greedy decision, sample if there is more than one maximum in reward vector
}
tmpConf <- append(spotConfig$seq.ensemble.settings[[imax]],spotConfig)
conf <- eval(call(models[[imax]],rawB#evaluated chosen model
, mergedB
, design
, tmpConf))
spotConfig$seq.modelFit <- conf$seq.modelFit
spotConfig$seq.greed.imax<-imax#safe index, to associate reward with model
spotConfig$seq.greed.history<-c(spotConfig$seq.greed.history,imax)
}else{
########################################################
# PREDICT ONLY
########################################################
jj<-spotConfig$seq.greed.imax
tmpConf <- append(spotConfig$seq.ensemble.settings[[jj]],spotConfig)
conf <- eval(call(models[[jj]], NULL
, NULL
, design
, tmpConf
, fit
));
}
spotWriteLines(spotConfig$io.verbosity,3,"spotEnsembleSingleEpsGreedy finished");
spotConfig$seq.largeDesignY<-conf$seq.largeDesignY#usual output
spotConfig
}
|
library(pheno2geno)
### Name: markerPlacementPlot
### Title: Plot number of markers selected.
### Aliases: markerPlacementPlot
### Keywords: manip
### ** Examples
data(testCross)
data(testPopulation)
markerPlacementPlot(testPopulation,placeUsing="qtl",cross=testCross)
markerPlacementPlot(testPopulation,placeUsing="correlation",cross=testCross)
|
/data/genthat_extracted_code/pheno2geno/examples/markerPlacementPlot.rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 357 |
r
|
library(pheno2geno)
### Name: markerPlacementPlot
### Title: Plot number of markers selected.
### Aliases: markerPlacementPlot
### Keywords: manip
### ** Examples
data(testCross)
data(testPopulation)
markerPlacementPlot(testPopulation,placeUsing="qtl",cross=testCross)
markerPlacementPlot(testPopulation,placeUsing="correlation",cross=testCross)
|
dtm<-
function(X, Grid, m0){
if (!is.numeric(X) && !is.data.frame(X)) stop("X should be a matrix of coordinates")
if (!is.numeric(Grid) && !is.data.frame(Grid)) stop("Grid should be a matrix of coordinates")
if (!is.vector(m0) || length(m0)!=1) stop("m0 should be a number between 0 and 1")
X=as.matrix(X)
Grid=as.matrix(Grid)
k0=ceiling(m0*dim(X)[1])
distances=knnx.dist(X, Grid, k=k0, algorithm=c("kd_tree"))
d2=apply(distances^2, 1, sum)
dOut=sqrt(d2/k0)
return(dOut)
}
|
/R/dtm.R
|
no_license
|
miriamperkins/TDA
|
R
| false | false | 491 |
r
|
dtm<-
function(X, Grid, m0){
if (!is.numeric(X) && !is.data.frame(X)) stop("X should be a matrix of coordinates")
if (!is.numeric(Grid) && !is.data.frame(Grid)) stop("Grid should be a matrix of coordinates")
if (!is.vector(m0) || length(m0)!=1) stop("m0 should be a number between 0 and 1")
X=as.matrix(X)
Grid=as.matrix(Grid)
k0=ceiling(m0*dim(X)[1])
distances=knnx.dist(X, Grid, k=k0, algorithm=c("kd_tree"))
d2=apply(distances^2, 1, sum)
dOut=sqrt(d2/k0)
return(dOut)
}
|
library(metadynminer)
### Name: linesonfes
### Title: Plot lines for Nudged Elastic Band projected onto free energy
### surface
### Aliases: linesonfes
### ** Examples
tfes<-fes(acealanme, imax=5000)
minima<-fesminima(tfes)
nebAD<-neb(minima, min1="A", min2="D", nsteps=20)
plot(minima)
linesonfes(nebAD)
|
/data/genthat_extracted_code/metadynminer/examples/linesonfes.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 314 |
r
|
library(metadynminer)
### Name: linesonfes
### Title: Plot lines for Nudged Elastic Band projected onto free energy
### surface
### Aliases: linesonfes
### ** Examples
tfes<-fes(acealanme, imax=5000)
minima<-fesminima(tfes)
nebAD<-neb(minima, min1="A", min2="D", nsteps=20)
plot(minima)
linesonfes(nebAD)
|
#clear current environment
rm(list = ls(all.names = TRUE))
#install packages as needed--unmarkdown if you do not have these on the console
#install.packages("RColorBrewer")
#install.packages("ggplot2")
#install.packages("dplyr")
#load packages that have been installed (always run these commands)
library(RColorBrewer)
library(ggplot2)
library(dplyr)
library(readxl)
library(reshape2)
library(tidyr)
#Import Excel data using import dataset on the right side of the screen, copy the way to read it from the console and then rename the file to something simpler
X3T3_Blimp1_WT_c_myc <- read_excel("~/Desktop/3T3 Blimp1 WT +c-myc.xls")
View(X3T3_Blimp1_WT_c_myc)
qPCR_raw <- X3T3_Blimp1_WT_c_myc
#Check that the file is correct
print(qPCR_raw)
#Look at current column names
colnames(qPCR_raw)
#rename column names
colnames(qPCR_raw) <- c("Well", "Sample.Name", "Target.Name", "Task", "Reporter", "Quencher", "RQ", "RQ Min", "RQ Max", "Cт")
colnames(qPCR_raw)
#cut out the first 6 rows and the last 5 rows that have no data (random things given by the instrument output file)
qPCR_c = qPCR_raw [7: nrow(qPCR_raw),]
head(qPCR_c)
qPCR_c1 = qPCR_c [1: (nrow(qPCR_c)-5),]
#make a new table with Sample Name, RQ, RQ Min and RQ Max
colnames(qPCR_c1)
qPCR_c2 = qPCR_c1 [, 1:10]
qPCR_cut <- select(qPCR_c2, "Sample.Name", "Target.Name", 'RQ', "RQ Min", "RQ Max")
print(qPCR_cut)
#look at the class of each of the variables
sapply(qPCR_cut, class)
#Convert RQ to a numeric and Sample Name to a factor so that it can be later sorted if necessary
qPCR_cut$RQ <- as.numeric(as.character(qPCR_cut$RQ))
qPCR_cut$Sample.Name <- as.factor(qPCR_cut$Sample.Name)
#look to see that the variable types were changed
sapply(qPCR_cut, class)
#filter the file based on Target Gene
qPCR_cut %>% filter(grepl('mAp', Target.Name)) -> qPCR_map
qPCR_cut %>% filter(grepl('GAPDH', Target.Name)) -> qPCR_GAPDH
qPCR_cut %>% filter(grepl('Blimp1', Target.Name)) -> qPCR_Blimp
qPCR_cut %>% filter(grepl('c-myc', Target.Name)) -> qPCR_c_myc
#Group by the Sample Name and Target Gene and then take avg of RQ for each group (accounting for the drop of anything with NA--Reference Gene RQs)
qPCR_map %>%
group_by(Sample.Name, Target.Name) %>%
summarise(avgRQ = mean(RQ, na.rm = TRUE)) -> Averagemap
#qPCR_GAPDH %>%
#group_by(Sample.Name, Target.Name) %>%
#summarise(avgRQ = mean(RQ, na.rm = TRUE)) -> AverageGAPDH
qPCR_Blimp %>%
group_by(Sample.Name, Target.Name) %>%
summarise(avgRQ = mean(RQ, na.rm = TRUE)) -> AverageBlimp
qPCR_c_myc %>%
group_by(Sample.Name, Target.Name) %>%
summarise(avgRQ = mean(RQ, na.rm = TRUE)) -> Averagec_myc
#Merge back together
Merged = rbind(Averagemap,AverageBlimp, Averagec_myc)
#Put Averages and RQ Min and Max from original plot on one graph
C <- merge(Merged, qPCR_cut, by = "Sample.Name")
#Remove NAs (so that you have a cleaner table)
C_Num <- na.omit(C)
#Remove duplicates (because when you graph things get weird so you need it to be 1-15 where theres no repeat values)
C_Num_2 <- distinct(C_Num)
#Convert columns to numeric for RQ Min and RQ Max
sapply(C_Num_2, class)
C_Num_2$`RQ Min` <- as.numeric(as.character(C_Num_2$`RQ Min`))
sapply(C_Num_2, class)
C_Num_2$`RQ Max` <- as.numeric(as.character(C_Num_2$`RQ Max`))
sapply(C_Num_2, class)
#Name Merged plot
qPCR_DCAA <- C_Num_2
colnames(qPCR_DCAA)
#Follow the Below to Make a Basic Plot with Sample name versus RQ where the fill is based on Sample Name
#Generate Basic Plot
Plot <- ggplot() + geom_col(data = qPCR_DCAA, aes(x = qPCR_DCAA$Target.Name.x, y = qPCR_DCAA$avgRQ, fill = qPCR_DCAA$Sample.Name)) + geom_bar(stat = "identity", width=.5, position = "dodge")
print(Plot)
#Add titles to axis (and format them to be readable) as well as add title to graph
Plot <- Plot + ggtitle("GLI1 Expression") +
xlab("Sample Name") + ylab("Fold Change") + theme_classic() + labs(fill = "Sample Name") + theme(axis.text.x = element_text(size=10, angle=90),axis.title=element_text(size=12,face="bold") + geom_bar(stat = "identity", width=.5, position = "dodge"))
print(Plot)
#Add the RQ Max and Min to the graph
Plot_SD <- Plot + geom_errorbar(data = qPCR_DCAA, mapping=aes(x=qPCR_DCAA$Sample.Name, ymin=qPCR_DCAA$`RQ Min`, ymax=qPCR_DCAA$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
#Rearrange sample names if necessary
colnames(qPCR_DCAA)
qPCR_DCAA_2 <- select(qPCR_DCAA, "Sample.Name", "avgRQ")
qPCR_DCAA_2$Sample.Name <- factor(qPCR_DCAA$Sample.Name, levels = c("3T3 SS 0 mM DCAA", "3T3 SS 80 mM DCAA", "3T3 SS 100 mM DCAA", "3T3 SS 126 mM DCAA", "3T3 SS 160 mM DCAA", "3T3 SSH 0 mM DCAA", "3T3 SSH 80 mM DCAA", "3T3 SSH 100 mM DCAA", "3T3 SSH 126 mM DCAA", "3T3 SSH 160 mM DCAA", "BCC SS 0 mM DCAA", "BCC SS 80 mM DCAA", "BCC SS 100 mM DCAA", "BCC SS 126 mM DCAA", "BCC SS 160 mM DCAA"))
print(qPCR_DCAA_2)
#Follow this if you needed to rearrange the sample names as above
#Generate Basic Plot with rearrange
Plot <- ggplot() + geom_col(data = qPCR_DCAA_2, aes(x = qPCR_DCAA_2$Sample.Name, y = qPCR_DCAA_2$avgRQ, fill = qPCR_DCAA_2$Sample.Name))
print(Plot)
#Add titles to axis as well as graph (for rearrange)
Plot <- Plot + ggtitle("GLI1 Expression") +
xlab("Sample Name") + ylab("Fold Change") + theme_classic() + labs(fill = "Sample Name") + theme(axis.text.x = element_text(size=10, angle=90),axis.title=element_text(size=12,face="bold"))
print(Plot)
#Add the RQ Max and Min to the graph (for rearrange)
Plot_SD <- Plot + geom_errorbar(data = qPCR_DCAA, mapping=aes(x=qPCR_DCAA$Sample.Name, ymin=qPCR_DCAA$`RQ Min`, ymax=qPCR_DCAA$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
#Follow this if you need to filter by Cell Type and want to color your plot by that
#Generate new column in table based on cell type and color by it
qPCR_DCAA_2$CellType <- NA
print(qPCR_DCAA_2)
sapply(qPCR_DCAA_2, class)
qPCR_DCAA_2$CellType <- as.numeric(as.character(qPCR_DCAA_2$CellType))
sapply(qPCR_DCAA_2, class)
qPCR_DCAA_2$CellType <- as.numeric(grepl("3T3", qPCR_DCAA_2$Sample.Name))
print(qPCR_DCAA_2)
#Generate Basic Plot with rearrange and color
Plot <- ggplot() + geom_col(data = qPCR_DCAA_2, aes(x = qPCR_DCAA_2$Sample.Name, y = qPCR_DCAA_2$avgRQ, fill = as.factor(qPCR_DCAA_2$CellType)))
print(Plot)
#Add titles to axis, remove legend, as well as title graph (for rearrange and color)
Plot <- Plot + ggtitle("GLI1 Expression") +
xlab("Sample Name") + ylab("Fold Change") + theme_classic() + labs(fill = "Sample Name") + theme(axis.text.x = element_text(size=10, angle=90),axis.title=element_text(size=12,face="bold"), legend.position = "none")
print(Plot)
#Add the RQ Max and Min to the graph (for rearrange and color)
Plot_SD <- Plot + geom_errorbar(data = qPCR_DCAA, mapping=aes(x=qPCR_DCAA$Sample.Name, ymin=qPCR_DCAA$`RQ Min`, ymax=qPCR_DCAA$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
|
/Anna qPCR Script.R
|
no_license
|
phalas25/ph-future-phd
|
R
| false | false | 6,864 |
r
|
#clear current environment
rm(list = ls(all.names = TRUE))
#install packages as needed--unmarkdown if you do not have these on the console
#install.packages("RColorBrewer")
#install.packages("ggplot2")
#install.packages("dplyr")
#load packages that have been installed (always run these commands)
library(RColorBrewer)
library(ggplot2)
library(dplyr)
library(readxl)
library(reshape2)
library(tidyr)
#Import Excel data using import dataset on the right side of the screen, copy the way to read it from the console and then rename the file to something simpler
X3T3_Blimp1_WT_c_myc <- read_excel("~/Desktop/3T3 Blimp1 WT +c-myc.xls")
View(X3T3_Blimp1_WT_c_myc)
qPCR_raw <- X3T3_Blimp1_WT_c_myc
#Check that the file is correct
print(qPCR_raw)
#Look at current column names
colnames(qPCR_raw)
#rename column names
colnames(qPCR_raw) <- c("Well", "Sample.Name", "Target.Name", "Task", "Reporter", "Quencher", "RQ", "RQ Min", "RQ Max", "Cт")
colnames(qPCR_raw)
#cut out the first 6 rows and the last 5 rows that have no data (random things given by the instrument output file)
qPCR_c = qPCR_raw [7: nrow(qPCR_raw),]
head(qPCR_c)
qPCR_c1 = qPCR_c [1: (nrow(qPCR_c)-5),]
#make a new table with Sample Name, RQ, RQ Min and RQ Max
colnames(qPCR_c1)
qPCR_c2 = qPCR_c1 [, 1:10]
qPCR_cut <- select(qPCR_c2, "Sample.Name", "Target.Name", 'RQ', "RQ Min", "RQ Max")
print(qPCR_cut)
#look at the class of each of the variables
sapply(qPCR_cut, class)
#Convert RQ to a numeric and Sample Name to a factor so that it can be later sorted if necessary
qPCR_cut$RQ <- as.numeric(as.character(qPCR_cut$RQ))
qPCR_cut$Sample.Name <- as.factor(qPCR_cut$Sample.Name)
#look to see that the variable types were changed
sapply(qPCR_cut, class)
#filter the file based on Target Gene
qPCR_cut %>% filter(grepl('mAp', Target.Name)) -> qPCR_map
qPCR_cut %>% filter(grepl('GAPDH', Target.Name)) -> qPCR_GAPDH
qPCR_cut %>% filter(grepl('Blimp1', Target.Name)) -> qPCR_Blimp
qPCR_cut %>% filter(grepl('c-myc', Target.Name)) -> qPCR_c_myc
#Group by the Sample Name and Target Gene and then take avg of RQ for each group (accounting for the drop of anything with NA--Reference Gene RQs)
qPCR_map %>%
group_by(Sample.Name, Target.Name) %>%
summarise(avgRQ = mean(RQ, na.rm = TRUE)) -> Averagemap
#qPCR_GAPDH %>%
#group_by(Sample.Name, Target.Name) %>%
#summarise(avgRQ = mean(RQ, na.rm = TRUE)) -> AverageGAPDH
qPCR_Blimp %>%
group_by(Sample.Name, Target.Name) %>%
summarise(avgRQ = mean(RQ, na.rm = TRUE)) -> AverageBlimp
qPCR_c_myc %>%
group_by(Sample.Name, Target.Name) %>%
summarise(avgRQ = mean(RQ, na.rm = TRUE)) -> Averagec_myc
#Merge back together
Merged = rbind(Averagemap,AverageBlimp, Averagec_myc)
#Put Averages and RQ Min and Max from original plot on one graph
C <- merge(Merged, qPCR_cut, by = "Sample.Name")
#Remove NAs (so that you have a cleaner table)
C_Num <- na.omit(C)
#Remove duplicates (because when you graph things get weird so you need it to be 1-15 where theres no repeat values)
C_Num_2 <- distinct(C_Num)
#Convert columns to numeric for RQ Min and RQ Max
sapply(C_Num_2, class)
C_Num_2$`RQ Min` <- as.numeric(as.character(C_Num_2$`RQ Min`))
sapply(C_Num_2, class)
C_Num_2$`RQ Max` <- as.numeric(as.character(C_Num_2$`RQ Max`))
sapply(C_Num_2, class)
#Name Merged plot
qPCR_DCAA <- C_Num_2
colnames(qPCR_DCAA)
#Follow the Below to Make a Basic Plot with Sample name versus RQ where the fill is based on Sample Name
#Generate Basic Plot
Plot <- ggplot() + geom_col(data = qPCR_DCAA, aes(x = qPCR_DCAA$Target.Name.x, y = qPCR_DCAA$avgRQ, fill = qPCR_DCAA$Sample.Name)) + geom_bar(stat = "identity", width=.5, position = "dodge")
print(Plot)
#Add titles to axis (and format them to be readable) as well as add title to graph
Plot <- Plot + ggtitle("GLI1 Expression") +
xlab("Sample Name") + ylab("Fold Change") + theme_classic() + labs(fill = "Sample Name") + theme(axis.text.x = element_text(size=10, angle=90),axis.title=element_text(size=12,face="bold") + geom_bar(stat = "identity", width=.5, position = "dodge"))
print(Plot)
#Add the RQ Max and Min to the graph
Plot_SD <- Plot + geom_errorbar(data = qPCR_DCAA, mapping=aes(x=qPCR_DCAA$Sample.Name, ymin=qPCR_DCAA$`RQ Min`, ymax=qPCR_DCAA$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
#Rearrange sample names if necessary
colnames(qPCR_DCAA)
qPCR_DCAA_2 <- select(qPCR_DCAA, "Sample.Name", "avgRQ")
qPCR_DCAA_2$Sample.Name <- factor(qPCR_DCAA$Sample.Name, levels = c("3T3 SS 0 mM DCAA", "3T3 SS 80 mM DCAA", "3T3 SS 100 mM DCAA", "3T3 SS 126 mM DCAA", "3T3 SS 160 mM DCAA", "3T3 SSH 0 mM DCAA", "3T3 SSH 80 mM DCAA", "3T3 SSH 100 mM DCAA", "3T3 SSH 126 mM DCAA", "3T3 SSH 160 mM DCAA", "BCC SS 0 mM DCAA", "BCC SS 80 mM DCAA", "BCC SS 100 mM DCAA", "BCC SS 126 mM DCAA", "BCC SS 160 mM DCAA"))
print(qPCR_DCAA_2)
#Follow this if you needed to rearrange the sample names as above
#Generate Basic Plot with rearrange
Plot <- ggplot() + geom_col(data = qPCR_DCAA_2, aes(x = qPCR_DCAA_2$Sample.Name, y = qPCR_DCAA_2$avgRQ, fill = qPCR_DCAA_2$Sample.Name))
print(Plot)
#Add titles to axis as well as graph (for rearrange)
Plot <- Plot + ggtitle("GLI1 Expression") +
xlab("Sample Name") + ylab("Fold Change") + theme_classic() + labs(fill = "Sample Name") + theme(axis.text.x = element_text(size=10, angle=90),axis.title=element_text(size=12,face="bold"))
print(Plot)
#Add the RQ Max and Min to the graph (for rearrange)
Plot_SD <- Plot + geom_errorbar(data = qPCR_DCAA, mapping=aes(x=qPCR_DCAA$Sample.Name, ymin=qPCR_DCAA$`RQ Min`, ymax=qPCR_DCAA$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
#Follow this if you need to filter by Cell Type and want to color your plot by that
#Generate new column in table based on cell type and color by it
qPCR_DCAA_2$CellType <- NA
print(qPCR_DCAA_2)
sapply(qPCR_DCAA_2, class)
qPCR_DCAA_2$CellType <- as.numeric(as.character(qPCR_DCAA_2$CellType))
sapply(qPCR_DCAA_2, class)
qPCR_DCAA_2$CellType <- as.numeric(grepl("3T3", qPCR_DCAA_2$Sample.Name))
print(qPCR_DCAA_2)
#Generate Basic Plot with rearrange and color
Plot <- ggplot() + geom_col(data = qPCR_DCAA_2, aes(x = qPCR_DCAA_2$Sample.Name, y = qPCR_DCAA_2$avgRQ, fill = as.factor(qPCR_DCAA_2$CellType)))
print(Plot)
#Add titles to axis, remove legend, as well as title graph (for rearrange and color)
Plot <- Plot + ggtitle("GLI1 Expression") +
xlab("Sample Name") + ylab("Fold Change") + theme_classic() + labs(fill = "Sample Name") + theme(axis.text.x = element_text(size=10, angle=90),axis.title=element_text(size=12,face="bold"), legend.position = "none")
print(Plot)
#Add the RQ Max and Min to the graph (for rearrange and color)
Plot_SD <- Plot + geom_errorbar(data = qPCR_DCAA, mapping=aes(x=qPCR_DCAA$Sample.Name, ymin=qPCR_DCAA$`RQ Min`, ymax=qPCR_DCAA$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LinearTSVM.R
\name{LinearTSVMSSLR}
\alias{LinearTSVMSSLR}
\title{General Interface for LinearTSVM model}
\usage{
LinearTSVMSSLR(
C = 1,
Cstar = 0.1,
s = 0,
x_center = FALSE,
scale = FALSE,
eps = 1e-06,
verbose = FALSE,
init = NULL
)
}
\arguments{
\item{C}{Cost variable}
\item{Cstar}{numeric; Cost parameter of the unlabeled objects}
\item{s}{numeric; parameter controlling the loss function of the unlabeled objects}
\item{x_center}{logical; Should the features be centered?}
\item{scale}{Whether a z-transform should be applied (default: TRUE)}
\item{eps}{Small value to ensure positive definiteness of the matrix in QP formulation}
\item{verbose}{logical; Controls the verbosity of the output}
\item{init}{numeric; Initial classifier parameters to start the convex concave procedure}
}
\description{
model from RSSL package
Implementation of the Linear Support Vector Classifier. Can be solved in the Dual formulation, which is equivalent to \code{\link{SVM}} or the Primal formulation.
}
\examples{
library(tidyverse)
library(caret)
library(tidymodels)
library(SSLR)
data(breast)
set.seed(1)
train.index <- createDataPartition(breast$Class, p = .7, list = FALSE)
train <- breast[ train.index,]
test <- breast[-train.index,]
cls <- which(colnames(breast) == "Class")
#\% LABELED
labeled.index <- createDataPartition(breast$Class, p = .2, list = FALSE)
train[-labeled.index,cls] <- NA
m <- LinearTSVMSSLR() \%>\% fit(Class ~ ., data = train)
#Accesing model from RSSL
model <- m$model
}
|
/man/LinearTSVMSSLR.Rd
|
no_license
|
cran/SSLR
|
R
| false | true | 1,664 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LinearTSVM.R
\name{LinearTSVMSSLR}
\alias{LinearTSVMSSLR}
\title{General Interface for LinearTSVM model}
\usage{
LinearTSVMSSLR(
C = 1,
Cstar = 0.1,
s = 0,
x_center = FALSE,
scale = FALSE,
eps = 1e-06,
verbose = FALSE,
init = NULL
)
}
\arguments{
\item{C}{Cost variable}
\item{Cstar}{numeric; Cost parameter of the unlabeled objects}
\item{s}{numeric; parameter controlling the loss function of the unlabeled objects}
\item{x_center}{logical; Should the features be centered?}
\item{scale}{Whether a z-transform should be applied (default: TRUE)}
\item{eps}{Small value to ensure positive definiteness of the matrix in QP formulation}
\item{verbose}{logical; Controls the verbosity of the output}
\item{init}{numeric; Initial classifier parameters to start the convex concave procedure}
}
\description{
model from RSSL package
Implementation of the Linear Support Vector Classifier. Can be solved in the Dual formulation, which is equivalent to \code{\link{SVM}} or the Primal formulation.
}
\examples{
library(tidyverse)
library(caret)
library(tidymodels)
library(SSLR)
data(breast)
set.seed(1)
train.index <- createDataPartition(breast$Class, p = .7, list = FALSE)
train <- breast[ train.index,]
test <- breast[-train.index,]
cls <- which(colnames(breast) == "Class")
#\% LABELED
labeled.index <- createDataPartition(breast$Class, p = .2, list = FALSE)
train[-labeled.index,cls] <- NA
m <- LinearTSVMSSLR() \%>\% fit(Class ~ ., data = train)
#Accesing model from RSSL
model <- m$model
}
|
################
url <- "https://archive.ics.uci.edu/ml/machine-learning-databases/credit-screening/crx.data"
crx <- read.table( file=url, header=FALSE, sep="," )
##write.table( crx, "crx.dat", quote=FALSE, sep="," )
head( crx, 6 )
crx <- crx[ sample( nrow( crx ) ), ]
X <- crx[,1:15]
y <- crx[,16]
trainX <- X[1:600,]
trainy <- y[1:600]
testX <- X[601:690,]
testy <- y[601:690]
##install.packages("C50")
library(C50)
model <- C50::C5.0( trainX, trainy )
summary(model)
plot(model)
model <- C50::C5.0( trainX, trainy, trials=50 )
p <- predict( model, testX, type="class" )
sum( p == testy ) / length( p )
p <- predict( model, testX, type="prob" )
#####
##install.packages("rpart")
library(rpart)
#######
df=kyphosis
tree=rpart(Kyphosis~.,method = 'class',data=kyphosis)
printcp(tree)
plot(tree,uniform = T,main="my tree")
text(tree,use.n = T,all = T)
#########
##install.packages("rpart.plot")
library(rpart.plot)
prp(tree)
######
##install.packages("randomForest")
library(randomForest)
######
rf.model=randomForest(Kyphosis~.,data=kyphosis,nTrees=500,importance = T)
print(rf.model)
rf.model$confusion
rf.model$importance
#install.packages("ISLR")
#library(ISLR)
#df=College
#####
library(caTools)
sample=sample.split(df$Kyphosis,SplitRatio = 0.70)
train=subset(df,sample==T)
test=subset(df,sample==F)
tree=rpart(Kyphosis~.,method="class",data=train)
tree.predict=predict(tree,test)
head(tree.predict)
tree.predict=as.data.frame(tree.predict)
#########
ranTree=randomForest(Kyphosis~.,data=train,nTrees=500)
ranTree.predict=predict(tree,test)
head(ranTree.predict)
ranTree.predict=as.data.frame((ranTree.predict))
ranTree.predict
#######
joiner <- function(x){
if (x>=0.5){
return('Present')
}else{
return("Absent")
}
}
ranTree.predict$Kyphosis_State <- sapply(ranTree.predict$present,joiner)
ranTree.predict
table(ranTree.predict$Kyphosis_State,test$Kyphosis)
|
/AEGIS/Machine_Learning/Decision Trees and Random Forests.R
|
no_license
|
vipin752/datascience-machineLearning
|
R
| false | false | 1,895 |
r
|
################
url <- "https://archive.ics.uci.edu/ml/machine-learning-databases/credit-screening/crx.data"
crx <- read.table( file=url, header=FALSE, sep="," )
##write.table( crx, "crx.dat", quote=FALSE, sep="," )
head( crx, 6 )
crx <- crx[ sample( nrow( crx ) ), ]
X <- crx[,1:15]
y <- crx[,16]
trainX <- X[1:600,]
trainy <- y[1:600]
testX <- X[601:690,]
testy <- y[601:690]
##install.packages("C50")
library(C50)
model <- C50::C5.0( trainX, trainy )
summary(model)
plot(model)
model <- C50::C5.0( trainX, trainy, trials=50 )
p <- predict( model, testX, type="class" )
sum( p == testy ) / length( p )
p <- predict( model, testX, type="prob" )
#####
##install.packages("rpart")
library(rpart)
#######
df=kyphosis
tree=rpart(Kyphosis~.,method = 'class',data=kyphosis)
printcp(tree)
plot(tree,uniform = T,main="my tree")
text(tree,use.n = T,all = T)
#########
##install.packages("rpart.plot")
library(rpart.plot)
prp(tree)
######
##install.packages("randomForest")
library(randomForest)
######
rf.model=randomForest(Kyphosis~.,data=kyphosis,nTrees=500,importance = T)
print(rf.model)
rf.model$confusion
rf.model$importance
#install.packages("ISLR")
#library(ISLR)
#df=College
#####
library(caTools)
sample=sample.split(df$Kyphosis,SplitRatio = 0.70)
train=subset(df,sample==T)
test=subset(df,sample==F)
tree=rpart(Kyphosis~.,method="class",data=train)
tree.predict=predict(tree,test)
head(tree.predict)
tree.predict=as.data.frame(tree.predict)
#########
ranTree=randomForest(Kyphosis~.,data=train,nTrees=500)
ranTree.predict=predict(tree,test)
head(ranTree.predict)
ranTree.predict=as.data.frame((ranTree.predict))
ranTree.predict
#######
joiner <- function(x){
if (x>=0.5){
return('Present')
}else{
return("Absent")
}
}
ranTree.predict$Kyphosis_State <- sapply(ranTree.predict$present,joiner)
ranTree.predict
table(ranTree.predict$Kyphosis_State,test$Kyphosis)
|
##' @include AllClasses.R
##' @include AllGenerics.R
setMethod('initialize', 'GLMlike', function(.Object, ...){
.Object <- callNextMethod()
model.matrix(.Object) <- model.matrix(.Object@formula, .Object@design)
.Object
})
## This is pinch point (up to 10% of computation time can be spent here)
#' @describeIn GLMlike return the variance/covariance of component \code{which}
#' @param object \code{GLMlike}
#' @param which \code{character}, one of 'C', 'D'.
#' @param ... ignored
#' @return covariance matrix
#' @export
setMethod('vcov', signature=c(object='GLMlike'), function(object, which, ...){
stopifnot(which %in% c('C', 'D'))
vc <- object@defaultVcov
if(which=='C' & object@fitted['C']){
vc2 <- stats::summary.glm(object@fitC, dispersion=object@fitC$dispersion)$cov.scaled
} else if(which=='D' & object@fitted['D']){
vc2 <- stats::summary.glm(object@fitD)$cov.scaled
} else{
vc2 <- numeric()
}
ok <- colnames(vc2)
vc[ok,ok] <- vc2
vc
})
## Degree of freedom calculations
.glmDOF <- function(object, pos){
npos <- sum(pos)
## bayesglm doesn't correctly set the residual DOF, and this won't hurt for regular glm
object@fitC$df.residual <- max(npos - object@fitC$rank, 0)
## conservative estimate of residual df
object@fitD$df.residual <- min(npos, length(pos)-npos) - object@fitD$rank
object@fitted <- c(C=object@fitC$converged &
object@fitC$df.residual>0, #kill unconverged or empty
D=object@fitD$converged)
object
}
## dispersion calculations for glm-like fitters
.dispersion <- function(object){
object@fitC$dispersionMLE <- object@fitC$dispersion <- NA
if(object@fitted['C']){
df.total <- object@fitC$df.null+1
df.residual <- object@fitC$df.residual
## Save unshrunken
dMLEns <- object@fitC$deviance/df.total
dns <- object@fitC$deviance/df.residual
object@fitC$dispersionMLENoShrink <- dMLEns
object@fitC$dispersionNoShrink <- dns
## Now shrink default component
object@fitC$dispersionMLE <- (dMLEns*df.total + object@priorVar*object@priorDOF)/(df.total+object@priorDOF)
object@fitC$dispersion <- (dns*df.residual+object@priorVar*object@priorDOF)/(df.residual+object@priorDOF)
}
object
}
.residualsD <- function(object){
if(object@fitted['D']){
object@fitD$residuals <- (object@response>0)*1 - object@fitD$fitted
} else{
object@fitD$residuals <- NA
}
object
}
if(getRversion() >= "2.15.1") globalVariables(c('pos'))
setMethod('fit', signature=c(object='GLMlike', response='missing'), function(object, response, silent=TRUE, ...){
prefit <- .fit(object)
if(!prefit){
if(!silent) warning('No positive observations')
return(object)
}
fitArgsC <- object@fitArgsC
fitArgsD <- object@fitArgsD
object@fitC <- do.call(glm.fit, c(list(x=object@modelMatrix[pos,,drop=FALSE], y=object@response[pos], weights=object@weightFun(object@response[pos])), fitArgsC))
object@fitD <- do.call(glm.fit, c(list(x=object@modelMatrix, y=object@weightFun(object@response), family=binomial()), fitArgsD))
## needed so that residuals dispatches more correctly
class(object@fitD) <- c('glm', class(object@fitD))
## first test for positive continuous DOF
## cheap additional test for convergence
## object@fitted['D'] <- object@fitted['D'] & (object@fitD$null.deviance >= object@fitD$deviance)
object <- .glmDOF(object, pos)
## don't return estimates that would be at the boundary
## object@fitted <- object@fitted & c(C=TRUE, D=object@fitD$df.residual>0)
## update dispersion, possibly shrinking by prior
object <- .dispersion(object)
if(!silent & !all(object@fitted)) warning('At least one component failed to converge')
object
})
##' @export
##' @importMethodsFrom stats4 logLik
##' @describeIn LMlike return the log-likelihood of a fitted model
setMethod('logLik', signature=c(object='GLMlike'), function(object){
L <- c(C=0, D=0)
if(object@fitted['C']){
s2 <- object@fitC$dispersionMLE
dev <- object@fitC$deviance
N <- (object@fitC$df.null+1)
L['C'] <- -.5*N*(log(s2*2*pi) +1)
}
if(object@fitted['D']){
dev <- object@fitD$deviance
L['D'] <- -dev/2
}
return(L)
})
setMethod('dof', signature=c(object='GLMlike'), function(object){
c(C=length(coef(object, 'C', singular=FALSE)), D=length(coef(object, 'D', singular=FALSE)))
})
setMethod('residuals', signature=c(object='GLMlike'), function(object, type='response', which, ...){
which <- match.arg(which, c('Discrete', 'Continuous', 'Marginal'))
if(type != 'response') stop("Only type='response' residuals implemented for GLMlike")
PD <- object@fitD$fitted
RD <- object@fitD$residuals <- (object@response>0)*1 -PD
## May contain NAs for non-estimible coefficients, set to zero
coefC <- coef(object, which='C', singular=TRUE)
coefC[is.na(coefC)] <- 0
PC <- object@modelMatrix %*% coefC
RC <- (object@response - PC)[object@response>0]
if(which=='Discrete') return(RD)
if(which=='Continuous') return(RC)
if(which=='Marginal'){
if(type != 'response') warning("Marginal residuals probably don't make sense unless predicting on the response scale")
return(object@response-PC*PD)
}
})
## make a row matrix
rowm <- function(C, D){
x <- c(C=NA, D=NA)
try({if(is.null(C) | missing(C))
C <- NA
if(is.null(D) | missing(D))
D <- NA
x <- c(C=C, D=D)
}, silent=TRUE)
## dim(x) <- c(1, length(x))
## colnames(x) <- c('C', 'D')
x
}
torowm <- function(x){
## dim(x) <- c(1, length(x))
## colnames(x) <- c('C', 'D')
x
}
setMethod('summarize', signature=c(object='GLMlike'), function(object, ...){
coefC <- coef(object, which='C')
coefD <- coef(object, which='D')
## make sure covariance matrices are constant size
## if it's not fitted, then we'll throw an error here
vcC <- vcov(object, 'C')
vcD <- vcov(object, 'D')
list(coefC=coefC, vcovC=vcC,
deviance=rowm(C=object@fitC$deviance, D=object@fitD$deviance),
df.null=rowm(C=object@fitC$df.null, D=object@fitD$df.null),
df.resid=rowm(C=object@fitC$df.residual, D=object@fitD$df.residual),
dispersion=rowm(C=object@fitC$dispersionMLE, D=object@fitD$dispersion),
dispersionNoshrink=rowm(C=object@fitC$dispersionMLENoShrink, D=object@fitD$dispersion),
loglik=torowm(logLik(object)),
coefD=coefD, vcovD=vcD, converged=torowm(object@fitted))
})
|
/R/lmWrapper-glm.R
|
no_license
|
Elena-TT/MAST
|
R
| false | false | 6,750 |
r
|
##' @include AllClasses.R
##' @include AllGenerics.R
setMethod('initialize', 'GLMlike', function(.Object, ...){
.Object <- callNextMethod()
model.matrix(.Object) <- model.matrix(.Object@formula, .Object@design)
.Object
})
## This is pinch point (up to 10% of computation time can be spent here)
#' @describeIn GLMlike return the variance/covariance of component \code{which}
#' @param object \code{GLMlike}
#' @param which \code{character}, one of 'C', 'D'.
#' @param ... ignored
#' @return covariance matrix
#' @export
setMethod('vcov', signature=c(object='GLMlike'), function(object, which, ...){
stopifnot(which %in% c('C', 'D'))
vc <- object@defaultVcov
if(which=='C' & object@fitted['C']){
vc2 <- stats::summary.glm(object@fitC, dispersion=object@fitC$dispersion)$cov.scaled
} else if(which=='D' & object@fitted['D']){
vc2 <- stats::summary.glm(object@fitD)$cov.scaled
} else{
vc2 <- numeric()
}
ok <- colnames(vc2)
vc[ok,ok] <- vc2
vc
})
## Degree of freedom calculations
.glmDOF <- function(object, pos){
npos <- sum(pos)
## bayesglm doesn't correctly set the residual DOF, and this won't hurt for regular glm
object@fitC$df.residual <- max(npos - object@fitC$rank, 0)
## conservative estimate of residual df
object@fitD$df.residual <- min(npos, length(pos)-npos) - object@fitD$rank
object@fitted <- c(C=object@fitC$converged &
object@fitC$df.residual>0, #kill unconverged or empty
D=object@fitD$converged)
object
}
## dispersion calculations for glm-like fitters
.dispersion <- function(object){
object@fitC$dispersionMLE <- object@fitC$dispersion <- NA
if(object@fitted['C']){
df.total <- object@fitC$df.null+1
df.residual <- object@fitC$df.residual
## Save unshrunken
dMLEns <- object@fitC$deviance/df.total
dns <- object@fitC$deviance/df.residual
object@fitC$dispersionMLENoShrink <- dMLEns
object@fitC$dispersionNoShrink <- dns
## Now shrink default component
object@fitC$dispersionMLE <- (dMLEns*df.total + object@priorVar*object@priorDOF)/(df.total+object@priorDOF)
object@fitC$dispersion <- (dns*df.residual+object@priorVar*object@priorDOF)/(df.residual+object@priorDOF)
}
object
}
.residualsD <- function(object){
if(object@fitted['D']){
object@fitD$residuals <- (object@response>0)*1 - object@fitD$fitted
} else{
object@fitD$residuals <- NA
}
object
}
if(getRversion() >= "2.15.1") globalVariables(c('pos'))
setMethod('fit', signature=c(object='GLMlike', response='missing'), function(object, response, silent=TRUE, ...){
prefit <- .fit(object)
if(!prefit){
if(!silent) warning('No positive observations')
return(object)
}
fitArgsC <- object@fitArgsC
fitArgsD <- object@fitArgsD
object@fitC <- do.call(glm.fit, c(list(x=object@modelMatrix[pos,,drop=FALSE], y=object@response[pos], weights=object@weightFun(object@response[pos])), fitArgsC))
object@fitD <- do.call(glm.fit, c(list(x=object@modelMatrix, y=object@weightFun(object@response), family=binomial()), fitArgsD))
## needed so that residuals dispatches more correctly
class(object@fitD) <- c('glm', class(object@fitD))
## first test for positive continuous DOF
## cheap additional test for convergence
## object@fitted['D'] <- object@fitted['D'] & (object@fitD$null.deviance >= object@fitD$deviance)
object <- .glmDOF(object, pos)
## don't return estimates that would be at the boundary
## object@fitted <- object@fitted & c(C=TRUE, D=object@fitD$df.residual>0)
## update dispersion, possibly shrinking by prior
object <- .dispersion(object)
if(!silent & !all(object@fitted)) warning('At least one component failed to converge')
object
})
##' @export
##' @importMethodsFrom stats4 logLik
##' @describeIn LMlike return the log-likelihood of a fitted model
setMethod('logLik', signature=c(object='GLMlike'), function(object){
L <- c(C=0, D=0)
if(object@fitted['C']){
s2 <- object@fitC$dispersionMLE
dev <- object@fitC$deviance
N <- (object@fitC$df.null+1)
L['C'] <- -.5*N*(log(s2*2*pi) +1)
}
if(object@fitted['D']){
dev <- object@fitD$deviance
L['D'] <- -dev/2
}
return(L)
})
setMethod('dof', signature=c(object='GLMlike'), function(object){
c(C=length(coef(object, 'C', singular=FALSE)), D=length(coef(object, 'D', singular=FALSE)))
})
setMethod('residuals', signature=c(object='GLMlike'), function(object, type='response', which, ...){
which <- match.arg(which, c('Discrete', 'Continuous', 'Marginal'))
if(type != 'response') stop("Only type='response' residuals implemented for GLMlike")
PD <- object@fitD$fitted
RD <- object@fitD$residuals <- (object@response>0)*1 -PD
## May contain NAs for non-estimible coefficients, set to zero
coefC <- coef(object, which='C', singular=TRUE)
coefC[is.na(coefC)] <- 0
PC <- object@modelMatrix %*% coefC
RC <- (object@response - PC)[object@response>0]
if(which=='Discrete') return(RD)
if(which=='Continuous') return(RC)
if(which=='Marginal'){
if(type != 'response') warning("Marginal residuals probably don't make sense unless predicting on the response scale")
return(object@response-PC*PD)
}
})
## make a row matrix
rowm <- function(C, D){
x <- c(C=NA, D=NA)
try({if(is.null(C) | missing(C))
C <- NA
if(is.null(D) | missing(D))
D <- NA
x <- c(C=C, D=D)
}, silent=TRUE)
## dim(x) <- c(1, length(x))
## colnames(x) <- c('C', 'D')
x
}
torowm <- function(x){
## dim(x) <- c(1, length(x))
## colnames(x) <- c('C', 'D')
x
}
setMethod('summarize', signature=c(object='GLMlike'), function(object, ...){
coefC <- coef(object, which='C')
coefD <- coef(object, which='D')
## make sure covariance matrices are constant size
## if it's not fitted, then we'll throw an error here
vcC <- vcov(object, 'C')
vcD <- vcov(object, 'D')
list(coefC=coefC, vcovC=vcC,
deviance=rowm(C=object@fitC$deviance, D=object@fitD$deviance),
df.null=rowm(C=object@fitC$df.null, D=object@fitD$df.null),
df.resid=rowm(C=object@fitC$df.residual, D=object@fitD$df.residual),
dispersion=rowm(C=object@fitC$dispersionMLE, D=object@fitD$dispersion),
dispersionNoshrink=rowm(C=object@fitC$dispersionMLENoShrink, D=object@fitD$dispersion),
loglik=torowm(logLik(object)),
coefD=coefD, vcovD=vcD, converged=torowm(object@fitted))
})
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 1.05573440322867e-218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615827151-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 361 |
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 1.05573440322867e-218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
## runs data manipulation to create a tidy dataset from raw data
library(plyr); library(dplyr)
##Step 1: Merge the training and the test sets to create one data set.
## read in training measurement data
x_train_data <- read.table("./train/X_train.txt")
## read in labels for the measurement data
features <- read.table("features.txt", col.names = c("featureID","featureDesc"))
## Assign labels to data columns
names(x_train_data) <- features$featureDesc
## read in training activity id
## (values are 1-6. ex. 1 = WALKING, 2 = WALKING_UPSTAIRS, etc.)
y_train_activity_id <- read.table("./train/y_train.txt", col.names = "activityID")
## read in training people (aka subjects)
subject_train <- read.table("./train/subject_train.txt", col.names = "subjectID")
## Combine train subject, activity and data
train1 <- cbind(subject_train,y_train_activity_id,x_train_data)
## read in test measurement data
x_test_data <- read.table("./test/X_test.txt")
## Assign labels to data columns
names(x_test_data) <- features$featureDesc
## read in training activity id
## (values are 1-6. ex. 1 = WALKING, 2 = WALKING_UPSTAIRS, etc.)
y_test_activity_id <- read.table("./test/y_test.txt", col.names = "activityID")
## read in test people (aka subjects)
subject_test <- read.table("./test/subject_test.txt", col.names = "subjectID")
## Combine test subject, activity and data
test1 <- cbind(subject_test,y_test_activity_id,x_test_data)
## Combine test and training data
combined_test_train <- rbind(train1,test1)
## Step 2: Extract only the measurements on the mean and standard deviation for each measurement.
## get only columns for mean and standard deviation
combined_smaller <- combined_test_train[,c(1,2,grep("mean|std",names(combined_test_train)))]
## Step 3: Use descriptive activity names to name the activities in the data set.
activities <- read.table(file = "activity_labels.txt", sep = " ", col.names = c("activityID", "activitydesc"))
combined_smaller$activityID <- factor(x = combined_smaller$activityID, levels = c(1,2,3,4,5,6), labels = activities$activitydesc)
colnames(combined_smaller)[2] <- "activitydesc"
## Step 4: Clean up the variable names for duplicate "body" text
names(combined_smaller) <- gsub("BodyBody","Body",names(combined_smaller))
## Step 5: From the data set in step 4, creates a second, independent tidy data set with
## the average of each variable for each activity and each subject.
## Created melted data frame
step5DF <- melt(combined_smaller, id=c("subjectID","activitydesc"), measure.vars = colnames(combined_smaller)[3:81])
## summarize usinge mean
step5DF_Final <- dcast(step5DF, subjectID + activitydesc ~ variable,mean)
write.table(step5DF_Final, file = "step5DS.txt", row.names = FALSE)
step5DF_Final
|
/run_analysis.R
|
no_license
|
jg-jhu-ds/GettingAndCleaningDataProject
|
R
| false | false | 2,753 |
r
|
## runs data manipulation to create a tidy dataset from raw data
library(plyr); library(dplyr)
##Step 1: Merge the training and the test sets to create one data set.
## read in training measurement data
x_train_data <- read.table("./train/X_train.txt")
## read in labels for the measurement data
features <- read.table("features.txt", col.names = c("featureID","featureDesc"))
## Assign labels to data columns
names(x_train_data) <- features$featureDesc
## read in training activity id
## (values are 1-6. ex. 1 = WALKING, 2 = WALKING_UPSTAIRS, etc.)
y_train_activity_id <- read.table("./train/y_train.txt", col.names = "activityID")
## read in training people (aka subjects)
subject_train <- read.table("./train/subject_train.txt", col.names = "subjectID")
## Combine train subject, activity and data
train1 <- cbind(subject_train,y_train_activity_id,x_train_data)
## read in test measurement data
x_test_data <- read.table("./test/X_test.txt")
## Assign labels to data columns
names(x_test_data) <- features$featureDesc
## read in training activity id
## (values are 1-6. ex. 1 = WALKING, 2 = WALKING_UPSTAIRS, etc.)
y_test_activity_id <- read.table("./test/y_test.txt", col.names = "activityID")
## read in test people (aka subjects)
subject_test <- read.table("./test/subject_test.txt", col.names = "subjectID")
## Combine test subject, activity and data
test1 <- cbind(subject_test,y_test_activity_id,x_test_data)
## Combine test and training data
combined_test_train <- rbind(train1,test1)
## Step 2: Extract only the measurements on the mean and standard deviation for each measurement.
## get only columns for mean and standard deviation
combined_smaller <- combined_test_train[,c(1,2,grep("mean|std",names(combined_test_train)))]
## Step 3: Use descriptive activity names to name the activities in the data set.
activities <- read.table(file = "activity_labels.txt", sep = " ", col.names = c("activityID", "activitydesc"))
combined_smaller$activityID <- factor(x = combined_smaller$activityID, levels = c(1,2,3,4,5,6), labels = activities$activitydesc)
colnames(combined_smaller)[2] <- "activitydesc"
## Step 4: Clean up the variable names for duplicate "body" text
names(combined_smaller) <- gsub("BodyBody","Body",names(combined_smaller))
## Step 5: From the data set in step 4, creates a second, independent tidy data set with
## the average of each variable for each activity and each subject.
## Created melted data frame
step5DF <- melt(combined_smaller, id=c("subjectID","activitydesc"), measure.vars = colnames(combined_smaller)[3:81])
## summarize usinge mean
step5DF_Final <- dcast(step5DF, subjectID + activitydesc ~ variable,mean)
write.table(step5DF_Final, file = "step5DS.txt", row.names = FALSE)
step5DF_Final
|
edgelist <- as.matrix(read.table("./cit-HepPh.txt", skip=4))
nodetime <- as.matrix(read.table("./cit-HepPh-dates.txt",skip=1))
nodetime[,2] = as.Date(nodetime[,2])
nodetime = matrix(as.numeric(nodetime),nc=2)
nodetime[,2] = nodetime[,2]-nodetime[1,2]
nodetime = nodetime[order(nodetime[,2]),]
nodetime[,2] = nodetime[,2]+13
nodetime[1:20,2]
tmp = nodetime[,2]
ntmp = length(tmp[tmp<(as.Date("1992-05-11")-as.Date("1992-02-11"))])
nodetmp = nodetime[,1][tmp<(as.Date("1992-05-11")-as.Date("1992-02-11"))]
ntmp
dim(nodetime)
findix<- function(vec1,vec2){
tmp = rep(FALSE,length(vec2))
for (x in vec1){
tmp = tmp|(vec2==x)
}
return(tmp)
}
Daycite = matrix(rep(0,ntmp*( max(nodetime[,2])+1) ),nc = max(nodetime[,2])+1)
i=1
nodetmp[i]
for(i in 1:ntmp){
citer_nodeid = as.matrix(edgelist[edgelist[,2]==nodetmp[i],1])
if(is.null(dim(citer_nodeid))){
next
}
cite_time = nodetime[,2][findix(citer_nodeid,nodetime[,1])]
tmp = table(cite_time)
for (j in unique(cite_time)){
Daycite[i,j+1] = tmp[as.character(j)]
}
}
max(Daycite)
plot(Daycite[20,])
################## Year ###################
tmp = nodetime[,2]
ntmp = length(tmp[tmp<(as.Date("1993-02-11")-as.Date("1992-02-11"))])
nodetmp = nodetime[,1][tmp<(as.Date("1993-02-11")-as.Date("1992-02-11"))]
Daycite = matrix(rep(0,ntmp*( max(nodetime[,2])+1) ),nc = max(nodetime[,2])+1)
i=1
nodetmp[i]
for(i in 1:ntmp){
citer_nodeid = as.matrix(edgelist[edgelist[,2]==nodetmp[i],1])
if(is.null(dim(citer_nodeid))){
next
}
cite_time = nodetime[,2][apply(citer_nodeid,1,match,nodetime[,1])]
tmp = table(cite_time)
for (j in unique(cite_time)){
Daycite[i,j+1] = tmp[as.character(j)]
}
}
Yearcite = matrix(rep(0,ntmp*10),nc=10)
timepoint = c()
for (i in 1992:2002){
timepoint=append( timepoint,1+as.numeric( as.Date( paste(as.character(i),"-02-11",sep='') )-as.Date("1992-02-11") ) )
# if (i <2002){timepoint=append(timepoint,as.numeric(as.Date( paste(as.character(i),"-08-24",sep='') )-as.Date("1992-02-24")) ) }
}
for (i in 1:ntmp){
for (j in 1:10){
Yearcite[i,j] = sum(Daycite[i,timepoint[j]:timepoint[j+1]])
}
}
plot( (Yearcite[1,]),type='o',ylim=c(0,max((Yearcite))) )
for (i in 1:ntmp){
if(max(Yearcite[i,])>30){
points( (Yearcite[i,]),type='o',col =colors()[sample(1:length(colors()),1)] )
}
}
title(main = "Year. threshold: max>10")
################## highly cited paper ###################
citenum = nodetime
for (i in 1:(dim(citenum)[1])){
citenum[i,2] = sum(edgelist[,2]==citenum[i,1])
}
citenum = citenum[order(citenum[,2],decreasing = TRUE),]
nodehigh = citenum[1:round(0.01*dim(citenum)[1]),]
indhigh = order(citenum[,2])[round(0.01*dim(citenum)[1])]
length(edgelist[,2][edgelist[,2]==9804398])
timepoint = c()
for (i in 1992:2002){
timepoint=append( timepoint,1+as.numeric( as.Date( paste(as.character(i),"-02-11",sep='') )-as.Date("1992-02-11") ) )
# if (i <2002){timepoint=append(timepoint,as.numeric(as.Date( paste(as.character(i),"-08-24",sep='') )-as.Date("1992-02-24")) ) }
}
Yearcite_high = matrix(rep(0,dim(nodehigh)[1]*10),nc=10)
citorlist[77]
for (i in 1:(dim(nodehigh)[1])){
citorlist = as.matrix(edgelist[,1][edgelist[,2]==nodehigh[i,1]])
cite_time = na.omit(apply(citorlist,1,match,nodetime[,1]))
for (j in 1:10){
Yearcite_high[i,j] = sum((nodetime[cite_time,2]<timepoint[j+1])&(nodetime[cite_time,2]>timepoint[j]))
}
}
(nodetime[cite_time,2])
match(212091,nodetime)
nodehigh_time = nodetime[,2][apply(matrix(nodehigh[,1]),1,match,nodetime[,1])]
nodehigh_time
year = max((1:11)[(timepoint<=nodehigh_time[1])])
plot( (Yearcite_high[1,][year:10]),type='o',xlim = c(1,10),ylim=c(0,max((Yearcite_high)) ))
# for (i in 1:(dim(Yearcite_high)[1])){
for (i in 2:30){
year = max((1:11)[(timepoint<=nodehigh_time[i])])
points(Yearcite_high[i,][Yearcite_high[i,]>0],type='o',col =colors()[sample(1:length(colors()),1)])
}
###################### Eq 3 ########################
library("car")
library("numDeriv")
library("gsl")
cumu_cite <- function(t,m,lambda,mu,sigma){
x = (log(t)-mu)/sigma
Phi = pnorm(x)
return( m*(exp(lambda*Phi)-1) )
}
m=30
num=1
result = nls( citeyear~m*(exp(lambda*pnorm( (log(timeyear)-mu)/sigma ))-1),
start=list(lambda = 2, mu = 7, sigma=1),
data = data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) ) )
result
data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) )
# time = (1:75000)*0.1
# lambda = 2.2431
# mu = 7.5160
# sigma = 0.7254
# par(mfrow = c(1,1))
# plot(time,cumu_cite(time,m,lambda,mu,sigma),type = "l",lwd=1)
num=3
result = nls( citeyear~m*(exp(lambda*pnorm( (log(timeyear)-mu)/sigma ))-1),
start=list(lambda = 1, mu =7, sigma=2),
data = data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) ) )
result
plot(seq(1, 7500, by=0.1),predict(result, data.frame(timeyear=seq(1,7500, by=0.1))),type = 'l',lwd=1)
points(citeyear~timeyear,data = data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) ) )
title(main = "first_year: No.6")
num=3
citeyear = cumsum(Yearcite[num,])
# The loss function
sum_square <- function(para){
lambda=para[1]
mu=para[2]
sigma=para[3]
t = timepoint[2:11]
x = (log(t)-mu)/sigma
Phi = pnorm(x)
return( sum( (30*(exp(lambda*Phi)-1)-citeyear)^2 ) )
}
# The numerical derivatives of the loss function
deriv_sq<- function( para ){
grad(sum_square, para, method="Richardson", side=NULL, method.args=list())
}
sum_square(c(2.4424, 7.1679, 0.5435 ))
start=list(lambda = 1, mu =4, sigma=2)
state = multimin(as.numeric(start), sum_square, df=deriv_sq, method='conjugate-fr')
state
#ny: number of papers/curves
#using 3 starting values is a naive method of selecting appropriate starting values
#after 10 iterations, the starting values with the smallest loss will be selected
ny = 500
paralist_first = matrix(rep(0,ny*3),nc=3)
abnorm = c()
for (i in 1:ny){
num = i
citeyear = cumsum(Yearcite[num,])
# mut = log(365*9)-0.4*qnorm(1/2*log( 1/10*sum(1+citeyear/30) ))
start1=list(lambda = 2, mu =5, sigma=1)
start2=list(lambda = 2, mu =7, sigma=1)
start3=list(lambda = 2, mu =10, sigma=1)
r = 10
state1 = multimin.init(as.numeric(start1), sum_square, df=deriv_sq, method='bfgs')
for (j in 1:r){
state1 = multimin.iterate(state1)
}
state2 = multimin.init(as.numeric(start2), sum_square, df=deriv_sq, method='bfgs')
for (j in 1:r){
state2 = multimin.iterate(state2)
}
state3 = multimin.init(as.numeric(start3), sum_square, df=deriv_sq, method='bfgs')
for (j in 1:r){
state3 = multimin.iterate(state3)
}
j = order(c(state1$f,state2$f,state3$f))[1]
# j = order(c(norm( as.matrix(state1$df) ), norm( as.matrix(state2$df) ),norm( as.matrix(state3$df) ) ))[1]
if (j==1){
start = start1
}
if (j==2){
start = start2
}
if (j==3){
start = start3
}
stol = 5*10^(-3)
df = 1
k = 0
state = multimin.init(as.numeric(start), sum_square, df=deriv_sq, method='bfgs')
while(df>=stol){
state = multimin.iterate(state)
df = norm( as.matrix(state$df) )
k = k+1
if(k>10000 & df >0.1){
abnorm = append(abnorm,i)
break
}
}
paralist_first[i,] = state$x
}
a = (1:500)[-abnorm]
# lambda,mu,sigma
plot(1:length(a),paralist_first[-abnorm,1],ylim = c(0,5))
plot(1:length(a),paralist_first[-abnorm,3])
plot(paralist_first[-abnorm,1],paralist_first[-abnorm,3],xlim=c(0,4),ylim=c(0,3))
i=4
mut
# 2.4424
7.1679 0.5435
lambda=state$x[1]
mu=state$x[2]
sigma=state$x[3]
m=30
time = as.numeric(timepoint[2:11])
plot(time,cumu_cite(time,m,lambda,mu,sigma),type = "l",lwd=1)
points(citeyear~timeyear,data = data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) ) )
num=22
plot(citeyear~timeyear,data = data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) ) )
abnorm
for (i in 1:20){
}
# xcumucite = cumu_cite(time,m,lambda,mu,sigma)
# diff = cumucite[-11]-cumucite[-length(time)]
# plot(time[-1]/365,diff,type = "l",lwd=1)
|
/code/fit_curve_arXiv_Ph.R
|
no_license
|
pengminshi/citation_analysis
|
R
| false | false | 8,315 |
r
|
edgelist <- as.matrix(read.table("./cit-HepPh.txt", skip=4))
nodetime <- as.matrix(read.table("./cit-HepPh-dates.txt",skip=1))
nodetime[,2] = as.Date(nodetime[,2])
nodetime = matrix(as.numeric(nodetime),nc=2)
nodetime[,2] = nodetime[,2]-nodetime[1,2]
nodetime = nodetime[order(nodetime[,2]),]
nodetime[,2] = nodetime[,2]+13
nodetime[1:20,2]
tmp = nodetime[,2]
ntmp = length(tmp[tmp<(as.Date("1992-05-11")-as.Date("1992-02-11"))])
nodetmp = nodetime[,1][tmp<(as.Date("1992-05-11")-as.Date("1992-02-11"))]
ntmp
dim(nodetime)
findix<- function(vec1,vec2){
tmp = rep(FALSE,length(vec2))
for (x in vec1){
tmp = tmp|(vec2==x)
}
return(tmp)
}
Daycite = matrix(rep(0,ntmp*( max(nodetime[,2])+1) ),nc = max(nodetime[,2])+1)
i=1
nodetmp[i]
for(i in 1:ntmp){
citer_nodeid = as.matrix(edgelist[edgelist[,2]==nodetmp[i],1])
if(is.null(dim(citer_nodeid))){
next
}
cite_time = nodetime[,2][findix(citer_nodeid,nodetime[,1])]
tmp = table(cite_time)
for (j in unique(cite_time)){
Daycite[i,j+1] = tmp[as.character(j)]
}
}
max(Daycite)
plot(Daycite[20,])
################## Year ###################
tmp = nodetime[,2]
ntmp = length(tmp[tmp<(as.Date("1993-02-11")-as.Date("1992-02-11"))])
nodetmp = nodetime[,1][tmp<(as.Date("1993-02-11")-as.Date("1992-02-11"))]
Daycite = matrix(rep(0,ntmp*( max(nodetime[,2])+1) ),nc = max(nodetime[,2])+1)
i=1
nodetmp[i]
for(i in 1:ntmp){
citer_nodeid = as.matrix(edgelist[edgelist[,2]==nodetmp[i],1])
if(is.null(dim(citer_nodeid))){
next
}
cite_time = nodetime[,2][apply(citer_nodeid,1,match,nodetime[,1])]
tmp = table(cite_time)
for (j in unique(cite_time)){
Daycite[i,j+1] = tmp[as.character(j)]
}
}
Yearcite = matrix(rep(0,ntmp*10),nc=10)
timepoint = c()
for (i in 1992:2002){
timepoint=append( timepoint,1+as.numeric( as.Date( paste(as.character(i),"-02-11",sep='') )-as.Date("1992-02-11") ) )
# if (i <2002){timepoint=append(timepoint,as.numeric(as.Date( paste(as.character(i),"-08-24",sep='') )-as.Date("1992-02-24")) ) }
}
for (i in 1:ntmp){
for (j in 1:10){
Yearcite[i,j] = sum(Daycite[i,timepoint[j]:timepoint[j+1]])
}
}
plot( (Yearcite[1,]),type='o',ylim=c(0,max((Yearcite))) )
for (i in 1:ntmp){
if(max(Yearcite[i,])>30){
points( (Yearcite[i,]),type='o',col =colors()[sample(1:length(colors()),1)] )
}
}
title(main = "Year. threshold: max>10")
################## highly cited paper ###################
citenum = nodetime
for (i in 1:(dim(citenum)[1])){
citenum[i,2] = sum(edgelist[,2]==citenum[i,1])
}
citenum = citenum[order(citenum[,2],decreasing = TRUE),]
nodehigh = citenum[1:round(0.01*dim(citenum)[1]),]
indhigh = order(citenum[,2])[round(0.01*dim(citenum)[1])]
length(edgelist[,2][edgelist[,2]==9804398])
timepoint = c()
for (i in 1992:2002){
timepoint=append( timepoint,1+as.numeric( as.Date( paste(as.character(i),"-02-11",sep='') )-as.Date("1992-02-11") ) )
# if (i <2002){timepoint=append(timepoint,as.numeric(as.Date( paste(as.character(i),"-08-24",sep='') )-as.Date("1992-02-24")) ) }
}
Yearcite_high = matrix(rep(0,dim(nodehigh)[1]*10),nc=10)
citorlist[77]
for (i in 1:(dim(nodehigh)[1])){
citorlist = as.matrix(edgelist[,1][edgelist[,2]==nodehigh[i,1]])
cite_time = na.omit(apply(citorlist,1,match,nodetime[,1]))
for (j in 1:10){
Yearcite_high[i,j] = sum((nodetime[cite_time,2]<timepoint[j+1])&(nodetime[cite_time,2]>timepoint[j]))
}
}
(nodetime[cite_time,2])
match(212091,nodetime)
nodehigh_time = nodetime[,2][apply(matrix(nodehigh[,1]),1,match,nodetime[,1])]
nodehigh_time
year = max((1:11)[(timepoint<=nodehigh_time[1])])
plot( (Yearcite_high[1,][year:10]),type='o',xlim = c(1,10),ylim=c(0,max((Yearcite_high)) ))
# for (i in 1:(dim(Yearcite_high)[1])){
for (i in 2:30){
year = max((1:11)[(timepoint<=nodehigh_time[i])])
points(Yearcite_high[i,][Yearcite_high[i,]>0],type='o',col =colors()[sample(1:length(colors()),1)])
}
###################### Eq 3 ########################
library("car")
library("numDeriv")
library("gsl")
cumu_cite <- function(t,m,lambda,mu,sigma){
x = (log(t)-mu)/sigma
Phi = pnorm(x)
return( m*(exp(lambda*Phi)-1) )
}
m=30
num=1
result = nls( citeyear~m*(exp(lambda*pnorm( (log(timeyear)-mu)/sigma ))-1),
start=list(lambda = 2, mu = 7, sigma=1),
data = data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) ) )
result
data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) )
# time = (1:75000)*0.1
# lambda = 2.2431
# mu = 7.5160
# sigma = 0.7254
# par(mfrow = c(1,1))
# plot(time,cumu_cite(time,m,lambda,mu,sigma),type = "l",lwd=1)
num=3
result = nls( citeyear~m*(exp(lambda*pnorm( (log(timeyear)-mu)/sigma ))-1),
start=list(lambda = 1, mu =7, sigma=2),
data = data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) ) )
result
plot(seq(1, 7500, by=0.1),predict(result, data.frame(timeyear=seq(1,7500, by=0.1))),type = 'l',lwd=1)
points(citeyear~timeyear,data = data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) ) )
title(main = "first_year: No.6")
num=3
citeyear = cumsum(Yearcite[num,])
# The loss function
sum_square <- function(para){
lambda=para[1]
mu=para[2]
sigma=para[3]
t = timepoint[2:11]
x = (log(t)-mu)/sigma
Phi = pnorm(x)
return( sum( (30*(exp(lambda*Phi)-1)-citeyear)^2 ) )
}
# The numerical derivatives of the loss function
deriv_sq<- function( para ){
grad(sum_square, para, method="Richardson", side=NULL, method.args=list())
}
sum_square(c(2.4424, 7.1679, 0.5435 ))
start=list(lambda = 1, mu =4, sigma=2)
state = multimin(as.numeric(start), sum_square, df=deriv_sq, method='conjugate-fr')
state
#ny: number of papers/curves
#using 3 starting values is a naive method of selecting appropriate starting values
#after 10 iterations, the starting values with the smallest loss will be selected
ny = 500
paralist_first = matrix(rep(0,ny*3),nc=3)
abnorm = c()
for (i in 1:ny){
num = i
citeyear = cumsum(Yearcite[num,])
# mut = log(365*9)-0.4*qnorm(1/2*log( 1/10*sum(1+citeyear/30) ))
start1=list(lambda = 2, mu =5, sigma=1)
start2=list(lambda = 2, mu =7, sigma=1)
start3=list(lambda = 2, mu =10, sigma=1)
r = 10
state1 = multimin.init(as.numeric(start1), sum_square, df=deriv_sq, method='bfgs')
for (j in 1:r){
state1 = multimin.iterate(state1)
}
state2 = multimin.init(as.numeric(start2), sum_square, df=deriv_sq, method='bfgs')
for (j in 1:r){
state2 = multimin.iterate(state2)
}
state3 = multimin.init(as.numeric(start3), sum_square, df=deriv_sq, method='bfgs')
for (j in 1:r){
state3 = multimin.iterate(state3)
}
j = order(c(state1$f,state2$f,state3$f))[1]
# j = order(c(norm( as.matrix(state1$df) ), norm( as.matrix(state2$df) ),norm( as.matrix(state3$df) ) ))[1]
if (j==1){
start = start1
}
if (j==2){
start = start2
}
if (j==3){
start = start3
}
stol = 5*10^(-3)
df = 1
k = 0
state = multimin.init(as.numeric(start), sum_square, df=deriv_sq, method='bfgs')
while(df>=stol){
state = multimin.iterate(state)
df = norm( as.matrix(state$df) )
k = k+1
if(k>10000 & df >0.1){
abnorm = append(abnorm,i)
break
}
}
paralist_first[i,] = state$x
}
a = (1:500)[-abnorm]
# lambda,mu,sigma
plot(1:length(a),paralist_first[-abnorm,1],ylim = c(0,5))
plot(1:length(a),paralist_first[-abnorm,3])
plot(paralist_first[-abnorm,1],paralist_first[-abnorm,3],xlim=c(0,4),ylim=c(0,3))
i=4
mut
# 2.4424
7.1679 0.5435
lambda=state$x[1]
mu=state$x[2]
sigma=state$x[3]
m=30
time = as.numeric(timepoint[2:11])
plot(time,cumu_cite(time,m,lambda,mu,sigma),type = "l",lwd=1)
points(citeyear~timeyear,data = data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) ) )
num=22
plot(citeyear~timeyear,data = data.frame(timeyear = timepoint[2:11],citeyear = cumsum(Yearcite[num,]) ) )
abnorm
for (i in 1:20){
}
# xcumucite = cumu_cite(time,m,lambda,mu,sigma)
# diff = cumucite[-11]-cumucite[-length(time)]
# plot(time[-1]/365,diff,type = "l",lwd=1)
|
library(ggplot2)
cars <- mtcars
cars$cyl <- factor(cars$cyl, labels =
c('Four cylinder', 'Six cylinder', 'Eight cylinder'))
features <- c('wt', 'qsec')
n_clusters <- 3
car_clusters <- kmeans(cars[, features], n_clusters, nstart = 30)
cars$cluster <- factor(car_clusters$cluster)
centroids <- data.frame(cluster = factor(seq(1:n_clusters)),
wt = car_clusters$centers[,'wt'],
qsec = car_clusters$centers[,'qsec'])
# cross tab of cylinder by cluster
print(table(cars$cluster, cars$cyl))
g <- ggplot() +
geom_point(data = cars,
aes(x = wt,
y = qsec,
color = cluster),
size = 3) +
geom_text(data = cars,
aes(x = wt,
y = qsec,
label = row.names(cars),
color = cluster),
nudge_y = .2,
check_overlap = TRUE) +
geom_point(data = centroids,
mapping = aes(x = wt,
y = qsec,
color = cluster),
size = 20,
pch = 13)
print(g)
|
/09_PCA_LDA/Untitled2.R
|
no_license
|
Yousuf28/Machine_Learning_Materials_to_Follow
|
R
| false | false | 1,172 |
r
|
library(ggplot2)
cars <- mtcars
cars$cyl <- factor(cars$cyl, labels =
c('Four cylinder', 'Six cylinder', 'Eight cylinder'))
features <- c('wt', 'qsec')
n_clusters <- 3
car_clusters <- kmeans(cars[, features], n_clusters, nstart = 30)
cars$cluster <- factor(car_clusters$cluster)
centroids <- data.frame(cluster = factor(seq(1:n_clusters)),
wt = car_clusters$centers[,'wt'],
qsec = car_clusters$centers[,'qsec'])
# cross tab of cylinder by cluster
print(table(cars$cluster, cars$cyl))
g <- ggplot() +
geom_point(data = cars,
aes(x = wt,
y = qsec,
color = cluster),
size = 3) +
geom_text(data = cars,
aes(x = wt,
y = qsec,
label = row.names(cars),
color = cluster),
nudge_y = .2,
check_overlap = TRUE) +
geom_point(data = centroids,
mapping = aes(x = wt,
y = qsec,
color = cluster),
size = 20,
pch = 13)
print(g)
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.2,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/autonomic_ganglia/autonomic_ganglia_033.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/autonomic_ganglia/autonomic_ganglia_033.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 392 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.2,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/autonomic_ganglia/autonomic_ganglia_033.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(RSQLite)
library(plyr)
# don't forget to setwd()
setwd("~/Documents/NCAA/")
sqlite<-dbDriver("SQLite")
ncaaDB1<-dbConnect(sqlite, "data/ncaa.db")
dbListTables(ncaaDB1)
gameData2012<-dbGetQuery(ncaaDB1, "select * from game_data_2012")
names(gameData2012)
head(gameData2012)
## Olivia's code
gameDataTrain2012 <- get_train_data(ncaaDB1, 2012)
dat <- subset(gameData2012, !home_team_id==0 | !away_team_id==0)
team_id <- sort(unique(c(dat$home_team_id, dat$away_team_id)))
N <- length(team_id)
Sc <- matrix(NA, nrow=N, ncol=N)
for(i in 1:N){
# data for home team id
dati <- subset(dat, home_team_id==team_id[i])
# order by away_team_id
dati <- dati[with(dati, order(dati$away_team_id)), ]
# home_pts/away_pts
fn <- function(fn){log(mean(fn[["home_team_pts"]]/fn[["away_team_pts"]]))}
odds <- do.call(c,dlply(dati, .(away_team_id), fn))
js <- which(team_id%in%names(odds))
Sc[i, js] <- odds
}
rowIds <- apply(Sc, 1, function(d){ sum(!is.na(d)) > 0 })
colIds <- apply(Sc, 2, function(d){ sum(!is.na(d)) > 0 })
#sum( rowIds & colIds )
sps <- which(rowIds & colIds )
#test_dat<-get_train_data(ncaaDB1, 2012)
X<-Sc[sps, sps]
# X<-Sc
train.ind<-(!is.na(X))
Z0<-matrix(0, nrow=dim(X)[1], ncol=dim(X)[2])
Zhat<-soft.impute(1.5, Z0, X, train.ind, 0.0001)
year_str<-paste("game_data_", 2012, sep="")
select_str<-paste("select * from ", year_str, sep="")
gameData<-dbGetQuery(ncaaDB1, select_str)
dates<-as.Date(gameData$game_date, "%m/%d/%Y")
index<-(start_date$year == year)
tournament_date<-start_date[index, 2]
# get the games (with only the ids)
games<-gameData[dates >= tournament_date, c("away_team_id", "home_team_id")]
winlose<-gameData[dates >= tournament_date, c("away_team_pts", "home_team_pts")]
testY <- (winlose[,2] - winlose[,1]) > 0
res <- rep(NA, nrow(games))
for(i in 1:nrow(games)){
res[i] <- Zhat$Z[match(games[i, 2], team_id[sps]), match( games[i, 1], team_id[sps] )]
}
predY = (res > 0)
predY
testY
mean(predY==testY)
|
/scripts/impute.R
|
no_license
|
junseonghwan/madness
|
R
| false | false | 1,968 |
r
|
library(RSQLite)
library(plyr)
# don't forget to setwd()
setwd("~/Documents/NCAA/")
sqlite<-dbDriver("SQLite")
ncaaDB1<-dbConnect(sqlite, "data/ncaa.db")
dbListTables(ncaaDB1)
gameData2012<-dbGetQuery(ncaaDB1, "select * from game_data_2012")
names(gameData2012)
head(gameData2012)
## Olivia's code
gameDataTrain2012 <- get_train_data(ncaaDB1, 2012)
dat <- subset(gameData2012, !home_team_id==0 | !away_team_id==0)
team_id <- sort(unique(c(dat$home_team_id, dat$away_team_id)))
N <- length(team_id)
Sc <- matrix(NA, nrow=N, ncol=N)
for(i in 1:N){
# data for home team id
dati <- subset(dat, home_team_id==team_id[i])
# order by away_team_id
dati <- dati[with(dati, order(dati$away_team_id)), ]
# home_pts/away_pts
fn <- function(fn){log(mean(fn[["home_team_pts"]]/fn[["away_team_pts"]]))}
odds <- do.call(c,dlply(dati, .(away_team_id), fn))
js <- which(team_id%in%names(odds))
Sc[i, js] <- odds
}
rowIds <- apply(Sc, 1, function(d){ sum(!is.na(d)) > 0 })
colIds <- apply(Sc, 2, function(d){ sum(!is.na(d)) > 0 })
#sum( rowIds & colIds )
sps <- which(rowIds & colIds )
#test_dat<-get_train_data(ncaaDB1, 2012)
X<-Sc[sps, sps]
# X<-Sc
train.ind<-(!is.na(X))
Z0<-matrix(0, nrow=dim(X)[1], ncol=dim(X)[2])
Zhat<-soft.impute(1.5, Z0, X, train.ind, 0.0001)
year_str<-paste("game_data_", 2012, sep="")
select_str<-paste("select * from ", year_str, sep="")
gameData<-dbGetQuery(ncaaDB1, select_str)
dates<-as.Date(gameData$game_date, "%m/%d/%Y")
index<-(start_date$year == year)
tournament_date<-start_date[index, 2]
# get the games (with only the ids)
games<-gameData[dates >= tournament_date, c("away_team_id", "home_team_id")]
winlose<-gameData[dates >= tournament_date, c("away_team_pts", "home_team_pts")]
testY <- (winlose[,2] - winlose[,1]) > 0
res <- rep(NA, nrow(games))
for(i in 1:nrow(games)){
res[i] <- Zhat$Z[match(games[i, 2], team_id[sps]), match( games[i, 1], team_id[sps] )]
}
predY = (res > 0)
predY
testY
mean(predY==testY)
|
\name{lrr}
\alias{lrr}
\alias{lrr.default}
\alias{lrr.ssa}
\alias{lrr.1d-ssa}
\title{Calculate the Linear Recurrence Relation}
\description{
Calculates the Linear Recurrence Relation given the 'ssa' object.
}
\usage{
\method{lrr}{ssa}(x, group)
}
\arguments{
\item{x}{SSA object holding the decomposition}
\item{group}{indices of eigenvectors used to derive the LRR}
}
\details{
Produces the linear recurrence relation from the series. Check
\code{\link{basis2lrr}} for calculation details.
}
\value{
Object of class 'lrr'
}
\seealso{
\code{\link[Rssa:basis2lrr]{basis2lrr}}
\code{\link[Rssa:plot.lrr]{plot.lrr}}
\code{\link[Rssa:roots.lrr]{roots.lrr}}
}
\examples{
# Decompose 'co2' series with default parameters
s <- new.ssa(co2, L = 24)
# Calculate the LRR out of first 3 eigentriples
lrr(s, group = c(1:3))
}
|
/man/lrr.Rd
|
no_license
|
ebernierResearch/rssa
|
R
| false | false | 839 |
rd
|
\name{lrr}
\alias{lrr}
\alias{lrr.default}
\alias{lrr.ssa}
\alias{lrr.1d-ssa}
\title{Calculate the Linear Recurrence Relation}
\description{
Calculates the Linear Recurrence Relation given the 'ssa' object.
}
\usage{
\method{lrr}{ssa}(x, group)
}
\arguments{
\item{x}{SSA object holding the decomposition}
\item{group}{indices of eigenvectors used to derive the LRR}
}
\details{
Produces the linear recurrence relation from the series. Check
\code{\link{basis2lrr}} for calculation details.
}
\value{
Object of class 'lrr'
}
\seealso{
\code{\link[Rssa:basis2lrr]{basis2lrr}}
\code{\link[Rssa:plot.lrr]{plot.lrr}}
\code{\link[Rssa:roots.lrr]{roots.lrr}}
}
\examples{
# Decompose 'co2' series with default parameters
s <- new.ssa(co2, L = 24)
# Calculate the LRR out of first 3 eigentriples
lrr(s, group = c(1:3))
}
|
% --- Source file: fact2dummy.Rd ---
\name{plotTab}
\alias{plotTab}
\title{graphical comparison of the estimated distributions for the same continuous variable.}
\description{
Compares graphically the estimated distributions for the same categorical variable(s) using data coming from two different data sources.
}
\usage{
plotTab(data.A, data.B, xlab.A, xlab.B=NULL, w.A=NULL, w.B=NULL)
}
\arguments{
\item{data.A}{
A dataframe or matrix containing the variable of interest \code{xlab.A} and eventual associated survey weights \code{w.A}.
}
\item{data.B}{
A dataframe or matrix containing the variable of interest \code{xlab.B} and eventual associated survey weights \code{w.B}.
}
\item{xlab.A}{
Character string providing the name(s) of one or more variables in \code{data.A} whose (joint) distribution should be represented graphically and compared with that estimated from \code{data.B}.
}
\item{xlab.B}{
Character string providing the name(s) of one or more variables in \code{data.A} whose (joint) distribution should be represented graphically and compared with that estimated from \code{data.A}. If \code{xlab.B=NULL} (default) then it assumed \code{xlab.B=xlab.A}.
}
\item{w.A}{
Character string providing the name of the optional weighting variable in \code{data.A} that, in case, should be used to estimate the distribution of \code{xlab.A}
}
\item{w.B}{
Character string providing the name of the optional weighting variable in \code{data.B} that, in case, should be used to estimate the distribution of \code{xlab.B}
}
}
\details{
This function compares graphically the (joint) distribution of the same variables but estimated from data coming from two different data sources. The graphical comparison is done using barcharts.
}
\value{
The required graphical representation is drawn using the \pkg{ggplot2} facilities.
}
\author{
Marcello D'Orazio \email{mdo.statmatch@gmail.com}
}
\seealso{
\code{\link[StatMatch]{comp.prop}}
}
\examples{
# plotTab(data.A = samp.A, data.B = samp.B, xlab.A="edu7", w.A = "ww")
# plotTab(data.A = samp.A, data.B = samp.B, xlab.A=c("urb", "sex"), w.A = "ww", w.B="ww")
}
\keyword{multivariate}
\keyword{dplot}
|
/StatMatch_1.4.0/man/plotTab.Rd
|
no_license
|
marcellodo/StatMatch
|
R
| false | false | 2,202 |
rd
|
% --- Source file: fact2dummy.Rd ---
\name{plotTab}
\alias{plotTab}
\title{graphical comparison of the estimated distributions for the same continuous variable.}
\description{
Compares graphically the estimated distributions for the same categorical variable(s) using data coming from two different data sources.
}
\usage{
plotTab(data.A, data.B, xlab.A, xlab.B=NULL, w.A=NULL, w.B=NULL)
}
\arguments{
\item{data.A}{
A dataframe or matrix containing the variable of interest \code{xlab.A} and eventual associated survey weights \code{w.A}.
}
\item{data.B}{
A dataframe or matrix containing the variable of interest \code{xlab.B} and eventual associated survey weights \code{w.B}.
}
\item{xlab.A}{
Character string providing the name(s) of one or more variables in \code{data.A} whose (joint) distribution should be represented graphically and compared with that estimated from \code{data.B}.
}
\item{xlab.B}{
Character string providing the name(s) of one or more variables in \code{data.A} whose (joint) distribution should be represented graphically and compared with that estimated from \code{data.A}. If \code{xlab.B=NULL} (default) then it assumed \code{xlab.B=xlab.A}.
}
\item{w.A}{
Character string providing the name of the optional weighting variable in \code{data.A} that, in case, should be used to estimate the distribution of \code{xlab.A}
}
\item{w.B}{
Character string providing the name of the optional weighting variable in \code{data.B} that, in case, should be used to estimate the distribution of \code{xlab.B}
}
}
\details{
This function compares graphically the (joint) distribution of the same variables but estimated from data coming from two different data sources. The graphical comparison is done using barcharts.
}
\value{
The required graphical representation is drawn using the \pkg{ggplot2} facilities.
}
\author{
Marcello D'Orazio \email{mdo.statmatch@gmail.com}
}
\seealso{
\code{\link[StatMatch]{comp.prop}}
}
\examples{
# plotTab(data.A = samp.A, data.B = samp.B, xlab.A="edu7", w.A = "ww")
# plotTab(data.A = samp.A, data.B = samp.B, xlab.A=c("urb", "sex"), w.A = "ww", w.B="ww")
}
\keyword{multivariate}
\keyword{dplot}
|
data<- read.table("household_power_consumption.txt",header=TRUE,sep=";",dec = ".")
data<-subset(data,Date == "1/2/2007" | Date=="2/2/2007")
datetime<- strptime(paste(data$Date, data$Time,sep=" "), "%d/%m/%Y %H:%M:%S")
submeter1<-as.numeric(data$Sub_metering_1)
submeter2<-as.numeric(data$Sub_metering_2)
submeter3<-as.numeric(data$Sub_metering_3)
Globalap <- as.numeric(data$Global_active_power)
Globalrp <- as.numeric(data$Global_reactive_power)
voltage <-as.numeric(data$Voltage)
png("plot4.png",width=480,height=480)
par(mfrow = c(2,2))
plot(datetime, Globalap,type="l",xlab="",ylab="Global Active Power")
plot(datetime, voltage, type="l",xlab="datetime",ylab="Voltage")
plot(datetime,submeter1,type="l",ylab="Energy sub metering",xlab="")
lines(datetime,submeter2,type="l",col="red")
lines(datetime,submeter3,type="l",col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,lwd =2.5, col=c("black","red","blue"))
plot(datetime,Globalrp,type="l",xlab = "datetime",ylab = "Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
badaldesai/ExData_Plotting1
|
R
| false | false | 1,046 |
r
|
data<- read.table("household_power_consumption.txt",header=TRUE,sep=";",dec = ".")
data<-subset(data,Date == "1/2/2007" | Date=="2/2/2007")
datetime<- strptime(paste(data$Date, data$Time,sep=" "), "%d/%m/%Y %H:%M:%S")
submeter1<-as.numeric(data$Sub_metering_1)
submeter2<-as.numeric(data$Sub_metering_2)
submeter3<-as.numeric(data$Sub_metering_3)
Globalap <- as.numeric(data$Global_active_power)
Globalrp <- as.numeric(data$Global_reactive_power)
voltage <-as.numeric(data$Voltage)
png("plot4.png",width=480,height=480)
par(mfrow = c(2,2))
plot(datetime, Globalap,type="l",xlab="",ylab="Global Active Power")
plot(datetime, voltage, type="l",xlab="datetime",ylab="Voltage")
plot(datetime,submeter1,type="l",ylab="Energy sub metering",xlab="")
lines(datetime,submeter2,type="l",col="red")
lines(datetime,submeter3,type="l",col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,lwd =2.5, col=c("black","red","blue"))
plot(datetime,Globalrp,type="l",xlab = "datetime",ylab = "Global_reactive_power")
dev.off()
|
library(DBI, quietly = T)
library(RSQLite, quietly=T)
#SQLite for omics database ---
ngs_db <- dbConnect(SQLite(), dbname="realgar_data/realgar-omics.sqlite")
#SQLite for gwas data from database ---
gwas_db <- dbConnect(SQLite(), dbname="realgar_data/realgar-gwas-hg38.sqlite")
## FUNCTIONS ------
## Get databases from gwas files and filter selected gene
get_query_db <- function(name, curr_gene){
gene <- paste0("'",curr_gene,"'")
query <- paste0("SELECT * FROM ",name," WHERE symbol = ",gene)
res <- dbSendQuery(gwas_db, query)
data <- dbFetch(res)
dbClearResult(res)
return(data)
}
## Get snp data file
get_snp <- function(name){
query <- paste0("SELECT snp FROM ",name)
res <- dbSendQuery(gwas_db, query)
data <- dbFetch(res)
dbClearResult(res)
return(as.vector(data$snp))
}
## Get match for specific entered snp id
get_matches <- function(snp, name){
rsid <- paste0("'",snp,"'")
query <- paste0("SELECT snp,end,symbol FROM ",name," WHERE snp = ",rsid)
res <- dbSendQuery(gwas_db, query)
data <- dbFetch(res)
dbClearResult(res)
return(data)
}
## Match snp id selected to gene location database and select gene nearest by distance to selected snp
join_gene_snp <- function(all_matches){
genes <- unique(as.vector(all_matches$symbol))
data_table <- data.frame()
for (i in genes){
gene <- paste0("'",i,"'")
query <- paste0("SELECT DISTINCT symbol,start FROM gene_locations WHERE symbol = ", gene)
res <- dbSendQuery(gwas_db, query)
data <- dbFetch(res)
dbClearResult(res)
data_table <- rbind(data_table,data)
}
data_table <- data_table[which(!duplicated(data_table$symbol)),]
all_matches <- merge(all_matches, data_table, by = "symbol")
all_matches$dist <- abs(all_matches$start - all_matches$end) # here, "end" is snp position, "start" is gene start
return(unique(all_matches$symbol[which(all_matches$dist==min(all_matches$dist))])) # choose the gene symbol whose start is the smallest absolute distance away
}
|
/realgar/utilities/sql_queries.R
|
permissive
|
HimesGroup/shiny-server
|
R
| false | false | 2,002 |
r
|
library(DBI, quietly = T)
library(RSQLite, quietly=T)
#SQLite for omics database ---
ngs_db <- dbConnect(SQLite(), dbname="realgar_data/realgar-omics.sqlite")
#SQLite for gwas data from database ---
gwas_db <- dbConnect(SQLite(), dbname="realgar_data/realgar-gwas-hg38.sqlite")
## FUNCTIONS ------
## Get databases from gwas files and filter selected gene
get_query_db <- function(name, curr_gene){
gene <- paste0("'",curr_gene,"'")
query <- paste0("SELECT * FROM ",name," WHERE symbol = ",gene)
res <- dbSendQuery(gwas_db, query)
data <- dbFetch(res)
dbClearResult(res)
return(data)
}
## Get snp data file
get_snp <- function(name){
query <- paste0("SELECT snp FROM ",name)
res <- dbSendQuery(gwas_db, query)
data <- dbFetch(res)
dbClearResult(res)
return(as.vector(data$snp))
}
## Get match for specific entered snp id
get_matches <- function(snp, name){
rsid <- paste0("'",snp,"'")
query <- paste0("SELECT snp,end,symbol FROM ",name," WHERE snp = ",rsid)
res <- dbSendQuery(gwas_db, query)
data <- dbFetch(res)
dbClearResult(res)
return(data)
}
## Match snp id selected to gene location database and select gene nearest by distance to selected snp
join_gene_snp <- function(all_matches){
genes <- unique(as.vector(all_matches$symbol))
data_table <- data.frame()
for (i in genes){
gene <- paste0("'",i,"'")
query <- paste0("SELECT DISTINCT symbol,start FROM gene_locations WHERE symbol = ", gene)
res <- dbSendQuery(gwas_db, query)
data <- dbFetch(res)
dbClearResult(res)
data_table <- rbind(data_table,data)
}
data_table <- data_table[which(!duplicated(data_table$symbol)),]
all_matches <- merge(all_matches, data_table, by = "symbol")
all_matches$dist <- abs(all_matches$start - all_matches$end) # here, "end" is snp position, "start" is gene start
return(unique(all_matches$symbol[which(all_matches$dist==min(all_matches$dist))])) # choose the gene symbol whose start is the smallest absolute distance away
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tidyverse)
# Define UI for application that draws a histogram
ui <- fluidPage(
titlePanel("Covid-19 Test"),
numericInput("InfectionRate", "Infection Rate(%)", 5),
actionButton("draw", "Draw the Plot"),
h2("Plot of Cash Flow"),
plotOutput("plot1"),
)
# Define server logic required to draw a histogram
server <- function(input, output) {
gettable <- function(input){
FN <-as.integer(0.05 * input$InfectionRate/100 * 500)
AP <-as.integer(0.95 * input$InfectionRate/100 *500)
FP <- as.integer(0.05 * (1- input$InfectionRate/100) * 500)
df_sensi <- full_join(
tibble(x = 1:25, color = 'Actual Neg'),
tibble(y = 1:20, color = 'Actual Neg'),
by = 'color')
# At 5% infection rate,
# Positive 500 * 5% = 25 = Actual Pos (24 at 95%) + False Neg (1 at 5%)
# Negative 500 * 95% = 475 = Actual Neg (451 at 95%) + False Pos (24 at 5%)
df_sensi['color'] <- c(rep('False Neg', FN),
rep('Actual Pos', AP),
rep('False Pos', FP),
rep('Actual Neg', 500 - FN - AP - FP))
df_sensi
}
observeEvent(input$draw, {
output$plot1 <- renderPlot({ggplot(gettable(input)) +
geom_point(aes(x, y,colour = color), size = 4, shape="circle") +
theme_bw() +
theme(axis.title.x=element_blank(), axis.title.y=element_blank(),
axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank())})
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/2020/Assignment-2020/Individual/FE8828-ZHU Yiqing/Assignment4/Assignment4_Q2.R
|
no_license
|
leafyoung/fe8828
|
R
| false | false | 1,908 |
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tidyverse)
# Define UI for application that draws a histogram
ui <- fluidPage(
titlePanel("Covid-19 Test"),
numericInput("InfectionRate", "Infection Rate(%)", 5),
actionButton("draw", "Draw the Plot"),
h2("Plot of Cash Flow"),
plotOutput("plot1"),
)
# Define server logic required to draw a histogram
server <- function(input, output) {
gettable <- function(input){
FN <-as.integer(0.05 * input$InfectionRate/100 * 500)
AP <-as.integer(0.95 * input$InfectionRate/100 *500)
FP <- as.integer(0.05 * (1- input$InfectionRate/100) * 500)
df_sensi <- full_join(
tibble(x = 1:25, color = 'Actual Neg'),
tibble(y = 1:20, color = 'Actual Neg'),
by = 'color')
# At 5% infection rate,
# Positive 500 * 5% = 25 = Actual Pos (24 at 95%) + False Neg (1 at 5%)
# Negative 500 * 95% = 475 = Actual Neg (451 at 95%) + False Pos (24 at 5%)
df_sensi['color'] <- c(rep('False Neg', FN),
rep('Actual Pos', AP),
rep('False Pos', FP),
rep('Actual Neg', 500 - FN - AP - FP))
df_sensi
}
observeEvent(input$draw, {
output$plot1 <- renderPlot({ggplot(gettable(input)) +
geom_point(aes(x, y,colour = color), size = 4, shape="circle") +
theme_bw() +
theme(axis.title.x=element_blank(), axis.title.y=element_blank(),
axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank())})
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vs2dhReadConfig.R
\name{importMatrices}
\alias{importMatrices}
\title{Helper function: imports matrices (jtex, hvalues, tvalues)}
\usage{
importMatrices(prepData, dbg = TRUE)
}
\arguments{
\item{prepData}{object as retrieved by prepareImport()}
\item{dbg}{prints debug information on the screen}
}
\value{
Matrix values (always: soil, if available: initial pressure head &
temperature distribution)
model.path <- system.file("extdata", "vs2dh_example/tutorial2", package = "kwb.vs2dh")
inp <- prepareImport(model.path)
grid <- importMatrices(inp)
#### Soil properties matrix
vs2dh.plotMatrix(data = grid$jtex)
#### Initial temperature distribution matrix
vs2dh.plotMatrix(data = grid$tvalues)
}
\description{
Helper function: imports matrices (jtex, hvalues, tvalues)
}
|
/man/importMatrices.Rd
|
permissive
|
KWB-R/kwb.vs2dh
|
R
| false | true | 877 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vs2dhReadConfig.R
\name{importMatrices}
\alias{importMatrices}
\title{Helper function: imports matrices (jtex, hvalues, tvalues)}
\usage{
importMatrices(prepData, dbg = TRUE)
}
\arguments{
\item{prepData}{object as retrieved by prepareImport()}
\item{dbg}{prints debug information on the screen}
}
\value{
Matrix values (always: soil, if available: initial pressure head &
temperature distribution)
model.path <- system.file("extdata", "vs2dh_example/tutorial2", package = "kwb.vs2dh")
inp <- prepareImport(model.path)
grid <- importMatrices(inp)
#### Soil properties matrix
vs2dh.plotMatrix(data = grid$jtex)
#### Initial temperature distribution matrix
vs2dh.plotMatrix(data = grid$tvalues)
}
\description{
Helper function: imports matrices (jtex, hvalues, tvalues)
}
|
library(Bergm)
### Name: bergm
### Title: Parameter estimation for Bayesian ERGMs
### Aliases: bergm
### ** Examples
# Load the florentine marriage network
data(florentine)
# Posterior parameter estimation:
p.flo <- bergm(flomarriage ~ edges + kstar(2),
burn.in = 50,
aux.iters = 500,
main.iters = 500,
gamma = 1)
# Posterior summaries:
bergm.output(p.flo)
# Bayesian goodness-of-fit test:
bgof(p.flo,
aux.iters = 500,
sample.size = 50,
n.deg = 10,
n.dist = 9,
n.esp = 6)
|
/data/genthat_extracted_code/Bergm/examples/bergm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 571 |
r
|
library(Bergm)
### Name: bergm
### Title: Parameter estimation for Bayesian ERGMs
### Aliases: bergm
### ** Examples
# Load the florentine marriage network
data(florentine)
# Posterior parameter estimation:
p.flo <- bergm(flomarriage ~ edges + kstar(2),
burn.in = 50,
aux.iters = 500,
main.iters = 500,
gamma = 1)
# Posterior summaries:
bergm.output(p.flo)
# Bayesian goodness-of-fit test:
bgof(p.flo,
aux.iters = 500,
sample.size = 50,
n.deg = 10,
n.dist = 9,
n.esp = 6)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ceden_query_csv.R
\name{ceden_query_csv}
\alias{ceden_query_csv}
\title{Download CEDEN data via an API}
\usage{
ceden_query_csv(service, query_parameters,
base_URI = "https://cedenwebservices.waterboards.ca.gov:9267",
userName = "", password = "", errorMessages_out = TRUE)
}
\arguments{
\item{service}{A text string representing one of the 15 CEDEN advanced query tool services.
For each of the 5 categories of monitoring data (Benthic, Habitat, Tissue, Toxicity, WaterQuality),
there are 3 types of data available (MonitoringStationsList, ParameterCountsList, ResultsList). For example:
CEDENBenthicMonitoringStationsList}
\item{query_parameters}{The query string (in plain text). This includes everything after the
\code{?queryParams=\{} statement, except the closing \code{\}} of the query string. For information on how
to construct a query string, see the documentation for the CEDEN web services.}
\item{base_URI}{The base part of the URL for all CEDEN web services
(e.g.,https://cedenwebservices.waterboards.ca.gov), including a port number if required
(use ":9267" if on the State Water Board network). Defaults to:
https://cedenwebservices.waterboards.ca.gov:9267}
\item{userName}{The user name for your CEDEN web services account. You can enter this through
the function, or if you leave this argument blank the function will look for this information
in a variable called `ceden_userName` within the environment variables defined for your account.}
\item{password}{The password for your CEDEN web services account. You can enter this through
the function, or if you leave this argument blank the function will look for this information
in a variable called `ceden_password` within the environment variables defined for your account.}
\item{errorMessages_out}{When set to \code{TRUE}, if there is an error with the
authentication or the query request (inclduing when there is simply no data returned that meets
the query parameters), the function will attempt to return a data frame with information about
the error (including where the error occured, the HTTP code returned, and any messages about the API
response). When set to \code{FALSE}, the function will simply return \code{NA} on an error.}
}
\value{
This function returns a data frame with the data specified in the \code{service}
and \code{query_parameters} arguments. On an error, the output will depend on the value
of the \code{errorMessages_out} parameter.
}
\description{
This function provides an interface with CEDEN web services to perform queries and programatically download data.
This function may be able to handle larger requests than the \code{ceden_query} function. It is identical to the
\code{ceden_query} function, except that it requests data from the API in csv format instead of JSON. As a result,
there could possibly be some slight differences in the format of the data returned by the two functions.
}
\examples{
All of these examples return the data to a data frame called: data.download
# This is the example provided in the CEDEN web services documentation
data.download <- ceden_query_csv(service = 'cedenwaterqualitymonitoringstationslist', query_parameters = '"filter":[{"sampleDateMin":"1/1/2015","sampleDateMax":"4/1/2015"}],"top":1000')
# Get all of the records of E. coli monitoring in Sacramento and San Joaquin counties from 6/1/2014 through 7/1/2014
data.download <- ceden_query_csv(service = 'cedenwaterqualityresultslist', query_parameters = '"filter":[{"county":"Sacramento","parameter":"E. coli","sampleDateMin":"6/1/2014","sampleDateMax":"7/1/2014"},{"county":"San Joaquin","parameter":"E. coli","sampleDateMin":"6/1/2014","sampleDateMax":"7/1/2014"}]')
# Get all water quality results in Sacramento from the year 2014 where the parameter name contains the name Nitrogen (note use of the wildcard /\%)
data.download <- ceden_query_csv(service = 'cedenwaterqualityresultslist', query_parameters = '"filter":[{"county":"Sacramento","parameter":"/\%Nitrogen/\%","sampleDateMin":"1/1/2014","sampleDateMax":"12/31/2014"}]', userName = 'user', password = 'password', base_URI = 'https://cedenwebservices.waterboards.ca.gov')
}
\keyword{API}
\keyword{CEDEN}
\keyword{California}
\keyword{Data}
\keyword{Environmental}
\keyword{Exchange}
\keyword{Network}
|
/man/ceden_query_csv.Rd
|
no_license
|
daltare/cedenTools
|
R
| false | true | 4,353 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ceden_query_csv.R
\name{ceden_query_csv}
\alias{ceden_query_csv}
\title{Download CEDEN data via an API}
\usage{
ceden_query_csv(service, query_parameters,
base_URI = "https://cedenwebservices.waterboards.ca.gov:9267",
userName = "", password = "", errorMessages_out = TRUE)
}
\arguments{
\item{service}{A text string representing one of the 15 CEDEN advanced query tool services.
For each of the 5 categories of monitoring data (Benthic, Habitat, Tissue, Toxicity, WaterQuality),
there are 3 types of data available (MonitoringStationsList, ParameterCountsList, ResultsList). For example:
CEDENBenthicMonitoringStationsList}
\item{query_parameters}{The query string (in plain text). This includes everything after the
\code{?queryParams=\{} statement, except the closing \code{\}} of the query string. For information on how
to construct a query string, see the documentation for the CEDEN web services.}
\item{base_URI}{The base part of the URL for all CEDEN web services
(e.g.,https://cedenwebservices.waterboards.ca.gov), including a port number if required
(use ":9267" if on the State Water Board network). Defaults to:
https://cedenwebservices.waterboards.ca.gov:9267}
\item{userName}{The user name for your CEDEN web services account. You can enter this through
the function, or if you leave this argument blank the function will look for this information
in a variable called `ceden_userName` within the environment variables defined for your account.}
\item{password}{The password for your CEDEN web services account. You can enter this through
the function, or if you leave this argument blank the function will look for this information
in a variable called `ceden_password` within the environment variables defined for your account.}
\item{errorMessages_out}{When set to \code{TRUE}, if there is an error with the
authentication or the query request (inclduing when there is simply no data returned that meets
the query parameters), the function will attempt to return a data frame with information about
the error (including where the error occured, the HTTP code returned, and any messages about the API
response). When set to \code{FALSE}, the function will simply return \code{NA} on an error.}
}
\value{
This function returns a data frame with the data specified in the \code{service}
and \code{query_parameters} arguments. On an error, the output will depend on the value
of the \code{errorMessages_out} parameter.
}
\description{
This function provides an interface with CEDEN web services to perform queries and programatically download data.
This function may be able to handle larger requests than the \code{ceden_query} function. It is identical to the
\code{ceden_query} function, except that it requests data from the API in csv format instead of JSON. As a result,
there could possibly be some slight differences in the format of the data returned by the two functions.
}
\examples{
All of these examples return the data to a data frame called: data.download
# This is the example provided in the CEDEN web services documentation
data.download <- ceden_query_csv(service = 'cedenwaterqualitymonitoringstationslist', query_parameters = '"filter":[{"sampleDateMin":"1/1/2015","sampleDateMax":"4/1/2015"}],"top":1000')
# Get all of the records of E. coli monitoring in Sacramento and San Joaquin counties from 6/1/2014 through 7/1/2014
data.download <- ceden_query_csv(service = 'cedenwaterqualityresultslist', query_parameters = '"filter":[{"county":"Sacramento","parameter":"E. coli","sampleDateMin":"6/1/2014","sampleDateMax":"7/1/2014"},{"county":"San Joaquin","parameter":"E. coli","sampleDateMin":"6/1/2014","sampleDateMax":"7/1/2014"}]')
# Get all water quality results in Sacramento from the year 2014 where the parameter name contains the name Nitrogen (note use of the wildcard /\%)
data.download <- ceden_query_csv(service = 'cedenwaterqualityresultslist', query_parameters = '"filter":[{"county":"Sacramento","parameter":"/\%Nitrogen/\%","sampleDateMin":"1/1/2014","sampleDateMax":"12/31/2014"}]', userName = 'user', password = 'password', base_URI = 'https://cedenwebservices.waterboards.ca.gov')
}
\keyword{API}
\keyword{CEDEN}
\keyword{California}
\keyword{Data}
\keyword{Environmental}
\keyword{Exchange}
\keyword{Network}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/result_lm.R
\name{result_lm}
\alias{result_lm}
\title{Computed the R adjusted of the abundance/metric value models at each scales with the lm method.}
\usage{
result_lm(temp, dist, metrics, Sp_Name, tab = T, plots = T,
individual_plots = F)
}
\arguments{
\item{temp}{The tab generated by formodel() function merged by the id of sampling site
with the tab of your abundance sampling.}
\item{dist}{Vector of scales you choosed in Chloe when you created the ascii whith Cloe}
\item{metrics}{Vector of metrics of 1 or more you choosed in Chloe when you created the ascii whith Chloe}
\item{tab}{Argument "T" or "F" defining if you want to return plot (F) or a tab (T).}
\item{sp_name}{Optionnal argument containing the name of the species you want to see analyse, the name
must match its column name contained in the "carab" tab}
}
\value{
Return a list of plot(s) of the length of "metrics" or a tab of the R adjusted function of lm
and their signifiance.
}
\description{
`result_lm()` Return plots or tab depending of the "tab" argument of the R adjusted of abundance/metric value models
at each scales for lm, gam and MARS with the corresponding signifiance if it can be tested.
}
\details{
This function is meant to be used with the tab generated by formodel() merged with
the tab of your abundance sampling.
}
\author{
Pierre-Gilles Lemasle <pg.lemasle@gmail.com>
}
|
/man/result_lm.Rd
|
no_license
|
Pintademijote/multipack
|
R
| false | true | 1,451 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/result_lm.R
\name{result_lm}
\alias{result_lm}
\title{Computed the R adjusted of the abundance/metric value models at each scales with the lm method.}
\usage{
result_lm(temp, dist, metrics, Sp_Name, tab = T, plots = T,
individual_plots = F)
}
\arguments{
\item{temp}{The tab generated by formodel() function merged by the id of sampling site
with the tab of your abundance sampling.}
\item{dist}{Vector of scales you choosed in Chloe when you created the ascii whith Cloe}
\item{metrics}{Vector of metrics of 1 or more you choosed in Chloe when you created the ascii whith Chloe}
\item{tab}{Argument "T" or "F" defining if you want to return plot (F) or a tab (T).}
\item{sp_name}{Optionnal argument containing the name of the species you want to see analyse, the name
must match its column name contained in the "carab" tab}
}
\value{
Return a list of plot(s) of the length of "metrics" or a tab of the R adjusted function of lm
and their signifiance.
}
\description{
`result_lm()` Return plots or tab depending of the "tab" argument of the R adjusted of abundance/metric value models
at each scales for lm, gam and MARS with the corresponding signifiance if it can be tested.
}
\details{
This function is meant to be used with the tab generated by formodel() merged with
the tab of your abundance sampling.
}
\author{
Pierre-Gilles Lemasle <pg.lemasle@gmail.com>
}
|
PeaksToGenesCRIStyle <- function(ExtGenes,Rit){
require(GenomicRanges)
MatchedGenes <- ExtGenes[findOverlaps(ExtGenes,Rit)@matchMatrix[,1]]
MatchedPeaks <- Rit[findOverlaps(ExtGenes,Rit)@matchMatrix[,2]]
NotMatchePeaks <- Rit[-findOverlaps(ExtGenes,Rit)@matchMatrix[,2]]
TempData <- as.data.frame(MatchedPeaks)[-4] ## Removes width part of object which is automatically generated
colnames(TempData) <- paste("Variant_",colnames(TempData),sep="")
elementMetadata(MatchedGenes) <- cbind(as.data.frame(elementMetadata(MatchedGenes)),TempData)
MetaList <- MatchedGenes
cat("Overlapping to find nearest feature for non-overlapping Peaks")
#seqnames()
TempNearestRanges <- GRanges()
TempNonMatchedByChr <- GRanges()
for(i in 1:length(unique(seqnames(NotMatchePeaks)))){
if(any(seqnames(ExtGenes) %in% unique(seqnames(NotMatchePeaks))[i])){
Index <- nearest(ranges(NotMatchePeaks[seqnames(NotMatchePeaks) %in% unique(seqnames(NotMatchePeaks))[i]]),ranges(ExtGenes[seqnames(ExtGenes) %in% unique(seqnames(NotMatchePeaks))[i]]))
TempNearGenes <- ExtGenes[seqnames(ExtGenes) %in% unique(seqnames(NotMatchePeaks))[i]][Index]
TempPeaks <- NotMatchePeaks[seqnames(NotMatchePeaks) %in% unique(seqnames(NotMatchePeaks))[i]]
TempData2 <- as.data.frame(TempPeaks)[-4] ## Removes width part of object which is automatically generated
colnames(TempData2) <- paste("Variant_",colnames(TempData2),sep="")
elementMetadata(TempNearGenes) <- cbind(as.data.frame(elementMetadata(TempNearGenes)),TempData2)
TempNearestRanges <- c(TempNearestRanges,TempNearGenes)
}else{
Temp <- NotMatchePeaks[seqnames(NotMatchePeaks) %in% unique(seqnames(NotMatchePeaks))[i]]
TempNonMatchedByChr <- c(TempNonMatchedByChr,Temp)
}
}
elementMetadata(TempNearestRanges)$Feature <- ("Off_Amplicon")
levels(elementMetadata(MetaList)$Feature) <- c(levels(elementMetadata(MetaList)$Feature),"Off_Amplicon")
MetaList <- c(MetaList,TempNearestRanges)
Distances <- DistanceTo(MetaList)
elementMetadata(MetaList) <- cbind(as.data.frame(elementMetadata(MetaList)),Distances)
# return(MetaList)
ForPrinting <- as.data.frame(MetaList)
VarBegging <- grep("Variant",colnames(ForPrinting))[1]
Rearranged <- cbind(ForPrinting[,(VarBegging):ncol(ForPrinting)],ForPrinting[,1:(VarBegging-1)])
# colnames(Rearranged)[grep("Unique_ID",colnames(Rearranged))] <- "Unique_ID"
ToMergeIn <- as.data.frame(TempNonMatchedByChr)[,-4]
colnames(ToMergeIn) <- paste("Variant_",colnames(ToMergeIn),sep="")
TotalVariants <- merge(Rearranged,ToMergeIn,all=T,sort=F)
}
BuildPeakRanges <- function(CRIStyle){
GRanges(seqnames=seqnames(CRIStyle),ranges=IRanges(start=as.numeric(as.vector(elementMetadata(CRIStyle)$Variant_start)),end=as.numeric(as.vector(elementMetadata(CRIStyle)$Variant_end))))
}
DistanceTo <- function(StrandedFeatures){
Centredpeaks <- Centred(BuildPeakRanges(StrandedFeatures))
DistanceTo3PrimeofFeature = end(StrandedFeatures)-start(Centredpeaks)
DistanceTo5PrimeofFeature = start(StrandedFeatures)-start(Centredpeaks)
DistanceToCentreOfFeature = start(Centred(StrandedFeatures))-start(Centredpeaks)
DistancesToFeatures <- cbind(DistanceTo3PrimeofFeature,DistanceTo5PrimeofFeature,DistanceToCentreOfFeature)
colnames(DistancesToFeatures) <- c("Distance to 3'-end of Feature","Distance to 5'-end of Feature","Distance to Centre of Feature")
DistancesToFeatures
}
Centred <- function(GRanges,distance=0){
require(GenomicRanges)
PeakStart <- start(GRanges)
PeakWidth <- width(GRanges)
PeakCentre <- PeakStart+round(PeakWidth/2)
start(ranges(GRanges)) <- PeakCentre-distance
end(ranges(GRanges)) <- PeakCentre+distance
GRanges
}
args <- c("GATK.snps.bed","SLX-4969_amplicons.bed")
SNPS <- read.delim(args[1],sep="\t",header=F)
Amps <- read.delim(args[2],sep="\t",header=F)
AmpRanges <- GRanges(seqnames=as.vector(Amps[!Amps[,1]=="",1]),IRanges(Amps[!Amps[,1]=="",2],Amps[!Amps[,1]=="",3]),strand="*")
elementMetadata(AmpRanges) <- "ON_Amplicon"
colnames(elementMetadata(AmpRanges)) <- "Feature"
SNPRanges<- GRanges(seqnames=as.vector(SNPS[!SNPS[,1]=="",1]),IRanges(SNPS[!SNPS[,1]=="",2],SNPS[!SNPS[,1]=="",3]),strand="*")
if(ncol(SNPS)>3){
metaData <- cbind(SNPS[!SNPS[,1]=="",-c(1:3)],paste("Variant_ID",seq(1,nrow(SNPS[!SNPS[,1]=="",])),sep="_"))
colnames(metaData)[ncol(metaData)] <- "Unique_ID"
elementMetadata(SNPRanges) <- metaData
}
Answer <- PeaksToGenesCRIStyle(AmpRanges,SNPRanges)
ExtGenes <- AmpRanges
Rit <- SNPRanges
|
/Process10/RScripts/ExomeAnno.r
|
permissive
|
Yixf-Self/chipseq-pipeline
|
R
| false | false | 4,723 |
r
|
PeaksToGenesCRIStyle <- function(ExtGenes,Rit){
require(GenomicRanges)
MatchedGenes <- ExtGenes[findOverlaps(ExtGenes,Rit)@matchMatrix[,1]]
MatchedPeaks <- Rit[findOverlaps(ExtGenes,Rit)@matchMatrix[,2]]
NotMatchePeaks <- Rit[-findOverlaps(ExtGenes,Rit)@matchMatrix[,2]]
TempData <- as.data.frame(MatchedPeaks)[-4] ## Removes width part of object which is automatically generated
colnames(TempData) <- paste("Variant_",colnames(TempData),sep="")
elementMetadata(MatchedGenes) <- cbind(as.data.frame(elementMetadata(MatchedGenes)),TempData)
MetaList <- MatchedGenes
cat("Overlapping to find nearest feature for non-overlapping Peaks")
#seqnames()
TempNearestRanges <- GRanges()
TempNonMatchedByChr <- GRanges()
for(i in 1:length(unique(seqnames(NotMatchePeaks)))){
if(any(seqnames(ExtGenes) %in% unique(seqnames(NotMatchePeaks))[i])){
Index <- nearest(ranges(NotMatchePeaks[seqnames(NotMatchePeaks) %in% unique(seqnames(NotMatchePeaks))[i]]),ranges(ExtGenes[seqnames(ExtGenes) %in% unique(seqnames(NotMatchePeaks))[i]]))
TempNearGenes <- ExtGenes[seqnames(ExtGenes) %in% unique(seqnames(NotMatchePeaks))[i]][Index]
TempPeaks <- NotMatchePeaks[seqnames(NotMatchePeaks) %in% unique(seqnames(NotMatchePeaks))[i]]
TempData2 <- as.data.frame(TempPeaks)[-4] ## Removes width part of object which is automatically generated
colnames(TempData2) <- paste("Variant_",colnames(TempData2),sep="")
elementMetadata(TempNearGenes) <- cbind(as.data.frame(elementMetadata(TempNearGenes)),TempData2)
TempNearestRanges <- c(TempNearestRanges,TempNearGenes)
}else{
Temp <- NotMatchePeaks[seqnames(NotMatchePeaks) %in% unique(seqnames(NotMatchePeaks))[i]]
TempNonMatchedByChr <- c(TempNonMatchedByChr,Temp)
}
}
elementMetadata(TempNearestRanges)$Feature <- ("Off_Amplicon")
levels(elementMetadata(MetaList)$Feature) <- c(levels(elementMetadata(MetaList)$Feature),"Off_Amplicon")
MetaList <- c(MetaList,TempNearestRanges)
Distances <- DistanceTo(MetaList)
elementMetadata(MetaList) <- cbind(as.data.frame(elementMetadata(MetaList)),Distances)
# return(MetaList)
ForPrinting <- as.data.frame(MetaList)
VarBegging <- grep("Variant",colnames(ForPrinting))[1]
Rearranged <- cbind(ForPrinting[,(VarBegging):ncol(ForPrinting)],ForPrinting[,1:(VarBegging-1)])
# colnames(Rearranged)[grep("Unique_ID",colnames(Rearranged))] <- "Unique_ID"
ToMergeIn <- as.data.frame(TempNonMatchedByChr)[,-4]
colnames(ToMergeIn) <- paste("Variant_",colnames(ToMergeIn),sep="")
TotalVariants <- merge(Rearranged,ToMergeIn,all=T,sort=F)
}
BuildPeakRanges <- function(CRIStyle){
GRanges(seqnames=seqnames(CRIStyle),ranges=IRanges(start=as.numeric(as.vector(elementMetadata(CRIStyle)$Variant_start)),end=as.numeric(as.vector(elementMetadata(CRIStyle)$Variant_end))))
}
DistanceTo <- function(StrandedFeatures){
Centredpeaks <- Centred(BuildPeakRanges(StrandedFeatures))
DistanceTo3PrimeofFeature = end(StrandedFeatures)-start(Centredpeaks)
DistanceTo5PrimeofFeature = start(StrandedFeatures)-start(Centredpeaks)
DistanceToCentreOfFeature = start(Centred(StrandedFeatures))-start(Centredpeaks)
DistancesToFeatures <- cbind(DistanceTo3PrimeofFeature,DistanceTo5PrimeofFeature,DistanceToCentreOfFeature)
colnames(DistancesToFeatures) <- c("Distance to 3'-end of Feature","Distance to 5'-end of Feature","Distance to Centre of Feature")
DistancesToFeatures
}
Centred <- function(GRanges,distance=0){
require(GenomicRanges)
PeakStart <- start(GRanges)
PeakWidth <- width(GRanges)
PeakCentre <- PeakStart+round(PeakWidth/2)
start(ranges(GRanges)) <- PeakCentre-distance
end(ranges(GRanges)) <- PeakCentre+distance
GRanges
}
args <- c("GATK.snps.bed","SLX-4969_amplicons.bed")
SNPS <- read.delim(args[1],sep="\t",header=F)
Amps <- read.delim(args[2],sep="\t",header=F)
AmpRanges <- GRanges(seqnames=as.vector(Amps[!Amps[,1]=="",1]),IRanges(Amps[!Amps[,1]=="",2],Amps[!Amps[,1]=="",3]),strand="*")
elementMetadata(AmpRanges) <- "ON_Amplicon"
colnames(elementMetadata(AmpRanges)) <- "Feature"
SNPRanges<- GRanges(seqnames=as.vector(SNPS[!SNPS[,1]=="",1]),IRanges(SNPS[!SNPS[,1]=="",2],SNPS[!SNPS[,1]=="",3]),strand="*")
if(ncol(SNPS)>3){
metaData <- cbind(SNPS[!SNPS[,1]=="",-c(1:3)],paste("Variant_ID",seq(1,nrow(SNPS[!SNPS[,1]=="",])),sep="_"))
colnames(metaData)[ncol(metaData)] <- "Unique_ID"
elementMetadata(SNPRanges) <- metaData
}
Answer <- PeaksToGenesCRIStyle(AmpRanges,SNPRanges)
ExtGenes <- AmpRanges
Rit <- SNPRanges
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/beeps.R
\name{beepyeah}
\alias{beepyeah}
\title{beepyeah}
\usage{
beepyeah()
}
\value{
NULL #Plays lil Jon saying "Yeah"
}
\description{
beepyeah
}
\examples{
beepyeah()
}
|
/CSPay/man/beepyeah.Rd
|
permissive
|
gafar123/finalproject-blank
|
R
| false | true | 250 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/beeps.R
\name{beepyeah}
\alias{beepyeah}
\title{beepyeah}
\usage{
beepyeah()
}
\value{
NULL #Plays lil Jon saying "Yeah"
}
\description{
beepyeah
}
\examples{
beepyeah()
}
|
testlist <- list(a = -1L, b = -256L, x = integer(0))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610054544-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 113 |
r
|
testlist <- list(a = -1L, b = -256L, x = integer(0))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
# Function for creating the cancer map.
# Input parameters:
# data: data matrix where rows represent samples and columns represent features.
# name: Parameter used in the naming of the output files.
# VAR: percentage of most variable features to retain in the data matrix for t-SNE.
# BW: Bandwidth parameter for mean-shfit clustering. Lower the bandwidth, more dense the clusters found.
# PATH_OUTPUT: Path where to save the output files.
CancerMap=function(data, name, VAR=NULL, BW=0.9, PATH_OUTPUT){
if(!is.null(VAR)){
# Get the j% most variating genes, feature selection.
v = apply(data,2,var)
v = sort(v,decreasing=T)
v = names(v)
j = VAR
V = round(dim(data)[2] * j / 100)
v = v[1:V]
data = data[,colnames(data)%in%v]
outname=paste0(PATH_OUTPUT, "cancermap_", name, "_", VAR, "pct_genes_", "BH-SNE_mean-shift_BW", BW, ".txt")
outnamecent=paste0(PATH_OUTPUT, "cancermap_", name, "_", VAR, "pct_genes_", "BH-SNE_mean-shift_BW", BW, "_cluster_centroids.txt")
}else{
outname=paste0(PATH_OUTPUT, "cancermap_", name, "_", "BH-SNE_mean-shift_BW", BW, ".txt")
outnamecent=paste0(PATH_OUTPUT, "cancermap_", name, "_", "BH-SNE_mean-shift_BW", BW, "_cluster_centroids.txt")
}
# Run BH-SNE + mean-shift clustering.
set.seed(1) # Random number generator seed for reproducible results.
x = Rtsne(data,perplexity = 30, check_duplicates = F, pca = F, is_distance = F) # BH-SNE.
x = x$Y
h = BW # Bandwidth parameter for clustering (for subsets of Hemap data we used h = 1.5).
m1 = ms(x, h=h, scaled=F, thr=0.01, iter=500, plotms=0) # Mean-shift clustering.
X = data.frame(rownames(data), x, m1$cluster.label) # Summarize results to a data.frame.
colnames(X) = c("ID","x","y",paste0("cluster"))
# Write embedding coordinates and the clustering result to a file.
write.table(X, outname, quote = F, sep = "\t", row.names = F)
# Write the cluster centroids(modes) to a file.
write.table(data.frame(m1$cluster.center), outnamecent, row.names = F, sep = "\t", quote = F)
return(X)
}
|
/Example_useCases/useCase3/Cancermap.R
|
no_license
|
systemsgenomics/t-sne.cancermaps
|
R
| false | false | 2,063 |
r
|
# Function for creating the cancer map.
# Input parameters:
# data: data matrix where rows represent samples and columns represent features.
# name: Parameter used in the naming of the output files.
# VAR: percentage of most variable features to retain in the data matrix for t-SNE.
# BW: Bandwidth parameter for mean-shfit clustering. Lower the bandwidth, more dense the clusters found.
# PATH_OUTPUT: Path where to save the output files.
CancerMap=function(data, name, VAR=NULL, BW=0.9, PATH_OUTPUT){
if(!is.null(VAR)){
# Get the j% most variating genes, feature selection.
v = apply(data,2,var)
v = sort(v,decreasing=T)
v = names(v)
j = VAR
V = round(dim(data)[2] * j / 100)
v = v[1:V]
data = data[,colnames(data)%in%v]
outname=paste0(PATH_OUTPUT, "cancermap_", name, "_", VAR, "pct_genes_", "BH-SNE_mean-shift_BW", BW, ".txt")
outnamecent=paste0(PATH_OUTPUT, "cancermap_", name, "_", VAR, "pct_genes_", "BH-SNE_mean-shift_BW", BW, "_cluster_centroids.txt")
}else{
outname=paste0(PATH_OUTPUT, "cancermap_", name, "_", "BH-SNE_mean-shift_BW", BW, ".txt")
outnamecent=paste0(PATH_OUTPUT, "cancermap_", name, "_", "BH-SNE_mean-shift_BW", BW, "_cluster_centroids.txt")
}
# Run BH-SNE + mean-shift clustering.
set.seed(1) # Random number generator seed for reproducible results.
x = Rtsne(data,perplexity = 30, check_duplicates = F, pca = F, is_distance = F) # BH-SNE.
x = x$Y
h = BW # Bandwidth parameter for clustering (for subsets of Hemap data we used h = 1.5).
m1 = ms(x, h=h, scaled=F, thr=0.01, iter=500, plotms=0) # Mean-shift clustering.
X = data.frame(rownames(data), x, m1$cluster.label) # Summarize results to a data.frame.
colnames(X) = c("ID","x","y",paste0("cluster"))
# Write embedding coordinates and the clustering result to a file.
write.table(X, outname, quote = F, sep = "\t", row.names = F)
# Write the cluster centroids(modes) to a file.
write.table(data.frame(m1$cluster.center), outnamecent, row.names = F, sep = "\t", quote = F)
return(X)
}
|
# Author: Robert J. Hijmans
# Date : September 2011
# Version 1.0
# Licence GPL v3
.adjacentUD <- function(x, cells, ngb, include) {
# ngb should be a matrix with
# one and only one cell with value 0 (the focal cell),
# at least one cell with value 1 (the adjacent cells)
# cells with other values are ignored (not considered adjacent)
rs <- res(x)
rn <- raster(ngb)
center <- which(values(rn)==0)
if (include) {
ngb[center] <- 1
}
rc <- rowFromCell(rn, center)
cc <- colFromCell(rn, center)
xngb <- yngb <- ngb
xngb[] <- rep(1:ncol(ngb), each=nrow(ngb)) - cc
yngb[] <- rep(nrow(ngb):1, ncol(ngb)) - (nrow(ngb)-rc+1)
ngb[ngb != 1] <- NA
xngb <- na.omit(as.vector( xngb * rs[1] * ngb))
yngb <- na.omit(as.vector( yngb * rs[2] * ngb))
xy <- xyFromCell(x, cells)
X <- apply(xy[,1,drop=FALSE], 1, function(z) z + xngb )
Y <- apply(xy[,2,drop=FALSE], 1, function(z) z + yngb )
c(as.vector(X), as.vector(Y))
}
adjacent <- function(x, cells, directions=4, pairs=TRUE, target=NULL, sorted=FALSE, include=FALSE, id=FALSE) {
if (is.character(directions)) {
directions <- tolower(directions)
}
x <- raster(x)
r <- res(x)
xy <- xyFromCell(x, cells)
mat <- FALSE
if (is.matrix(directions)) {
stopifnot(length(which(directions==0)) == 1)
stopifnot(length(which(directions==1)) > 0)
d <- .adjacentUD(x, cells, directions, include)
directions <- sum(directions==1, na.rm=TRUE)
mat <- TRUE
} else if (directions==4) {
if (include) {
d <- c(xy[,1], xy[,1]-r[1], xy[,1]+r[1], xy[,1], xy[,1], xy[,2], xy[,2], xy[,2], xy[,2]+r[2], xy[,2]-r[2])
} else {
d <- c(xy[,1]-r[1], xy[,1]+r[1], xy[,1], xy[,1], xy[,2], xy[,2], xy[,2]+r[2], xy[,2]-r[2])
}
} else if (directions==8) {
if (include) {
d <- c(xy[,1], rep(xy[,1]-r[1], 3), rep(xy[,1]+r[1],3), xy[,1], xy[,1],
xy[,2], rep(c(xy[,2]+r[2], xy[,2], xy[,2]-r[2]), 2), xy[,2]+r[2], xy[,2]-r[2])
} else {
d <- c(rep(xy[,1]-r[1], 3), rep(xy[,1]+r[1],3), xy[,1], xy[,1],
rep(c(xy[,2]+r[2], xy[,2], xy[,2]-r[2]), 2), xy[,2]+r[2], xy[,2]-r[2])
}
} else if (directions==16) {
r2 <- r * 2
if (include) {
d <- c(xy[,1], rep(xy[,1]-r2[1], 2), rep(xy[,1]+r2[1], 2),
rep(xy[,1]-r[1], 5), rep(xy[,1]+r[1], 5),
xy[,1], xy[,1],
xy[,2], rep(c(xy[,2]+r[2], xy[,2]-r[2]), 2),
rep(c(xy[,2]+r2[2], xy[,2]+r[2], xy[,2], xy[,2]-r[2], xy[,2]-r2[2]), 2),
xy[,2]+r[2], xy[,2]-r[2])
} else {
d <- c(rep(xy[,1]-r2[1], 2), rep(xy[,1]+r2[1], 2),
rep(xy[,1]-r[1], 5), rep(xy[,1]+r[1], 5),
xy[,1], xy[,1],
rep(c(xy[,2]+r[2], xy[,2]-r[2]), 2),
rep(c(xy[,2]+r2[2], xy[,2]+r[2], xy[,2], xy[,2]-r[2], xy[,2]-r2[2]), 2),
xy[,2]+r[2], xy[,2]-r[2])
}
} else if (directions=='bishop') {
if (include) {
d <- c(xy[,1], rep(xy[,1]-r[1], 2), rep(xy[,1]+r[1],2), xy[,2], rep(c(xy[,2]+r[2], xy[,2]-r[2]), 2))
} else {
d <- c(rep(xy[,1]-r[1], 2), rep(xy[,1]+r[1],2), rep(c(xy[,2]+r[2], xy[,2]-r[2]), 2))
}
directions <- 4 # to make pairs
} else {
stop('directions should be one of: 4, 8, 16, "bishop", or a matrix')
}
if (include) directions <- directions + 1
d <- matrix(d, ncol=2)
if (.isGlobalLonLat(x)) {
# normalize longitude to -180..180
d[,1] <- (d[,1] + 180) %% 360 - 180
}
if (pairs) {
if (mat) {
cell <- rep(cells, each=directions)
} else {
cell <- rep(cells, directions)
}
if (id) {
if (mat) {
ID <- rep(1:length(cells), each=directions)
} else {
ID <- rep(1:length(cells), directions)
}
d <- na.omit(cbind(ID, cell, cellFromXY(x, d)))
attr(d, 'na.action') <- NULL
colnames(d) <- c('id', 'from', 'to')
if (! is.null(target)) {
d <- d[d[,3] %in% target, ]
}
} else {
d <- na.omit(cbind(cell, cellFromXY(x, d)))
attr(d, 'na.action') <- NULL
colnames(d) <- c('from', 'to')
if (! is.null(target)) {
d <- d[d[,2] %in% target, ]
}
}
if (sorted) {
d <- d[order(d[,1], d[,2]),]
}
} else {
d <- as.vector(unique(na.omit(cellFromXY(x, d))))
if (! is.null(target)) {
d <- intersect(d, target)
}
if (sorted) {
d <- sort(d)
}
}
d
}
|
/raster/R/adjacent.R
|
no_license
|
radfordneal/R-package-mods
|
R
| false | false | 4,311 |
r
|
# Author: Robert J. Hijmans
# Date : September 2011
# Version 1.0
# Licence GPL v3
.adjacentUD <- function(x, cells, ngb, include) {
# ngb should be a matrix with
# one and only one cell with value 0 (the focal cell),
# at least one cell with value 1 (the adjacent cells)
# cells with other values are ignored (not considered adjacent)
rs <- res(x)
rn <- raster(ngb)
center <- which(values(rn)==0)
if (include) {
ngb[center] <- 1
}
rc <- rowFromCell(rn, center)
cc <- colFromCell(rn, center)
xngb <- yngb <- ngb
xngb[] <- rep(1:ncol(ngb), each=nrow(ngb)) - cc
yngb[] <- rep(nrow(ngb):1, ncol(ngb)) - (nrow(ngb)-rc+1)
ngb[ngb != 1] <- NA
xngb <- na.omit(as.vector( xngb * rs[1] * ngb))
yngb <- na.omit(as.vector( yngb * rs[2] * ngb))
xy <- xyFromCell(x, cells)
X <- apply(xy[,1,drop=FALSE], 1, function(z) z + xngb )
Y <- apply(xy[,2,drop=FALSE], 1, function(z) z + yngb )
c(as.vector(X), as.vector(Y))
}
adjacent <- function(x, cells, directions=4, pairs=TRUE, target=NULL, sorted=FALSE, include=FALSE, id=FALSE) {
if (is.character(directions)) {
directions <- tolower(directions)
}
x <- raster(x)
r <- res(x)
xy <- xyFromCell(x, cells)
mat <- FALSE
if (is.matrix(directions)) {
stopifnot(length(which(directions==0)) == 1)
stopifnot(length(which(directions==1)) > 0)
d <- .adjacentUD(x, cells, directions, include)
directions <- sum(directions==1, na.rm=TRUE)
mat <- TRUE
} else if (directions==4) {
if (include) {
d <- c(xy[,1], xy[,1]-r[1], xy[,1]+r[1], xy[,1], xy[,1], xy[,2], xy[,2], xy[,2], xy[,2]+r[2], xy[,2]-r[2])
} else {
d <- c(xy[,1]-r[1], xy[,1]+r[1], xy[,1], xy[,1], xy[,2], xy[,2], xy[,2]+r[2], xy[,2]-r[2])
}
} else if (directions==8) {
if (include) {
d <- c(xy[,1], rep(xy[,1]-r[1], 3), rep(xy[,1]+r[1],3), xy[,1], xy[,1],
xy[,2], rep(c(xy[,2]+r[2], xy[,2], xy[,2]-r[2]), 2), xy[,2]+r[2], xy[,2]-r[2])
} else {
d <- c(rep(xy[,1]-r[1], 3), rep(xy[,1]+r[1],3), xy[,1], xy[,1],
rep(c(xy[,2]+r[2], xy[,2], xy[,2]-r[2]), 2), xy[,2]+r[2], xy[,2]-r[2])
}
} else if (directions==16) {
r2 <- r * 2
if (include) {
d <- c(xy[,1], rep(xy[,1]-r2[1], 2), rep(xy[,1]+r2[1], 2),
rep(xy[,1]-r[1], 5), rep(xy[,1]+r[1], 5),
xy[,1], xy[,1],
xy[,2], rep(c(xy[,2]+r[2], xy[,2]-r[2]), 2),
rep(c(xy[,2]+r2[2], xy[,2]+r[2], xy[,2], xy[,2]-r[2], xy[,2]-r2[2]), 2),
xy[,2]+r[2], xy[,2]-r[2])
} else {
d <- c(rep(xy[,1]-r2[1], 2), rep(xy[,1]+r2[1], 2),
rep(xy[,1]-r[1], 5), rep(xy[,1]+r[1], 5),
xy[,1], xy[,1],
rep(c(xy[,2]+r[2], xy[,2]-r[2]), 2),
rep(c(xy[,2]+r2[2], xy[,2]+r[2], xy[,2], xy[,2]-r[2], xy[,2]-r2[2]), 2),
xy[,2]+r[2], xy[,2]-r[2])
}
} else if (directions=='bishop') {
if (include) {
d <- c(xy[,1], rep(xy[,1]-r[1], 2), rep(xy[,1]+r[1],2), xy[,2], rep(c(xy[,2]+r[2], xy[,2]-r[2]), 2))
} else {
d <- c(rep(xy[,1]-r[1], 2), rep(xy[,1]+r[1],2), rep(c(xy[,2]+r[2], xy[,2]-r[2]), 2))
}
directions <- 4 # to make pairs
} else {
stop('directions should be one of: 4, 8, 16, "bishop", or a matrix')
}
if (include) directions <- directions + 1
d <- matrix(d, ncol=2)
if (.isGlobalLonLat(x)) {
# normalize longitude to -180..180
d[,1] <- (d[,1] + 180) %% 360 - 180
}
if (pairs) {
if (mat) {
cell <- rep(cells, each=directions)
} else {
cell <- rep(cells, directions)
}
if (id) {
if (mat) {
ID <- rep(1:length(cells), each=directions)
} else {
ID <- rep(1:length(cells), directions)
}
d <- na.omit(cbind(ID, cell, cellFromXY(x, d)))
attr(d, 'na.action') <- NULL
colnames(d) <- c('id', 'from', 'to')
if (! is.null(target)) {
d <- d[d[,3] %in% target, ]
}
} else {
d <- na.omit(cbind(cell, cellFromXY(x, d)))
attr(d, 'na.action') <- NULL
colnames(d) <- c('from', 'to')
if (! is.null(target)) {
d <- d[d[,2] %in% target, ]
}
}
if (sorted) {
d <- d[order(d[,1], d[,2]),]
}
} else {
d <- as.vector(unique(na.omit(cellFromXY(x, d))))
if (! is.null(target)) {
d <- intersect(d, target)
}
if (sorted) {
d <- sort(d)
}
}
d
}
|
#################################### 3. Specificity:
#Extract named entities using Stanford NER tool.
#Specificity is the no. of specific entity names, quantitative values, times/dates
#all scaled by the total number of words in document.
################################## load packages
library(stringr)
#library(reticulate)
#use_python("C:/ProgramData/Microsoft/Windows/Start Menu/Programs/Python 3.8")
#install.packages("spacyr")
library(spacyr)
#spacy_install(conda = "auto", version = "latest", lang_models = "en",
#python_version = "3.6", envname = "spacy_condaenv", pip = FALSE,
#python_path = NULL, prompt = TRUE)
spacy_initialize(model = 'en_core_web_sm')
##### load files
load("workspaces/CSR_documents_30samples.RData")
#### parse the first document
document <- spacy_parse(text_stack_sample[1,1])
names(document)
document_entity<- entity_extract(document, type = "all")
dim(document_entity)
head(document_entity,4)
############################################## parse all the documents
##the lapply will take about 2 mins 10 sec
#document<- lapply(text_stack_sample[,1],spacy_parse)
## the loop takes about 2 mins 10 sec for 30 files
document<-NULL
for (i in 1:nrow(text_stack_sample)){
print(i)
if(text_stack_sample[,1] != ""){
document[[i]]<-spacy_parse(text_stack_sample[i,1])
}
}
#warings()
#In spacy_parse.character(text_stack_sample[i, 1]) :
#lemmatization may not work properly in model 'en_core_web_sm'
########################################### extract all the entities
document_entity<- NULL
for (i in 1:length(document)){
print(i)
document_entity[[i]]<- entity_extract(document[[i]], type = "all")
}
################################### Add length
text_stack_sample$Length<-str_count(text_stack_sample[,1], '\\w+')
################################## Add Entity Counts
text_stack_sample$EntityCount <- lapply(document_entity, nrow)
################################# calculate the specificity
text_stack_sample$Specificity <- unlist(text_stack_sample$EntityCount) / text_stack_sample$Length
#text_stack_sample$Specificity
#save(text_stack_sample, file = "workspaces/Specificity.RData")
|
/scripts/Specificity.R
|
no_license
|
chengze123/CSRtm
|
R
| false | false | 2,179 |
r
|
#################################### 3. Specificity:
#Extract named entities using Stanford NER tool.
#Specificity is the no. of specific entity names, quantitative values, times/dates
#all scaled by the total number of words in document.
################################## load packages
library(stringr)
#library(reticulate)
#use_python("C:/ProgramData/Microsoft/Windows/Start Menu/Programs/Python 3.8")
#install.packages("spacyr")
library(spacyr)
#spacy_install(conda = "auto", version = "latest", lang_models = "en",
#python_version = "3.6", envname = "spacy_condaenv", pip = FALSE,
#python_path = NULL, prompt = TRUE)
spacy_initialize(model = 'en_core_web_sm')
##### load files
load("workspaces/CSR_documents_30samples.RData")
#### parse the first document
document <- spacy_parse(text_stack_sample[1,1])
names(document)
document_entity<- entity_extract(document, type = "all")
dim(document_entity)
head(document_entity,4)
############################################## parse all the documents
##the lapply will take about 2 mins 10 sec
#document<- lapply(text_stack_sample[,1],spacy_parse)
## the loop takes about 2 mins 10 sec for 30 files
document<-NULL
for (i in 1:nrow(text_stack_sample)){
print(i)
if(text_stack_sample[,1] != ""){
document[[i]]<-spacy_parse(text_stack_sample[i,1])
}
}
#warings()
#In spacy_parse.character(text_stack_sample[i, 1]) :
#lemmatization may not work properly in model 'en_core_web_sm'
########################################### extract all the entities
document_entity<- NULL
for (i in 1:length(document)){
print(i)
document_entity[[i]]<- entity_extract(document[[i]], type = "all")
}
################################### Add length
text_stack_sample$Length<-str_count(text_stack_sample[,1], '\\w+')
################################## Add Entity Counts
text_stack_sample$EntityCount <- lapply(document_entity, nrow)
################################# calculate the specificity
text_stack_sample$Specificity <- unlist(text_stack_sample$EntityCount) / text_stack_sample$Length
#text_stack_sample$Specificity
#save(text_stack_sample, file = "workspaces/Specificity.RData")
|
library(RMySQL)
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
dbListTables(con)
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
q = 'DROP TABLE digitaalschouwen'
dbSendQuery(con , q)
q = 'CREATE TABLE digitaalschouwen (id VARCHAR(150), time VARCHAR(30), prediction INT , location_x VARCHAR(30), location_y VARCHAR(30), location_old_x VARCHAR(30), location_old_y VARCHAR(30), photo LONGTEXT )'
z = dbSendQuery(con , q)
#place photo in db
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
txt <- base64enc::base64encode("z.jpg")
dbSendQuery(con, paste0("INSERT INTO digitaalschouwen (id, time, prediction, location_x, location_y, location_old_x, location_old_y, photo) VALUES ('1', '1', 1, '1', '1', '1', '1' ,'", txt , "')"))
dbDisconnect(con)
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
q = 'SELECT location_x, location_y, id FROM digitaalschouwen'
z = dbSendQuery(con , q)
data = fetch(z, n=-1)
data
q = paste0("SELECT photo FROM digitaalschouwen WHERE id = '", p, "'" )
z = dbSendQuery(con , q)
data = fetch(z, n=-1)
dbDisconnect(con)
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
q = paste0("SELECT * FROM digitaalschouwen" )
z = dbSendQuery(con , q)
data = fetch(z, n=-1)
dbDisconnect(con)
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
q = 'select id FROM digitaalschouwen LIMIT 9 '
z = dbSendQuery(con , q)
ids = fetch(z, n=-1)
ids = ids[,1]
ids_last = ids[length(ids)]
ids = ids[-length(ids)]
ids = paste0('\'', ids, '\', ')
ids = paste(ids, collapse = ' ' )
ids = paste0(ids, ' \'', ids_last, '\'')
q = paste0('DELETE FROM digitaalschouwen WHERE id NOT IN (', ids ,')' )
z = dbSendQuery(con , q)
dbDisconnect(con)
|
/beheer_database.r
|
no_license
|
daanvandermaas/raspberry
|
R
| false | false | 2,225 |
r
|
library(RMySQL)
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
dbListTables(con)
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
q = 'DROP TABLE digitaalschouwen'
dbSendQuery(con , q)
q = 'CREATE TABLE digitaalschouwen (id VARCHAR(150), time VARCHAR(30), prediction INT , location_x VARCHAR(30), location_y VARCHAR(30), location_old_x VARCHAR(30), location_old_y VARCHAR(30), photo LONGTEXT )'
z = dbSendQuery(con , q)
#place photo in db
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
txt <- base64enc::base64encode("z.jpg")
dbSendQuery(con, paste0("INSERT INTO digitaalschouwen (id, time, prediction, location_x, location_y, location_old_x, location_old_y, photo) VALUES ('1', '1', 1, '1', '1', '1', '1' ,'", txt , "')"))
dbDisconnect(con)
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
q = 'SELECT location_x, location_y, id FROM digitaalschouwen'
z = dbSendQuery(con , q)
data = fetch(z, n=-1)
data
q = paste0("SELECT photo FROM digitaalschouwen WHERE id = '", p, "'" )
z = dbSendQuery(con , q)
data = fetch(z, n=-1)
dbDisconnect(con)
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
q = paste0("SELECT * FROM digitaalschouwen" )
z = dbSendQuery(con , q)
data = fetch(z, n=-1)
dbDisconnect(con)
con <- dbConnect(MySQL(), user="bf98019d0486fa", password="58973b37", dbname="ad_2de5416a43df6e8", host="us-cdbr-iron-east-01.cleardb.net" )
q = 'select id FROM digitaalschouwen LIMIT 9 '
z = dbSendQuery(con , q)
ids = fetch(z, n=-1)
ids = ids[,1]
ids_last = ids[length(ids)]
ids = ids[-length(ids)]
ids = paste0('\'', ids, '\', ')
ids = paste(ids, collapse = ' ' )
ids = paste0(ids, ' \'', ids_last, '\'')
q = paste0('DELETE FROM digitaalschouwen WHERE id NOT IN (', ids ,')' )
z = dbSendQuery(con , q)
dbDisconnect(con)
|
# clears workspace:
rm(list=ls())
# sets working directories:
projdir <- getwd()
setwd("Models/Bayesian_Cognitive_Modeling/ParameterEstimation/Gaussian/SevenScientists/")
library(R2jags)
x <- c(-27.020,3.570,8.191,9.898,9.603,9.945,10.056)
n <- length(x)
data <- list("x", "n") # to be passed on to JAGS
myinits <- list(
list(mu = 0, lambda = rep(1,n)))
# parameters to be monitored:
parameters <- c("mu", "sigma")
# The following command calls JAGS with specific options.
# For a detailed description see the R2jags documentation.
samples <- jags(data, inits=myinits, parameters,
model.file ="SevenScientists.txt", n.chains=1, n.iter=1000,
n.burnin=1, n.thin=1, DIC=T)
# Now the values for the monitored parameters are in the "samples" object,
# ready for inspection.
setwd(projdir)
samples
|
/Models/Bayesian_Cognitive_Modeling/ParameterEstimation/Gaussian/SevenScientists/SevenScientists_jags.R
|
no_license
|
wmmurrah/cognitivemodeling
|
R
| false | false | 823 |
r
|
# clears workspace:
rm(list=ls())
# sets working directories:
projdir <- getwd()
setwd("Models/Bayesian_Cognitive_Modeling/ParameterEstimation/Gaussian/SevenScientists/")
library(R2jags)
x <- c(-27.020,3.570,8.191,9.898,9.603,9.945,10.056)
n <- length(x)
data <- list("x", "n") # to be passed on to JAGS
myinits <- list(
list(mu = 0, lambda = rep(1,n)))
# parameters to be monitored:
parameters <- c("mu", "sigma")
# The following command calls JAGS with specific options.
# For a detailed description see the R2jags documentation.
samples <- jags(data, inits=myinits, parameters,
model.file ="SevenScientists.txt", n.chains=1, n.iter=1000,
n.burnin=1, n.thin=1, DIC=T)
# Now the values for the monitored parameters are in the "samples" object,
# ready for inspection.
setwd(projdir)
samples
|
#' prep_data est une fonction qui prend une base le nombre de cas / décès / hospit quotidien
#' et qui retourne 7 variables supplémentaires:
#' avg_XXX_last7
#' total
#' worst7
#' last7
#' ratio
#' winning.
#' group <-- soit la variable de groupe (genre 0-9 ans) réordonnée en fonction du total décroissant.. on a tu vraiment besoin de ça??
#'
#' @param data data frame
#' @param group nom de variable groupe, ex: health_region ou groupe_age
#' @param type type de ce qui est compté quotidiennement , ex: cases ou deaths ou hos
#'
#' @return
#' @export
#' @importFrom dplyr enquo quo_name
#' @importFrom rlang sym syms
#' @examples
#' prep_data(cases_prov, province, variable = cases)
prep_data <- function(data, group, variable) {
variable_column <- enquo(variable) ## this has to be !!
variable_name <- quo_name(variable_column) ## its a string, dont !!
mean_name <- paste0("avg_", variable_name, "_last7")
mean_column <- sym(mean_name)
## pour chaque date calculer la moyenne des 7 derniers jours
gaa <- data %>%
group_by({{ group }}) %>%
arrange(date_report) %>%
mutate(!!mean_name := (!!variable_column + lag(!!variable_column, 1) + lag(!!variable_column, 2) + lag(!!variable_column, 3) + lag(!!variable_column, 4) + lag(!!variable_column, 5) + lag(!!variable_column, 6)) / 7) %>%
ungroup()
# pour le group, calculer le pire 7 jours et le dernier 7 jours et voir si on est dans le pire moment jusqu'à date..
gaa1 <- gaa %>%
group_by({{ group }}) %>%
summarise(
total = sum(!!variable_column),
worst7 = max(!!mean_column, na.rm = TRUE),
last7 = max(!!mean_column * (date_report == max(date_report)), na.rm = TRUE),
ratio = last7 / worst7,
winning = factor(
case_when(
ratio < 0.33 ~ "Winning",
ratio < 0.67 ~ "Nearly there",
TRUE ~ "Needs action"
),
levels = c("Winning", "Nearly there", "Needs action")
)
) %>%
ungroup()
gaa %>%
left_join(gaa1)
}
|
/R/prep_data.R
|
permissive
|
SimonCoulombe/covidtwitterbot
|
R
| false | false | 2,019 |
r
|
#' prep_data est une fonction qui prend une base le nombre de cas / décès / hospit quotidien
#' et qui retourne 7 variables supplémentaires:
#' avg_XXX_last7
#' total
#' worst7
#' last7
#' ratio
#' winning.
#' group <-- soit la variable de groupe (genre 0-9 ans) réordonnée en fonction du total décroissant.. on a tu vraiment besoin de ça??
#'
#' @param data data frame
#' @param group nom de variable groupe, ex: health_region ou groupe_age
#' @param type type de ce qui est compté quotidiennement , ex: cases ou deaths ou hos
#'
#' @return
#' @export
#' @importFrom dplyr enquo quo_name
#' @importFrom rlang sym syms
#' @examples
#' prep_data(cases_prov, province, variable = cases)
prep_data <- function(data, group, variable) {
variable_column <- enquo(variable) ## this has to be !!
variable_name <- quo_name(variable_column) ## its a string, dont !!
mean_name <- paste0("avg_", variable_name, "_last7")
mean_column <- sym(mean_name)
## pour chaque date calculer la moyenne des 7 derniers jours
gaa <- data %>%
group_by({{ group }}) %>%
arrange(date_report) %>%
mutate(!!mean_name := (!!variable_column + lag(!!variable_column, 1) + lag(!!variable_column, 2) + lag(!!variable_column, 3) + lag(!!variable_column, 4) + lag(!!variable_column, 5) + lag(!!variable_column, 6)) / 7) %>%
ungroup()
# pour le group, calculer le pire 7 jours et le dernier 7 jours et voir si on est dans le pire moment jusqu'à date..
gaa1 <- gaa %>%
group_by({{ group }}) %>%
summarise(
total = sum(!!variable_column),
worst7 = max(!!mean_column, na.rm = TRUE),
last7 = max(!!mean_column * (date_report == max(date_report)), na.rm = TRUE),
ratio = last7 / worst7,
winning = factor(
case_when(
ratio < 0.33 ~ "Winning",
ratio < 0.67 ~ "Nearly there",
TRUE ~ "Needs action"
),
levels = c("Winning", "Nearly there", "Needs action")
)
) %>%
ungroup()
gaa %>%
left_join(gaa1)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stanMisc.r
\name{getParamCoef}
\alias{getParamCoef}
\title{getParamCoef}
\usage{
getParamCoef(
fit,
posterior.summary = c("mean", "median", "mode"),
what = c("both", "betas", "taus")
)
}
\arguments{
\item{fit}{a Bayesian model fit from \code{rmsb}}
\item{posterior.summary}{which summary statistic (Bayesian point estimate) to fetch}
\item{what}{specifies which coefficients to include. Default is all. Specify \code{what="betas"} to include only intercepts and betas if the model is a partial proportional odds model (i.e.,, exclude the tau parameters). Specify \code{what="taus"} to include only the tau parameters.}
}
\value{
vector of regression coefficients
}
\description{
Get a Bayesian Parameter Vector Summary
}
\details{
Retrieves posterior mean, median, or mode (if available)
}
\author{
Frank Harrell
}
|
/fuzzedpackages/rmsb/man/getParamCoef.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false | true | 904 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stanMisc.r
\name{getParamCoef}
\alias{getParamCoef}
\title{getParamCoef}
\usage{
getParamCoef(
fit,
posterior.summary = c("mean", "median", "mode"),
what = c("both", "betas", "taus")
)
}
\arguments{
\item{fit}{a Bayesian model fit from \code{rmsb}}
\item{posterior.summary}{which summary statistic (Bayesian point estimate) to fetch}
\item{what}{specifies which coefficients to include. Default is all. Specify \code{what="betas"} to include only intercepts and betas if the model is a partial proportional odds model (i.e.,, exclude the tau parameters). Specify \code{what="taus"} to include only the tau parameters.}
}
\value{
vector of regression coefficients
}
\description{
Get a Bayesian Parameter Vector Summary
}
\details{
Retrieves posterior mean, median, or mode (if available)
}
\author{
Frank Harrell
}
|
testlist <- list(x = structure(c(-8.37116099364334e+298, -8.37116099364334e+298, -Inf, 1.04450397576714e-158, 1.39137529961112e+93, 2.99474875729459e+238 ), .Dim = 3:2))
result <- do.call(bravo:::colSumSq_matrix,testlist)
str(result)
|
/bravo/inst/testfiles/colSumSq_matrix/libFuzzer_colSumSq_matrix/colSumSq_matrix_valgrind_files/1609959738-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 234 |
r
|
testlist <- list(x = structure(c(-8.37116099364334e+298, -8.37116099364334e+298, -Inf, 1.04450397576714e-158, 1.39137529961112e+93, 2.99474875729459e+238 ), .Dim = 3:2))
result <- do.call(bravo:::colSumSq_matrix,testlist)
str(result)
|
# CIBERSORT R script v1.04 (last updated 10-24-2016)
# Note: Signature matrix construction is not currently available; use java version for full functionality.
# Author: Aaron M. Newman, Stanford University (amnewman@stanford.edu)
# Requirements:
# R v3.0 or later. (dependencies below might not work properly with earlier versions)
# install.packages('e1071')
# install.pacakges('parallel')
# install.packages('preprocessCore')
# if preprocessCore is not available in the repositories you have selected, run the following:
# source("http://bioconductor.org/biocLite.R")
# biocLite("preprocessCore")
# Windows users using the R GUI may need to Run as Administrator to install or update packages.
# This script uses 3 parallel processes. Since Windows does not support forking, this script will run
# single-threaded in Windows.
#
# Usage:
# Navigate to directory containing R script
#
# In R:
# source('CIBERSORT.R')
# results <- CIBERSORT('sig_matrix_file.txt','mixture_file.txt', perm, QN, absolute, abs_method)
#
# Options:
# i) perm = No. permutations; set to >=100 to calculate p-values (default = 0)
# ii) QN = Quantile normalization of input mixture (default = TRUE)
# iii) absolute = Run CIBERSORT in absolute mode (default = FALSE)
# - note that cell subsets will be scaled by their absolute levels and will not be
# represented as fractions (to derive the default output, normalize absolute
# levels such that they sum to 1 for each mixture sample)
# - the sum of all cell subsets in each mixture sample will be added to the ouput
# ('Absolute score'). If LM22 is used, this score will capture total immune content.
# iv) abs_method = if absolute is set to TRUE, choose method: 'no.sumto1' or 'sig.score'
# - sig.score = for each mixture sample, define S as the median expression
# level of all genes in the signature matrix divided by the median expression
# level of all genes in the mixture. Multiple cell subset fractions by S.
# - no.sumto1 = remove sum to 1 constraint
#
# Input: signature matrix and mixture file, formatted as specified at http://cibersort.stanford.edu/tutorial.php
# Output: matrix object containing all results and tabular data written to disk 'CIBERSORT-Results.txt'
# License: http://cibersort.stanford.edu/CIBERSORT_License.txt
# source('CIBERSORT.R')
# results <- CIBERSORT('sig_matrix_file.txt','mixture_file.txt', perm, QN, absolute, abs_method)
#dependencies
#Core algorithm
CoreAlg <- function(X, y, absolute, abs_method){
#try different values of nu
svn_itor <- 3
res <- function(i){
if(i==1){nus <- 0.25}
if(i==2){nus <- 0.5}
if(i==3){nus <- 0.75}
model<-svm(X,y,type="nu-regression",kernel="linear",nu=nus,scale=F)
model
}
if(Sys.info()['sysname'] == 'Windows') out <- mclapply(1:svn_itor, res, mc.cores=1) else
out <- mclapply(1:svn_itor, res, mc.cores=svn_itor)
nusvm <- rep(0,svn_itor)
corrv <- rep(0,svn_itor)
#do cibersort
t <- 1
while(t <= svn_itor) {
weights = t(out[[t]]$coefs) %*% out[[t]]$SV
weights[which(weights<0)]<-0
w<-weights/sum(weights)
u <- sweep(X,MARGIN=2,w,'*')
k <- apply(u, 1, sum)
nusvm[t] <- sqrt((mean((k - y)^2)))
corrv[t] <- cor(k, y)
t <- t + 1
}
#pick best model
rmses <- nusvm
mn <- which.min(rmses)
model <- out[[mn]]
#get and normalize coefficients
q <- t(model$coefs) %*% model$SV
q[which(q<0)]<-0
if(!absolute || abs_method == 'sig.score') w <- (q/sum(q)) #relative space (returns fractions)
if(absolute && abs_method == 'no.sumto1') w <- q #absolute space (returns scores)
mix_rmse <- rmses[mn]
mix_r <- corrv[mn]
newList <- list("w" = w, "mix_rmse" = mix_rmse, "mix_r" = mix_r)
}
#do permutations
doPerm <- function(perm, X, Y, absolute, abs_method){
itor <- 1
Ylist <- as.list(data.matrix(Y))
dist <- matrix()
while(itor <= perm){
#print(itor)
#random mixture
yr <- as.numeric(Ylist[sample(length(Ylist),dim(X)[1])])
#standardize mixture
yr <- (yr - mean(yr)) / sd(yr)
#run CIBERSORT core algorithm
result <- CoreAlg(X, yr, absolute, abs_method)
mix_r <- result$mix_r
#store correlation
if(itor == 1) {dist <- mix_r}
else {dist <- rbind(dist, mix_r)}
itor <- itor + 1
}
newList <- list("dist" = dist)
}
#main function
CIBERSORT <- function(sig_matrix, mixture_file, perm=0, QN=TRUE, absolute=FALSE, abs_method='sig.score'){
library(e1071)
library(parallel)
library(preprocessCore)
if(absolute && abs_method != 'no.sumto1' && abs_method != 'sig.score') stop("abs_method must be set to either 'sig.score' or 'no.sumto1'")
#read in data
#X <- read.table(sig_matrix,header=T,sep="\t",row.names=1,check.names=F)
X <- sig_matrix
Y <- mixture_file
#Y <- mixture_file
#to prevent crashing on duplicated gene symbols, add unique numbers to identical names
# dups <- dim(Y)[1] - length(unique(Y[,1]))
# if(dups > 0) {
# warning(paste(dups," duplicated gene symbol(s) found in mixture file!",sep=""))
# rownames(Y) <- make.names(Y[,1], unique=TRUE)
# }else {rownames(Y) <- Y[,1]}
# Y <- Y[,-1]
X <- data.matrix(X)
Y <- data.matrix(Y)
#order
X <- X[order(rownames(X)),]
Y <- Y[order(rownames(Y)),]
P <- perm #number of permutations
#anti-log if max < 50 in mixture file
if(max(Y) < 50) {Y <- 2^Y}
#quantile normalization of mixture file
if(QN == TRUE){
tmpc <- colnames(Y)
tmpr <- rownames(Y)
Y <- normalize.quantiles(Y)
colnames(Y) <- tmpc
rownames(Y) <- tmpr
}
#store original mixtures
Yorig <- Y
Ymedian <- max(median(Yorig),1)
#intersect genes
Xgns <- row.names(X)
Ygns <- row.names(Y)
YintX <- Ygns %in% Xgns
Y <- Y[YintX,]
XintY <- Xgns %in% row.names(Y)
X <- X[XintY,]
#standardize sig matrix
X <- (X - mean(X)) / sd(as.vector(X))
#empirical null distribution of correlation coefficients
if(P > 0) {nulldist <- sort(doPerm(P, X, Y, absolute, abs_method)$dist)}
header <- c('Mixture',colnames(X),"P-value","Correlation","RMSE")
if(absolute) header <- c(header, paste('Absolute score (',abs_method,')',sep=""))
output <- matrix()
itor <- 1
mixtures <- dim(Y)[2]
pval <- 9999
#iterate through mixtures
while(itor <= mixtures){
y <- Y[,itor]
#standardize mixture
y <- (y - mean(y)) / sd(y)
#run SVR core algorithm
result <- CoreAlg(X, y, absolute, abs_method)
#get results
w <- result$w
mix_r <- result$mix_r
mix_rmse <- result$mix_rmse
if(absolute && abs_method == 'sig.score') {
w <- w * median(Y[,itor]) / Ymedian
}
#calculate p-value
if(P > 0) {pval <- 1 - (which.min(abs(nulldist - mix_r)) / length(nulldist))}
#print output
out <- c(colnames(Y)[itor],w,pval,mix_r,mix_rmse)
if(absolute) out <- c(out, sum(w))
if(itor == 1) {output <- out}
else {output <- rbind(output, out)}
itor <- itor + 1
}
#save results
write.table(rbind(header,output), file="CIBERSORT-Results.txt", sep="\t", row.names=F, col.names=F, quote=F)
#return matrix object containing all results
obj <- rbind(header,output)
obj <- obj[,-1]
obj <- obj[-1,]
obj <- matrix(as.numeric(unlist(obj)),nrow=nrow(obj))
rownames(obj) <- colnames(Y)
if(!absolute){colnames(obj) <- c(colnames(X),"P-value","Correlation","RMSE")}
else{colnames(obj) <- c(colnames(X),"P-value","Correlation","RMSE",paste('Absolute score (',abs_method,')',sep=""))}
obj
}
|
/R/CIBERSORT_modified.R
|
no_license
|
zcslab/SSMD
|
R
| false | false | 8,206 |
r
|
# CIBERSORT R script v1.04 (last updated 10-24-2016)
# Note: Signature matrix construction is not currently available; use java version for full functionality.
# Author: Aaron M. Newman, Stanford University (amnewman@stanford.edu)
# Requirements:
# R v3.0 or later. (dependencies below might not work properly with earlier versions)
# install.packages('e1071')
# install.pacakges('parallel')
# install.packages('preprocessCore')
# if preprocessCore is not available in the repositories you have selected, run the following:
# source("http://bioconductor.org/biocLite.R")
# biocLite("preprocessCore")
# Windows users using the R GUI may need to Run as Administrator to install or update packages.
# This script uses 3 parallel processes. Since Windows does not support forking, this script will run
# single-threaded in Windows.
#
# Usage:
# Navigate to directory containing R script
#
# In R:
# source('CIBERSORT.R')
# results <- CIBERSORT('sig_matrix_file.txt','mixture_file.txt', perm, QN, absolute, abs_method)
#
# Options:
# i) perm = No. permutations; set to >=100 to calculate p-values (default = 0)
# ii) QN = Quantile normalization of input mixture (default = TRUE)
# iii) absolute = Run CIBERSORT in absolute mode (default = FALSE)
# - note that cell subsets will be scaled by their absolute levels and will not be
# represented as fractions (to derive the default output, normalize absolute
# levels such that they sum to 1 for each mixture sample)
# - the sum of all cell subsets in each mixture sample will be added to the ouput
# ('Absolute score'). If LM22 is used, this score will capture total immune content.
# iv) abs_method = if absolute is set to TRUE, choose method: 'no.sumto1' or 'sig.score'
# - sig.score = for each mixture sample, define S as the median expression
# level of all genes in the signature matrix divided by the median expression
# level of all genes in the mixture. Multiple cell subset fractions by S.
# - no.sumto1 = remove sum to 1 constraint
#
# Input: signature matrix and mixture file, formatted as specified at http://cibersort.stanford.edu/tutorial.php
# Output: matrix object containing all results and tabular data written to disk 'CIBERSORT-Results.txt'
# License: http://cibersort.stanford.edu/CIBERSORT_License.txt
# source('CIBERSORT.R')
# results <- CIBERSORT('sig_matrix_file.txt','mixture_file.txt', perm, QN, absolute, abs_method)
#dependencies
#Core algorithm
CoreAlg <- function(X, y, absolute, abs_method){
#try different values of nu
svn_itor <- 3
res <- function(i){
if(i==1){nus <- 0.25}
if(i==2){nus <- 0.5}
if(i==3){nus <- 0.75}
model<-svm(X,y,type="nu-regression",kernel="linear",nu=nus,scale=F)
model
}
if(Sys.info()['sysname'] == 'Windows') out <- mclapply(1:svn_itor, res, mc.cores=1) else
out <- mclapply(1:svn_itor, res, mc.cores=svn_itor)
nusvm <- rep(0,svn_itor)
corrv <- rep(0,svn_itor)
#do cibersort
t <- 1
while(t <= svn_itor) {
weights = t(out[[t]]$coefs) %*% out[[t]]$SV
weights[which(weights<0)]<-0
w<-weights/sum(weights)
u <- sweep(X,MARGIN=2,w,'*')
k <- apply(u, 1, sum)
nusvm[t] <- sqrt((mean((k - y)^2)))
corrv[t] <- cor(k, y)
t <- t + 1
}
#pick best model
rmses <- nusvm
mn <- which.min(rmses)
model <- out[[mn]]
#get and normalize coefficients
q <- t(model$coefs) %*% model$SV
q[which(q<0)]<-0
if(!absolute || abs_method == 'sig.score') w <- (q/sum(q)) #relative space (returns fractions)
if(absolute && abs_method == 'no.sumto1') w <- q #absolute space (returns scores)
mix_rmse <- rmses[mn]
mix_r <- corrv[mn]
newList <- list("w" = w, "mix_rmse" = mix_rmse, "mix_r" = mix_r)
}
#do permutations
doPerm <- function(perm, X, Y, absolute, abs_method){
itor <- 1
Ylist <- as.list(data.matrix(Y))
dist <- matrix()
while(itor <= perm){
#print(itor)
#random mixture
yr <- as.numeric(Ylist[sample(length(Ylist),dim(X)[1])])
#standardize mixture
yr <- (yr - mean(yr)) / sd(yr)
#run CIBERSORT core algorithm
result <- CoreAlg(X, yr, absolute, abs_method)
mix_r <- result$mix_r
#store correlation
if(itor == 1) {dist <- mix_r}
else {dist <- rbind(dist, mix_r)}
itor <- itor + 1
}
newList <- list("dist" = dist)
}
#main function
CIBERSORT <- function(sig_matrix, mixture_file, perm=0, QN=TRUE, absolute=FALSE, abs_method='sig.score'){
library(e1071)
library(parallel)
library(preprocessCore)
if(absolute && abs_method != 'no.sumto1' && abs_method != 'sig.score') stop("abs_method must be set to either 'sig.score' or 'no.sumto1'")
#read in data
#X <- read.table(sig_matrix,header=T,sep="\t",row.names=1,check.names=F)
X <- sig_matrix
Y <- mixture_file
#Y <- mixture_file
#to prevent crashing on duplicated gene symbols, add unique numbers to identical names
# dups <- dim(Y)[1] - length(unique(Y[,1]))
# if(dups > 0) {
# warning(paste(dups," duplicated gene symbol(s) found in mixture file!",sep=""))
# rownames(Y) <- make.names(Y[,1], unique=TRUE)
# }else {rownames(Y) <- Y[,1]}
# Y <- Y[,-1]
X <- data.matrix(X)
Y <- data.matrix(Y)
#order
X <- X[order(rownames(X)),]
Y <- Y[order(rownames(Y)),]
P <- perm #number of permutations
#anti-log if max < 50 in mixture file
if(max(Y) < 50) {Y <- 2^Y}
#quantile normalization of mixture file
if(QN == TRUE){
tmpc <- colnames(Y)
tmpr <- rownames(Y)
Y <- normalize.quantiles(Y)
colnames(Y) <- tmpc
rownames(Y) <- tmpr
}
#store original mixtures
Yorig <- Y
Ymedian <- max(median(Yorig),1)
#intersect genes
Xgns <- row.names(X)
Ygns <- row.names(Y)
YintX <- Ygns %in% Xgns
Y <- Y[YintX,]
XintY <- Xgns %in% row.names(Y)
X <- X[XintY,]
#standardize sig matrix
X <- (X - mean(X)) / sd(as.vector(X))
#empirical null distribution of correlation coefficients
if(P > 0) {nulldist <- sort(doPerm(P, X, Y, absolute, abs_method)$dist)}
header <- c('Mixture',colnames(X),"P-value","Correlation","RMSE")
if(absolute) header <- c(header, paste('Absolute score (',abs_method,')',sep=""))
output <- matrix()
itor <- 1
mixtures <- dim(Y)[2]
pval <- 9999
#iterate through mixtures
while(itor <= mixtures){
y <- Y[,itor]
#standardize mixture
y <- (y - mean(y)) / sd(y)
#run SVR core algorithm
result <- CoreAlg(X, y, absolute, abs_method)
#get results
w <- result$w
mix_r <- result$mix_r
mix_rmse <- result$mix_rmse
if(absolute && abs_method == 'sig.score') {
w <- w * median(Y[,itor]) / Ymedian
}
#calculate p-value
if(P > 0) {pval <- 1 - (which.min(abs(nulldist - mix_r)) / length(nulldist))}
#print output
out <- c(colnames(Y)[itor],w,pval,mix_r,mix_rmse)
if(absolute) out <- c(out, sum(w))
if(itor == 1) {output <- out}
else {output <- rbind(output, out)}
itor <- itor + 1
}
#save results
write.table(rbind(header,output), file="CIBERSORT-Results.txt", sep="\t", row.names=F, col.names=F, quote=F)
#return matrix object containing all results
obj <- rbind(header,output)
obj <- obj[,-1]
obj <- obj[-1,]
obj <- matrix(as.numeric(unlist(obj)),nrow=nrow(obj))
rownames(obj) <- colnames(Y)
if(!absolute){colnames(obj) <- c(colnames(X),"P-value","Correlation","RMSE")}
else{colnames(obj) <- c(colnames(X),"P-value","Correlation","RMSE",paste('Absolute score (',abs_method,')',sep=""))}
obj
}
|
#' A wrapper for a bunch of functions that calcualte contribution of
#' individual walker to the sampling area. For arguments, see individual
#' functions.
#' @author Roman Luštrik
walkerContribution <- function(num.walkers, sw, area, home.range, sap.poly, seed,
prob, sessions, weight.switch, .object, .num.boots, custom.walkers, SD,
sim.dist, work.dir, ...) {
# We first need to populate our world from which we will sample.
walk.walkers <- populateWorld(num.walkers = num.walkers, sap = sap.poly,
area = area, home.range = home.range, custom.walkers = custom.walkers)
# Sample walkers that come in contact with the sampling area with
# a certain probability in a number of sessions.
walk.sample <- sampleWorld(walk = walk.walkers, sap.poly = sap.poly,
sessions = sessions, prob = prob, SD = SD)
walk.contrib <- calculateContribution(walk.pair = walk.sample$walk.pair,
sap.poly = sap.poly, walks = walk.sample$sample, weight.switch = weight.switch,
..object = .object, ..num.boots = .num.boots, sim.dist = sim.dist, SD = SD,
work.dir = work.dir, seed = seed, ...)
# Construct output object
out <- list(
walkers = walk.walkers,
sample = walk.sample,
contribs = walk.contrib
)
return(out)
}
|
/walkerContribution.R
|
no_license
|
romunov/duhturat
|
R
| false | false | 1,484 |
r
|
#' A wrapper for a bunch of functions that calcualte contribution of
#' individual walker to the sampling area. For arguments, see individual
#' functions.
#' @author Roman Luštrik
walkerContribution <- function(num.walkers, sw, area, home.range, sap.poly, seed,
prob, sessions, weight.switch, .object, .num.boots, custom.walkers, SD,
sim.dist, work.dir, ...) {
# We first need to populate our world from which we will sample.
walk.walkers <- populateWorld(num.walkers = num.walkers, sap = sap.poly,
area = area, home.range = home.range, custom.walkers = custom.walkers)
# Sample walkers that come in contact with the sampling area with
# a certain probability in a number of sessions.
walk.sample <- sampleWorld(walk = walk.walkers, sap.poly = sap.poly,
sessions = sessions, prob = prob, SD = SD)
walk.contrib <- calculateContribution(walk.pair = walk.sample$walk.pair,
sap.poly = sap.poly, walks = walk.sample$sample, weight.switch = weight.switch,
..object = .object, ..num.boots = .num.boots, sim.dist = sim.dist, SD = SD,
work.dir = work.dir, seed = seed, ...)
# Construct output object
out <- list(
walkers = walk.walkers,
sample = walk.sample,
contribs = walk.contrib
)
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.file.R
\name{fdata}
\alias{fdata}
\title{format dataset}
\usage{
fdata(data, faS = NULL)
}
\arguments{
\item{package}{package name.}
\item{setpath}{Whether set working directory under package, FALSE(default).}
}
\value{
this returned a path or file list.
}
\description{
\code{fdata} This function will format dataset for varialbes to factors.
}
\details{
This function reads file list under one package, or sets working directory under package.
}
\examples{
library(datasets)
names(mtcars)
mtcars1<-fdata(mtcars,faS=c(2,8,9))
}
\references{
Yuanzhen Lin. R & ASReml-R Statistics. China Forestry Publishing House. 2016
AAFMM website:https://github.com/yzhlinscau/AAFMM
}
\seealso{
See Also as \code{\link{read.file}}, \code{\link{read.example}}
}
\author{
Yuanzhen Lin <yzhlinscau@163.com>
}
|
/man/fdata.Rd
|
no_license
|
yzhlinscau/AAFMM
|
R
| false | true | 917 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.file.R
\name{fdata}
\alias{fdata}
\title{format dataset}
\usage{
fdata(data, faS = NULL)
}
\arguments{
\item{package}{package name.}
\item{setpath}{Whether set working directory under package, FALSE(default).}
}
\value{
this returned a path or file list.
}
\description{
\code{fdata} This function will format dataset for varialbes to factors.
}
\details{
This function reads file list under one package, or sets working directory under package.
}
\examples{
library(datasets)
names(mtcars)
mtcars1<-fdata(mtcars,faS=c(2,8,9))
}
\references{
Yuanzhen Lin. R & ASReml-R Statistics. China Forestry Publishing House. 2016
AAFMM website:https://github.com/yzhlinscau/AAFMM
}
\seealso{
See Also as \code{\link{read.file}}, \code{\link{read.example}}
}
\author{
Yuanzhen Lin <yzhlinscau@163.com>
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R_dlfoo2_Methylation.R
\name{meFilterVMF}
\alias{meFilterVMF}
\title{VMF - Varying Methylated Features}
\usage{
meFilterVMF(
x,
projectLabel = NULL,
vmfQuantile = c(0.05, 0.95),
vmfBetaCut = 0.1,
naFilter = 0
)
}
\arguments{
\item{x}{Genoset with methylation beta values as assay data slot}
\item{projectLabel}{character vector; name of project}
\item{vmfBetaCut}{What beta cutoff to use for quantile VMF varition cutoff}
\item{naFilter}{MAX ratio NA values that are allowed per feature}
\item{vmfQantile}{What quantiles to use for VMF variance filter.}
}
\value{
genoSet object with fitered features (VMF)
}
\description{
FIlter GenoSet for Varying Methylated features
}
\examples{
\dontrun{
gs_rcc <- readRDS(file="/Volumes/MacPro2TB/RESOURCES/Methylation_Infinium/ProcessedData/gdac_rcc/gdac_rcc_867set_norm.hg38.rds")
x <- gs_rcc[,sample(1:ncol(gs_rcc), size=100, replace=F)]
}
}
\seealso{
Other genoset:
\code{\link{geoSeriesMatrixPdataPolish}()},
\code{\link{gsSinkGenosetDescription}()},
\code{\link{liftoverGRanges}()},
\code{\link{me2gexHeatmapRCC}()},
\code{\link{meAnalysisCorTableStats}()},
\code{\link{meAnalysisCorrGexRandom}()},
\code{\link{meAnalysisCorrGex}()},
\code{\link{meFilterBetaNa}()},
\code{\link{meGetGEOsoft_me450}()},
\code{\link{meHeatmapRCC}()},
\code{\link{meMergeCpGs}()},
\code{\link{meMergeGenoSetsBeta}()},
\code{\link{meNormalizeBeta}()},
\code{\link{meParseBeadStudio}()},
\code{\link{meParseGDACv2}()},
\code{\link{meParseIDAT}()},
\code{\link{meParseInhouseBeadStudio}()},
\code{\link{parseGeoSeriesMatrixSampleFile}()}
Other genomics:
\code{\link{chip_genePeaks2occupancyTab}()},
\code{\link{chip_peak2gene_maxValue}()},
\code{\link{chip_peak2gene}()},
\code{\link{expandRanges}()},
\code{\link{fromChr}()},
\code{\link{genomicPosition}()},
\code{\link{liftoverGRanges}()},
\code{\link{loadRefgene}()},
\code{\link{meAnalysisCorTableStats}()},
\code{\link{meAnalysisCorrGexRandom}()},
\code{\link{meAnalysisCorrGex}()},
\code{\link{meFilterBetaNa}()},
\code{\link{meMergeCpGs}()},
\code{\link{meMergeGenoSetsBeta}()},
\code{\link{meNormalizeBeta}()},
\code{\link{meParseBeadStudio}()},
\code{\link{meParseGDACv2}()},
\code{\link{meParseInhouseBeadStudio}()},
\code{\link{segmentPlotter}()},
\code{\link{toChr}()}
Other filter:
\code{\link{esetVarianceFilter}()},
\code{\link{meAnalysisCorTableStats}()},
\code{\link{meAnalysisCorrGexRandom}()},
\code{\link{meAnalysisCorrGex}()},
\code{\link{meFilterBetaNa}()},
\code{\link{meMergeCpGs}()}
Other methylation:
\code{\link{liftoverGRanges}()},
\code{\link{me2gexHeatmapRCC}()},
\code{\link{meAnalysisCorTableStats}()},
\code{\link{meAnalysisCorrGexRandom}()},
\code{\link{meAnalysisCorrGex}()},
\code{\link{meFilterBetaNa}()},
\code{\link{meGetGEOsoft_me450}()},
\code{\link{meHeatmapRCC}()},
\code{\link{meMergeCpGs}()},
\code{\link{meMergeGenoSetsBeta}()},
\code{\link{meNormalizeBeta}()},
\code{\link{meParseBeadStudio}()},
\code{\link{meParseGDACv2}()},
\code{\link{meParseIDAT}()},
\code{\link{meParseInhouseBeadStudio}()}
}
\concept{filter}
\concept{genomics}
\concept{genoset}
\concept{methylation}
|
/man/meFilterVMF.Rd
|
no_license
|
cirrina/dlfoo2
|
R
| false | true | 3,194 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R_dlfoo2_Methylation.R
\name{meFilterVMF}
\alias{meFilterVMF}
\title{VMF - Varying Methylated Features}
\usage{
meFilterVMF(
x,
projectLabel = NULL,
vmfQuantile = c(0.05, 0.95),
vmfBetaCut = 0.1,
naFilter = 0
)
}
\arguments{
\item{x}{Genoset with methylation beta values as assay data slot}
\item{projectLabel}{character vector; name of project}
\item{vmfBetaCut}{What beta cutoff to use for quantile VMF varition cutoff}
\item{naFilter}{MAX ratio NA values that are allowed per feature}
\item{vmfQantile}{What quantiles to use for VMF variance filter.}
}
\value{
genoSet object with fitered features (VMF)
}
\description{
FIlter GenoSet for Varying Methylated features
}
\examples{
\dontrun{
gs_rcc <- readRDS(file="/Volumes/MacPro2TB/RESOURCES/Methylation_Infinium/ProcessedData/gdac_rcc/gdac_rcc_867set_norm.hg38.rds")
x <- gs_rcc[,sample(1:ncol(gs_rcc), size=100, replace=F)]
}
}
\seealso{
Other genoset:
\code{\link{geoSeriesMatrixPdataPolish}()},
\code{\link{gsSinkGenosetDescription}()},
\code{\link{liftoverGRanges}()},
\code{\link{me2gexHeatmapRCC}()},
\code{\link{meAnalysisCorTableStats}()},
\code{\link{meAnalysisCorrGexRandom}()},
\code{\link{meAnalysisCorrGex}()},
\code{\link{meFilterBetaNa}()},
\code{\link{meGetGEOsoft_me450}()},
\code{\link{meHeatmapRCC}()},
\code{\link{meMergeCpGs}()},
\code{\link{meMergeGenoSetsBeta}()},
\code{\link{meNormalizeBeta}()},
\code{\link{meParseBeadStudio}()},
\code{\link{meParseGDACv2}()},
\code{\link{meParseIDAT}()},
\code{\link{meParseInhouseBeadStudio}()},
\code{\link{parseGeoSeriesMatrixSampleFile}()}
Other genomics:
\code{\link{chip_genePeaks2occupancyTab}()},
\code{\link{chip_peak2gene_maxValue}()},
\code{\link{chip_peak2gene}()},
\code{\link{expandRanges}()},
\code{\link{fromChr}()},
\code{\link{genomicPosition}()},
\code{\link{liftoverGRanges}()},
\code{\link{loadRefgene}()},
\code{\link{meAnalysisCorTableStats}()},
\code{\link{meAnalysisCorrGexRandom}()},
\code{\link{meAnalysisCorrGex}()},
\code{\link{meFilterBetaNa}()},
\code{\link{meMergeCpGs}()},
\code{\link{meMergeGenoSetsBeta}()},
\code{\link{meNormalizeBeta}()},
\code{\link{meParseBeadStudio}()},
\code{\link{meParseGDACv2}()},
\code{\link{meParseInhouseBeadStudio}()},
\code{\link{segmentPlotter}()},
\code{\link{toChr}()}
Other filter:
\code{\link{esetVarianceFilter}()},
\code{\link{meAnalysisCorTableStats}()},
\code{\link{meAnalysisCorrGexRandom}()},
\code{\link{meAnalysisCorrGex}()},
\code{\link{meFilterBetaNa}()},
\code{\link{meMergeCpGs}()}
Other methylation:
\code{\link{liftoverGRanges}()},
\code{\link{me2gexHeatmapRCC}()},
\code{\link{meAnalysisCorTableStats}()},
\code{\link{meAnalysisCorrGexRandom}()},
\code{\link{meAnalysisCorrGex}()},
\code{\link{meFilterBetaNa}()},
\code{\link{meGetGEOsoft_me450}()},
\code{\link{meHeatmapRCC}()},
\code{\link{meMergeCpGs}()},
\code{\link{meMergeGenoSetsBeta}()},
\code{\link{meNormalizeBeta}()},
\code{\link{meParseBeadStudio}()},
\code{\link{meParseGDACv2}()},
\code{\link{meParseIDAT}()},
\code{\link{meParseInhouseBeadStudio}()}
}
\concept{filter}
\concept{genomics}
\concept{genoset}
\concept{methylation}
|
variance.test.twosample.independent <- function(
g1
,g2
,alternative = c("two.sided","less","greater")
,conf.level = 0.95
,assume.normality = c("yes", "no")
) {
validate.htest.alternative(alternative = alternative)
if (assume.normality[1] == "yes") {
#F Test
sample.size.g1 <- length(g1)
sample.size.g2 <- length(g2)
sample.variance.g1<- var(g1)
sample.variance.g2<- var(g2)
variance.test.twosample.independent.simple(
sample.variance.g1
,sample.size.g1
,sample.variance.g2
,sample.size.g2
,alternative
,conf.level
)
} else {
#T Test on ADMn-1
g1.ADMn1 <- na.omit(dispersion.ADMn1(g1))
g2.ADMn1 <- na.omit(dispersion.ADMn1(g2))
mean.g1<-mean(g1.ADMn1)
mean.g2<-mean(g2.ADMn1)
var.g1<-var(g1.ADMn1)
var.g2<-var(g2.ADMn1)
sample.size.g1 <- length(g1.ADMn1)
sample.size.g2 <- length(g2.ADMn1)
retval <- t.test.twosample.independent.simple(sample.mean.g1 = mean.g1
,sample.variance.g1 = var.g1
,sample.size.g1 = sample.size.g1
,sample.mean.g2 = mean.g2
,sample.variance.g2 = var.g2
,sample.size.g2 = sample.size.g2
,null.hypothesis.difference = 0
,alternative = alternative
,assume.equal.variances = "yes"
,conf.level = conf.level
,var.test.details = F
)
retval$data.name <- "ADMn-1 values for samples"
retval$method <- "Two-Sample t Test for Equality of ADMn-1"
retval
}
}
|
/R/variance.test.twosample.independent.R
|
permissive
|
burrm/lolcat
|
R
| false | false | 2,289 |
r
|
variance.test.twosample.independent <- function(
g1
,g2
,alternative = c("two.sided","less","greater")
,conf.level = 0.95
,assume.normality = c("yes", "no")
) {
validate.htest.alternative(alternative = alternative)
if (assume.normality[1] == "yes") {
#F Test
sample.size.g1 <- length(g1)
sample.size.g2 <- length(g2)
sample.variance.g1<- var(g1)
sample.variance.g2<- var(g2)
variance.test.twosample.independent.simple(
sample.variance.g1
,sample.size.g1
,sample.variance.g2
,sample.size.g2
,alternative
,conf.level
)
} else {
#T Test on ADMn-1
g1.ADMn1 <- na.omit(dispersion.ADMn1(g1))
g2.ADMn1 <- na.omit(dispersion.ADMn1(g2))
mean.g1<-mean(g1.ADMn1)
mean.g2<-mean(g2.ADMn1)
var.g1<-var(g1.ADMn1)
var.g2<-var(g2.ADMn1)
sample.size.g1 <- length(g1.ADMn1)
sample.size.g2 <- length(g2.ADMn1)
retval <- t.test.twosample.independent.simple(sample.mean.g1 = mean.g1
,sample.variance.g1 = var.g1
,sample.size.g1 = sample.size.g1
,sample.mean.g2 = mean.g2
,sample.variance.g2 = var.g2
,sample.size.g2 = sample.size.g2
,null.hypothesis.difference = 0
,alternative = alternative
,assume.equal.variances = "yes"
,conf.level = conf.level
,var.test.details = F
)
retval$data.name <- "ADMn-1 values for samples"
retval$method <- "Two-Sample t Test for Equality of ADMn-1"
retval
}
}
|
#####################################
## Willany Thayse 04/23/19 ##
#####################################
## https://bit.ly/2uAr4LU ##
#####################################
# -----------------------------------------------------------------
# Cuckoo Search (CS) algorithm by Xin-She Yang and Suash Deb #
# Programmed by Xin-She Yang at Cambridge University #
# Programming dates: Nov 2008 to June 2009 #
# Last revised: Dec 2009 (simplified version for demo only) #
# -----------------------------------------------------------------
# Papers -- Citation Details:
# 1) X.-S. Yang, S. Deb, Cuckoo search via Levy flights,
# in: Proc. of World Congress on Nature & Biologically Inspired
# Computing (NaBIC 2009), December 2009, India,
# IEEE Publications, USA, pp. 210-214 (2009).
# http://arxiv.org/PS_cache/arxiv/pdf/1003/1003.1594v1.pdf
# 2) X.-S. Yang, S. Deb, Engineering optimization by cuckoo search,
# Int. J. Mathematical Modelling and Numerical Optimisation,
# Vol. 1, No. 4, 330-343 (2010).
# http://arxiv.org/PS_cache/arxiv/pdf/1005/1005.2908v2.pdf
# ----------------------------------------------------------------#
# This demo program only implements a standard version of #
# Cuckoo Search (CS), as the Levy flights and generation of #
# new solutions may use slightly different methods. #
# The pseudo code was given sequentially (select a cuckoo etc), #
# but the implementation here uses Matlab's vector capability, #
# which results in neater/better codes and shorter running time. #
# This implementation is different and more efficient than the #
# the demo code provided in the book by
# "Yang X. S., Nature-Inspired Metaheuristic Algoirthms, #
# 2nd Edition, Luniver Press, (2010). " #
# --------------------------------------------------------------- #
# =============================================================== #
# Notes: #
# Different implementations may lead to slightly different #
# behavour and/or results, but there is nothing wrong with it, #
# as this is the nature of random walks and all metaheuristics. #
# -----------------------------------------------------------------
cuckoo_search <- function (n = 25, maxIter = 10^5, pa = 0.25, Tol = 1.0e-5, nd = 2, lb = 0, ub = pi) {
# Max Iteration
# maxIter = 10^5
# # Number of nests (or different solutions)
# n <- 25
# # Discovery rate of alien eggs/solutions
# pa <- 0.25
# ## Change this if you want to get better results
# # Tolerance
# Tol <- 1.0e-5
# ## Simple bounds of the search domain
# # Lower bounds
# nd <- 2 #Dimension
Lb <- matrix(lb, 1, nd)
# Upper bounds
Ub <- matrix(ub, 1, nd)
# Random initial solutions
nest <- matrix(0, n, nd)
for (i in 1:n) {
nest[i,] <- Lb+(Ub-Lb)* runif(nd)
}
# Get the current best
fitness <- 10^10 * matrix(1, n, 1)
current <- get_best_nest(nest, nest, fitness)
fmin <- current$fmin
bestnest <- current$best
nest <- current$nest
fitness <- current$fitness
N_iter <- 0
N_iter_rand <- n
while (N_iter < maxIter) {
# Generate new solutions (but keep the current best)
new_nest <- get_cuckoos(nest,bestnest,Lb,Ub)
new_best <- get_best_nest(nest,new_nest,fitness)
fnew <- new_best$fmin
best <- new_best$best
nest <- new_best$nest
fitness <- new_best$fitness
# Update the counter
N_iter <- N_iter + n
# Discovery and randomization
new_nest <- empty_nests(nest,Lb,Ub,pa)
# Evaluate this set of solutions
new_best <- get_best_nest(nest,new_nest,fitness)
fnew <- new_best$fmin
best <- new_best$best
nest <- new_best$nest
fitness <- new_best$fitness
# Update the counter again
N_iter_rand <- N_iter * 2
# Find the best objective so far
if (fnew < fmin) {
fmin <- fnew
bestnest <- best
}
print(cat('Iteration number:', N_iter, 'fitness:', fmin, 'Total number of iteration:', N_iter_rand))
} ## End of iterations
## Post-optimization processing
## Display all the nests
print(cat('Number loop iterations=', N_iter))
print(cat('Total number of iteration= ', N_iter_rand))
return(list('fmin' = fmin, 'bestnest' = bestnest))
}
# --------------- All subfunctions are list below ------------------
# Get cuckoos by ramdom walk
get_cuckoos <- function (nest,best,Lb,Ub) {
## Levy flights
n <- dim(nest)[1]
# Levy exponent and coefficient
# For details, see equation (2.21), Page 16 (chapter 2) of the book
# X. S. Yang, Nature-Inspired Metaheuristic Algorithms, 2nd Edition, Luniver Press, (2010).
beta <- 3/2
sigma <- (gamma(1+beta)*sin(pi*beta/2)/(gamma((1+beta)/2)*beta*2^((beta-1)/2)))^(1/beta)
for (j in 1:n) {
s <- nest[j,]
size <- dim(nest)[2]
# This is a simple way of implementing Levy flights
# For standard random walks, use step=1;
## Levy flights by Mantegna's algorithm
u <- runif(size)*sigma
v <- runif(size)
step <- (u/abs(v))^(1/beta)
# I#n the next equation, the difference factor (s-best) means that
# when the solution is the best solution, it remains unchanged.
stepsize <- 0.01*step*(s-best)
# Here the factor 0.01 comes from the fact that L/100 should the typical
# step size of walks/flights where L is the typical lenghtscale;
# otherwise, Levy flights may become too aggresive/efficient,
# which makes new solutions (even) jump out side of the design domain
# (and thus wasting evaluations).
# Now the actual random walks or flights
s <- s + (stepsize*rnorm(size))
# Apply simple bounds/limits
nest[j,]=simplebounds(s,Lb,Ub)
}
return(nest)
}
## Find the current best nest
get_best_nest <- function (nest,newnest,fitness){
# Evaluating all new solutions
for (j in 1:dim(nest)[1]) {
fnew <- fobj(newnest[j,])
if (fnew <= fitness[j]) {
fitness[j] <- fnew
nest[j,] <- newnest[j,]
}
}
# Find the current best
fmin <- min(fitness)
best <- nest[which.min(fitness)]
return(list('fmin' = fmin, 'best' = best, 'nest' = nest, 'fitness' = fitness))
}
## Replace some nests by constructing new solutions/nests
empty_nests <- function (nest,Lb,Ub,pa){
# A fraction of worse nests are discovered with a probability pa
# n <- dim(nest)[1,2]
n <- dim(nest)[1]
# Discovered or not -- a status vector
K <- matrix(runif(n*dim(nest)[2]), n, dim(nest)[2] ) > pa
# In the real world, if a cuckoo's egg is very similar to a host's eggs, then
# this cuckoo's egg is less likely to be discovered, thus the fitness should
# be related to the difference in solutions. Therefore, it is a good idea
# to do a random walk in a biased way with some random step sizes.
## New solution by biased/selective random walks
stepsize <- runif(1)*(nest[sample(n),]-nest[sample(n),])
new_nest <- nest+stepsize*K
#for (j in 1:dim(new_nest)[1]) {
# s <- new_nest
# new_nest = simplebounds(s,Lb,Ub)
#}
#
return(new_nest)
}
#### Application of simple constraints ####
simplebounds <- function (s,Lb,Ub) {
ns_tmp <- s
## Apply the lower bound
i <- ns_tmp < Lb
ns_tmp[i] <- Lb[i]
## Apply the upper bounds
j <- ns_tmp > Ub
ns_tmp[j] <- Ub[j]
# Update this new move
s <- ns_tmp
return(s)
}
## You can replace the following by your own functions
# A d-dimensional objective function
fobj <- function (u) {
## d-dimensional sphere function sum_j=1^d (u_j-1)^2.
# with a minimum at (1,1, ...., 1);
return(michal(u))
}
michal <- function(xx, m=10)
{
##########################################################################
#
# MICHALEWICZ FUNCTION
#
# Authors: Sonja Surjanovic, Simon Fraser University
# Derek Bingham, Simon Fraser University
# Questions/Comments: Please email Derek Bingham at dbingham@stat.sfu.ca.
#
# Copyright 2013. Derek Bingham, Simon Fraser University.
#
# THERE IS NO WARRANTY, EXPRESS OR IMPLIED. WE DO NOT ASSUME ANY LIABILITY
# FOR THE USE OF THIS SOFTWARE. If software is modified to produce
# derivative works, such modified software should be clearly marked.
# Additionally, this program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2.0 of the License.
# Accordingly, this program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# For function details and reference information, see:
# http://www.sfu.ca/~ssurjano/
#
##########################################################################
#
# INPUTS:
#
# xx = c(x1, x2)
# m = constant (optional), with default value 10
#
##########################################################################
ii <- c(1:length(xx))
sum <- sum(sin(xx) * (sin(ii*xx^2/pi))^(2*m))
y <- -sum
return(y)
}
#cuckoo_search(n = 25, maxIter = 10^5, pa = 0.25, Tol = 1.0e-5, nd = 2, lb, ub)
#cuckoo_search(25, 10^5, 0.25, 1.0e-5, 2, lb, ub)
|
/R/cuckoosearch.R
|
no_license
|
willanythayse/CuckooSearch
|
R
| false | false | 9,301 |
r
|
#####################################
## Willany Thayse 04/23/19 ##
#####################################
## https://bit.ly/2uAr4LU ##
#####################################
# -----------------------------------------------------------------
# Cuckoo Search (CS) algorithm by Xin-She Yang and Suash Deb #
# Programmed by Xin-She Yang at Cambridge University #
# Programming dates: Nov 2008 to June 2009 #
# Last revised: Dec 2009 (simplified version for demo only) #
# -----------------------------------------------------------------
# Papers -- Citation Details:
# 1) X.-S. Yang, S. Deb, Cuckoo search via Levy flights,
# in: Proc. of World Congress on Nature & Biologically Inspired
# Computing (NaBIC 2009), December 2009, India,
# IEEE Publications, USA, pp. 210-214 (2009).
# http://arxiv.org/PS_cache/arxiv/pdf/1003/1003.1594v1.pdf
# 2) X.-S. Yang, S. Deb, Engineering optimization by cuckoo search,
# Int. J. Mathematical Modelling and Numerical Optimisation,
# Vol. 1, No. 4, 330-343 (2010).
# http://arxiv.org/PS_cache/arxiv/pdf/1005/1005.2908v2.pdf
# ----------------------------------------------------------------#
# This demo program only implements a standard version of #
# Cuckoo Search (CS), as the Levy flights and generation of #
# new solutions may use slightly different methods. #
# The pseudo code was given sequentially (select a cuckoo etc), #
# but the implementation here uses Matlab's vector capability, #
# which results in neater/better codes and shorter running time. #
# This implementation is different and more efficient than the #
# the demo code provided in the book by
# "Yang X. S., Nature-Inspired Metaheuristic Algoirthms, #
# 2nd Edition, Luniver Press, (2010). " #
# --------------------------------------------------------------- #
# =============================================================== #
# Notes: #
# Different implementations may lead to slightly different #
# behavour and/or results, but there is nothing wrong with it, #
# as this is the nature of random walks and all metaheuristics. #
# -----------------------------------------------------------------
cuckoo_search <- function (n = 25, maxIter = 10^5, pa = 0.25, Tol = 1.0e-5, nd = 2, lb = 0, ub = pi) {
# Max Iteration
# maxIter = 10^5
# # Number of nests (or different solutions)
# n <- 25
# # Discovery rate of alien eggs/solutions
# pa <- 0.25
# ## Change this if you want to get better results
# # Tolerance
# Tol <- 1.0e-5
# ## Simple bounds of the search domain
# # Lower bounds
# nd <- 2 #Dimension
Lb <- matrix(lb, 1, nd)
# Upper bounds
Ub <- matrix(ub, 1, nd)
# Random initial solutions
nest <- matrix(0, n, nd)
for (i in 1:n) {
nest[i,] <- Lb+(Ub-Lb)* runif(nd)
}
# Get the current best
fitness <- 10^10 * matrix(1, n, 1)
current <- get_best_nest(nest, nest, fitness)
fmin <- current$fmin
bestnest <- current$best
nest <- current$nest
fitness <- current$fitness
N_iter <- 0
N_iter_rand <- n
while (N_iter < maxIter) {
# Generate new solutions (but keep the current best)
new_nest <- get_cuckoos(nest,bestnest,Lb,Ub)
new_best <- get_best_nest(nest,new_nest,fitness)
fnew <- new_best$fmin
best <- new_best$best
nest <- new_best$nest
fitness <- new_best$fitness
# Update the counter
N_iter <- N_iter + n
# Discovery and randomization
new_nest <- empty_nests(nest,Lb,Ub,pa)
# Evaluate this set of solutions
new_best <- get_best_nest(nest,new_nest,fitness)
fnew <- new_best$fmin
best <- new_best$best
nest <- new_best$nest
fitness <- new_best$fitness
# Update the counter again
N_iter_rand <- N_iter * 2
# Find the best objective so far
if (fnew < fmin) {
fmin <- fnew
bestnest <- best
}
print(cat('Iteration number:', N_iter, 'fitness:', fmin, 'Total number of iteration:', N_iter_rand))
} ## End of iterations
## Post-optimization processing
## Display all the nests
print(cat('Number loop iterations=', N_iter))
print(cat('Total number of iteration= ', N_iter_rand))
return(list('fmin' = fmin, 'bestnest' = bestnest))
}
# --------------- All subfunctions are list below ------------------
# Get cuckoos by ramdom walk
get_cuckoos <- function (nest,best,Lb,Ub) {
## Levy flights
n <- dim(nest)[1]
# Levy exponent and coefficient
# For details, see equation (2.21), Page 16 (chapter 2) of the book
# X. S. Yang, Nature-Inspired Metaheuristic Algorithms, 2nd Edition, Luniver Press, (2010).
beta <- 3/2
sigma <- (gamma(1+beta)*sin(pi*beta/2)/(gamma((1+beta)/2)*beta*2^((beta-1)/2)))^(1/beta)
for (j in 1:n) {
s <- nest[j,]
size <- dim(nest)[2]
# This is a simple way of implementing Levy flights
# For standard random walks, use step=1;
## Levy flights by Mantegna's algorithm
u <- runif(size)*sigma
v <- runif(size)
step <- (u/abs(v))^(1/beta)
# I#n the next equation, the difference factor (s-best) means that
# when the solution is the best solution, it remains unchanged.
stepsize <- 0.01*step*(s-best)
# Here the factor 0.01 comes from the fact that L/100 should the typical
# step size of walks/flights where L is the typical lenghtscale;
# otherwise, Levy flights may become too aggresive/efficient,
# which makes new solutions (even) jump out side of the design domain
# (and thus wasting evaluations).
# Now the actual random walks or flights
s <- s + (stepsize*rnorm(size))
# Apply simple bounds/limits
nest[j,]=simplebounds(s,Lb,Ub)
}
return(nest)
}
## Find the current best nest
get_best_nest <- function (nest,newnest,fitness){
# Evaluating all new solutions
for (j in 1:dim(nest)[1]) {
fnew <- fobj(newnest[j,])
if (fnew <= fitness[j]) {
fitness[j] <- fnew
nest[j,] <- newnest[j,]
}
}
# Find the current best
fmin <- min(fitness)
best <- nest[which.min(fitness)]
return(list('fmin' = fmin, 'best' = best, 'nest' = nest, 'fitness' = fitness))
}
## Replace some nests by constructing new solutions/nests
empty_nests <- function (nest,Lb,Ub,pa){
# A fraction of worse nests are discovered with a probability pa
# n <- dim(nest)[1,2]
n <- dim(nest)[1]
# Discovered or not -- a status vector
K <- matrix(runif(n*dim(nest)[2]), n, dim(nest)[2] ) > pa
# In the real world, if a cuckoo's egg is very similar to a host's eggs, then
# this cuckoo's egg is less likely to be discovered, thus the fitness should
# be related to the difference in solutions. Therefore, it is a good idea
# to do a random walk in a biased way with some random step sizes.
## New solution by biased/selective random walks
stepsize <- runif(1)*(nest[sample(n),]-nest[sample(n),])
new_nest <- nest+stepsize*K
#for (j in 1:dim(new_nest)[1]) {
# s <- new_nest
# new_nest = simplebounds(s,Lb,Ub)
#}
#
return(new_nest)
}
#### Application of simple constraints ####
simplebounds <- function (s,Lb,Ub) {
ns_tmp <- s
## Apply the lower bound
i <- ns_tmp < Lb
ns_tmp[i] <- Lb[i]
## Apply the upper bounds
j <- ns_tmp > Ub
ns_tmp[j] <- Ub[j]
# Update this new move
s <- ns_tmp
return(s)
}
## You can replace the following by your own functions
# A d-dimensional objective function
fobj <- function (u) {
## d-dimensional sphere function sum_j=1^d (u_j-1)^2.
# with a minimum at (1,1, ...., 1);
return(michal(u))
}
michal <- function(xx, m=10)
{
##########################################################################
#
# MICHALEWICZ FUNCTION
#
# Authors: Sonja Surjanovic, Simon Fraser University
# Derek Bingham, Simon Fraser University
# Questions/Comments: Please email Derek Bingham at dbingham@stat.sfu.ca.
#
# Copyright 2013. Derek Bingham, Simon Fraser University.
#
# THERE IS NO WARRANTY, EXPRESS OR IMPLIED. WE DO NOT ASSUME ANY LIABILITY
# FOR THE USE OF THIS SOFTWARE. If software is modified to produce
# derivative works, such modified software should be clearly marked.
# Additionally, this program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2.0 of the License.
# Accordingly, this program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# For function details and reference information, see:
# http://www.sfu.ca/~ssurjano/
#
##########################################################################
#
# INPUTS:
#
# xx = c(x1, x2)
# m = constant (optional), with default value 10
#
##########################################################################
ii <- c(1:length(xx))
sum <- sum(sin(xx) * (sin(ii*xx^2/pi))^(2*m))
y <- -sum
return(y)
}
#cuckoo_search(n = 25, maxIter = 10^5, pa = 0.25, Tol = 1.0e-5, nd = 2, lb, ub)
#cuckoo_search(25, 10^5, 0.25, 1.0e-5, 2, lb, ub)
|
#' @name .ffdsingleVoxelFFBS
#' @title .ffdsingleVoxelFFBS
#' @description
#' this is an internal function
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' this is an internal function
#' @param posi.ffd the position of the voxel in the brain image.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param ffdc a 4D array (ffdc[i,j,k,t]) that contains the sequence of MRI images that are meant to be analyzed. (i,j,k) define the position of the observed voxel at time t.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at t=0 (m=0 is the default value when no prior information is available). For the case of available prior information, m0 can be defined as a pXr matrix, where p is the number of columns in the covariates object and r is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at t=0 (Cova=100 is the default value when no prior information is available). For the case of available prior information, Cova0 can be defined as a pXp matrix, where p is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between 0.85<delta<1. delta=1 will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at t=0. S0=1 is the default value when no prior information is available and defines an rXr identity matrix. For the case of available prior information, S0 can be defined as an rXr matrix, where r is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix S0 at t=0 (n=1 is the default value when no prior information is available). For the case of available prior information, n0 can be set as n0=np, where np is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (2<N1<T) from the ffdc array employed in the model fitting.N1=NULL (or equivalently N1=T) is its default value, taking all the images in the ffdc array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (30<Cutpos1<T).
#' @param Min.vol helps to define a threshold for the voxels considered in
#' the analysis. For example, Min.vol = 0.10 means that all the voxels with values
#' below to max(ffdc)*Min.vol can be considered irrelevant and discarded from the analysis.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different r values: r = 0, 1, 2, 3, 4, which leads to q = 1, 7, 19, 27, 33, where q is the size of the cluster.
#' @keywords internal
.ffdsingleVoxelFFBS <- function(posi.ffd, covariates, ffdc, m0, Cova, delta, S0, n0, N1, Nsimu1, Cutpos1, Min.vol, r1){
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
#BOLD RESPONSE SERIES IN THE CLUSTER RELATED TO posi
series.def <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series.def[,1]) < Min.vol){
return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Individual_Backwards_Sampling(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
return(list(EvidenceJoint = as.vector(res$Eviden_joint), EvidenceMargin = as.vector(res$Eviden_margin), EvidenLTT=as.vector(res$eviden_lt)))
}
}else{
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
aux.pos <- dim(ffdc)[1:3]
#GOING THROUGH EACH ROW AND CHECKING IF ANY POSITION IS OUTSIDE THE BOUNDS
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR THE CLUSTER RELATED TO posi
series <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series[,1]) < Min.vol){return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
# IDENTIFYING AND REMOVING TEMPORAL SERIES INSIDE THE CLUSTER WITH ZERO VALUES
zero.series <- unique(which(series==0, arr.ind = TRUE)[,2])
if(length(zero.series)==0){series.def <- series}else{series.def <- series[,-(zero.series)]}
#CHECKING THE SIZE OF THE CLUSTER: q=1 or q>1
#is.vector(series.def)==TRUE THEN q=1 OTHERWISE q>1
if(is.vector(series.def)){
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))}else{
series.def <- apply(series.def, 2, function(x){(x-mean(x))/sd(x)})
#PRIOR HYPERPARAMETERS FOR q1>1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=dim(series.def)[2])
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
delta1 <- sqrt(delta)
#DISCOUNT FACTORS MATRIX
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
}
res <- .Individual_Backwards_Sampling(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
#EVIDENCE OF ACTIVATION FOR A SINGLE VOXEL TAKING INTO ACCOUNT THE INFORMATION OF THE ENTIRE CLUSTER OF SIZE q
return(list(EvidenceJoint = as.vector(res$Eviden_joint), EvidenceMargin = as.vector(res$Eviden_margin), EvidenLTT=as.vector(res$eviden_lt)))
}
}
}
#END FUNCTION
|
/R/ffdSingleVoxelFFBS.R
|
no_license
|
JohnatanLAB/BayesDLMfMRI
|
R
| false | false | 7,799 |
r
|
#' @name .ffdsingleVoxelFFBS
#' @title .ffdsingleVoxelFFBS
#' @description
#' this is an internal function
#' @references
#' \insertRef{CARDONAJIMENEZ2021107297}{BayesDLMfMRI}
#'
#' \insertRef{cardona2021bayesdlmfmri}{BayesDLMfMRI}
#' @details
#' this is an internal function
#' @param posi.ffd the position of the voxel in the brain image.
#' @param covariates a data frame or matrix whose columns contain the covariates related to the expected BOLD response obtained from the experimental setup.
#' @param ffdc a 4D array (ffdc[i,j,k,t]) that contains the sequence of MRI images that are meant to be analyzed. (i,j,k) define the position of the observed voxel at time t.
#' @param m0 the constant prior mean value for the covariates parameters and common to all voxels within every neighborhood at t=0 (m=0 is the default value when no prior information is available). For the case of available prior information, m0 can be defined as a pXr matrix, where p is the number of columns in the covariates object and r is the cluster size.
#' @param Cova a positive constant that defines the prior variances for the covariates parameters at t=0 (Cova=100 is the default value when no prior information is available). For the case of available prior information, Cova0 can be defined as a pXp matrix, where p is the number of columns in the covariates object.
#' @param delta a discount factor related to the evolution variances. Recommended values between 0.85<delta<1. delta=1 will yield results similar to the classical general linear model.
#' @param S0 prior covariance structure among voxels within every cluster at t=0. S0=1 is the default value when no prior information is available and defines an rXr identity matrix. For the case of available prior information, S0 can be defined as an rXr matrix, where r is the common number of voxels in every cluster.
#' @param n0 a positive hyperparameter of the prior distribution for the covariance matrix S0 at t=0 (n=1 is the default value when no prior information is available). For the case of available prior information, n0 can be set as n0=np, where np is the number of MRI images in the pilot sample.
#' @param N1 is the number of images (2<N1<T) from the ffdc array employed in the model fitting.N1=NULL (or equivalently N1=T) is its default value, taking all the images in the ffdc array for the fitting process.
#' @param Nsimu1 is the number of simulated on-line trajectories related to the state parameters. These simulated curves are later employed to compute the posterior probability of voxel activation.
#' @param Cutpos1 a cutpoint time from where the on-line trajectories begin. This parameter value is related to an approximation from a t-student distribution to a normal distribution. Values equal to or greater than 30 are recommended (30<Cutpos1<T).
#' @param Min.vol helps to define a threshold for the voxels considered in
#' the analysis. For example, Min.vol = 0.10 means that all the voxels with values
#' below to max(ffdc)*Min.vol can be considered irrelevant and discarded from the analysis.
#' @param r1 a positive integer number that defines the distance from every voxel with its most distant neighbor. This value determines the size of the cluster. The users can set a range of different r values: r = 0, 1, 2, 3, 4, which leads to q = 1, 7, 19, 27, 33, where q is the size of the cluster.
#' @keywords internal
.ffdsingleVoxelFFBS <- function(posi.ffd, covariates, ffdc, m0, Cova, delta, S0, n0, N1, Nsimu1, Cutpos1, Min.vol, r1){
if(r1 == 0){
posi <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
#BOLD RESPONSE SERIES IN THE CLUSTER RELATED TO posi
series.def <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series.def[,1]) < Min.vol){
return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
res <- .Individual_Backwards_Sampling(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
return(list(EvidenceJoint = as.vector(res$Eviden_joint), EvidenceMargin = as.vector(res$Eviden_margin), EvidenLTT=as.vector(res$eviden_lt)))
}
}else{
#THIS LINE RETURN THE POSITIONS OF EACH VOXEL INSIDE THE CLUSTER GIVEN THE DISTANCE r1
posi1 <- .distanceNeighbors (posi.refer = as.vector(posi.ffd), r1)
aux.pos <- dim(ffdc)[1:3]
#GOING THROUGH EACH ROW AND CHECKING IF ANY POSITION IS OUTSIDE THE BOUNDS
row_sub1 <- apply(posi1, 1, function(row, x1){0 < row & row<=x1}, x1=aux.pos)
posi <- posi1[apply(t(row_sub1), 1, sum)==3, ]
#BOLD RESPONSE SERIES FOR THE CLUSTER RELATED TO posi
series <- sapply(1:(dim(posi)[1]), function(k){ffdc[posi[k,1], posi[k,2], posi[k,3], ]})
#CHEKING THE THRESHOLD Min.vol FOR THE MAIN TS: JUST TEMPORAL SERIES ABOVE THE THRESHOLD, DISCARD TEMPORAL SERIES WITH NON-SIGNIFICANT SIGNAL
if(min(series[,1]) < Min.vol){return(list(EvidenceJoint = rep(NA, dim(covariates)[2]), EvidenceMargin = rep(NA, dim(covariates)[2]), EvidenLTT = rep(NA, dim(covariates)[2])))}else{
# IDENTIFYING AND REMOVING TEMPORAL SERIES INSIDE THE CLUSTER WITH ZERO VALUES
zero.series <- unique(which(series==0, arr.ind = TRUE)[,2])
if(length(zero.series)==0){series.def <- series}else{series.def <- series[,-(zero.series)]}
#CHECKING THE SIZE OF THE CLUSTER: q=1 or q>1
#is.vector(series.def)==TRUE THEN q=1 OTHERWISE q>1
if(is.vector(series.def)){
series.def <- matrix((series.def - mean(series.def))/sd(series.def), ncol=1)
#PRIOR HYPERPARAMETERS FOR q1=1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=1)
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
#DISCOUNT FACTORS MATRIX
delta1 <- sqrt(delta)
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))}else{
series.def <- apply(series.def, 2, function(x){(x-mean(x))/sd(x)})
#PRIOR HYPERPARAMETERS FOR q1>1
m01 <- matrix(rep(m0, dim(covariates)[2]*dim(series.def)[2]), ncol=dim(series.def)[2])
Cova1 <- diag(rep(Cova, dim(covariates)[2]))
S01 <- diag(rep(S0,dim(series.def)[2]))
delta1 <- sqrt(delta)
#DISCOUNT FACTORS MATRIX
Beta1 <- diag(1/c(rep(delta1, dim(covariates)[2])))
}
res <- .Individual_Backwards_Sampling(ffd1 = as.matrix(series.def), Cova = as.matrix(covariates), m0In = m01, c0In = Cova1,
S0In = S01, beta0In = Beta1, nt0In = n0, NIn = N1, Nsimu = Nsimu1, CUTpos = Cutpos1)
#EVIDENCE OF ACTIVATION FOR A SINGLE VOXEL TAKING INTO ACCOUNT THE INFORMATION OF THE ENTIRE CLUSTER OF SIZE q
return(list(EvidenceJoint = as.vector(res$Eviden_joint), EvidenceMargin = as.vector(res$Eviden_margin), EvidenLTT=as.vector(res$eviden_lt)))
}
}
}
#END FUNCTION
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exploits.r
\name{shodan_exploit_search_count}
\alias{shodan_exploit_search_count}
\title{Search for Exploits without Results}
\usage{
shodan_exploit_search_count(query = NULL, facets = NULL, page = 1)
}
\arguments{
\item{query}{Search query used to search the database of known exploits. See
\url{https://developer.shodan.io/api/exploits/rest} for all supported
search filters.}
\item{facets}{A comma-separated list of properties to get summary information on.
The following facets are currently supported: "\code{author}",
"\code{platform}", "\code{port}", "\code{source}" and "\code{type}.
If \code{length(facets) > 1)} this function will
concatenate the vector with commas to send to Shodan.}
\item{page}{The page number to page through results \code{100} at a time
(default: \code{1})}
}
\description{
This method behaves identical to \link{shodan_exploit_search} with the
difference that it doesn't return any results.
}
\references{
\url{https://developer.shodan.io/api/exploits/rest}
}
|
/man/shodan_exploit_search_count.Rd
|
no_license
|
hrbrmstr/shodan
|
R
| false | true | 1,074 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exploits.r
\name{shodan_exploit_search_count}
\alias{shodan_exploit_search_count}
\title{Search for Exploits without Results}
\usage{
shodan_exploit_search_count(query = NULL, facets = NULL, page = 1)
}
\arguments{
\item{query}{Search query used to search the database of known exploits. See
\url{https://developer.shodan.io/api/exploits/rest} for all supported
search filters.}
\item{facets}{A comma-separated list of properties to get summary information on.
The following facets are currently supported: "\code{author}",
"\code{platform}", "\code{port}", "\code{source}" and "\code{type}.
If \code{length(facets) > 1)} this function will
concatenate the vector with commas to send to Shodan.}
\item{page}{The page number to page through results \code{100} at a time
(default: \code{1})}
}
\description{
This method behaves identical to \link{shodan_exploit_search} with the
difference that it doesn't return any results.
}
\references{
\url{https://developer.shodan.io/api/exploits/rest}
}
|
#' Extract a character column into multiple columns using regex
#'
#' @description
#' Given a regular expression with capturing groups, `extract()` turns each group
#' into a new column. If the groups don't match, or the input is `NA`, the output
#' will be `NA`. When you pass same name in the `into` argument it will merge
#' the groups together. Whilst passing `NA` in the `into` arg will drop the group
#' from the resulting `tidytable`
#'
#' @param .df A data.table or data.frame
#' @param col Column to extract from
#' @param into New column names to split into. A character vector.
#' @param regex A regular expression to extract the desired values. There
#' should be one group (defined by `()`) for each element of `into`
#' @param remove If TRUE, remove the input column from the output data.table
#' @param convert If TRUE, runs `type.convert()` on the resulting column.
#' Useful if the resulting column should be type integer/double.
#' @param ... Additional arguments passed on to methods.
#'
#' @export
#'
#' @examples
#' df <- data.table(x = c(NA, "a-b-1", "a-d-3", "b-c-2", "d-e-7"))
#' df %>% extract.(x, "A")
#' df %>% extract.(x, c("A", "B"), "([[:alnum:]]+)-([[:alnum:]]+)")
#'
#' # If no match, NA:
#' df %>% extract.(x, c("A", "B"), "([a-d]+)-([a-d]+)")
#' # drop columns by passing NA
#' df %>% extract.(x, c("A", NA, "B"), "([a-d]+)-([a-d]+)-(\\d+)")
#' # merge groups by passing same name
#' df %>% extract.(x, c("A", "B", "A"), "([a-d]+)-([a-d]+)-(\\d+)")
extract. <- function(.df, col, into, regex = "([[:alnum:]]+)",
remove = TRUE, convert = FALSE, ...) {
UseMethod("extract.")
}
#' @export
extract..data.frame <- function(.df, col, into, regex = "([[:alnum:]]+)",
remove = TRUE, convert = FALSE, ...) {
.df <- as_tidytable(.df)
.df <- shallow(.df)
if (missing(col)) abort("col is missing and must be supplied")
if (missing(into)) abort("into is missing and must be supplied")
col <- select_vec_idx(.df, {{ col }})
groups <- str_extract_groups(.df[[col]], regex, convert = convert)
if (length(groups) != length(into)) {
abort(
glue("`regex` pattern should define {length(into)} groups; {length(groups)} found.")
)
}
keep_group <- !is.na(into)
groups <- groups[keep_group]
into <- into[keep_group]
if(anyDuplicated(into) > 0){
groups <- lapply(split(groups, into), pmap_chr., paste0)
into <- names(groups)
}
if(convert) groups <- lapply(groups, type.convert, as.is = TRUE)
.df[, (into) := ..groups]
if (remove) .df[, (col) := NULL]
.df[]
}
str_extract_groups <- function(string, pattern, convert = FALSE){
groups <- regexpr(pattern, string, perl = TRUE)
start <- attr(groups, "capture.start")
end <- start + attr(groups, "capture.length") - 1L
if(is.null(start)) {
return(list())
}
# in order to force substr to return NA when No match is found
# set the start and end to NA
none_found <- start == -1
start[none_found] <- NA
end[none_found] <- NA
lapply(
seq_len(ncol(start)),
function(.x) substr(string, start[, .x], end[, .x])
)
}
globalVariables("..groups")
|
/R/extract.R
|
permissive
|
mjkarlsen/tidytable
|
R
| false | false | 3,151 |
r
|
#' Extract a character column into multiple columns using regex
#'
#' @description
#' Given a regular expression with capturing groups, `extract()` turns each group
#' into a new column. If the groups don't match, or the input is `NA`, the output
#' will be `NA`. When you pass same name in the `into` argument it will merge
#' the groups together. Whilst passing `NA` in the `into` arg will drop the group
#' from the resulting `tidytable`
#'
#' @param .df A data.table or data.frame
#' @param col Column to extract from
#' @param into New column names to split into. A character vector.
#' @param regex A regular expression to extract the desired values. There
#' should be one group (defined by `()`) for each element of `into`
#' @param remove If TRUE, remove the input column from the output data.table
#' @param convert If TRUE, runs `type.convert()` on the resulting column.
#' Useful if the resulting column should be type integer/double.
#' @param ... Additional arguments passed on to methods.
#'
#' @export
#'
#' @examples
#' df <- data.table(x = c(NA, "a-b-1", "a-d-3", "b-c-2", "d-e-7"))
#' df %>% extract.(x, "A")
#' df %>% extract.(x, c("A", "B"), "([[:alnum:]]+)-([[:alnum:]]+)")
#'
#' # If no match, NA:
#' df %>% extract.(x, c("A", "B"), "([a-d]+)-([a-d]+)")
#' # drop columns by passing NA
#' df %>% extract.(x, c("A", NA, "B"), "([a-d]+)-([a-d]+)-(\\d+)")
#' # merge groups by passing same name
#' df %>% extract.(x, c("A", "B", "A"), "([a-d]+)-([a-d]+)-(\\d+)")
extract. <- function(.df, col, into, regex = "([[:alnum:]]+)",
remove = TRUE, convert = FALSE, ...) {
UseMethod("extract.")
}
#' @export
extract..data.frame <- function(.df, col, into, regex = "([[:alnum:]]+)",
remove = TRUE, convert = FALSE, ...) {
.df <- as_tidytable(.df)
.df <- shallow(.df)
if (missing(col)) abort("col is missing and must be supplied")
if (missing(into)) abort("into is missing and must be supplied")
col <- select_vec_idx(.df, {{ col }})
groups <- str_extract_groups(.df[[col]], regex, convert = convert)
if (length(groups) != length(into)) {
abort(
glue("`regex` pattern should define {length(into)} groups; {length(groups)} found.")
)
}
keep_group <- !is.na(into)
groups <- groups[keep_group]
into <- into[keep_group]
if(anyDuplicated(into) > 0){
groups <- lapply(split(groups, into), pmap_chr., paste0)
into <- names(groups)
}
if(convert) groups <- lapply(groups, type.convert, as.is = TRUE)
.df[, (into) := ..groups]
if (remove) .df[, (col) := NULL]
.df[]
}
str_extract_groups <- function(string, pattern, convert = FALSE){
groups <- regexpr(pattern, string, perl = TRUE)
start <- attr(groups, "capture.start")
end <- start + attr(groups, "capture.length") - 1L
if(is.null(start)) {
return(list())
}
# in order to force substr to return NA when No match is found
# set the start and end to NA
none_found <- start == -1
start[none_found] <- NA
end[none_found] <- NA
lapply(
seq_len(ncol(start)),
function(.x) substr(string, start[, .x], end[, .x])
)
}
globalVariables("..groups")
|
###### function that count amount of data points that was flagged
###### and the percentage of the flagged data points.
outliers_summary <- function(in_ind, out_file) {
daily_dat_flagged <- readRDS(sc_retrieve(in_ind, remake_file = 'getters.yml')) %>%
summarize(n_flagged_obs = sum(flag %in% 'o'),
perc_flagged_obs = round((sum(flag %in% 'o')/sum(!is.na(mean_temp_degC))) * 100, 1),
n_flagged_sites = length(unique(site_id[flag %in% 'o'])),
perc_flagged_sites = round((length(unique(site_id[flag %in% 'o']))/length(unique(site_id[!is.na(mean_temp_degC)])))*100, 1))
#saving the summary data
readr::write_csv(daily_dat_flagged, out_file)
}
################### function to get summarizes about the binned daily data ####
################# number and percentage of flagged temperature observation
################# and sites in a group/bin
summary_qaqc_daily_temp_site <- function(in_ind, out_file) {
# reading the qaqc daily temperature data-in,
# to find the number of observation and the number of flagged per group/bin.
# provides the number of sites in each group/bin.
qaqc_flagged_temp <- readRDS(sc_retrieve(in_ind), remake_file = 'getters.yml') %>%
group_by(site_type, lat_bins, long_bins, doy_bins) %>%
summarize(n_per_group = n(),
n_flagged = length(which(flag %in% 'o')),
prop_flagged = round(length(which(flag %in% 'o'))/n(), 4),
number_of_sites = length(unique(site_id))) %>%
ungroup()
# getting summaries about the bins
# counting the flagged bins, median of number of observation
# number of bins with 3 of less observation.
bins_summary <- qaqc_flagged_temp %>%
summarize(count_bins_flagged = length(which(n_flagged > 0)),
percent_bins_flagged = count_bins_flagged/n() * 100,
median_obs_per_bin = median(n_per_group),
percent_bins_w_3_less_obs = (length(which(n_per_group <= 3))/n()) * 100)
#saving the summary data
readr::write_csv(bins_summary, out_file)
}
|
/5_data_munge/src/qaqc_groups_summaries.R
|
no_license
|
USGS-R/2wp-temp-observations
|
R
| false | false | 2,057 |
r
|
###### function that count amount of data points that was flagged
###### and the percentage of the flagged data points.
outliers_summary <- function(in_ind, out_file) {
daily_dat_flagged <- readRDS(sc_retrieve(in_ind, remake_file = 'getters.yml')) %>%
summarize(n_flagged_obs = sum(flag %in% 'o'),
perc_flagged_obs = round((sum(flag %in% 'o')/sum(!is.na(mean_temp_degC))) * 100, 1),
n_flagged_sites = length(unique(site_id[flag %in% 'o'])),
perc_flagged_sites = round((length(unique(site_id[flag %in% 'o']))/length(unique(site_id[!is.na(mean_temp_degC)])))*100, 1))
#saving the summary data
readr::write_csv(daily_dat_flagged, out_file)
}
################### function to get summarizes about the binned daily data ####
################# number and percentage of flagged temperature observation
################# and sites in a group/bin
summary_qaqc_daily_temp_site <- function(in_ind, out_file) {
# reading the qaqc daily temperature data-in,
# to find the number of observation and the number of flagged per group/bin.
# provides the number of sites in each group/bin.
qaqc_flagged_temp <- readRDS(sc_retrieve(in_ind), remake_file = 'getters.yml') %>%
group_by(site_type, lat_bins, long_bins, doy_bins) %>%
summarize(n_per_group = n(),
n_flagged = length(which(flag %in% 'o')),
prop_flagged = round(length(which(flag %in% 'o'))/n(), 4),
number_of_sites = length(unique(site_id))) %>%
ungroup()
# getting summaries about the bins
# counting the flagged bins, median of number of observation
# number of bins with 3 of less observation.
bins_summary <- qaqc_flagged_temp %>%
summarize(count_bins_flagged = length(which(n_flagged > 0)),
percent_bins_flagged = count_bins_flagged/n() * 100,
median_obs_per_bin = median(n_per_group),
percent_bins_w_3_less_obs = (length(which(n_per_group <= 3))/n()) * 100)
#saving the summary data
readr::write_csv(bins_summary, out_file)
}
|
rm(list=ls())
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destfile <- "household_power_consumption.zip"
datafile <- "household_power_consumption.txt"
if(!file.exists(datafile)){
download.file(url = url, dest = destfile, method ="curl")
unzip(destfile)
}
#get header names
epc.header <- names(read.table(file = datafile, header = T, nrows = 1, sep = ";"))
# Read data between 2007-02-01 and 2007-02-02
epc <- read.table(file = datafile,col.names = epc.header,
sep = ";",
na.strings="?",
skip=66637,
nrows= 2880,
colClasses = c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
epc$timestamp = as.POSIXct(paste(epc$Date, epc$Time), format="%d/%m/%Y %H:%M:%S")
###################################################################################
# Plot1
png(filename = "plot1.png", width = 480, height = 480)
hist(epc$Global_active_power, col="red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
mesontau/ExData_Plotting1
|
R
| false | false | 1,224 |
r
|
rm(list=ls())
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destfile <- "household_power_consumption.zip"
datafile <- "household_power_consumption.txt"
if(!file.exists(datafile)){
download.file(url = url, dest = destfile, method ="curl")
unzip(destfile)
}
#get header names
epc.header <- names(read.table(file = datafile, header = T, nrows = 1, sep = ";"))
# Read data between 2007-02-01 and 2007-02-02
epc <- read.table(file = datafile,col.names = epc.header,
sep = ";",
na.strings="?",
skip=66637,
nrows= 2880,
colClasses = c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
epc$timestamp = as.POSIXct(paste(epc$Date, epc$Time), format="%d/%m/%Y %H:%M:%S")
###################################################################################
# Plot1
png(filename = "plot1.png", width = 480, height = 480)
hist(epc$Global_active_power, col="red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/turkey.R
\docType{data}
\name{turkey}
\alias{turkey}
\title{Turkish league results 1994-2017}
\format{A data frame with 7004 rows and 12 variables:
\describe{
\item{Date}{Date of match}
\item{Season}{Season of match - refers to starting year}
\item{home}{Home team}
\item{visitor}{Visiting team}
\item{FT}{Full-time result}
\item{hgoal}{Goals scored by home team}
\item{vgoal}{Goals scored by visiting team}
\item{division}{Division}
\item{tier}{Tier of football pyramid: 1}
\item{totgoal}{Total goals in game}
\item{goaldif}{Goal difference in game home goals - visitor goals}
\item{result}{Result: H-Home Win, A-Away Win, D-Draw}
}}
\usage{
turkey
}
\description{
All results for Turkish soccer games in the top tier
from 1994/95 season to 2016/17 season. Doesn't include
playoff games.
}
\keyword{datasets}
|
/man/turkey.Rd
|
no_license
|
LeandroZipitria/engsoccerdata
|
R
| false | true | 913 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/turkey.R
\docType{data}
\name{turkey}
\alias{turkey}
\title{Turkish league results 1994-2017}
\format{A data frame with 7004 rows and 12 variables:
\describe{
\item{Date}{Date of match}
\item{Season}{Season of match - refers to starting year}
\item{home}{Home team}
\item{visitor}{Visiting team}
\item{FT}{Full-time result}
\item{hgoal}{Goals scored by home team}
\item{vgoal}{Goals scored by visiting team}
\item{division}{Division}
\item{tier}{Tier of football pyramid: 1}
\item{totgoal}{Total goals in game}
\item{goaldif}{Goal difference in game home goals - visitor goals}
\item{result}{Result: H-Home Win, A-Away Win, D-Draw}
}}
\usage{
turkey
}
\description{
All results for Turkish soccer games in the top tier
from 1994/95 season to 2016/17 season. Doesn't include
playoff games.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shinyGAM.R
\name{shinyGAMAnalysis}
\alias{shinyGAMAnalysis}
\title{Constructs data frame with gene annotations and submits it into Shiny GAM web-server}
\usage{
shinyGAMAnalysis(fData, fvarLabels)
}
\arguments{
\item{fData}{list of annotation columns}
\item{fvarLabels}{vector of column names}
}
\value{
URL for Shiny GAM
}
\description{
Constructs data frame with gene annotations and submits it into Shiny GAM web-server
}
|
/man/shinyGAMAnalysis.Rd
|
no_license
|
bhvbhushan/phantasus
|
R
| false | true | 504 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shinyGAM.R
\name{shinyGAMAnalysis}
\alias{shinyGAMAnalysis}
\title{Constructs data frame with gene annotations and submits it into Shiny GAM web-server}
\usage{
shinyGAMAnalysis(fData, fvarLabels)
}
\arguments{
\item{fData}{list of annotation columns}
\item{fvarLabels}{vector of column names}
}
\value{
URL for Shiny GAM
}
\description{
Constructs data frame with gene annotations and submits it into Shiny GAM web-server
}
|
source('Globals.R')
# Function to check MC question
chkQuestion <- function(answer, correct, index) {
message <- if(answer == correct) 'Correct' else 'Onjuist'
return(message)}
# Function to check open question
chkQuestionOpen <- function(answer, correct) {
message <- if(grepl(correct, answer, perl=TRUE)) 'Correct' else if(answer != '') 'Onjuist' else 'Leeg'
return(message)}
# Function to calculate score / needs the setup of a vector 'tscore'
# with max scores per question
scrQuestion <- function(result, i) {
vscore <- (result == 'Onjuist') * -1
tscore[i] <<- tscore[i] + vscore
return(as.numeric(max(0,tscore[i])))}
|
/Quizii_V7/Functions.R
|
no_license
|
witusj/Quizii
|
R
| false | false | 640 |
r
|
source('Globals.R')
# Function to check MC question
chkQuestion <- function(answer, correct, index) {
message <- if(answer == correct) 'Correct' else 'Onjuist'
return(message)}
# Function to check open question
chkQuestionOpen <- function(answer, correct) {
message <- if(grepl(correct, answer, perl=TRUE)) 'Correct' else if(answer != '') 'Onjuist' else 'Leeg'
return(message)}
# Function to calculate score / needs the setup of a vector 'tscore'
# with max scores per question
scrQuestion <- function(result, i) {
vscore <- (result == 'Onjuist') * -1
tscore[i] <<- tscore[i] + vscore
return(as.numeric(max(0,tscore[i])))}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariance-functions.R
\name{sums.of.products}
\alias{sums.of.products}
\title{This is the function GC which is required for the computation of the finite sample variance for m and total multivariance}
\usage{
sums.of.products(a, b, c, type = "multi")
}
\description{
This is the function GC which is required for the computation of the finite sample variance for m and total multivariance
}
\keyword{internal}
|
/fuzzedpackages/multivariance/man/sums.of.products.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false | true | 504 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariance-functions.R
\name{sums.of.products}
\alias{sums.of.products}
\title{This is the function GC which is required for the computation of the finite sample variance for m and total multivariance}
\usage{
sums.of.products(a, b, c, type = "multi")
}
\description{
This is the function GC which is required for the computation of the finite sample variance for m and total multivariance
}
\keyword{internal}
|
#4 Controlling line pattern and width
VADeaths
male.deaths = VADeaths[,1]
female.deaths = VADeaths[,2]
x.points = seq(from = 52,to = 72, by = 5)
plot(x.points, male.deaths, type = "l", xlab = "age", ylab = "deaths", lty = 1)
lines(x.points, female.deaths, type = "l", lty = 2)
legend("topleft", legend=c("male","female"), title = "Rural", lty = 1:2)
plot(x.points, male.deaths, type = "l", xlab = "age", ylab = "deaths", lty = 1, lwd = 1)
lines(x.points, female.deaths, type = "l", lty = 2, lwd = 2)
legend("topleft", legend=c("male","female"), title = "Rural", lty = 1:2, lwd = 1:2)
|
/section2/src/5460OS_02_code04_pattern_draft1.R
|
no_license
|
rsanchezs/RGraphic
|
R
| false | false | 586 |
r
|
#4 Controlling line pattern and width
VADeaths
male.deaths = VADeaths[,1]
female.deaths = VADeaths[,2]
x.points = seq(from = 52,to = 72, by = 5)
plot(x.points, male.deaths, type = "l", xlab = "age", ylab = "deaths", lty = 1)
lines(x.points, female.deaths, type = "l", lty = 2)
legend("topleft", legend=c("male","female"), title = "Rural", lty = 1:2)
plot(x.points, male.deaths, type = "l", xlab = "age", ylab = "deaths", lty = 1, lwd = 1)
lines(x.points, female.deaths, type = "l", lty = 2, lwd = 2)
legend("topleft", legend=c("male","female"), title = "Rural", lty = 1:2, lwd = 1:2)
|
setwd("C:\\test\\sam")
large_table <- read.csv("TCGA_genes_FPKM-UQ_1.csv", header = TRUE)
GEO_6000 <- names(read.csv("C:\\test\\FinalData_GSM_gene_index_result.csv", header = TRUE))
GEO_6000_genes <- GEO_6000[2:length(GEO_6000)-2]
p <- GEO_6000_genes[-1]
TCGA_whole <- names(large_table)
TCGA_whole_gene <- TCGA_whole[-1]
q <- TCGA_whole_gene[1:(length(TCGA_whole_gene)-3)]
inter_6000 <- intersect(p,q)
names(inter_6000) <- "intersect"
write.csv(inter_6000, "inter_GEO_TCGA_geneset.csv",row.names = FALSE)
|
/User/kyulhee/Code/04.inteR_GEO(6000)_TCGA.R
|
no_license
|
Chan-Hee/BioDataLab
|
R
| false | false | 521 |
r
|
setwd("C:\\test\\sam")
large_table <- read.csv("TCGA_genes_FPKM-UQ_1.csv", header = TRUE)
GEO_6000 <- names(read.csv("C:\\test\\FinalData_GSM_gene_index_result.csv", header = TRUE))
GEO_6000_genes <- GEO_6000[2:length(GEO_6000)-2]
p <- GEO_6000_genes[-1]
TCGA_whole <- names(large_table)
TCGA_whole_gene <- TCGA_whole[-1]
q <- TCGA_whole_gene[1:(length(TCGA_whole_gene)-3)]
inter_6000 <- intersect(p,q)
names(inter_6000) <- "intersect"
write.csv(inter_6000, "inter_GEO_TCGA_geneset.csv",row.names = FALSE)
|
context("calc_cofactor for MTA method works correctly")
test_that("calc_cofactor", {
calculated_cofactor <- calc_cofactor(
matrix(c(2, sqrt(3), 1, sqrt(3), 2, sqrt(3), 1, sqrt(3), 2) / 2, nr = 3))
correct_cofactor <-
matrix(c(1, -sqrt(3), 1, -sqrt(3), 3, -sqrt(3), 1, -sqrt(3), 1) / 4, nr = 3)
expect_equal(round(calculated_cofactor, 5), correct_cofactor, tol = 1e-5)
})
|
/tests/testthat/test-calc_cofactor.R
|
no_license
|
okayaa/MTSYS
|
R
| false | false | 406 |
r
|
context("calc_cofactor for MTA method works correctly")
test_that("calc_cofactor", {
calculated_cofactor <- calc_cofactor(
matrix(c(2, sqrt(3), 1, sqrt(3), 2, sqrt(3), 1, sqrt(3), 2) / 2, nr = 3))
correct_cofactor <-
matrix(c(1, -sqrt(3), 1, -sqrt(3), 3, -sqrt(3), 1, -sqrt(3), 1) / 4, nr = 3)
expect_equal(round(calculated_cofactor, 5), correct_cofactor, tol = 1e-5)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_functions.R
\name{projects.locations.specialistPools.delete}
\alias{projects.locations.specialistPools.delete}
\title{Deletes a SpecialistPool as well as all Specialists in the pool.}
\usage{
projects.locations.specialistPools.delete(name, force = NULL)
}
\arguments{
\item{name}{Required}
\item{force}{If set to true, any specialist managers in this SpecialistPool will also be deleted}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/vertex-ai/}{Google Documentation}
}
|
/googleaiplatformv1.auto/man/projects.locations.specialistPools.delete.Rd
|
no_license
|
justinjm/autoGoogleAPI
|
R
| false | true | 978 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_functions.R
\name{projects.locations.specialistPools.delete}
\alias{projects.locations.specialistPools.delete}
\title{Deletes a SpecialistPool as well as all Specialists in the pool.}
\usage{
projects.locations.specialistPools.delete(name, force = NULL)
}
\arguments{
\item{name}{Required}
\item{force}{If set to true, any specialist managers in this SpecialistPool will also be deleted}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/vertex-ai/}{Google Documentation}
}
|
tar_test("tar_target_raw() works", {
tar_option_set(envir = new.env(parent = baseenv()))
x <- tar_target_raw("x", expression(get_data()))
expect_silent(target_validate(x))
expect_equal(target_get_name(x), "x")
expect_equal(x$command$string, "expression(get_data())")
})
tar_test("tar_target_raw() gets priorities", {
x <- tar_target_raw("x", quote(get_data()), priority = 0.5)
expect_equal(x$settings$priority, 0.5)
})
tar_test("tar_target_raw() defines pattens correctly", {
x <- tar_target_raw("x", expression(1), pattern = expression(map(y)))
expect_silent(target_validate(x))
expect_equal(x$settings$growth, "map")
expect_equal(x$settings$dimensions, "y")
})
tar_test("tar_target_raw() receives options", {
on.exit(tar_option_set())
tar_option_set(format = "file")
x <- tar_target_raw("x", "y")
expect_equal(x$settings$format, "file")
})
|
/tests/testthat/test-tar_target_raw.R
|
permissive
|
Robinlovelace/targets
|
R
| false | false | 875 |
r
|
tar_test("tar_target_raw() works", {
tar_option_set(envir = new.env(parent = baseenv()))
x <- tar_target_raw("x", expression(get_data()))
expect_silent(target_validate(x))
expect_equal(target_get_name(x), "x")
expect_equal(x$command$string, "expression(get_data())")
})
tar_test("tar_target_raw() gets priorities", {
x <- tar_target_raw("x", quote(get_data()), priority = 0.5)
expect_equal(x$settings$priority, 0.5)
})
tar_test("tar_target_raw() defines pattens correctly", {
x <- tar_target_raw("x", expression(1), pattern = expression(map(y)))
expect_silent(target_validate(x))
expect_equal(x$settings$growth, "map")
expect_equal(x$settings$dimensions, "y")
})
tar_test("tar_target_raw() receives options", {
on.exit(tar_option_set())
tar_option_set(format = "file")
x <- tar_target_raw("x", "y")
expect_equal(x$settings$format, "file")
})
|
####################################################
### FUNCTIONS FOR FUNCTIONAL DEMOGRAPHIC MODELS ###
####################################################
fdm <- function(data, series=names(data$rate)[1], order=6, ages=data$age, max.age=100,
method=c("classical","M","rapca"), lambda=3, mean=TRUE, level=FALSE, transform=TRUE,...)
{
series <- tolower(series)
method <- match.arg(method)
data <- extract.ages(data,ages,combine.upper=FALSE)
if(max.age < max(ages))
data <- extract.ages(data,min(ages):max.age,combine.upper=TRUE)
ages <- data$age
if(data$type=="mortality")
yname <- "Mortality rate"
else if(data$type=="fertility")
yname <- "Fertility rate"
else if(data$type=="migration")
yname <- "Net migration"
else
stop("Not sure what to do with this data type")
if(transform)
{
mx <- BoxCox(get.series(data$rate,series),data$lambda)
mx[mx < -1e9] <- NA
}
else
mx <- get.series(data$rate,series)
data.fts <- fts(ages,mx,start=data$year[1],xname="Age",yname=yname)
fit <- ftsm(data.fts, order=order, method=method, mean=mean, level=level, lambda=lambda, ...)
# Adjust signs of output so that basis functions are primarily positive. (Easier to interpret.)
nx <- length(data$age)
for(i in 1+(1:order))
{
if(sum(fit$basis[,i] > 0) < nx/2)
{
fit$basis[,i] <- -fit$basis[,i]
fit$coeff[,i] <- -fit$coeff[,i]
}
}
if(is.element("obs.var",names(data)))
ov <- data$obs.var[[match(series,tolower(names(data$rate)))]]
else
ov <- mx*0
output <- list(label=data$label,age=fit$y$x,year=data$year,mx=mx,
pop=get.series(data$pop,series),
fitted=fit$fitted,
residuals=fit$residuals,
basis=fit$basis,coeff=fit$coeff,
mean.se=fit$mean.se,varprop=fit$varprop, weights=fit$wt,
obs.var=ov,
v=fit$v,type=data$type,
y=data.fts,
basis2 = fit$basis2,
coeff2 = fit$coeff2,
lambda = data$lambda,
call=match.call())
names(output)[4] <- series
class(output)=c("fdm","ftsm")
return(output)
}
print.fdm <- function(x,...)
{
cat("Functional demographic model\n")
cat(paste("\nCall:",deparse(x$call,200),"\n"))
cat(paste("\nRegion:"),x$label)
cat(paste("\nData type:"),x$type)
cat(paste("\nYears in fit:",min(x$year),"-",max(x$year)))
cat(paste("\nAges in fit:",min(x$age),"-",max(x$age),"\n"))
ord <- ncol(x$basis)-1
cat(paste("\nOrder:",ord))
if(ord>1)
{
cat("\nPercentage variation due to basis functions: ")
cat(paste(formatC(x$varprop*100,1,format="f"),"%",sep=""))
}
cat("\n")
}
summary.fdm <- function(object,...)
{
print(object)
junk <- fdmMISE(object[[4]],object$fitted$y,age=object$age,years=object$year)
junk1 <- cbind(junk$ME,junk$MSE,junk$MPE,junk$MAPE)
rownames(junk1) <- object$age
colnames(junk1) <- c("ME","MSE","MPE","MAPE")
junk2 <- cbind(junk$MIE,junk$MISE,junk$MIPE,junk$MIAPE)
rownames(junk2) = object$year
colnames(junk2) = c("IE","ISE","IPE","IAPE")
cat(paste("\nAverages across ages:\n"))
print(round(colMeans(junk1),5))
cat(paste("\nAverages across years:\n"))
print(round(colMeans(junk2),5))
cat("\n")
}
summary.fdmpr <- function(object, ...)
{
cat("*** PRODUCT MODEL ***\n")
summary(object$product)
cat("\n*** RATIO MODELS ***\n")
for(i in 1:length(object$ratio))
{
cat(paste("\n",toupper(names(object$ratio))[i],"\n",sep=""))
summary(object$ratio[[i]])
}
}
residuals.fdm <- function(object,...)
{
return(structure(list(x=object$year,y=object$age,z=t(object$residuals$y)),class="fmres"))
}
forecast.fdm <- function(object,h=50,level=80, jumpchoice=c("fit","actual"),
method="arima",warnings=FALSE,...)
{
jumpchoice <- match.arg(jumpchoice)
if(sum(object$weights < 0.1)/length(object$weights) > 0.2) # Probably exponential weights for fitting. Can be ignored for forecasting
object$weights[object$weights > 0] <- 1
oldwarn <- options()$warn
olderror <- options()$show.error.messages
options(show.error.messages=warnings,warn=ifelse(warnings,0,-1))
fcast <- forecast.ftsm(object,h=h,level=level,jumpchoice=jumpchoice,method=method,...)
options(show.error.messages=olderror,warn=oldwarn)
# Compute observational variance
# Update to deal with general transformations
# fred <- InvBoxCox(object[[4]],object$lambda)
# if(object$type != "migration")
# fred <- pmax(fred,0.000000001)
# if(object$type == "mortality") # Use binomial distribution
# s <- sqrt((1-fred)*fred^(2*object$lambda-1)/pmax(object$pop,1))
# else if(object$type=="fertility") # Use Poisson distribution
# s <- sqrt((fred/1000)^(2*object$lambda-1)/pmax(object$pop,1))
# else
# {
s <- sqrt(abs(object$obs.var))
# }
# browser()
ysd <- s*NA
for(i in 1:ncol(ysd))
ysd[,i] <- spline(object$age, s[,i], n=nrow(fcast$var$total))$y
ysd <- rowMeans(ysd)
# browser()
# Add observational variance to total variance
fcast$var$observ <- ysd^2
fcast$var$total <- sweep(fcast$var$total,1,fcast$var$observ,"+")
# Correct prediction intervals and reverse transform
fse <- qnorm(.5 + fcast$coeff[[1]]$level/200) * sqrt(fcast$var$total)
fcast$upper$y <- InvBoxCox(fcast$mean$y + fse,object$lambda)
fcast$lower$y <- InvBoxCox(fcast$mean$y - fse,object$lambda)
fcast$mean$y <- InvBoxCox(fcast$mean$y,object$lambda)
if(object$type != "migration")
{
fcast$mean$y <- pmax(fcast$mean$y,0.000000001)
fcast$lower$y <- pmax(fcast$lower$y,0.000000001)
fcast$lower$y[is.na(fcast$lower$y)] <- 0
fcast$upper$y <- pmax(fcast$upper$y,0.000000001)
}
# if(object$type != "migration")
# {
# fcast$mean$y[is.na(fcast$mean$y)] <- 0
# fcast$lower$y[is.na(fcast$upper$y)] <- 0
# fcast$upper$y[is.na(fcast$lower$y)] <- 0
# }
output <- list(
label=object$label,
age=object$age,
year=max(object$year)+(1:h),
rate=list(forecast=fcast$mean$y,
lower=fcast$lower$y,
upper=fcast$upper$y),
error=fcast$error,
fitted=fcast$fitted,
coeff=fcast$coeff,
coeff.error=fcast$coeff.error,
var=fcast$var,
model=fcast$model,
type=object$type,
lambda=object$lambda)
names(output$rate)[1] = names(object)[4]
output$call <- match.call()
return(structure(output,class=c("fmforecast","demogdata")))
}
print.fmforecast <- function(x,...)
{
cat(paste("Forecasts for",x$label))
cat(paste("\nData type:"),x$type,"\n\n")
cat(paste(" Call:"),deparse(x$call))
cat("\n Based on model:",deparse(x$model$call))
if(is.element("order",names(x$model)))
cat(paste("\n Model order:",x$model$order))
if(is.element("adjust",names(x$model)))
cat(paste("\n Adjustment method:",x$model$adjust))
if(is.element("jumpchoice",names(x$model)))
cat(paste("\n Jump-off method:",x$model$jumpchoice))
cat(paste("\n\n Years:",min(x$year),"-",max(x$year)))
cat(paste("\n Ages: ",min(x$age),"-",max(x$age),"\n"))
}
plot.fmforecast <- function(x,plot.type=c("function","component","variance"),vcol=1:4,mean.lab="Mean",
xlab2="Year",h=1,...)
{
plot.type=match.arg(plot.type)
if(plot.type=="function")
plot.demogdata(x,...)
else if(plot.type=="variance")
{
ylim = range(x$var$model[,h],x$var$mean,x$var$error,x$var$observ,na.rm=TRUE)
plot(x$age,x$var$model[,h],type="l",xlab="Age",ylab="Variance",col=vcol[1],ylim=ylim,...)
abline(0,0,lty=2)
lines(x$age,x$var$mean,col=vcol[2])
lines(x$age,x$var$error,col=vcol[3])
lines(x$age,x$var$observ,col=vcol[4])
}
else
{
if(is.element("ax",names(x$model))) #output from lca()
{
x$model$basis <- cbind(x$model$ax,x$model$bx)
x$modelcoeff <- cbind(rep(1,length(x$model$kt)),x$model$kt)
x$coeff <- list(NULL,x$kt.f)
colnames(x$model$basis) <- c("mean","bx")
if(x$model$adjust != "none")
xlab <- "kt (adjusted)"
else
xlab <- "kt"
plot.ftsf(x, "component", xlab2=xlab2, ylab2=xlab, mean.lab="ax", ...)
}
else
plot.ftsf(x, "component", xlab2=xlab2, mean.lab=mean.lab, ...)
}
}
models <- function(object, ...)
UseMethod("models")
models.fmforecast <- function(object, select=0, ...)
{
if(!select[1])
select <- 1:(length(object$coeff)-1)
for(i in select)
{
cat("\n-- Coefficient",i,"--\n")
print(object$coeff[[i+1]]$model$model)
}
}
models.fmforecast2 <- function(object, ...)
{
cat("\n************* PRODUCT MODEL *************\n")
models(object$product,...)
cat("\n\n\n************* RATIO MODELS *************")
for(i in 1:length(object$ratio))
{
cat("\n\n\n***********", toupper(names(object$ratio)[i]),"***********\n\n")
models(object$ratio[[i]],...)
}
}
### fdmMISE
## Inputs:
## actual = observed data
## estimate = fitted or forecast data
## These must be matrices of the same order
## Assumed that each column is one year
fdmMISE <- function(actual,estimate,age=NULL,years=NULL,neval=1000)
{
p <- nrow(actual)
n <- ncol(actual)
if(is.null(age))
age <- 0:(p-1)
if(is.null(years))
years <- 1:n
if(p != nrow(estimate) | n != ncol(estimate) | n != length(years))
stop("Dimensions of inputs don't match")
p <- length(age)
actual = actual[1:p,]
estimate = estimate[1:p,]
out <- ftsa:::MISE(fts(age,actual,start=years[1],frequency=1),fts(age,estimate,start=years[1],frequency=1),neval=neval)
out$age <- age
out$years <- years
return(out)
}
compare.demogdata <- function(data, forecast, series=names(forecast$rate)[1],
ages = data$age, max.age=min(max(data$age),max(forecast$age)), years=data$year,
interpolate=FALSE)
{
years <- years[sort(match(forecast$year,years))]
ages <- ages[ages <= max.age]
if(length(years)==0)
stop("No common years between data and forecasts")
subdata <- extract.ages(extract.years(data,years=years),ages)
forecast <- extract.ages(extract.years(forecast,years=years),ages)
ages <- subdata$age
mx <- get.series(subdata$rate,series)
fmx <- get.series(forecast$rate,series)
n <- nrow(mx)
log.mx <- BoxCox(mx,data$lambda)
if (interpolate)
{
if (sum(abs(mx) < 1e-09) > 0)
warning("Replacing zero values with estimates")
for (i in 1:n)
log.mx[i, ] <- BoxCox(fill.zero(mx[i, ]),data$lambda)
}
junka <- fdmMISE(log.mx,BoxCox(fmx,forecast$lambda),ages,years)
junkb <- fdmMISE(mx,fmx,ages,years)
junk1 <- cbind(junka$ME,junka$MAE,junka$MSE,junkb$MPE,junkb$MAPE)
rownames(junk1) <- ages
colnames(junk1) <- c("ME","MAE","MSE","MPE","MAPE")
junk2 <- cbind(junka$MIE,junka$MIAE,junka$MISE,junkb$MIPE,junkb$MIAPE)
rownames(junk2) = years
colnames(junk2) = c("IE","IAE","ISE","IPE","IAPE")
fred <- list(label=data$label,age=ages,year=years,error=junkb$error,terror=junka$error,
mean.error=junk1,int.error=ts(junk2,start=years[1],frequency=1))
names(fred)[4] <- paste(series,"error")
names(fred)[5] <- paste(series,"transformed error")
# Add life expectancies
if(data$type=="mortality")
{
actual.le <- life.expectancy(subdata,series=series)
forecast.le <- life.expectancy(forecast,series=series)
fred$life.expectancy <- cbind(actual.le,forecast.le,forecast.le-actual.le)
dimnames(fred$life.expectancy)[[2]] <- c("Actual","Forecast","Error")
}
return(structure(fred,class="errorfdm"))
}
fitted.fdm <- function(object,...)
{
object$fitted
}
print.errorfdm <- function(x,...)
{
cat(paste("Demographic comparison for",x$label,"\n"))
cat(paste(" Years: ",min(x$year),"-",max(x$year),"\n"))
cat(paste(" Ages: ",min(x$age),"-",max(x$age),"\n"))
fred1 <- apply(x$mean.error,2,mean)
fred2 <- apply(x$int.error,2,mean)
cat("\nTransformed data: Errors averaged across time and ages\n")
print(fred1[1:3])
cat("\nTransformed data: Errors averaged across time and integrated across ages\n")
print(fred2[1:3])
cat("\nRaw data: Percentage errors averaged across time and ages\n")
print(fred1[4:5])
cat("\nRaw data: Percentage errors averaged across time and integrated across ages\n")
print(fred2[4:5])
if(is.element("life.expectancy",names(x)))
{
cat("\nLife expectancy\n")
x$life.expectancy <- rbind(x$life.expectancy,colMeans(x$life.expectancy))
dimnames(x$life.expectancy)[[1]] <- c(paste(x$year),"Mean")
print(x$life.expectancy)
}
}
plot.errorfdm <- function(x,transform=TRUE,...)
{
i <- ifelse(transform,5,4)
plot(fts(x=x$age,y=x[[i]],start=x$year[1],frequency=1,xname="Age",yname=names(x)[i]),...)
}
isfe <- function(...) UseMethod("isfe")
isfe.demogdata <- function(data,series=names(data$rate)[1],max.order=N-3,N=10,h=5:10,
ages=data$age, max.age=100,
method=c("classical","M","rapca"), fmethod=c("arima", "ar", "arfima", "ets","ets.na","struct","rwdrift","rw"),
lambda=3, ...)
{
series <- tolower(series)
method <- match.arg(method)
fmethod <- match.arg(fmethod)
data <- extract.ages(data,ages,combine.upper=FALSE)
if(max.age < max(ages))
data <- extract.ages(data,min(ages):max.age,combine.upper=TRUE)
ages <- data$age
mx <- BoxCox(get.series(data$rate,series),data$lambda)
mx[mx < -1e9] <- NA
data.fts <- fts(ages,mx,start=data$year[1],xname="Age",yname="")
return(isfe(data.fts,max.order=max.order,N=N,h=h,method=method,fmethod=fmethod,lambda=lambda,...))
}
summary.fmforecast <- function(object,...)
{
print(object)
cat("\nERROR MEASURES BASED ON MORTALITY RATES\n")
printout(fdmMISE(object$model$y$y,exp(object$fitted$y),age=object$age,years=object$model$year))
cat("\nERROR MEASURES BASED ON LOG MORTALITY RATES\n")
printout(fdmMISE(log(object$model$y$y),object$fitted$y,age=object$age,years=object$model$year))
}
summary.fmforecast2 <- function(object,...)
{
if(is.element("product",names(object))) # Assume coherent model
{
summary(object$product)
for(i in 1:length(object$ratio))
summary(object$ratio[[i]])
}
else
{
for(i in 1:length(object))
summary(object[[i]])
}
}
|
/R/fdm.R
|
no_license
|
Vara15/demography
|
R
| false | false | 14,557 |
r
|
####################################################
### FUNCTIONS FOR FUNCTIONAL DEMOGRAPHIC MODELS ###
####################################################
fdm <- function(data, series=names(data$rate)[1], order=6, ages=data$age, max.age=100,
method=c("classical","M","rapca"), lambda=3, mean=TRUE, level=FALSE, transform=TRUE,...)
{
series <- tolower(series)
method <- match.arg(method)
data <- extract.ages(data,ages,combine.upper=FALSE)
if(max.age < max(ages))
data <- extract.ages(data,min(ages):max.age,combine.upper=TRUE)
ages <- data$age
if(data$type=="mortality")
yname <- "Mortality rate"
else if(data$type=="fertility")
yname <- "Fertility rate"
else if(data$type=="migration")
yname <- "Net migration"
else
stop("Not sure what to do with this data type")
if(transform)
{
mx <- BoxCox(get.series(data$rate,series),data$lambda)
mx[mx < -1e9] <- NA
}
else
mx <- get.series(data$rate,series)
data.fts <- fts(ages,mx,start=data$year[1],xname="Age",yname=yname)
fit <- ftsm(data.fts, order=order, method=method, mean=mean, level=level, lambda=lambda, ...)
# Adjust signs of output so that basis functions are primarily positive. (Easier to interpret.)
nx <- length(data$age)
for(i in 1+(1:order))
{
if(sum(fit$basis[,i] > 0) < nx/2)
{
fit$basis[,i] <- -fit$basis[,i]
fit$coeff[,i] <- -fit$coeff[,i]
}
}
if(is.element("obs.var",names(data)))
ov <- data$obs.var[[match(series,tolower(names(data$rate)))]]
else
ov <- mx*0
output <- list(label=data$label,age=fit$y$x,year=data$year,mx=mx,
pop=get.series(data$pop,series),
fitted=fit$fitted,
residuals=fit$residuals,
basis=fit$basis,coeff=fit$coeff,
mean.se=fit$mean.se,varprop=fit$varprop, weights=fit$wt,
obs.var=ov,
v=fit$v,type=data$type,
y=data.fts,
basis2 = fit$basis2,
coeff2 = fit$coeff2,
lambda = data$lambda,
call=match.call())
names(output)[4] <- series
class(output)=c("fdm","ftsm")
return(output)
}
print.fdm <- function(x,...)
{
cat("Functional demographic model\n")
cat(paste("\nCall:",deparse(x$call,200),"\n"))
cat(paste("\nRegion:"),x$label)
cat(paste("\nData type:"),x$type)
cat(paste("\nYears in fit:",min(x$year),"-",max(x$year)))
cat(paste("\nAges in fit:",min(x$age),"-",max(x$age),"\n"))
ord <- ncol(x$basis)-1
cat(paste("\nOrder:",ord))
if(ord>1)
{
cat("\nPercentage variation due to basis functions: ")
cat(paste(formatC(x$varprop*100,1,format="f"),"%",sep=""))
}
cat("\n")
}
summary.fdm <- function(object,...)
{
print(object)
junk <- fdmMISE(object[[4]],object$fitted$y,age=object$age,years=object$year)
junk1 <- cbind(junk$ME,junk$MSE,junk$MPE,junk$MAPE)
rownames(junk1) <- object$age
colnames(junk1) <- c("ME","MSE","MPE","MAPE")
junk2 <- cbind(junk$MIE,junk$MISE,junk$MIPE,junk$MIAPE)
rownames(junk2) = object$year
colnames(junk2) = c("IE","ISE","IPE","IAPE")
cat(paste("\nAverages across ages:\n"))
print(round(colMeans(junk1),5))
cat(paste("\nAverages across years:\n"))
print(round(colMeans(junk2),5))
cat("\n")
}
summary.fdmpr <- function(object, ...)
{
cat("*** PRODUCT MODEL ***\n")
summary(object$product)
cat("\n*** RATIO MODELS ***\n")
for(i in 1:length(object$ratio))
{
cat(paste("\n",toupper(names(object$ratio))[i],"\n",sep=""))
summary(object$ratio[[i]])
}
}
residuals.fdm <- function(object,...)
{
return(structure(list(x=object$year,y=object$age,z=t(object$residuals$y)),class="fmres"))
}
forecast.fdm <- function(object,h=50,level=80, jumpchoice=c("fit","actual"),
method="arima",warnings=FALSE,...)
{
jumpchoice <- match.arg(jumpchoice)
if(sum(object$weights < 0.1)/length(object$weights) > 0.2) # Probably exponential weights for fitting. Can be ignored for forecasting
object$weights[object$weights > 0] <- 1
oldwarn <- options()$warn
olderror <- options()$show.error.messages
options(show.error.messages=warnings,warn=ifelse(warnings,0,-1))
fcast <- forecast.ftsm(object,h=h,level=level,jumpchoice=jumpchoice,method=method,...)
options(show.error.messages=olderror,warn=oldwarn)
# Compute observational variance
# Update to deal with general transformations
# fred <- InvBoxCox(object[[4]],object$lambda)
# if(object$type != "migration")
# fred <- pmax(fred,0.000000001)
# if(object$type == "mortality") # Use binomial distribution
# s <- sqrt((1-fred)*fred^(2*object$lambda-1)/pmax(object$pop,1))
# else if(object$type=="fertility") # Use Poisson distribution
# s <- sqrt((fred/1000)^(2*object$lambda-1)/pmax(object$pop,1))
# else
# {
s <- sqrt(abs(object$obs.var))
# }
# browser()
ysd <- s*NA
for(i in 1:ncol(ysd))
ysd[,i] <- spline(object$age, s[,i], n=nrow(fcast$var$total))$y
ysd <- rowMeans(ysd)
# browser()
# Add observational variance to total variance
fcast$var$observ <- ysd^2
fcast$var$total <- sweep(fcast$var$total,1,fcast$var$observ,"+")
# Correct prediction intervals and reverse transform
fse <- qnorm(.5 + fcast$coeff[[1]]$level/200) * sqrt(fcast$var$total)
fcast$upper$y <- InvBoxCox(fcast$mean$y + fse,object$lambda)
fcast$lower$y <- InvBoxCox(fcast$mean$y - fse,object$lambda)
fcast$mean$y <- InvBoxCox(fcast$mean$y,object$lambda)
if(object$type != "migration")
{
fcast$mean$y <- pmax(fcast$mean$y,0.000000001)
fcast$lower$y <- pmax(fcast$lower$y,0.000000001)
fcast$lower$y[is.na(fcast$lower$y)] <- 0
fcast$upper$y <- pmax(fcast$upper$y,0.000000001)
}
# if(object$type != "migration")
# {
# fcast$mean$y[is.na(fcast$mean$y)] <- 0
# fcast$lower$y[is.na(fcast$upper$y)] <- 0
# fcast$upper$y[is.na(fcast$lower$y)] <- 0
# }
output <- list(
label=object$label,
age=object$age,
year=max(object$year)+(1:h),
rate=list(forecast=fcast$mean$y,
lower=fcast$lower$y,
upper=fcast$upper$y),
error=fcast$error,
fitted=fcast$fitted,
coeff=fcast$coeff,
coeff.error=fcast$coeff.error,
var=fcast$var,
model=fcast$model,
type=object$type,
lambda=object$lambda)
names(output$rate)[1] = names(object)[4]
output$call <- match.call()
return(structure(output,class=c("fmforecast","demogdata")))
}
print.fmforecast <- function(x,...)
{
cat(paste("Forecasts for",x$label))
cat(paste("\nData type:"),x$type,"\n\n")
cat(paste(" Call:"),deparse(x$call))
cat("\n Based on model:",deparse(x$model$call))
if(is.element("order",names(x$model)))
cat(paste("\n Model order:",x$model$order))
if(is.element("adjust",names(x$model)))
cat(paste("\n Adjustment method:",x$model$adjust))
if(is.element("jumpchoice",names(x$model)))
cat(paste("\n Jump-off method:",x$model$jumpchoice))
cat(paste("\n\n Years:",min(x$year),"-",max(x$year)))
cat(paste("\n Ages: ",min(x$age),"-",max(x$age),"\n"))
}
plot.fmforecast <- function(x,plot.type=c("function","component","variance"),vcol=1:4,mean.lab="Mean",
xlab2="Year",h=1,...)
{
plot.type=match.arg(plot.type)
if(plot.type=="function")
plot.demogdata(x,...)
else if(plot.type=="variance")
{
ylim = range(x$var$model[,h],x$var$mean,x$var$error,x$var$observ,na.rm=TRUE)
plot(x$age,x$var$model[,h],type="l",xlab="Age",ylab="Variance",col=vcol[1],ylim=ylim,...)
abline(0,0,lty=2)
lines(x$age,x$var$mean,col=vcol[2])
lines(x$age,x$var$error,col=vcol[3])
lines(x$age,x$var$observ,col=vcol[4])
}
else
{
if(is.element("ax",names(x$model))) #output from lca()
{
x$model$basis <- cbind(x$model$ax,x$model$bx)
x$modelcoeff <- cbind(rep(1,length(x$model$kt)),x$model$kt)
x$coeff <- list(NULL,x$kt.f)
colnames(x$model$basis) <- c("mean","bx")
if(x$model$adjust != "none")
xlab <- "kt (adjusted)"
else
xlab <- "kt"
plot.ftsf(x, "component", xlab2=xlab2, ylab2=xlab, mean.lab="ax", ...)
}
else
plot.ftsf(x, "component", xlab2=xlab2, mean.lab=mean.lab, ...)
}
}
models <- function(object, ...)
UseMethod("models")
models.fmforecast <- function(object, select=0, ...)
{
if(!select[1])
select <- 1:(length(object$coeff)-1)
for(i in select)
{
cat("\n-- Coefficient",i,"--\n")
print(object$coeff[[i+1]]$model$model)
}
}
models.fmforecast2 <- function(object, ...)
{
cat("\n************* PRODUCT MODEL *************\n")
models(object$product,...)
cat("\n\n\n************* RATIO MODELS *************")
for(i in 1:length(object$ratio))
{
cat("\n\n\n***********", toupper(names(object$ratio)[i]),"***********\n\n")
models(object$ratio[[i]],...)
}
}
### fdmMISE
## Inputs:
## actual = observed data
## estimate = fitted or forecast data
## These must be matrices of the same order
## Assumed that each column is one year
fdmMISE <- function(actual,estimate,age=NULL,years=NULL,neval=1000)
{
p <- nrow(actual)
n <- ncol(actual)
if(is.null(age))
age <- 0:(p-1)
if(is.null(years))
years <- 1:n
if(p != nrow(estimate) | n != ncol(estimate) | n != length(years))
stop("Dimensions of inputs don't match")
p <- length(age)
actual = actual[1:p,]
estimate = estimate[1:p,]
out <- ftsa:::MISE(fts(age,actual,start=years[1],frequency=1),fts(age,estimate,start=years[1],frequency=1),neval=neval)
out$age <- age
out$years <- years
return(out)
}
compare.demogdata <- function(data, forecast, series=names(forecast$rate)[1],
ages = data$age, max.age=min(max(data$age),max(forecast$age)), years=data$year,
interpolate=FALSE)
{
years <- years[sort(match(forecast$year,years))]
ages <- ages[ages <= max.age]
if(length(years)==0)
stop("No common years between data and forecasts")
subdata <- extract.ages(extract.years(data,years=years),ages)
forecast <- extract.ages(extract.years(forecast,years=years),ages)
ages <- subdata$age
mx <- get.series(subdata$rate,series)
fmx <- get.series(forecast$rate,series)
n <- nrow(mx)
log.mx <- BoxCox(mx,data$lambda)
if (interpolate)
{
if (sum(abs(mx) < 1e-09) > 0)
warning("Replacing zero values with estimates")
for (i in 1:n)
log.mx[i, ] <- BoxCox(fill.zero(mx[i, ]),data$lambda)
}
junka <- fdmMISE(log.mx,BoxCox(fmx,forecast$lambda),ages,years)
junkb <- fdmMISE(mx,fmx,ages,years)
junk1 <- cbind(junka$ME,junka$MAE,junka$MSE,junkb$MPE,junkb$MAPE)
rownames(junk1) <- ages
colnames(junk1) <- c("ME","MAE","MSE","MPE","MAPE")
junk2 <- cbind(junka$MIE,junka$MIAE,junka$MISE,junkb$MIPE,junkb$MIAPE)
rownames(junk2) = years
colnames(junk2) = c("IE","IAE","ISE","IPE","IAPE")
fred <- list(label=data$label,age=ages,year=years,error=junkb$error,terror=junka$error,
mean.error=junk1,int.error=ts(junk2,start=years[1],frequency=1))
names(fred)[4] <- paste(series,"error")
names(fred)[5] <- paste(series,"transformed error")
# Add life expectancies
if(data$type=="mortality")
{
actual.le <- life.expectancy(subdata,series=series)
forecast.le <- life.expectancy(forecast,series=series)
fred$life.expectancy <- cbind(actual.le,forecast.le,forecast.le-actual.le)
dimnames(fred$life.expectancy)[[2]] <- c("Actual","Forecast","Error")
}
return(structure(fred,class="errorfdm"))
}
fitted.fdm <- function(object,...)
{
object$fitted
}
print.errorfdm <- function(x,...)
{
cat(paste("Demographic comparison for",x$label,"\n"))
cat(paste(" Years: ",min(x$year),"-",max(x$year),"\n"))
cat(paste(" Ages: ",min(x$age),"-",max(x$age),"\n"))
fred1 <- apply(x$mean.error,2,mean)
fred2 <- apply(x$int.error,2,mean)
cat("\nTransformed data: Errors averaged across time and ages\n")
print(fred1[1:3])
cat("\nTransformed data: Errors averaged across time and integrated across ages\n")
print(fred2[1:3])
cat("\nRaw data: Percentage errors averaged across time and ages\n")
print(fred1[4:5])
cat("\nRaw data: Percentage errors averaged across time and integrated across ages\n")
print(fred2[4:5])
if(is.element("life.expectancy",names(x)))
{
cat("\nLife expectancy\n")
x$life.expectancy <- rbind(x$life.expectancy,colMeans(x$life.expectancy))
dimnames(x$life.expectancy)[[1]] <- c(paste(x$year),"Mean")
print(x$life.expectancy)
}
}
plot.errorfdm <- function(x,transform=TRUE,...)
{
i <- ifelse(transform,5,4)
plot(fts(x=x$age,y=x[[i]],start=x$year[1],frequency=1,xname="Age",yname=names(x)[i]),...)
}
isfe <- function(...) UseMethod("isfe")
isfe.demogdata <- function(data,series=names(data$rate)[1],max.order=N-3,N=10,h=5:10,
ages=data$age, max.age=100,
method=c("classical","M","rapca"), fmethod=c("arima", "ar", "arfima", "ets","ets.na","struct","rwdrift","rw"),
lambda=3, ...)
{
series <- tolower(series)
method <- match.arg(method)
fmethod <- match.arg(fmethod)
data <- extract.ages(data,ages,combine.upper=FALSE)
if(max.age < max(ages))
data <- extract.ages(data,min(ages):max.age,combine.upper=TRUE)
ages <- data$age
mx <- BoxCox(get.series(data$rate,series),data$lambda)
mx[mx < -1e9] <- NA
data.fts <- fts(ages,mx,start=data$year[1],xname="Age",yname="")
return(isfe(data.fts,max.order=max.order,N=N,h=h,method=method,fmethod=fmethod,lambda=lambda,...))
}
summary.fmforecast <- function(object,...)
{
print(object)
cat("\nERROR MEASURES BASED ON MORTALITY RATES\n")
printout(fdmMISE(object$model$y$y,exp(object$fitted$y),age=object$age,years=object$model$year))
cat("\nERROR MEASURES BASED ON LOG MORTALITY RATES\n")
printout(fdmMISE(log(object$model$y$y),object$fitted$y,age=object$age,years=object$model$year))
}
summary.fmforecast2 <- function(object,...)
{
if(is.element("product",names(object))) # Assume coherent model
{
summary(object$product)
for(i in 1:length(object$ratio))
summary(object$ratio[[i]])
}
else
{
for(i in 1:length(object))
summary(object[[i]])
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Phyloinfer.R
\name{smcp_sampling}
\alias{smcp_sampling}
\title{SMC' sampler--inference from local genealogies}
\usage{
smcp_sampling(data, nsamp, nburnin, grid, alpha = 0.001, beta = 0.001,
stepsz = 0.1, Nleap = 15, rand_leap = TRUE, scaling = 10,
tol = 1e-05)
}
\arguments{
\item{data}{a list containing sufficient statistics}
\item{nsamp}{integer specifying number of MCMC samples}
\item{nburnin}{integer specifying the number of burnin samples}
\item{grid}{a vector with the grid points}
\item{alpha}{hyperparameter of precision of BM prior}
\item{beta}{hyperparameter of precision of BM prior}
\item{stepsz}{numeric tuning parameter for Split Hamiltonian Monte Carlo}
\item{Nleap}{integer tuning parameter for Split Hamiltonian Monte Carlo}
\item{rand_leap}{tuning parameter for Split Hamiltonian Monte Carlo}
\item{scaling}{numeric re-scaling parameter}
\item{tol}{tolerance to detect difference}
}
\value{
A matrix of sim rows. Entry x_{i,j} has the n-j+1-th coalescent time of the i-th tree
}
\description{
SMC' sampler--inference from local genealogies
}
|
/man/smcp_sampling.Rd
|
no_license
|
JuliaPalacios/phylodyn
|
R
| false | true | 1,156 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Phyloinfer.R
\name{smcp_sampling}
\alias{smcp_sampling}
\title{SMC' sampler--inference from local genealogies}
\usage{
smcp_sampling(data, nsamp, nburnin, grid, alpha = 0.001, beta = 0.001,
stepsz = 0.1, Nleap = 15, rand_leap = TRUE, scaling = 10,
tol = 1e-05)
}
\arguments{
\item{data}{a list containing sufficient statistics}
\item{nsamp}{integer specifying number of MCMC samples}
\item{nburnin}{integer specifying the number of burnin samples}
\item{grid}{a vector with the grid points}
\item{alpha}{hyperparameter of precision of BM prior}
\item{beta}{hyperparameter of precision of BM prior}
\item{stepsz}{numeric tuning parameter for Split Hamiltonian Monte Carlo}
\item{Nleap}{integer tuning parameter for Split Hamiltonian Monte Carlo}
\item{rand_leap}{tuning parameter for Split Hamiltonian Monte Carlo}
\item{scaling}{numeric re-scaling parameter}
\item{tol}{tolerance to detect difference}
}
\value{
A matrix of sim rows. Entry x_{i,j} has the n-j+1-th coalescent time of the i-th tree
}
\description{
SMC' sampler--inference from local genealogies
}
|
pollutantmean <- function(directory, pollutant, id = 1:332){
my_monitor_data <- data.frame()
for (f in id){
if(f<10){
data.list <- read.csv(paste(directory,"\\00",f, ".csv",sep=""))
}
if(f>=10 & !f<10 & f<100){
data.list <- read.csv(paste(directory,"\\0",f, ".csv",sep=""))
}
if(f>=100 & !f<10 & !f<100 ){
data.list <- read.csv(paste(directory,"\\",f, ".csv",sep=""))
}
my_monitor_data <- rbind(my_monitor_data, data.list)
}
if(pollutant == 'sulfate'){
r <- mean(my_monitor_data$sulfate[!is.na(my_monitor_data$sulfate)])
}
if(pollutant == 'nitrate'){
r <- mean(my_monitor_data$nitrate[!is.na(my_monitor_data$nitrate)])
}
r
}
complete <- function(directory, id=1:332){
my_monitor_data <- data.frame()
resdata <- data.frame("id"=numeric(0), "nobs"=numeric(0))
for (f in id){
if(f<10){
data.list <- read.csv(paste(directory,"\\00",f, ".csv",sep=""))
}
if(f>=10 & !f<10 & f<100){
data.list <- read.csv(paste(directory,"\\0",f, ".csv",sep=""))
}
if(f>=100 & !f<10 & !f<100 ){
data.list <- read.csv(paste(directory,"\\",f, ".csv",sep=""))
}
my_monitor_data <- data.list
resdata <- rbind(
resdata, data.frame(
"id" = f, "nobs" = length(my_monitor_data$sulfate[!is.na(my_monitor_data$nitrate & my_monitor_data$sulfate)])))
}
resdata
}
corr <- function(directory, threshold = 0){
temp = list.files(directory, pattern="*.csv")
myfiles = lapply(paste(directory,"\\",temp, sep=""), read.csv)
cal <- data.frame("sulfate"=numeric(0), "nitrate"=numeric(0))
r<-vector()
for (file in myfiles) {
nobs = length(file$sulfate[!is.na(file$nitrate & file$sulfate)])
if( nobs >= threshold){
cal <- rbind(cal,
data.frame(
"sulfate"=file$sulfate[!is.na(file$nitrate & file$sulfate)],
"nitrate"=file$nitrate[!is.na(file$nitrate & file$sulfate)]
)
)
if(nobs != 0){
r <- c(r,
if(!is.na(cor(file$sulfate[!is.na(file$nitrate & file$sulfate)], file$nitrate[!is.na(file$nitrate & file$sulfate)]))){
cor(file$sulfate[!is.na(file$nitrate & file$sulfate)], file$nitrate[!is.na(file$nitrate & file$sulfate)])
})
}
}
}
r
}
|
/02-02/airquality.R
|
no_license
|
JoaquinIbar/datasciencecoursera
|
R
| false | false | 2,517 |
r
|
pollutantmean <- function(directory, pollutant, id = 1:332){
my_monitor_data <- data.frame()
for (f in id){
if(f<10){
data.list <- read.csv(paste(directory,"\\00",f, ".csv",sep=""))
}
if(f>=10 & !f<10 & f<100){
data.list <- read.csv(paste(directory,"\\0",f, ".csv",sep=""))
}
if(f>=100 & !f<10 & !f<100 ){
data.list <- read.csv(paste(directory,"\\",f, ".csv",sep=""))
}
my_monitor_data <- rbind(my_monitor_data, data.list)
}
if(pollutant == 'sulfate'){
r <- mean(my_monitor_data$sulfate[!is.na(my_monitor_data$sulfate)])
}
if(pollutant == 'nitrate'){
r <- mean(my_monitor_data$nitrate[!is.na(my_monitor_data$nitrate)])
}
r
}
complete <- function(directory, id=1:332){
my_monitor_data <- data.frame()
resdata <- data.frame("id"=numeric(0), "nobs"=numeric(0))
for (f in id){
if(f<10){
data.list <- read.csv(paste(directory,"\\00",f, ".csv",sep=""))
}
if(f>=10 & !f<10 & f<100){
data.list <- read.csv(paste(directory,"\\0",f, ".csv",sep=""))
}
if(f>=100 & !f<10 & !f<100 ){
data.list <- read.csv(paste(directory,"\\",f, ".csv",sep=""))
}
my_monitor_data <- data.list
resdata <- rbind(
resdata, data.frame(
"id" = f, "nobs" = length(my_monitor_data$sulfate[!is.na(my_monitor_data$nitrate & my_monitor_data$sulfate)])))
}
resdata
}
corr <- function(directory, threshold = 0){
temp = list.files(directory, pattern="*.csv")
myfiles = lapply(paste(directory,"\\",temp, sep=""), read.csv)
cal <- data.frame("sulfate"=numeric(0), "nitrate"=numeric(0))
r<-vector()
for (file in myfiles) {
nobs = length(file$sulfate[!is.na(file$nitrate & file$sulfate)])
if( nobs >= threshold){
cal <- rbind(cal,
data.frame(
"sulfate"=file$sulfate[!is.na(file$nitrate & file$sulfate)],
"nitrate"=file$nitrate[!is.na(file$nitrate & file$sulfate)]
)
)
if(nobs != 0){
r <- c(r,
if(!is.na(cor(file$sulfate[!is.na(file$nitrate & file$sulfate)], file$nitrate[!is.na(file$nitrate & file$sulfate)]))){
cor(file$sulfate[!is.na(file$nitrate & file$sulfate)], file$nitrate[!is.na(file$nitrate & file$sulfate)])
})
}
}
}
r
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base.R
\name{SARS}
\alias{SARS}
\title{SARS Object}
\usage{
SARS(states, actions, rewards, states_next, ids = NA)
}
\arguments{
\item{states}{a numeric matrix for states, each row for each time step.}
\item{actions}{a numeric matrix for actions.}
\item{rewards}{a numeric column vector for rewards.}
\item{states_next}{a numeric matrix for next states.}
\item{ids}{a numeric column vector for ids.}
}
\value{
a SARS object (\code{class = "SARS"})
}
\description{
The function \code{SARS()} creates a SARS object for discrete-time Markov Decision
Process (MDP) data.
}
\details{
SARS stands for \eqn{S} (state), \eqn{A} (action), \eqn{A} (reward), and
\eqn{S'} (next state), a basic unit of MDP.
SARS objects are designed to store more than one units. A typical use case is
MDP trajectories of the form
\deqn{S_1, A_1, R_1, S_2, A_2, R_2, \ldots, S_n, A_n, R_n, S_{n+1}}
which can be rearranged into units \eqn{(S_1, A_1, R_1, S'_1=S_2)}, \eqn{(S_2, A_2, R_2, S'_2=S_3)},
and so on. Elements across all units are then stacked together into matrices of
\code{states}, \code{actions}, \code{rewards}, and \code{states_next}. For example, if each \eqn{S}
is a \eqn{p}-vector, then \code{state} is a \eqn{n}-by-\eqn{p} matrix.
This structure is not a compact representation for trajectory use-case, because
\code{states_next} would be a duplicate for 1 time step lagged \code{states}. However,
it has compatibility over more than one trajectories: simply stacking matrices
from different trajectories together. This single-matrix representation provides
some computational advantages.
}
\note{
For 1D arguments (e.g. reward as a real number), a column vector (\eqn{n}-by-\eqn{1} matrix)
is expected.
}
\examples{
states <- matrix(c(1, 2, 3, 4), 2, 2)
actions <- matrix(c(1, 0), 2, 1)
rewards <- matrix(c(1, 2), 2, 1)
states_next <- matrix(c(2, 3, 4, 5), 2, 2)
ss <- SARS(states, actions, rewards, states_next)
ss
}
|
/man/sars.Rd
|
permissive
|
XiaoqiLu/PhD-Thesis
|
R
| false | true | 1,994 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base.R
\name{SARS}
\alias{SARS}
\title{SARS Object}
\usage{
SARS(states, actions, rewards, states_next, ids = NA)
}
\arguments{
\item{states}{a numeric matrix for states, each row for each time step.}
\item{actions}{a numeric matrix for actions.}
\item{rewards}{a numeric column vector for rewards.}
\item{states_next}{a numeric matrix for next states.}
\item{ids}{a numeric column vector for ids.}
}
\value{
a SARS object (\code{class = "SARS"})
}
\description{
The function \code{SARS()} creates a SARS object for discrete-time Markov Decision
Process (MDP) data.
}
\details{
SARS stands for \eqn{S} (state), \eqn{A} (action), \eqn{A} (reward), and
\eqn{S'} (next state), a basic unit of MDP.
SARS objects are designed to store more than one units. A typical use case is
MDP trajectories of the form
\deqn{S_1, A_1, R_1, S_2, A_2, R_2, \ldots, S_n, A_n, R_n, S_{n+1}}
which can be rearranged into units \eqn{(S_1, A_1, R_1, S'_1=S_2)}, \eqn{(S_2, A_2, R_2, S'_2=S_3)},
and so on. Elements across all units are then stacked together into matrices of
\code{states}, \code{actions}, \code{rewards}, and \code{states_next}. For example, if each \eqn{S}
is a \eqn{p}-vector, then \code{state} is a \eqn{n}-by-\eqn{p} matrix.
This structure is not a compact representation for trajectory use-case, because
\code{states_next} would be a duplicate for 1 time step lagged \code{states}. However,
it has compatibility over more than one trajectories: simply stacking matrices
from different trajectories together. This single-matrix representation provides
some computational advantages.
}
\note{
For 1D arguments (e.g. reward as a real number), a column vector (\eqn{n}-by-\eqn{1} matrix)
is expected.
}
\examples{
states <- matrix(c(1, 2, 3, 4), 2, 2)
actions <- matrix(c(1, 0), 2, 1)
rewards <- matrix(c(1, 2), 2, 1)
states_next <- matrix(c(2, 3, 4, 5), 2, 2)
ss <- SARS(states, actions, rewards, states_next)
ss
}
|
context("Order component labels")
test_that("ordered_thetas", {
expect_true(isOrdered(MultiBatchModelExample))
theta(MultiBatchModelExample)[1, 3] <- -5
expect_false(isOrdered(MultiBatchModelExample))
bmodel <- sortComponentLabels(MultiBatchModelExample)
expect_true(isOrdered(bmodel))
})
|
/tests/testthat/test_sort_component_labels.R
|
no_license
|
aditharun/CNPBayes
|
R
| false | false | 300 |
r
|
context("Order component labels")
test_that("ordered_thetas", {
expect_true(isOrdered(MultiBatchModelExample))
theta(MultiBatchModelExample)[1, 3] <- -5
expect_false(isOrdered(MultiBatchModelExample))
bmodel <- sortComponentLabels(MultiBatchModelExample)
expect_true(isOrdered(bmodel))
})
|
library(igraph)
library(multiplex)
library(data.table)
set.seed(1234)
samplingrate = 0.8
numnodes = 1000
edgedensity = seq(.4,1,.1)
confidence = .95
epsilon = 2.0
#g <- erdos.renyi.game(1000, 1/1000)
########using file input graph##########
#args <- commandArgs(trailingOnly = TRUE)
#dat=read.csv(file("/Users/vmangipudi/vagrant-xdata/sample.txt"),header = FALSE,sep=" ")
#el=as.matrix(dat) # coerces the data into a two-column matrix format that igraph likes
#el[,1]=as.character(el[,1])
#el[,2]=as.character(el[,2])
#g=graph.edgelist(el,directed=FALSE) # turns the edgelist into a 'graph object'
############################################################
##########using random generated graph###############
compute_density = function(g){
gS <- g
bestaverage <- 0.0
bestiteration <- 0
iteration <- 0
bestgraph <- NULL
while(length(V(gS)) > 0){
grphavg = 2.0*gsize(gS)/length(V(gS))
#print(grphavg)
if(bestaverage <= grphavg){
bestaverage = grphavg
bestiteration = iteration
bestgraph <- gS
}
degS <- degree(gS, v=V(gS), mode='total')
dS <- min(degS)
indexdS <-which.min(degS)
#print(indexdS)
minS <- V(gS)[indexdS]
gS <- delete.vertices(gS,minS)
iteration = iteration + 1
}
#print(bestaverage)
#print(bestiteration)
return (bestaverage)
}
compute_delta <- function(edges,confidence) {
value <- (-1.0) * log(1-confidence) / log(edges)
return(value)
}
compute_C <- function(nodes, edges, delta,epsilon){
value <- (12.0 * nodes* (4.0 + delta) * log(edges)) / (epsilon * epsilon)
return (value)
}
compute_fraction <- function(nodes,edges,confidence,epsilon){
delta <- compute_delta(edges,confidence)
return (compute_C( nodes,edges, delta,epsilon)/edges)
}
run_graph = function(iters ){
index <-0
dt <- data.frame(Iteration="",numNodes="",numEdges="",Samplingrate="",Actualdensity="",Sampleddensity="",ActualCommunitysize="",SampledCommunitysize="",Compare="",stringsAsFactors = F)
repeat {
dn <- sample(edgedensity,1)
g <- erdos.renyi.game(numnodes, dn,directed = F)
numedges <- gsize(g)
samplingrate = compute_fraction(numnodes,numedges,confidence,epsilon)
#print(paste0("Sampling rate:",samplingrate ))
real_density <- compute_density(g)
real_comm <- cluster_walktrap(g)
real_comm_size <- sizes(real_comm)
elist <- as_edgelist(g)
size = nrow(elist)
samplededges <- elist[sample(nrow(elist),size=samplingrate*size,replace=FALSE),]
g1=graph.edgelist(samplededges,directed=FALSE)
sampled_density <- compute_density(g1)
sampled_comm <- cluster_walktrap(g1)
sampled_comm_size <- sizes(sampled_comm)
nodesg <- V(g)
nodesg1 <- V(g1)
intersect <- nodesg %in% nodesg1
error_factor <- abs(sampled_density-real_density)/real_density
#print(error_factor)
index <- index+1
report_Str <- paste0("Iteration:", index, " Sampling rate:",samplingrate, " Nodes:",numnodes," Edges:",numedges," Actual density:",real_density," Sampled density:", sampled_density, " Real Community size:", dim(real_comm_size), " Sampled Community size:", dim(sampled_comm_size))
#print(paste0("Compare: ",compare(real_comm$membership[intersect],sampled_comm$membership,"rand")))
dt <- rbind(dt,c(iters,numnodes,numedges,samplingrate, real_density,sampled_density,dim(real_comm_size),dim(sampled_comm_size),compare(real_comm$membership[intersect],sampled_comm$membership,"rand")))
#print(report_Str)
iters<- iters-1
if(iters <0){
print(as.data.table(dt))
break;
}
}
}
|
/SyntheticGraph.R
|
no_license
|
alejandroniculescu/DenseSubgraphSampling
|
R
| false | false | 3,412 |
r
|
library(igraph)
library(multiplex)
library(data.table)
set.seed(1234)
samplingrate = 0.8
numnodes = 1000
edgedensity = seq(.4,1,.1)
confidence = .95
epsilon = 2.0
#g <- erdos.renyi.game(1000, 1/1000)
########using file input graph##########
#args <- commandArgs(trailingOnly = TRUE)
#dat=read.csv(file("/Users/vmangipudi/vagrant-xdata/sample.txt"),header = FALSE,sep=" ")
#el=as.matrix(dat) # coerces the data into a two-column matrix format that igraph likes
#el[,1]=as.character(el[,1])
#el[,2]=as.character(el[,2])
#g=graph.edgelist(el,directed=FALSE) # turns the edgelist into a 'graph object'
############################################################
##########using random generated graph###############
compute_density = function(g){
gS <- g
bestaverage <- 0.0
bestiteration <- 0
iteration <- 0
bestgraph <- NULL
while(length(V(gS)) > 0){
grphavg = 2.0*gsize(gS)/length(V(gS))
#print(grphavg)
if(bestaverage <= grphavg){
bestaverage = grphavg
bestiteration = iteration
bestgraph <- gS
}
degS <- degree(gS, v=V(gS), mode='total')
dS <- min(degS)
indexdS <-which.min(degS)
#print(indexdS)
minS <- V(gS)[indexdS]
gS <- delete.vertices(gS,minS)
iteration = iteration + 1
}
#print(bestaverage)
#print(bestiteration)
return (bestaverage)
}
compute_delta <- function(edges,confidence) {
value <- (-1.0) * log(1-confidence) / log(edges)
return(value)
}
compute_C <- function(nodes, edges, delta,epsilon){
value <- (12.0 * nodes* (4.0 + delta) * log(edges)) / (epsilon * epsilon)
return (value)
}
compute_fraction <- function(nodes,edges,confidence,epsilon){
delta <- compute_delta(edges,confidence)
return (compute_C( nodes,edges, delta,epsilon)/edges)
}
run_graph = function(iters ){
index <-0
dt <- data.frame(Iteration="",numNodes="",numEdges="",Samplingrate="",Actualdensity="",Sampleddensity="",ActualCommunitysize="",SampledCommunitysize="",Compare="",stringsAsFactors = F)
repeat {
dn <- sample(edgedensity,1)
g <- erdos.renyi.game(numnodes, dn,directed = F)
numedges <- gsize(g)
samplingrate = compute_fraction(numnodes,numedges,confidence,epsilon)
#print(paste0("Sampling rate:",samplingrate ))
real_density <- compute_density(g)
real_comm <- cluster_walktrap(g)
real_comm_size <- sizes(real_comm)
elist <- as_edgelist(g)
size = nrow(elist)
samplededges <- elist[sample(nrow(elist),size=samplingrate*size,replace=FALSE),]
g1=graph.edgelist(samplededges,directed=FALSE)
sampled_density <- compute_density(g1)
sampled_comm <- cluster_walktrap(g1)
sampled_comm_size <- sizes(sampled_comm)
nodesg <- V(g)
nodesg1 <- V(g1)
intersect <- nodesg %in% nodesg1
error_factor <- abs(sampled_density-real_density)/real_density
#print(error_factor)
index <- index+1
report_Str <- paste0("Iteration:", index, " Sampling rate:",samplingrate, " Nodes:",numnodes," Edges:",numedges," Actual density:",real_density," Sampled density:", sampled_density, " Real Community size:", dim(real_comm_size), " Sampled Community size:", dim(sampled_comm_size))
#print(paste0("Compare: ",compare(real_comm$membership[intersect],sampled_comm$membership,"rand")))
dt <- rbind(dt,c(iters,numnodes,numedges,samplingrate, real_density,sampled_density,dim(real_comm_size),dim(sampled_comm_size),compare(real_comm$membership[intersect],sampled_comm$membership,"rand")))
#print(report_Str)
iters<- iters-1
if(iters <0){
print(as.data.table(dt))
break;
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotLengthFreq.R
\name{PlotLengthFreq}
\alias{PlotLengthFreq}
\title{PPlot the annual length frequency distributions.}
\usage{
PlotLengthFreq(lfdata, SelBank, stat = "lf", bw = 5)
}
\arguments{
\item{lfdata}{is the length frequency data from the database. Columns should include AREA (3 for Grand Bank and 4 for Banquereau)
YEAR, SHELL_LEN (length in mm) and NUM (frequency of this measurement in a given year).}
\item{SelBank}{a numeric delimiter of the assessment region (1 = Banquereau and 2 = Grand Bank).}
\item{stat}{variable which defines which plot to return. "cdf" returns the cumulative length frequency distribution
and "lf" returns the length frequency distribution.}
}
\description{
Function to subset loci and populations
}
\author{
Ryan Stanley
}
|
/man/PlotLengthFreq.Rd
|
no_license
|
SurfClam/bio.surfclam
|
R
| false | true | 843 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotLengthFreq.R
\name{PlotLengthFreq}
\alias{PlotLengthFreq}
\title{PPlot the annual length frequency distributions.}
\usage{
PlotLengthFreq(lfdata, SelBank, stat = "lf", bw = 5)
}
\arguments{
\item{lfdata}{is the length frequency data from the database. Columns should include AREA (3 for Grand Bank and 4 for Banquereau)
YEAR, SHELL_LEN (length in mm) and NUM (frequency of this measurement in a given year).}
\item{SelBank}{a numeric delimiter of the assessment region (1 = Banquereau and 2 = Grand Bank).}
\item{stat}{variable which defines which plot to return. "cdf" returns the cumulative length frequency distribution
and "lf" returns the length frequency distribution.}
}
\description{
Function to subset loci and populations
}
\author{
Ryan Stanley
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jsonobj.R
\name{json_map}
\alias{json_map}
\title{json_map}
\usage{
json_map(...)
}
\arguments{
\item{...}{optional contents of the named list (as key-value pairs)}
}
\value{
a named list
}
\description{
Convenience function for making a (possibly empty) named list, which converts to a JSON object.
}
\examples{
json_map()
json_map(one = 1, two = TRUE, three = "THREE")
}
|
/aurelius/man/json_map.Rd
|
permissive
|
mafpimentel/hadrian
|
R
| false | true | 451 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jsonobj.R
\name{json_map}
\alias{json_map}
\title{json_map}
\usage{
json_map(...)
}
\arguments{
\item{...}{optional contents of the named list (as key-value pairs)}
}
\value{
a named list
}
\description{
Convenience function for making a (possibly empty) named list, which converts to a JSON object.
}
\examples{
json_map()
json_map(one = 1, two = TRUE, three = "THREE")
}
|
function(input, output,session) {
output$Year <- renderUI({
list(
selectInput("Year",
label=h5("Year"),
choices = unique(df$Year),
selected = NULL, multiple = FALSE)
)
})
output$leafmap <- renderLeaflet({
leaflet() %>%
addProviderTiles("Esri.WorldTerrain")%>% setView(48.409703, 14.776540, zoom = 2)
})
observeEvent(input$goButton, {
p<-leafletProxy("leafmap", data = df) %>%
addLabelOnlyMarkers(lng = ~Longitude, lat = ~Latitude, label = ~htmlEscape(Country),
labelOptions = labelOptions(noHide = TRUE, textOnly = TRUE,
direction = "bottom", offset = c(0,-10))) %>%
addMinicharts(df$Longitude, df$Latitude, type = "bar",
chartdata = df[,c("Environmental.Wellbeing", "Economic.Wellbeing","Human.Wellbeing")],
width = 20, height =15)
return(p)
})
}
|
/server.R
|
no_license
|
lotwij/Sustainability_Index_Mapped
|
R
| false | false | 1,002 |
r
|
function(input, output,session) {
output$Year <- renderUI({
list(
selectInput("Year",
label=h5("Year"),
choices = unique(df$Year),
selected = NULL, multiple = FALSE)
)
})
output$leafmap <- renderLeaflet({
leaflet() %>%
addProviderTiles("Esri.WorldTerrain")%>% setView(48.409703, 14.776540, zoom = 2)
})
observeEvent(input$goButton, {
p<-leafletProxy("leafmap", data = df) %>%
addLabelOnlyMarkers(lng = ~Longitude, lat = ~Latitude, label = ~htmlEscape(Country),
labelOptions = labelOptions(noHide = TRUE, textOnly = TRUE,
direction = "bottom", offset = c(0,-10))) %>%
addMinicharts(df$Longitude, df$Latitude, type = "bar",
chartdata = df[,c("Environmental.Wellbeing", "Economic.Wellbeing","Human.Wellbeing")],
width = 20, height =15)
return(p)
})
}
|
context("grid search")
# ------------------------------------------------------------------------------
source(test_path("../helper-objects.R"))
# ------------------------------------------------------------------------------
rec_tune_1 <-
recipe(mpg ~ ., data = mtcars) %>%
step_normalize(all_predictors()) %>%
step_pca(all_predictors(), num_comp = tune())
rec_no_tune_1 <-
recipe(mpg ~ ., data = mtcars) %>%
step_normalize(all_predictors())
lm_mod <- linear_reg() %>% set_engine("lm")
svm_mod <- svm_rbf(mode = "regression", cost = tune()) %>% set_engine("kernlab")
# ------------------------------------------------------------------------------
test_that('tune recipe only', {
set.seed(4400)
wflow <- workflow() %>% add_recipe(rec_tune_1) %>% add_model(lm_mod)
pset <- dials::parameters(wflow) %>% update(num_comp = num_comp(c(1, 3)))
grid <- grid_regular(pset, levels = 3)
folds <- vfold_cv(mtcars)
res <- tune_grid(wflow, resamples = folds, grid = grid)
expect_equal(res$id, folds$id)
res_est <- collect_metrics(res)
expect_equal(nrow(res_est), nrow(grid) * 2)
expect_equal(sum(res_est$.metric == "rmse"), nrow(grid))
expect_equal(sum(res_est$.metric == "rsq"), nrow(grid))
expect_equal(res_est$n, rep(10, nrow(grid) * 2))
})
# ------------------------------------------------------------------------------
test_that('tune model only (with recipe)', {
set.seed(4400)
wflow <- workflow() %>% add_recipe(rec_no_tune_1) %>% add_model(svm_mod)
pset <- dials::parameters(wflow)
grid <- grid_regular(pset, levels = 3)
folds <- vfold_cv(mtcars)
res <- tune_grid(wflow, resamples = folds, grid = grid)
expect_equal(res$id, folds$id)
res_est <- collect_metrics(res)
expect_equal(nrow(res_est), nrow(grid) * 2)
expect_equal(sum(res_est$.metric == "rmse"), nrow(grid))
expect_equal(sum(res_est$.metric == "rsq"), nrow(grid))
expect_equal(res_est$n, rep(10, nrow(grid) * 2))
})
# ------------------------------------------------------------------------------
test_that('tune model only (with recipe, multi-predict)', {
set.seed(4400)
wflow <- workflow() %>% add_recipe(rec_no_tune_1) %>% add_model(svm_mod)
pset <- dials::parameters(wflow)
grid <- grid_regular(pset, levels = 3)
folds <- vfold_cv(mtcars)
res <- tune_grid(wflow, resamples = folds, grid = grid)
expect_equal(res$id, folds$id)
expect_equal(
colnames(res$.metrics[[1]]),
c("cost", ".metric", ".estimator", ".estimate")
)
res_est <- collect_metrics(res)
expect_equal(nrow(res_est), nrow(grid) * 2)
expect_equal(sum(res_est$.metric == "rmse"), nrow(grid))
expect_equal(sum(res_est$.metric == "rsq"), nrow(grid))
expect_equal(res_est$n, rep(10, nrow(grid) * 2))
})
# ------------------------------------------------------------------------------
test_that('tune model and recipe', {
set.seed(4400)
wflow <- workflow() %>% add_recipe(rec_tune_1) %>% add_model(svm_mod)
pset <- dials::parameters(wflow) %>% update(num_comp = num_comp(c(1, 3)))
grid <- grid_regular(pset, levels = 3)
folds <- vfold_cv(mtcars)
res <- tune_grid(wflow, resamples = folds, grid = grid)
expect_equal(res$id, folds$id)
expect_equal(
colnames(res$.metrics[[1]]),
c("cost", "num_comp", ".metric", ".estimator", ".estimate")
)
res_est <- collect_metrics(res)
expect_equal(nrow(res_est), nrow(grid) * 2)
expect_equal(sum(res_est$.metric == "rmse"), nrow(grid))
expect_equal(sum(res_est$.metric == "rsq"), nrow(grid))
expect_equal(res_est$n, rep(10, nrow(grid) * 2))
})
# ------------------------------------------------------------------------------
test_that('tune model and recipe (multi-predict)', {
set.seed(4400)
wflow <- workflow() %>% add_recipe(rec_tune_1) %>% add_model(svm_mod)
pset <- dials::parameters(wflow) %>% update(num_comp = num_comp(c(2, 3)))
grid <- grid_regular(pset, levels = c(3, 2))
folds <- vfold_cv(mtcars)
res <- tune_grid(wflow, resamples = folds, grid = grid)
expect_equal(res$id, folds$id)
res_est <- collect_metrics(res)
expect_equal(nrow(res_est), nrow(grid) * 2)
expect_equal(sum(res_est$.metric == "rmse"), nrow(grid))
expect_equal(sum(res_est$.metric == "rsq"), nrow(grid))
expect_equal(res_est$n, rep(10, nrow(grid) * 2))
})
# ------------------------------------------------------------------------------
test_that("tune recipe only - failure in recipe is caught elegantly", {
set.seed(7898)
data_folds <- vfold_cv(mtcars, v = 2)
rec <- recipe(mpg ~ ., data = mtcars) %>%
step_bs(disp, deg_free = tune())
model <- linear_reg(mode = "regression") %>%
set_engine("lm")
# NA values not allowed in recipe
cars_grid <- tibble(deg_free = c(3, NA_real_, 4))
# ask for predictions and extractions
control <- control_grid(
save_pred = TRUE,
extract = function(x) 1L
)
cars_res <- tune_grid(
model,
preprocessor = rec,
resamples = data_folds,
grid = cars_grid,
control = control
)
notes <- cars_res$.notes
note <- notes[[1]]$.notes
extract <- cars_res$.extracts[[1]]
predictions <- cars_res$.predictions[[1]]
used_deg_free <- sort(unique(predictions$deg_free))
expect_length(notes, 2L)
# failing rows are not in the output
expect_equal(nrow(extract), 2L)
expect_equal(extract$deg_free, c(3, 4))
expect_equal(used_deg_free, c(3, 4))
})
test_that("tune model only - failure in recipe is caught elegantly", {
set.seed(7898)
data_folds <- vfold_cv(mtcars, v = 2)
# NA values not allowed in recipe
rec <- recipe(mpg ~ ., data = mtcars) %>%
step_bs(disp, deg_free = NA_real_)
cars_grid <- tibble(cost = c(0.01, 0.02))
expect_warning(
cars_res <- tune_grid(
svm_mod,
preprocessor = rec,
resamples = data_folds,
grid = cars_grid,
control = control_grid(extract = function(x) {1}, save_pred = TRUE)
),
"All models failed"
)
notes <- cars_res$.notes
note <- notes[[1]]$.notes
extracts <- cars_res$.extracts
predictions <- cars_res$.predictions
expect_length(notes, 2L)
# recipe failed - no models run
expect_equivalent(extracts, list(NULL, NULL))
expect_equivalent(predictions, list(NULL, NULL))
})
test_that("tune model only - failure in formula is caught elegantly", {
set.seed(7898)
data_folds <- vfold_cv(mtcars, v = 2)
cars_grid <- tibble(cost = 0.01)
# these terms don't exist!
expect_warning(
cars_res <- tune_grid(
svm_mod,
y ~ z,
resamples = data_folds,
grid = cars_grid,
control = control_grid(extract = function(x) {1}, save_pred = TRUE)
),
"All models failed"
)
notes <- cars_res$.notes
note <- notes[[1]]$.notes
extracts <- cars_res$.extracts
predictions <- cars_res$.predictions
expect_length(notes, 2L)
# formula failed - no models run
expect_equivalent(extracts, list(NULL, NULL))
expect_equivalent(predictions, list(NULL, NULL))
})
test_that("tune model and recipe - failure in recipe is caught elegantly", {
set.seed(7898)
data_folds <- vfold_cv(mtcars, v = 2)
rec <- recipe(mpg ~ ., data = mtcars) %>%
step_bs(disp, deg_free = tune())
# NA values not allowed in recipe
cars_grid <- tibble(deg_free = c(NA_real_, 10L), cost = 0.01)
cars_res <- tune_grid(
svm_mod,
preprocessor = rec,
resamples = data_folds,
grid = cars_grid,
control = control_grid(extract = function(x) {1}, save_pred = TRUE)
)
notes <- cars_res$.notes
note <- notes[[1]]$.notes
extract <- cars_res$.extracts[[1]]
prediction <- cars_res$.predictions[[1]]
expect_length(notes, 2L)
# recipe failed half of the time, only 1 model passed
expect_equal(nrow(extract), 1L)
expect_equal(extract$deg_free, 10L)
expect_equal(extract$cost, 0.01)
expect_equal(
unique(prediction[, c("deg_free", "cost")]),
tibble(deg_free = 10, cost = 0.01)
)
})
test_that("argument order gives warning for recipes", {
expect_warning(
tune_grid(rec_tune_1, lm_mod, vfold_cv(mtcars, v = 2)),
"is deprecated as of lifecycle"
)
})
test_that("argument order gives warning for formula", {
expect_warning(
tune_grid(mpg ~ ., lm_mod, vfold_cv(mtcars, v = 2)),
"is deprecated as of lifecycle"
)
})
test_that("ellipses with tune_grid", {
wflow <- workflow() %>% add_recipe(rec_tune_1) %>% add_model(lm_mod)
folds <- vfold_cv(mtcars)
expect_warning(
tune_grid(wflow, resamples = folds, grid = 3, something = "wrong"),
"The `...` are not used in this function but one or more objects"
)
})
test_that("determining the grid type", {
grid_1 <- expand.grid(a = 1:100, b = letters[1:2])
expect_true(tune:::is_regular_grid(grid_1))
expect_true(tune:::is_regular_grid(grid_1[-(1:10),]))
expect_false(tune:::is_regular_grid(grid_1[-(1:100),]))
set.seed(1932)
grid_2 <- data.frame(a = runif(length(letters)), b = letters)
expect_false(tune:::is_regular_grid(grid_2))
})
|
/tests/testthat/test-grid.R
|
permissive
|
dcossyleon/tune
|
R
| false | false | 8,923 |
r
|
context("grid search")
# ------------------------------------------------------------------------------
source(test_path("../helper-objects.R"))
# ------------------------------------------------------------------------------
rec_tune_1 <-
recipe(mpg ~ ., data = mtcars) %>%
step_normalize(all_predictors()) %>%
step_pca(all_predictors(), num_comp = tune())
rec_no_tune_1 <-
recipe(mpg ~ ., data = mtcars) %>%
step_normalize(all_predictors())
lm_mod <- linear_reg() %>% set_engine("lm")
svm_mod <- svm_rbf(mode = "regression", cost = tune()) %>% set_engine("kernlab")
# ------------------------------------------------------------------------------
test_that('tune recipe only', {
set.seed(4400)
wflow <- workflow() %>% add_recipe(rec_tune_1) %>% add_model(lm_mod)
pset <- dials::parameters(wflow) %>% update(num_comp = num_comp(c(1, 3)))
grid <- grid_regular(pset, levels = 3)
folds <- vfold_cv(mtcars)
res <- tune_grid(wflow, resamples = folds, grid = grid)
expect_equal(res$id, folds$id)
res_est <- collect_metrics(res)
expect_equal(nrow(res_est), nrow(grid) * 2)
expect_equal(sum(res_est$.metric == "rmse"), nrow(grid))
expect_equal(sum(res_est$.metric == "rsq"), nrow(grid))
expect_equal(res_est$n, rep(10, nrow(grid) * 2))
})
# ------------------------------------------------------------------------------
test_that('tune model only (with recipe)', {
set.seed(4400)
wflow <- workflow() %>% add_recipe(rec_no_tune_1) %>% add_model(svm_mod)
pset <- dials::parameters(wflow)
grid <- grid_regular(pset, levels = 3)
folds <- vfold_cv(mtcars)
res <- tune_grid(wflow, resamples = folds, grid = grid)
expect_equal(res$id, folds$id)
res_est <- collect_metrics(res)
expect_equal(nrow(res_est), nrow(grid) * 2)
expect_equal(sum(res_est$.metric == "rmse"), nrow(grid))
expect_equal(sum(res_est$.metric == "rsq"), nrow(grid))
expect_equal(res_est$n, rep(10, nrow(grid) * 2))
})
# ------------------------------------------------------------------------------
test_that('tune model only (with recipe, multi-predict)', {
set.seed(4400)
wflow <- workflow() %>% add_recipe(rec_no_tune_1) %>% add_model(svm_mod)
pset <- dials::parameters(wflow)
grid <- grid_regular(pset, levels = 3)
folds <- vfold_cv(mtcars)
res <- tune_grid(wflow, resamples = folds, grid = grid)
expect_equal(res$id, folds$id)
expect_equal(
colnames(res$.metrics[[1]]),
c("cost", ".metric", ".estimator", ".estimate")
)
res_est <- collect_metrics(res)
expect_equal(nrow(res_est), nrow(grid) * 2)
expect_equal(sum(res_est$.metric == "rmse"), nrow(grid))
expect_equal(sum(res_est$.metric == "rsq"), nrow(grid))
expect_equal(res_est$n, rep(10, nrow(grid) * 2))
})
# ------------------------------------------------------------------------------
test_that('tune model and recipe', {
set.seed(4400)
wflow <- workflow() %>% add_recipe(rec_tune_1) %>% add_model(svm_mod)
pset <- dials::parameters(wflow) %>% update(num_comp = num_comp(c(1, 3)))
grid <- grid_regular(pset, levels = 3)
folds <- vfold_cv(mtcars)
res <- tune_grid(wflow, resamples = folds, grid = grid)
expect_equal(res$id, folds$id)
expect_equal(
colnames(res$.metrics[[1]]),
c("cost", "num_comp", ".metric", ".estimator", ".estimate")
)
res_est <- collect_metrics(res)
expect_equal(nrow(res_est), nrow(grid) * 2)
expect_equal(sum(res_est$.metric == "rmse"), nrow(grid))
expect_equal(sum(res_est$.metric == "rsq"), nrow(grid))
expect_equal(res_est$n, rep(10, nrow(grid) * 2))
})
# ------------------------------------------------------------------------------
test_that('tune model and recipe (multi-predict)', {
set.seed(4400)
wflow <- workflow() %>% add_recipe(rec_tune_1) %>% add_model(svm_mod)
pset <- dials::parameters(wflow) %>% update(num_comp = num_comp(c(2, 3)))
grid <- grid_regular(pset, levels = c(3, 2))
folds <- vfold_cv(mtcars)
res <- tune_grid(wflow, resamples = folds, grid = grid)
expect_equal(res$id, folds$id)
res_est <- collect_metrics(res)
expect_equal(nrow(res_est), nrow(grid) * 2)
expect_equal(sum(res_est$.metric == "rmse"), nrow(grid))
expect_equal(sum(res_est$.metric == "rsq"), nrow(grid))
expect_equal(res_est$n, rep(10, nrow(grid) * 2))
})
# ------------------------------------------------------------------------------
test_that("tune recipe only - failure in recipe is caught elegantly", {
set.seed(7898)
data_folds <- vfold_cv(mtcars, v = 2)
rec <- recipe(mpg ~ ., data = mtcars) %>%
step_bs(disp, deg_free = tune())
model <- linear_reg(mode = "regression") %>%
set_engine("lm")
# NA values not allowed in recipe
cars_grid <- tibble(deg_free = c(3, NA_real_, 4))
# ask for predictions and extractions
control <- control_grid(
save_pred = TRUE,
extract = function(x) 1L
)
cars_res <- tune_grid(
model,
preprocessor = rec,
resamples = data_folds,
grid = cars_grid,
control = control
)
notes <- cars_res$.notes
note <- notes[[1]]$.notes
extract <- cars_res$.extracts[[1]]
predictions <- cars_res$.predictions[[1]]
used_deg_free <- sort(unique(predictions$deg_free))
expect_length(notes, 2L)
# failing rows are not in the output
expect_equal(nrow(extract), 2L)
expect_equal(extract$deg_free, c(3, 4))
expect_equal(used_deg_free, c(3, 4))
})
test_that("tune model only - failure in recipe is caught elegantly", {
set.seed(7898)
data_folds <- vfold_cv(mtcars, v = 2)
# NA values not allowed in recipe
rec <- recipe(mpg ~ ., data = mtcars) %>%
step_bs(disp, deg_free = NA_real_)
cars_grid <- tibble(cost = c(0.01, 0.02))
expect_warning(
cars_res <- tune_grid(
svm_mod,
preprocessor = rec,
resamples = data_folds,
grid = cars_grid,
control = control_grid(extract = function(x) {1}, save_pred = TRUE)
),
"All models failed"
)
notes <- cars_res$.notes
note <- notes[[1]]$.notes
extracts <- cars_res$.extracts
predictions <- cars_res$.predictions
expect_length(notes, 2L)
# recipe failed - no models run
expect_equivalent(extracts, list(NULL, NULL))
expect_equivalent(predictions, list(NULL, NULL))
})
test_that("tune model only - failure in formula is caught elegantly", {
set.seed(7898)
data_folds <- vfold_cv(mtcars, v = 2)
cars_grid <- tibble(cost = 0.01)
# these terms don't exist!
expect_warning(
cars_res <- tune_grid(
svm_mod,
y ~ z,
resamples = data_folds,
grid = cars_grid,
control = control_grid(extract = function(x) {1}, save_pred = TRUE)
),
"All models failed"
)
notes <- cars_res$.notes
note <- notes[[1]]$.notes
extracts <- cars_res$.extracts
predictions <- cars_res$.predictions
expect_length(notes, 2L)
# formula failed - no models run
expect_equivalent(extracts, list(NULL, NULL))
expect_equivalent(predictions, list(NULL, NULL))
})
test_that("tune model and recipe - failure in recipe is caught elegantly", {
set.seed(7898)
data_folds <- vfold_cv(mtcars, v = 2)
rec <- recipe(mpg ~ ., data = mtcars) %>%
step_bs(disp, deg_free = tune())
# NA values not allowed in recipe
cars_grid <- tibble(deg_free = c(NA_real_, 10L), cost = 0.01)
cars_res <- tune_grid(
svm_mod,
preprocessor = rec,
resamples = data_folds,
grid = cars_grid,
control = control_grid(extract = function(x) {1}, save_pred = TRUE)
)
notes <- cars_res$.notes
note <- notes[[1]]$.notes
extract <- cars_res$.extracts[[1]]
prediction <- cars_res$.predictions[[1]]
expect_length(notes, 2L)
# recipe failed half of the time, only 1 model passed
expect_equal(nrow(extract), 1L)
expect_equal(extract$deg_free, 10L)
expect_equal(extract$cost, 0.01)
expect_equal(
unique(prediction[, c("deg_free", "cost")]),
tibble(deg_free = 10, cost = 0.01)
)
})
test_that("argument order gives warning for recipes", {
expect_warning(
tune_grid(rec_tune_1, lm_mod, vfold_cv(mtcars, v = 2)),
"is deprecated as of lifecycle"
)
})
test_that("argument order gives warning for formula", {
expect_warning(
tune_grid(mpg ~ ., lm_mod, vfold_cv(mtcars, v = 2)),
"is deprecated as of lifecycle"
)
})
test_that("ellipses with tune_grid", {
wflow <- workflow() %>% add_recipe(rec_tune_1) %>% add_model(lm_mod)
folds <- vfold_cv(mtcars)
expect_warning(
tune_grid(wflow, resamples = folds, grid = 3, something = "wrong"),
"The `...` are not used in this function but one or more objects"
)
})
test_that("determining the grid type", {
grid_1 <- expand.grid(a = 1:100, b = letters[1:2])
expect_true(tune:::is_regular_grid(grid_1))
expect_true(tune:::is_regular_grid(grid_1[-(1:10),]))
expect_false(tune:::is_regular_grid(grid_1[-(1:100),]))
set.seed(1932)
grid_2 <- data.frame(a = runif(length(letters)), b = letters)
expect_false(tune:::is_regular_grid(grid_2))
})
|
library(tidyverse)
load("rda/murders.rda")
murders %>% mutate( abb=reorder(abb,rate))%>% ggplot(aes(abb,rate))+geom_bar(stat="identity",width=.5,color="blue")+coord_flip()
x<- 5
ggsave("figs/barplot.png")
|
/analysis.R
|
no_license
|
skoi123/murders
|
R
| false | false | 207 |
r
|
library(tidyverse)
load("rda/murders.rda")
murders %>% mutate( abb=reorder(abb,rate))%>% ggplot(aes(abb,rate))+geom_bar(stat="identity",width=.5,color="blue")+coord_flip()
x<- 5
ggsave("figs/barplot.png")
|
#############################################
#### Winter Wheat in October_lag_no2005 ####
#############################################
'
######################
## File Discription ##
The purpose of this script is to estimate the impact of weather fluctuations in the month mentionend above on yearly crop yield.
This is done by the following the steps:
- Create data frame with Winterwheat as dependent and variables of the month above as independent variables
- Create stepwise function which is based on drought categories of german drought monitor
- Remove comIds with less than 7 observations to avoid leveage issues
- Remove log trend of indepedent variable
- Delete outliers which appear to be measurement error
- Use BIC to choose the degrees of the polynomial and to compare various model configurations
- Loop through polynomials configuration of each model; highest possible polynomial is of degree 3
- Compare models graphically
- Explore Models
- Model with lowest BIC in general: Tavg, Prec, SMI
- Model with lowest BIC of standard configuration: Tavg, Prec, SMI
- Model with lowest BIC with SMI: Tavg, Prec, SMI
- Correct Standard Errors with either Driscoll Kray or Cameron et al /Thompson estimator
The --vcovHC– function estimates three heteroskedasticity-consistent covariance estimators:
• "white1" - for general heteroskedasticity but no serial correlation. Recommended for random effects.
• "white2" - is "white1" restricted to a common variance within groups. Recommended for random effects.
• "arellano" - both heteroskedasticity and serial correlation. Recommended for fixed effects.
The following options apply*:
• HC0 - heteroskedasticity consistent. The default.
• HC1,HC2, HC3 – Recommended for small samples. HC3 gives less weight to influential
observations.
• HC4 - small samples with influential observations
• HAC - heteroskedasticity and autocorrelation consistent (type ?vcovHAC for more
details)
Solution for serial correlation: Cluster by groups.
Solution for cross sectional correlation: Cluster by time
Ich arbeitet vorerst mir Driscoll Kraay und weighting von 1 (maxlag=0). Die Ergebnisse sollten solide sein, da Cameron/Thompson ähnlich ist
## Input ##
- aus 4km_tmax: Yield_SMI_Prec_Tavg_Pet_Dem_Por_Tmin_Tmax_nodemean_nozscore_ww.csv (komplete data.frame)
## Output ##
- Yield_Covariates_WW_Oct_lag_no2005.csv (auf October_lag_no2005 reduzierter Data.Frame)
- Export Data frame for use in BIC_Graphic: file="./data/data_raw/BIC/BIC_WW_Oct_lag_no2005.csv")
- Export Data Frame of Fixed Effects to be used in Script FixedEffects_Graphic:
"./figures/figures_exploratory/FixedEffects/Winterwheat/..."
'
###################
## Load Packages ##
library(plm)
library(boot)
library(gtools)
library(lme4)
library(lmtest)
library(car)
library(sp)
library(rgdal)
library(raster)
library(rasterVis)
library(maptools)
library(reshape)
library(stringr)
library(classInt)
library(RColorBrewer)
library(stargazer)
library(ggplot2)
####################################################################################################################################################################
#################################################################################################################
#### Create data frame with Winterwheat as dependent and variables of the month above as independent variables ####
#################################################################################################################
## Read in large Dataframe for Maize ##
Yield_Covariates <- read.csv("~/Documents/projects/correlation/data/data_processed/Yield_SMI_Prec_Tavg_Pet_Dem_Por_Tmin_Tmax_nodemean_nozscore_ww.csv")
Yield_Covariates$X <- NULL
## For publication worth regression output need to change data names ##
'Get rid of variables which are not necessary: other months and other not needed variables'
names(Yield_Covariates)
names <- names(Yield_Covariates)
names_Oct_lag <- grep(c("*_Oct_lag"), names)
names_Oct_lag
Yield_Covariates_Oct_lag <- Yield_Covariates[,names_Oct_lag]
names(Yield_Covariates_Oct_lag)
dim(Yield_Covariates_Oct_lag)
## Delete all but SMI, Prec, Tavg and Pet
names(Yield_Covariates_Oct_lag)
Yield_Covariates_Oct_lag <- Yield_Covariates_Oct_lag[,c(1:4)]
## Establish first part of data frame_ time and spatial reference plus Winterwheat ##
names(Yield_Covariates[,c(2,1,3:5,7)])
Yield_Covariates_SM <- Yield_Covariates[,c(2,1,3:5,7)] # Achtung, darauf achten, dass comId und year in der richtigen Reihenfolge sind.
names(Yield_Covariates_SM)
head(Yield_Covariates_SM)
Yield_Covariates_WW_Oct_lag <- cbind(Yield_Covariates_SM, Yield_Covariates_Oct_lag)
names(Yield_Covariates_WW_Oct_lag)
names(Yield_Covariates_WW_Oct_lag) <- c( "comId" , "year","com","stateId","state","Winterwheat","SMI", "Prec","Tavg", "Pet")
names(Yield_Covariates_WW_Oct_lag)
###########################
##### Delete year 2004 ####
###########################
table(Yield_Covariates_WW_Oct_lag$year)
dim(Yield_Covariates_WW_Oct_lag[Yield_Covariates_WW_Oct_lag$year==2005,])
dim(Yield_Covariates_WW_Oct_lag[Yield_Covariates_WW_Oct_lag$year!=2005,])
dim(Yield_Covariates_WW_Oct_lag)
Yield_Covariates_WW_Oct_lag_no2005 <- Yield_Covariates_WW_Oct_lag[Yield_Covariates_WW_Oct_lag$year!=2005,]
dim(Yield_Covariates_WW_Oct_lag_no2005)
head(Yield_Covariates_WW_Oct_lag_no2005)
#########################################
#### Create stepwise function of SMI ####
#########################################
' Drought Monitor Spezification '
Yield_Covariates_WW_Oct_lag_no2005$SMI_GDM <- cut(Yield_Covariates_WW_Oct_lag_no2005$SMI, breaks = c(0, 0.1, 0.2, 0.3, 0.7, 0.8, 0.9, 1), ,
labels = c("severe drought","moderate drought","abnormal dry", "normal","abnormal wet" ,"abundant wet", "severe wet"))
#############
## Na-omit ##
sum(is.na(Yield_Covariates_WW_Oct_lag_no2005) )
dim(Yield_Covariates_WW_Oct_lag_no2005)
Yield_Covariates_WW_Oct_lag_no2005_nna <- na.omit(Yield_Covariates_WW_Oct_lag_no2005)
dim(Yield_Covariates_WW_Oct_lag_no2005_nna)
## Check for NAs
any(is.na(Yield_Covariates_WW_Oct_lag_no2005_nna))
## Reset Rownames
rownames(Yield_Covariates_WW_Oct_lag_no2005_nna) <- NULL
## Further work with DataFrame without Yield_Covariates_WW_Oct_lag_no2005 index ##
Yield_Covariates_WW_Oct_lag_no2005 <- Yield_Covariates_WW_Oct_lag_no2005_nna
#########################################################################
## Remove comIds with less than 7 observations to avoid leveage issues ##
#########################################################################
#####################################################
## Delete all comIds with less than 7 observations ##
sum(table(Yield_Covariates_WW_Oct_lag_no2005$comId) < 7 )
table(Yield_Covariates_WW_Oct_lag_no2005$comId) < 7
## comIds mit weniger als 7 Beoachtungen: ##
list <- c(3402, 5117, 5124, 5314, 5334, 5916, 8421, 9762, 12052, 12053, 15001, 15002,
15082, 15083, 15084, 15085, 15086, 15087, 15088, 15089, 15091)
length(list)
list[[1]]
temp <- Yield_Covariates_WW_Oct_lag_no2005
for (i in 1:length(list))
{
print(Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId==list[i],])
temp <- (temp[!temp$comId==list[i],])
}
## Number of deleted rows
dim(temp)-dim(Yield_Covariates_WW_Oct_lag_no2005)
## Further use old name for data.frame
Yield_Covariates_WW_Oct_lag_no2005 <- temp
################################
## Befehle nach jedem löschen ##
Yield_Covariates_WW_Oct_lag_no2005 <- na.omit(Yield_Covariates_WW_Oct_lag_no2005)
rownames(Yield_Covariates_WW_Oct_lag_no2005) <- NULL
Yield_Covariates_WW_Oct_lag_no2005 <- plm.data(Yield_Covariates_WW_Oct_lag_no2005, index=c("comId", "year"))
Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")] <- lapply(Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")], factor )
#################################################
#### Remove log trend of indepedent variable ####
#################################################
'Fit log of yield on log of time and use the residuals of that for yields'
logtrend <- lm(log(Winterwheat) ~ log(as.integer(year)), data= Yield_Covariates_WW_Oct_lag_no2005)
##########################
## Issue with Outliers ###
##########################
par(mfrow = c(2,2))
plot(logtrend)
## Look Outliers Values ##
Yield_Covariates_WW_Oct_lag_no2005[c(3382, 3442, 3454, 2574,3451,3511),]
## Look at other values of outliers com #
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "12060",] #2004
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "12065",] #2004
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "12066",] #2004
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "9276",] # 1999: hier sehe ich keinen Grund, warum die Daten geläscht werden sollten
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "12060",] # 2004
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "12071",] # 2000
## Interpretation ##
' Im Gegensatz zu SoliMoais nehme ich hier keine Beobachtungen wegen Outlier und Leverage raus, da es wohl keine Messfehler sind.'
Yield_Covariates_WW_Oct_lag_no2005 <- na.omit(Yield_Covariates_WW_Oct_lag_no2005)
rownames(Yield_Covariates_WW_Oct_lag_no2005) <- NULL
#################################################
#### Remove log trend of indepedent variable ####
logtrend <- lm(log(Winterwheat) ~ log(as.integer(year)), data= Yield_Covariates_WW_Oct_lag_no2005)
summary(logtrend)
Yield_Covariates_WW_Oct_lag_no2005$Winterwheat_logtrend <- resid(logtrend)
#######################################
## Prepare dataframe for plm package ##
'Change Indexing so that it can be used in plm package'
Yield_Covariates_WW_Oct_lag_no2005 <- plm.data(Yield_Covariates_WW_Oct_lag_no2005, index=c("comId", "year"))
str(Yield_Covariates_WW_Oct_lag_no2005)
## Transform comId and stateId to factor ##
Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")] <- lapply(Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")], factor )
lapply(Yield_Covariates_WW_Oct_lag_no2005, class)
###############################################
##### Save Yield_Covariates_WW_Oct_lag_no2005ober extern ####
write.csv(Yield_Covariates_WW_Oct_lag_no2005, file="./data/data_raw/Yield_Covariates_WW_Oct_lag_no2005.csv")
#######################################################
#### BIC to choose the degrees of the polynomials ####
#######################################################
## create a matrix which contains all possible degree combinations, here for three variables ##
degree <- permutations(n=3,r=2,v=c(1:3),repeats.allowed=T)
degree
################################################
## Formulas for Model Variations to be tested ##
## with SMI
formula_Oct_lag_no2005_WW_detrendlog_SMIPrecTavg <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Tavg, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_SMIPrecPet <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Pet, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_SMIPrec <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_SMIPet <- Winterwheat_logtrend ~ poly(Pet, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_SMITavg <- Winterwheat_logtrend ~ poly(Tavg, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_SMI <- Winterwheat_logtrend ~
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
## no SMI
formula_Oct_lag_no2005_WW_detrendlog_PrecTavg <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Tavg, degree[r, 2], raw = T) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_PrecPet <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Pet, degree[r, 2], raw = T) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_Prec <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_Pet <- Winterwheat_logtrend ~ poly(Pet, degree[r, 2], raw = T) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_Tavg <- Winterwheat_logtrend ~ poly(Tavg, degree[r, 2], raw = T) + dummy(comId)
#################################################################################################
# Loop through the container list to cover all permutations of posssible degree of freedoms of ##
# of the polynomials of the variables ##
#################################################################################################
##################################################
## Loop through various variable configurations ##
BIC_SMIPrecTavg <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPrecTavg <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMIPrecTavg, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMIPrecTavg[r] <- BIC(glm.fit_SMIPrecTavg)
}
BIC_SMIPrecPet <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPrecPet <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMIPrecPet, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMIPrecPet[r] <- BIC(glm.fit_SMIPrecPet)
}
BIC_SMIPrec <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPrec <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMIPrec, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMIPrec[r] <- BIC(glm.fit_SMIPrec)
}
BIC_SMIPet <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPet <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMIPet, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMIPet[r] <- BIC(glm.fit_SMIPet)
}
BIC_SMITavg <- rep(0,9)
for(r in 1:9){
glm.fit_SMITavg <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMITavg, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMITavg[r] <- BIC(glm.fit_SMITavg)
}
BIC_SMI <- rep(0,9)
for(r in 1:9){
glm.fit_SMI <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMI, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMI[r] <- BIC(glm.fit_SMI)
}
BIC_PrecTavg <- rep(0,9)
for(r in 1:9){
glm.fit_PrecTavg <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_PrecTavg, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_PrecTavg[r] <- BIC(glm.fit_PrecTavg)
}
BIC_PrecPet <- rep(0,9)
for(r in 1:9){
glm.fit_PrecPet <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_PrecPet, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_PrecPet[r] <- BIC(glm.fit_PrecPet)
}
BIC_Prec <- rep(0,9)
for(r in 1:9){
glm.fit_Prec <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_Prec, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_Prec[r] <- BIC(glm.fit_Prec)
}
BIC_Pet <- rep(0,9)
for(r in 1:9){
glm.fit_Pet <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_Pet , data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_Pet [r] <- BIC(glm.fit_Pet )
}
BIC_Tavg <- rep(0,9)
for(r in 1:9){
glm.fit_Tavg <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_Tavg , data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_Tavg [r] <- BIC(glm.fit_Tavg )
}
## Compare BIC values ##
BIC <- c(BIC_SMIPrecTavg, BIC_SMIPrecPet, BIC_SMIPrec, BIC_SMIPet, BIC_SMITavg, BIC_SMI, BIC_Prec, BIC_Tavg, BIC_Pet, BIC_PrecTavg, BIC_PrecPet)
BIC
par(mfrow=c(1,1))
plot(BIC)
###########################
## Plot BIC with ggplot2 ##
###########################
##############################################
## Create Dataframe for plotting in ggplot2 ##
## repeat name of modelconfiguration ##
list <-c("01_SMIPrecTavg", "02_SMIPrecPet", "03_SMIPrec", "04_SMIPet",
"05_SMITavg", "06_SMI", "07_Prec", "08_Tavg", "09_Pet", "10_PrecTavg", "11_PrecPet")
list2 <- 1:11
model <- NULL
model_index <- NULL
for (i in 1:11)
{
x <- rep(list[i],9)
y <- rep(list2[i],9)
model <- append(model, x)
model_index <- as.numeric(append(model_index, y))
}
###################################
## Combine data in on data.frame ##
BIC <- as.data.frame(BIC)
model <- as.data.frame(model)
model_index <- as.data.frame(model_index)
index <- 1:99
month <-rep("October_lag_no2005",99)
BIC_Oct_lag_no2005 <- cbind(BIC, model ,model_index, index, month)
#######################
## Delete Duplicates ##
which(duplicated(BIC_Oct_lag_no2005$BIC))
list3 <- c(20,21,23,24,26,27,31,32,33,34,35,36,40,41,42,43,44,45,47,48,49,50,51,52,53,54,56,57,59,60,62,63,67,68,69,70,71,72,76,77,78,79,80,81)
length(list3)
temp <- BIC_Oct_lag_no2005
for (i in 1:44)
{
print(BIC_Oct_lag_no2005[BIC_Oct_lag_no2005$index ==list3[i],])
temp <- (temp[!temp$index==list3[i],])
}
dim(BIC_Oct_lag_no2005)
dim(temp)
################################
## Correct created data.frame ##
rownames(temp) <- NULL
BIC_Oct_lag_no2005 <- temp
lapply(BIC_Oct_lag_no2005, class)
############################
## Plot data with ggplot2 ##
g <- ggplot(BIC_Oct_lag_no2005,aes(y=BIC, x=index))
g + geom_point(aes(color=model)) + labs(title="BIC of various model configurations", x="") + theme(plot.title=element_text(size=15, face="bold")) + theme_dark()
g + geom_point(aes(color=model)) + labs(title="BIC of various model configurations", x="") + theme(plot.title=element_text(size=15, face="bold")) + theme_dark() +
facet_wrap( ~ month)
BIC_Oct_lag_no2005
BIC_Oct_lag_no2005[which.min(BIC_Oct_lag_no2005$BIC),]
## Export Data frame for use in BIC_Grafic
BIC_WW_Oct_lag_no2005 <- BIC_Oct_lag_no2005
class(BIC_WW_Oct_lag_no2005)
write.csv(BIC_WW_Oct_lag_no2005, file="./data/data_raw/BIC/BIC_WW_Oct_lag_no2005.csv")
################################################################
################################### Explore Models #############
################################################################
###################
## Load Data Set ##
# Yield_Covariates_WW_Oct_lag_no2005 <- read.csv( file="./data/data_raw/Yield_Covariates_WW_Oct_lag_no2005.csv")
# names(Yield_Covariates_WW_Oct_lag_no2005)
# Yield_Covariates_WW_Oct_lag_no2005$X <- NULL
#######################################
## Prepare dataframe for plm package ##
'Change Indexing so that it can be used in plm package'
Yield_Covariates_WW_Oct_lag_no2005 <- plm.data(Yield_Covariates_WW_Oct_lag_no2005, index=c("comId", "year"))
## Transform comId and stateId to factor ##
Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")] <- lapply(Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")], factor )
str(Yield_Covariates_WW_Oct_lag_no2005)
#################################
###############################
## Results with smallest BIC ##
###############################
plot(BIC_SMIPrecTavg)
which.min(BIC_SMIPrecTavg)
r = 9
best_formula <- formula_Oct_lag_no2005_WW_detrendlog_SMIPrecTavg
###################
## GLM Ergebniss ##
glm.fit_WW_BEST_Oct_lag_no2005 <- glm(formula = best_formula, data = Yield_Covariates_WW_Oct_lag_no2005)
summary(glm.fit_WW_BEST_Oct_lag_no2005)
'AIC:-6885.8'
####################
## PLM Ergebnisse ##
plm.fit_WW_BEST_Oct_lag_no2005 <- plm(formula = update(best_formula, .~. - dummy(comId)), data = Yield_Covariates_WW_Oct_lag_no2005, effect="individual", model=("within"), index = c("comId","year"))
summary(plm.fit_WW_BEST_Oct_lag_no2005)
'Adj. R-Squared: 0.18194'
fixef <- fixef(plm.fit_WW_BEST_Oct_lag_no2005)
fixef <- as.data.frame(as.matrix(fixef))
head(fixef)
fixef <- cbind(rownames(fixef), fixef)
rownames(fixef) <- NULL
names(fixef) <- c("comId", "FE")
fixef
write.csv(fixef, "./figures/figures_exploratory/FixedEffects/Winterwheat/plm.fit_WW_BEST_Oct_lag_no2005_FE.csv")
##################
## LM Ergebniss ##
lm.fit_WW_BEST_Oct_lag_no2005 <-lm(formula = best_formula, data = Yield_Covariates_WW_Oct_lag_no2005)
summary(lm.fit_WW_BEST_Oct_lag_no2005)
'Adjusted R-squared:0.6987'
########################
## Heteroskedasdicity ##
bptest(glm.fit_WW_BEST_Oct_lag_no2005) # Breusch Pagan Test of Heteroskedastie in den Störgrößen: Null: Homoskedasdicity.
bptest(plm.fit_WW_BEST_Oct_lag_no2005)
' In beiden Fällen kann die Null widerlegt werden. Es gibt also heteroskedasdicity '
## Koenkers Version on BP Test: robuste Modification wenn die Störgrößen nicht normalverteilt sind.
bptest(plm.fit_WW_BEST_Oct_lag_no2005, studentize = TRUE)
'Auch hier kann die Null widerlegt werden. Need to use robust covariance variance matrix to correct standard errors'
######################################
## Tests for serial autocorrelation ##
pwartest(plm.fit_WW_BEST_Oct_lag_no2005)
pbgtest(plm.fit_WW_BEST_Oct_lag_no2005)
'
both, H_1 of serial autocorrelation cannot be rejected
'
#################################
## Correct the Standard Errors ##
#################################
## Correct Standard Errors used in table ##
coeftest(plm.fit_WW_BEST_Oct_lag_no2005)
## Robust covariance matrix estimators a la White ##
# coeftest(plm.fit_WW_BEST_Oct_lag_no2005,vcov=vcovHC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano", type = "HC0"))
cov0_WW_BEST_Oct_lag_no2005 <- vcovHC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano", type = "HC0", cluster="group")
Wh.se_serial_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov0_WW_BEST_Oct_lag_no2005))
cov0.1_WW_BEST_Oct_lag_no2005 <- vcovHC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano", type = "HC0", cluster="time")
Wh.se_cross_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov0.1_WW_BEST_Oct_lag_no2005))
#
# ## Beck Katz:
# # coeftest(plm.fit_WW_BEST_Oct_lag_no2005, vcov = function(x) vcovBK(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano", type = "HC0"))
# cov1 <- vcovBK(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano", type = "HC0", cluster="time")
# BK.se <- sqrt(diag(cov1))
# ## Driscoll Kraay ##
# summary(plm.fit_WW_BEST_Oct_lag_no2005)
coeftest(plm.fit_WW_BEST_Oct_lag_no2005, vcov=function(x) vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0"))
cov2_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0")
DK.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2_WW_BEST_Oct_lag_no2005))
#
# cov2.1_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0", maxlag=1)
# DK2.1.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2.1_WW_BEST_Oct_lag_no2005))
# cov2.2_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0", maxlag=2)
# DK2.2.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2.2_WW_BEST_Oct_lag_no2005))
#
# cov2.3_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0", maxlag=3)
# DK2.3.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2.3_WW_BEST_Oct_lag_no2005))
#
# cov2.4_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0", maxlag=4)
# DK2.4.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2.4_WW_BEST_Oct_lag_no2005))
#
cov2.5_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0", maxlag=5)
DK2.5.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2.5_WW_BEST_Oct_lag_no2005))
## Cameron et al /Thompson : doouble-clustering estimator ##
# coeftest(plm.fit_WW_BEST_Oct_lag_no2005, vcovDC(plm.fit_WW_BEST_Oct_lag_no2005, method = "arellano", type = "HC0"))
cov3_WW_BEST_Oct_lag_no2005 <- vcovDC(plm.fit_WW_BEST_Oct_lag_no2005, method = "arellano", type = "HC0")
CT.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov3_WW_BEST_Oct_lag_no2005))
'Our estimator is qualitatively similar to the ones presented in White and Domowitz (1984), for
time series data, and Conley (1999), for spatial data. '
## Generate Table with Output ##
se <- list(NULL, Wh.se_cross_WW_BEST_Oct_lag_no2005, Wh.se_serial_WW_BEST_Oct_lag_no2005, DK.se_WW_BEST_Oct_lag_no2005, DK2.5.se_WW_BEST_Oct_lag_no2005, CT.se_WW_BEST_Oct_lag_no2005)
labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson")
stargazer(plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005,plm.fit_WW_BEST_Oct_lag_no2005,
se = se,
dep.var.caption = "Model with smallest BIC - October_lag_no2005",
dep.var.labels = "log(Winterwheat)",
style="default",
model.numbers = FALSE,
column.labels = labels1,
type="text", out="./figures/figures_exploratory/BIC/Winterwheat/WW_Oct_lag_no2005_best.txt"
)
#########################################################
## Results with smallest BIC of Standard Configuration ##
#########################################################
se <- list(NULL, Wh.se_cross_WW_BEST_Oct_lag_no2005, Wh.se_serial_WW_BEST_Oct_lag_no2005, DK.se_WW_BEST_Oct_lag_no2005, DK2.5.se_WW_BEST_Oct_lag_no2005, CT.se_WW_BEST_Oct_lag_no2005)
labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson")
stargazer(plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005,plm.fit_WW_BEST_Oct_lag_no2005,
se = se,
dep.var.caption = "Model with smallest BIC - October_lag_no2005",
dep.var.labels = "log(Winterwheat)",
style="default",
model.numbers = FALSE,
column.labels = labels1,
type="text", out="./figures/figures_exploratory/BIC/Winterwheat/WW_Oct_lag_no2005_bestStandard.txt"
)
########################################
## Results with smallest BIC with SMI ##
########################################
'Best general model includes SMI'
## Generate Table with Output ##
se <- list(NULL, Wh.se_cross_WW_BEST_Oct_lag_no2005, Wh.se_serial_WW_BEST_Oct_lag_no2005, DK.se_WW_BEST_Oct_lag_no2005, DK2.5.se_WW_BEST_Oct_lag_no2005, CT.se_WW_BEST_Oct_lag_no2005)
labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson")
stargazer(plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005,plm.fit_WW_BEST_Oct_lag_no2005,
se = se,
dep.var.caption = "Model with smallest BIC - October_lag_no2005",
dep.var.labels = "log(Winterwheat)",
style="default",
model.numbers = FALSE,
column.labels = labels1,
type="text", out="./figures/figures_exploratory/BIC/Winterwheat/WW_Oct_lag_no2005_bestSM.txt"
)
|
/script/script_raw/winterWheat/BIC/10lag_WW_BIC_no2005.R
|
no_license
|
MikyPiky/Project1
|
R
| false | false | 27,196 |
r
|
#############################################
#### Winter Wheat in October_lag_no2005 ####
#############################################
'
######################
## File Discription ##
The purpose of this script is to estimate the impact of weather fluctuations in the month mentionend above on yearly crop yield.
This is done by the following the steps:
- Create data frame with Winterwheat as dependent and variables of the month above as independent variables
- Create stepwise function which is based on drought categories of german drought monitor
- Remove comIds with less than 7 observations to avoid leveage issues
- Remove log trend of indepedent variable
- Delete outliers which appear to be measurement error
- Use BIC to choose the degrees of the polynomial and to compare various model configurations
- Loop through polynomials configuration of each model; highest possible polynomial is of degree 3
- Compare models graphically
- Explore Models
- Model with lowest BIC in general: Tavg, Prec, SMI
- Model with lowest BIC of standard configuration: Tavg, Prec, SMI
- Model with lowest BIC with SMI: Tavg, Prec, SMI
- Correct Standard Errors with either Driscoll Kray or Cameron et al /Thompson estimator
The --vcovHC– function estimates three heteroskedasticity-consistent covariance estimators:
• "white1" - for general heteroskedasticity but no serial correlation. Recommended for random effects.
• "white2" - is "white1" restricted to a common variance within groups. Recommended for random effects.
• "arellano" - both heteroskedasticity and serial correlation. Recommended for fixed effects.
The following options apply*:
• HC0 - heteroskedasticity consistent. The default.
• HC1,HC2, HC3 – Recommended for small samples. HC3 gives less weight to influential
observations.
• HC4 - small samples with influential observations
• HAC - heteroskedasticity and autocorrelation consistent (type ?vcovHAC for more
details)
Solution for serial correlation: Cluster by groups.
Solution for cross sectional correlation: Cluster by time
Ich arbeitet vorerst mir Driscoll Kraay und weighting von 1 (maxlag=0). Die Ergebnisse sollten solide sein, da Cameron/Thompson ähnlich ist
## Input ##
- aus 4km_tmax: Yield_SMI_Prec_Tavg_Pet_Dem_Por_Tmin_Tmax_nodemean_nozscore_ww.csv (komplete data.frame)
## Output ##
- Yield_Covariates_WW_Oct_lag_no2005.csv (auf October_lag_no2005 reduzierter Data.Frame)
- Export Data frame for use in BIC_Graphic: file="./data/data_raw/BIC/BIC_WW_Oct_lag_no2005.csv")
- Export Data Frame of Fixed Effects to be used in Script FixedEffects_Graphic:
"./figures/figures_exploratory/FixedEffects/Winterwheat/..."
'
###################
## Load Packages ##
library(plm)
library(boot)
library(gtools)
library(lme4)
library(lmtest)
library(car)
library(sp)
library(rgdal)
library(raster)
library(rasterVis)
library(maptools)
library(reshape)
library(stringr)
library(classInt)
library(RColorBrewer)
library(stargazer)
library(ggplot2)
####################################################################################################################################################################
#################################################################################################################
#### Create data frame with Winterwheat as dependent and variables of the month above as independent variables ####
#################################################################################################################
## Read in large Dataframe for Maize ##
Yield_Covariates <- read.csv("~/Documents/projects/correlation/data/data_processed/Yield_SMI_Prec_Tavg_Pet_Dem_Por_Tmin_Tmax_nodemean_nozscore_ww.csv")
Yield_Covariates$X <- NULL
## For publication worth regression output need to change data names ##
'Get rid of variables which are not necessary: other months and other not needed variables'
names(Yield_Covariates)
names <- names(Yield_Covariates)
names_Oct_lag <- grep(c("*_Oct_lag"), names)
names_Oct_lag
Yield_Covariates_Oct_lag <- Yield_Covariates[,names_Oct_lag]
names(Yield_Covariates_Oct_lag)
dim(Yield_Covariates_Oct_lag)
## Delete all but SMI, Prec, Tavg and Pet
names(Yield_Covariates_Oct_lag)
Yield_Covariates_Oct_lag <- Yield_Covariates_Oct_lag[,c(1:4)]
## Establish first part of data frame_ time and spatial reference plus Winterwheat ##
names(Yield_Covariates[,c(2,1,3:5,7)])
Yield_Covariates_SM <- Yield_Covariates[,c(2,1,3:5,7)] # Achtung, darauf achten, dass comId und year in der richtigen Reihenfolge sind.
names(Yield_Covariates_SM)
head(Yield_Covariates_SM)
Yield_Covariates_WW_Oct_lag <- cbind(Yield_Covariates_SM, Yield_Covariates_Oct_lag)
names(Yield_Covariates_WW_Oct_lag)
names(Yield_Covariates_WW_Oct_lag) <- c( "comId" , "year","com","stateId","state","Winterwheat","SMI", "Prec","Tavg", "Pet")
names(Yield_Covariates_WW_Oct_lag)
###########################
##### Delete year 2004 ####
###########################
table(Yield_Covariates_WW_Oct_lag$year)
dim(Yield_Covariates_WW_Oct_lag[Yield_Covariates_WW_Oct_lag$year==2005,])
dim(Yield_Covariates_WW_Oct_lag[Yield_Covariates_WW_Oct_lag$year!=2005,])
dim(Yield_Covariates_WW_Oct_lag)
Yield_Covariates_WW_Oct_lag_no2005 <- Yield_Covariates_WW_Oct_lag[Yield_Covariates_WW_Oct_lag$year!=2005,]
dim(Yield_Covariates_WW_Oct_lag_no2005)
head(Yield_Covariates_WW_Oct_lag_no2005)
#########################################
#### Create stepwise function of SMI ####
#########################################
' Drought Monitor Spezification '
Yield_Covariates_WW_Oct_lag_no2005$SMI_GDM <- cut(Yield_Covariates_WW_Oct_lag_no2005$SMI, breaks = c(0, 0.1, 0.2, 0.3, 0.7, 0.8, 0.9, 1), ,
labels = c("severe drought","moderate drought","abnormal dry", "normal","abnormal wet" ,"abundant wet", "severe wet"))
#############
## Na-omit ##
sum(is.na(Yield_Covariates_WW_Oct_lag_no2005) )
dim(Yield_Covariates_WW_Oct_lag_no2005)
Yield_Covariates_WW_Oct_lag_no2005_nna <- na.omit(Yield_Covariates_WW_Oct_lag_no2005)
dim(Yield_Covariates_WW_Oct_lag_no2005_nna)
## Check for NAs
any(is.na(Yield_Covariates_WW_Oct_lag_no2005_nna))
## Reset Rownames
rownames(Yield_Covariates_WW_Oct_lag_no2005_nna) <- NULL
## Further work with DataFrame without Yield_Covariates_WW_Oct_lag_no2005 index ##
Yield_Covariates_WW_Oct_lag_no2005 <- Yield_Covariates_WW_Oct_lag_no2005_nna
#########################################################################
## Remove comIds with less than 7 observations to avoid leveage issues ##
#########################################################################
#####################################################
## Delete all comIds with less than 7 observations ##
sum(table(Yield_Covariates_WW_Oct_lag_no2005$comId) < 7 )
table(Yield_Covariates_WW_Oct_lag_no2005$comId) < 7
## comIds mit weniger als 7 Beoachtungen: ##
list <- c(3402, 5117, 5124, 5314, 5334, 5916, 8421, 9762, 12052, 12053, 15001, 15002,
15082, 15083, 15084, 15085, 15086, 15087, 15088, 15089, 15091)
length(list)
list[[1]]
temp <- Yield_Covariates_WW_Oct_lag_no2005
for (i in 1:length(list))
{
print(Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId==list[i],])
temp <- (temp[!temp$comId==list[i],])
}
## Number of deleted rows
dim(temp)-dim(Yield_Covariates_WW_Oct_lag_no2005)
## Further use old name for data.frame
Yield_Covariates_WW_Oct_lag_no2005 <- temp
################################
## Befehle nach jedem löschen ##
Yield_Covariates_WW_Oct_lag_no2005 <- na.omit(Yield_Covariates_WW_Oct_lag_no2005)
rownames(Yield_Covariates_WW_Oct_lag_no2005) <- NULL
Yield_Covariates_WW_Oct_lag_no2005 <- plm.data(Yield_Covariates_WW_Oct_lag_no2005, index=c("comId", "year"))
Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")] <- lapply(Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")], factor )
#################################################
#### Remove log trend of indepedent variable ####
#################################################
'Fit log of yield on log of time and use the residuals of that for yields'
logtrend <- lm(log(Winterwheat) ~ log(as.integer(year)), data= Yield_Covariates_WW_Oct_lag_no2005)
##########################
## Issue with Outliers ###
##########################
par(mfrow = c(2,2))
plot(logtrend)
## Look Outliers Values ##
Yield_Covariates_WW_Oct_lag_no2005[c(3382, 3442, 3454, 2574,3451,3511),]
## Look at other values of outliers com #
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "12060",] #2004
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "12065",] #2004
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "12066",] #2004
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "9276",] # 1999: hier sehe ich keinen Grund, warum die Daten geläscht werden sollten
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "12060",] # 2004
Yield_Covariates_WW_Oct_lag_no2005[Yield_Covariates_WW_Oct_lag_no2005$comId == "12071",] # 2000
## Interpretation ##
' Im Gegensatz zu SoliMoais nehme ich hier keine Beobachtungen wegen Outlier und Leverage raus, da es wohl keine Messfehler sind.'
Yield_Covariates_WW_Oct_lag_no2005 <- na.omit(Yield_Covariates_WW_Oct_lag_no2005)
rownames(Yield_Covariates_WW_Oct_lag_no2005) <- NULL
#################################################
#### Remove log trend of indepedent variable ####
logtrend <- lm(log(Winterwheat) ~ log(as.integer(year)), data= Yield_Covariates_WW_Oct_lag_no2005)
summary(logtrend)
Yield_Covariates_WW_Oct_lag_no2005$Winterwheat_logtrend <- resid(logtrend)
#######################################
## Prepare dataframe for plm package ##
'Change Indexing so that it can be used in plm package'
Yield_Covariates_WW_Oct_lag_no2005 <- plm.data(Yield_Covariates_WW_Oct_lag_no2005, index=c("comId", "year"))
str(Yield_Covariates_WW_Oct_lag_no2005)
## Transform comId and stateId to factor ##
Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")] <- lapply(Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")], factor )
lapply(Yield_Covariates_WW_Oct_lag_no2005, class)
###############################################
##### Save Yield_Covariates_WW_Oct_lag_no2005ober extern ####
write.csv(Yield_Covariates_WW_Oct_lag_no2005, file="./data/data_raw/Yield_Covariates_WW_Oct_lag_no2005.csv")
#######################################################
#### BIC to choose the degrees of the polynomials ####
#######################################################
## create a matrix which contains all possible degree combinations, here for three variables ##
degree <- permutations(n=3,r=2,v=c(1:3),repeats.allowed=T)
degree
################################################
## Formulas for Model Variations to be tested ##
## with SMI
formula_Oct_lag_no2005_WW_detrendlog_SMIPrecTavg <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Tavg, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_SMIPrecPet <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Pet, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_SMIPrec <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_SMIPet <- Winterwheat_logtrend ~ poly(Pet, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_SMITavg <- Winterwheat_logtrend ~ poly(Tavg, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_SMI <- Winterwheat_logtrend ~
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
## no SMI
formula_Oct_lag_no2005_WW_detrendlog_PrecTavg <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Tavg, degree[r, 2], raw = T) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_PrecPet <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Pet, degree[r, 2], raw = T) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_Prec <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_Pet <- Winterwheat_logtrend ~ poly(Pet, degree[r, 2], raw = T) + dummy(comId)
formula_Oct_lag_no2005_WW_detrendlog_Tavg <- Winterwheat_logtrend ~ poly(Tavg, degree[r, 2], raw = T) + dummy(comId)
#################################################################################################
# Loop through the container list to cover all permutations of posssible degree of freedoms of ##
# of the polynomials of the variables ##
#################################################################################################
##################################################
## Loop through various variable configurations ##
BIC_SMIPrecTavg <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPrecTavg <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMIPrecTavg, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMIPrecTavg[r] <- BIC(glm.fit_SMIPrecTavg)
}
BIC_SMIPrecPet <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPrecPet <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMIPrecPet, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMIPrecPet[r] <- BIC(glm.fit_SMIPrecPet)
}
BIC_SMIPrec <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPrec <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMIPrec, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMIPrec[r] <- BIC(glm.fit_SMIPrec)
}
BIC_SMIPet <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPet <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMIPet, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMIPet[r] <- BIC(glm.fit_SMIPet)
}
BIC_SMITavg <- rep(0,9)
for(r in 1:9){
glm.fit_SMITavg <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMITavg, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMITavg[r] <- BIC(glm.fit_SMITavg)
}
BIC_SMI <- rep(0,9)
for(r in 1:9){
glm.fit_SMI <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_SMI, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_SMI[r] <- BIC(glm.fit_SMI)
}
BIC_PrecTavg <- rep(0,9)
for(r in 1:9){
glm.fit_PrecTavg <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_PrecTavg, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_PrecTavg[r] <- BIC(glm.fit_PrecTavg)
}
BIC_PrecPet <- rep(0,9)
for(r in 1:9){
glm.fit_PrecPet <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_PrecPet, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_PrecPet[r] <- BIC(glm.fit_PrecPet)
}
BIC_Prec <- rep(0,9)
for(r in 1:9){
glm.fit_Prec <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_Prec, data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_Prec[r] <- BIC(glm.fit_Prec)
}
BIC_Pet <- rep(0,9)
for(r in 1:9){
glm.fit_Pet <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_Pet , data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_Pet [r] <- BIC(glm.fit_Pet )
}
BIC_Tavg <- rep(0,9)
for(r in 1:9){
glm.fit_Tavg <- glm(formula = formula_Oct_lag_no2005_WW_detrendlog_Tavg , data = Yield_Covariates_WW_Oct_lag_no2005)
BIC_Tavg [r] <- BIC(glm.fit_Tavg )
}
## Compare BIC values ##
BIC <- c(BIC_SMIPrecTavg, BIC_SMIPrecPet, BIC_SMIPrec, BIC_SMIPet, BIC_SMITavg, BIC_SMI, BIC_Prec, BIC_Tavg, BIC_Pet, BIC_PrecTavg, BIC_PrecPet)
BIC
par(mfrow=c(1,1))
plot(BIC)
###########################
## Plot BIC with ggplot2 ##
###########################
##############################################
## Create Dataframe for plotting in ggplot2 ##
## repeat name of modelconfiguration ##
list <-c("01_SMIPrecTavg", "02_SMIPrecPet", "03_SMIPrec", "04_SMIPet",
"05_SMITavg", "06_SMI", "07_Prec", "08_Tavg", "09_Pet", "10_PrecTavg", "11_PrecPet")
list2 <- 1:11
model <- NULL
model_index <- NULL
for (i in 1:11)
{
x <- rep(list[i],9)
y <- rep(list2[i],9)
model <- append(model, x)
model_index <- as.numeric(append(model_index, y))
}
###################################
## Combine data in on data.frame ##
BIC <- as.data.frame(BIC)
model <- as.data.frame(model)
model_index <- as.data.frame(model_index)
index <- 1:99
month <-rep("October_lag_no2005",99)
BIC_Oct_lag_no2005 <- cbind(BIC, model ,model_index, index, month)
#######################
## Delete Duplicates ##
which(duplicated(BIC_Oct_lag_no2005$BIC))
list3 <- c(20,21,23,24,26,27,31,32,33,34,35,36,40,41,42,43,44,45,47,48,49,50,51,52,53,54,56,57,59,60,62,63,67,68,69,70,71,72,76,77,78,79,80,81)
length(list3)
temp <- BIC_Oct_lag_no2005
for (i in 1:44)
{
print(BIC_Oct_lag_no2005[BIC_Oct_lag_no2005$index ==list3[i],])
temp <- (temp[!temp$index==list3[i],])
}
dim(BIC_Oct_lag_no2005)
dim(temp)
################################
## Correct created data.frame ##
rownames(temp) <- NULL
BIC_Oct_lag_no2005 <- temp
lapply(BIC_Oct_lag_no2005, class)
############################
## Plot data with ggplot2 ##
g <- ggplot(BIC_Oct_lag_no2005,aes(y=BIC, x=index))
g + geom_point(aes(color=model)) + labs(title="BIC of various model configurations", x="") + theme(plot.title=element_text(size=15, face="bold")) + theme_dark()
g + geom_point(aes(color=model)) + labs(title="BIC of various model configurations", x="") + theme(plot.title=element_text(size=15, face="bold")) + theme_dark() +
facet_wrap( ~ month)
BIC_Oct_lag_no2005
BIC_Oct_lag_no2005[which.min(BIC_Oct_lag_no2005$BIC),]
## Export Data frame for use in BIC_Grafic
BIC_WW_Oct_lag_no2005 <- BIC_Oct_lag_no2005
class(BIC_WW_Oct_lag_no2005)
write.csv(BIC_WW_Oct_lag_no2005, file="./data/data_raw/BIC/BIC_WW_Oct_lag_no2005.csv")
################################################################
################################### Explore Models #############
################################################################
###################
## Load Data Set ##
# Yield_Covariates_WW_Oct_lag_no2005 <- read.csv( file="./data/data_raw/Yield_Covariates_WW_Oct_lag_no2005.csv")
# names(Yield_Covariates_WW_Oct_lag_no2005)
# Yield_Covariates_WW_Oct_lag_no2005$X <- NULL
#######################################
## Prepare dataframe for plm package ##
'Change Indexing so that it can be used in plm package'
Yield_Covariates_WW_Oct_lag_no2005 <- plm.data(Yield_Covariates_WW_Oct_lag_no2005, index=c("comId", "year"))
## Transform comId and stateId to factor ##
Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")] <- lapply(Yield_Covariates_WW_Oct_lag_no2005[,c("comId","stateId")], factor )
str(Yield_Covariates_WW_Oct_lag_no2005)
#################################
###############################
## Results with smallest BIC ##
###############################
plot(BIC_SMIPrecTavg)
which.min(BIC_SMIPrecTavg)
r = 9
best_formula <- formula_Oct_lag_no2005_WW_detrendlog_SMIPrecTavg
###################
## GLM Ergebniss ##
glm.fit_WW_BEST_Oct_lag_no2005 <- glm(formula = best_formula, data = Yield_Covariates_WW_Oct_lag_no2005)
summary(glm.fit_WW_BEST_Oct_lag_no2005)
'AIC:-6885.8'
####################
## PLM Ergebnisse ##
plm.fit_WW_BEST_Oct_lag_no2005 <- plm(formula = update(best_formula, .~. - dummy(comId)), data = Yield_Covariates_WW_Oct_lag_no2005, effect="individual", model=("within"), index = c("comId","year"))
summary(plm.fit_WW_BEST_Oct_lag_no2005)
'Adj. R-Squared: 0.18194'
fixef <- fixef(plm.fit_WW_BEST_Oct_lag_no2005)
fixef <- as.data.frame(as.matrix(fixef))
head(fixef)
fixef <- cbind(rownames(fixef), fixef)
rownames(fixef) <- NULL
names(fixef) <- c("comId", "FE")
fixef
write.csv(fixef, "./figures/figures_exploratory/FixedEffects/Winterwheat/plm.fit_WW_BEST_Oct_lag_no2005_FE.csv")
##################
## LM Ergebniss ##
lm.fit_WW_BEST_Oct_lag_no2005 <-lm(formula = best_formula, data = Yield_Covariates_WW_Oct_lag_no2005)
summary(lm.fit_WW_BEST_Oct_lag_no2005)
'Adjusted R-squared:0.6987'
########################
## Heteroskedasdicity ##
bptest(glm.fit_WW_BEST_Oct_lag_no2005) # Breusch Pagan Test of Heteroskedastie in den Störgrößen: Null: Homoskedasdicity.
bptest(plm.fit_WW_BEST_Oct_lag_no2005)
' In beiden Fällen kann die Null widerlegt werden. Es gibt also heteroskedasdicity '
## Koenkers Version on BP Test: robuste Modification wenn die Störgrößen nicht normalverteilt sind.
bptest(plm.fit_WW_BEST_Oct_lag_no2005, studentize = TRUE)
'Auch hier kann die Null widerlegt werden. Need to use robust covariance variance matrix to correct standard errors'
######################################
## Tests for serial autocorrelation ##
pwartest(plm.fit_WW_BEST_Oct_lag_no2005)
pbgtest(plm.fit_WW_BEST_Oct_lag_no2005)
'
both, H_1 of serial autocorrelation cannot be rejected
'
#################################
## Correct the Standard Errors ##
#################################
## Correct Standard Errors used in table ##
coeftest(plm.fit_WW_BEST_Oct_lag_no2005)
## Robust covariance matrix estimators a la White ##
# coeftest(plm.fit_WW_BEST_Oct_lag_no2005,vcov=vcovHC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano", type = "HC0"))
cov0_WW_BEST_Oct_lag_no2005 <- vcovHC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano", type = "HC0", cluster="group")
Wh.se_serial_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov0_WW_BEST_Oct_lag_no2005))
cov0.1_WW_BEST_Oct_lag_no2005 <- vcovHC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano", type = "HC0", cluster="time")
Wh.se_cross_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov0.1_WW_BEST_Oct_lag_no2005))
#
# ## Beck Katz:
# # coeftest(plm.fit_WW_BEST_Oct_lag_no2005, vcov = function(x) vcovBK(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano", type = "HC0"))
# cov1 <- vcovBK(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano", type = "HC0", cluster="time")
# BK.se <- sqrt(diag(cov1))
# ## Driscoll Kraay ##
# summary(plm.fit_WW_BEST_Oct_lag_no2005)
coeftest(plm.fit_WW_BEST_Oct_lag_no2005, vcov=function(x) vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0"))
cov2_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0")
DK.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2_WW_BEST_Oct_lag_no2005))
#
# cov2.1_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0", maxlag=1)
# DK2.1.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2.1_WW_BEST_Oct_lag_no2005))
# cov2.2_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0", maxlag=2)
# DK2.2.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2.2_WW_BEST_Oct_lag_no2005))
#
# cov2.3_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0", maxlag=3)
# DK2.3.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2.3_WW_BEST_Oct_lag_no2005))
#
# cov2.4_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0", maxlag=4)
# DK2.4.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2.4_WW_BEST_Oct_lag_no2005))
#
cov2.5_WW_BEST_Oct_lag_no2005 <- vcovSCC(plm.fit_WW_BEST_Oct_lag_no2005,method = "arellano",type = "HC0", maxlag=5)
DK2.5.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov2.5_WW_BEST_Oct_lag_no2005))
## Cameron et al /Thompson : doouble-clustering estimator ##
# coeftest(plm.fit_WW_BEST_Oct_lag_no2005, vcovDC(plm.fit_WW_BEST_Oct_lag_no2005, method = "arellano", type = "HC0"))
cov3_WW_BEST_Oct_lag_no2005 <- vcovDC(plm.fit_WW_BEST_Oct_lag_no2005, method = "arellano", type = "HC0")
CT.se_WW_BEST_Oct_lag_no2005 <- sqrt(diag(cov3_WW_BEST_Oct_lag_no2005))
'Our estimator is qualitatively similar to the ones presented in White and Domowitz (1984), for
time series data, and Conley (1999), for spatial data. '
## Generate Table with Output ##
se <- list(NULL, Wh.se_cross_WW_BEST_Oct_lag_no2005, Wh.se_serial_WW_BEST_Oct_lag_no2005, DK.se_WW_BEST_Oct_lag_no2005, DK2.5.se_WW_BEST_Oct_lag_no2005, CT.se_WW_BEST_Oct_lag_no2005)
labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson")
stargazer(plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005,plm.fit_WW_BEST_Oct_lag_no2005,
se = se,
dep.var.caption = "Model with smallest BIC - October_lag_no2005",
dep.var.labels = "log(Winterwheat)",
style="default",
model.numbers = FALSE,
column.labels = labels1,
type="text", out="./figures/figures_exploratory/BIC/Winterwheat/WW_Oct_lag_no2005_best.txt"
)
#########################################################
## Results with smallest BIC of Standard Configuration ##
#########################################################
se <- list(NULL, Wh.se_cross_WW_BEST_Oct_lag_no2005, Wh.se_serial_WW_BEST_Oct_lag_no2005, DK.se_WW_BEST_Oct_lag_no2005, DK2.5.se_WW_BEST_Oct_lag_no2005, CT.se_WW_BEST_Oct_lag_no2005)
labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson")
stargazer(plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005,plm.fit_WW_BEST_Oct_lag_no2005,
se = se,
dep.var.caption = "Model with smallest BIC - October_lag_no2005",
dep.var.labels = "log(Winterwheat)",
style="default",
model.numbers = FALSE,
column.labels = labels1,
type="text", out="./figures/figures_exploratory/BIC/Winterwheat/WW_Oct_lag_no2005_bestStandard.txt"
)
########################################
## Results with smallest BIC with SMI ##
########################################
'Best general model includes SMI'
## Generate Table with Output ##
se <- list(NULL, Wh.se_cross_WW_BEST_Oct_lag_no2005, Wh.se_serial_WW_BEST_Oct_lag_no2005, DK.se_WW_BEST_Oct_lag_no2005, DK2.5.se_WW_BEST_Oct_lag_no2005, CT.se_WW_BEST_Oct_lag_no2005)
labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson")
stargazer(plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005, plm.fit_WW_BEST_Oct_lag_no2005,plm.fit_WW_BEST_Oct_lag_no2005,
se = se,
dep.var.caption = "Model with smallest BIC - October_lag_no2005",
dep.var.labels = "log(Winterwheat)",
style="default",
model.numbers = FALSE,
column.labels = labels1,
type="text", out="./figures/figures_exploratory/BIC/Winterwheat/WW_Oct_lag_no2005_bestSM.txt"
)
|
library(RxnSim)
### Name: ms.compute
### Title: Computes Similarity of Molecules
### Aliases: ms.compute ms.compute.sim.matrix
### ** Examples
ms.compute('N', '[H]N([H])[H]', standardize = FALSE)
|
/data/genthat_extracted_code/RxnSim/examples/ms.compute.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 203 |
r
|
library(RxnSim)
### Name: ms.compute
### Title: Computes Similarity of Molecules
### Aliases: ms.compute ms.compute.sim.matrix
### ** Examples
ms.compute('N', '[H]N([H])[H]', standardize = FALSE)
|
# mplot3.roc
# ::rtemis::
# 2017 Efstathios D. Gennatas egenn.lambdamd.org
#' \code{mplot3} ROC curves
#'
#' Plot ROC curve for a binary classifier
#'
#' @inheritParams mplot3.x
#' @param prob Vector, Float [0, 1]: Predicted probabilities (i.e. c(.1, .8, .2, .9))
#' @param labels Vector, Integer {0, 1}: True labels (i.e. c(0, 1, 0, 1))
#' @param method Character: "rt" or "pROC" will use \link{rtROC} and \code{pROC::roc} respectively
#' to get points of the ROC. Default = "rt"
#' @param type Character: "TPR.FPR" or "Sens.Spec". Only changes the x and y labels. True positive rate vs.
#' False positive rate and Sensitivity vs. Specificity. Default = "TPR.FPR"
#' @param balanced.accuracy Logical: If TRUE, annotate the point of maximal Balanced Accuracy. Default = FALSE
#' @param main Character: Plot title. Default = ""
#' @param col Color, vector: Colors to use for ROC curve(s)
#' @param cex Float: Character expansion factor. Default = 1.2
#' @param lwd Float: Line width. Default = 2.5
#' @param diagonal Logical: If TRUE, draw diagonal. Default = TRUE
#' @param diagonal.lwd Float: Line width for diagonal. Default = 2.5
#' @param diagonal.lty Integer: Line type for diagonal. Default = 1
#' @param group.legend Logical
#' @param ... Additional parameters to pass to \link{mplot3.xy}
#' @author Efstathios D. Gennatas
#' @export
mplot3.roc <- function(prob, labels,
method = c("rt", "pROC"),
type = "TPR.FPR",
balanced.accuracy = FALSE,
main = "",
col = NULL,
cex = 1.2,
lwd = 2.5,
diagonal = TRUE,
diagonal.lwd = 2.5,
diagonal.lty = 3,
group.legend = FALSE,
annotation = TRUE,
annotation.col = col,
annot.line = NULL,
annot.adj = 1,
annot.font = 1,
pty = "s",
mar = c(2.5, 3, 2, 1),
theme = getOption("rt.theme", "whitegrid"),
palette = getOption("rt.palette", "rtCol1"),
verbose = TRUE,
par.reset = TRUE,
filename = NULL,
pdf.width = 5,
pdf.height = 5, ...) {
# [ ARGUMENTS ] ====
# Output directory
if (!is.null(filename))
if (!dir.exists(dirname(filename)))
dir.create(dirname(filename), recursive = TRUE)
method <- match.arg(method)
# Compatibility with rtlayout()
if (exists("rtpar")) par.reset <- FALSE
# [ THEME ] ====
extraargs <- list(...)
if (is.character(theme)) {
theme <- do.call(paste0("theme_", theme), extraargs)
} else {
for (i in seq(extraargs)) {
theme[[names(extraargs)[i]]] <- extraargs[[i]]
}
}
theme$zerolines <- FALSE
# [ ROC ] ====
probl <- if (!is.list(prob)) list(prob) else prob
labelsl <- if (!is.list(labels)) list(labels) else labels
# if (length(probl) != length(labels)) stop("Input prob and labels do not contain same number of sets")
if (length(labelsl) < length(probl)) {
if (verbose) msg("Assuming same labels for each set of probabilities")
labelsl <- rep(labelsl, length(probl) / length(labelsl))
}
if (method == "rt") {
# '- method rt ====
.roc <- lapply(seq(probl), function(l) rtROC(labelsl[[l]], probl[[l]], verbose = FALSE))
TPR <- Sensitivity <- lapply(seq(probl), function(l) .roc[[l]]$Sensitivity)
Specificity <- lapply(seq(probl), function(l) .roc[[l]]$Specificity)
FPR <- lapply(seq(probl), function(l) 1 - Specificity[[l]])
AUC <- lapply(seq(probl), function(l) .roc[[l]]$AUC)
names(Sensitivity) <- names(Specificity) <- names(TPR) <- names(FPR) <- names(AUC) <- names(probl)
} else if (method == "pROC") {
# '- method pROC ====
for (i in seq(labelsl)) {
levels(labelsl[[i]]) <- c(1, 0)
}
if (!depCheck("pROC", verbose = FALSE)) {
cat("\n"); stop("Please install dependencies and try again")
}
.roc <- lapply(seq(probl), function(l) pROC::roc(labelsl[[l]], probl[[l]],
levels = c(0, 1), direction = "<"))
TPR <- Sensitivity <- lapply(seq(probl), function(l) rev(.roc[[l]]$sensitivities))
Specificity <- lapply(seq(probl), function(l) rev(.roc[[l]]$specificities))
FPR <- lapply(seq(probl), function(l) 1 - Specificity[[l]])
AUC <- lapply(seq(probl), function(l) .roc[[l]]$auc)
names(Sensitivity) <- names(Specificity) <- names(TPR) <- names(FPR) <- names(AUC) <- names(probl)
}
if (balanced.accuracy) {
BA <- lapply(seq(probl), function(l) (Sensitivity[[l]] + Specificity[[l]]) / 2)
BA.max.index <- lapply(seq(probl), function(l) which.max(BA[[l]]))
}
# Colors ====
if (is.null(col)) col <- rtPalette(palette)
# [ PLOT ] ====
if (exists("rtpar", envir = rtenv)) par.reset <- FALSE
par.orig <- par(no.readonly = TRUE)
if (par.reset) on.exit(suppressWarnings(par(par.orig)))
if (!is.null(filename)) pdf(filename, width = pdf.width, height = pdf.height, title = "rtemis Graphics")
if (type == "Sens.Spec") {
mplot3.xy(Specificity, Sensitivity,
main = main,
xlab = "Specificity", ylab = "Sensitivity",
line.alpha = 1, line.col = col, group.legend = group.legend,
diagonal.inv = diagonal, diagonal.lty = diagonal.lty, diagonal.lwd = diagonal.lwd,
pty = pty,
xlim = c(1, 0), xaxs = "i", yaxs = "i", cex = cex,
type = "l",
order.on.x = FALSE,
lwd = lwd, theme = theme,
zerolines = FALSE,
mar = mar,
xpd = TRUE, par.reset = FALSE)
if (balanced.accuracy) {
for (i in seq(probl)) {
points(x = Specificity[[i]][BA.max.index[[i]]],
y = Sensitivity[[i]][BA.max.index[[i]]],
col = col[[i]])
text(
x = Specificity[[i]][BA.max.index[[i]]] - .05,
y = Sensitivity[[i]][BA.max.index[[i]]] - .05,
labels = paste0("max BA = ", ddSci(max(BA[[i]])), "\n(p = ",
ddSci(.roc[[i]]$thresholds[BA.max.index[[i]]]), ")"),
col = col[[i]],
pos = 4,
family = theme$font.family)
}
}
} else {
mplot3.xy(FPR, TPR,
main = main,
xlab = "False Positive Rate", ylab = "True Positive Rate",
line.alpha = 1, line.col = col, group.legend = group.legend,
diagonal = diagonal, diagonal.lty = diagonal.lty, diagonal.lwd = diagonal.lwd,
xlim = c(0, 1), xaxs = "i", yaxs = "i", cex = cex,
type = "l",
pty = pty,
order.on.x = FALSE,
lwd = lwd,
theme = theme,
zerolines = FALSE,
mar = mar,
xpd = TRUE, par.reset = FALSE)
if (balanced.accuracy) {
for (i in seq(probl)) {
points(x = 1 - Specificity[[i]][BA.max.index[[i]]],
y = Sensitivity[[i]][BA.max.index[[i]]],
col = col[[i]])
text(x = 1 - Specificity[[i]][BA.max.index[[i]]] + .05,
y = Sensitivity[[i]][BA.max.index[[i]]],
labels = paste0("max BA = ", ddSci(max(BA[[i]])), "\n(thresh = ",
ddSci(.roc[[i]]$Thresholds[BA.max.index[[i]]]), ")"),
col = col[[i]],
pos = 4,
family = theme$font.family)
}
}
}
# [ AUC ANNOTATION ] ====
if (annotation) {
auc <- paste(names(probl), ddSci(unlist(AUC)), " ")
if (is.null(annot.line)) annot.line <- seq(-length(probl), 0) - 1.7
mtext(c("AUC ", auc),
font = annot.font,
side = 1,
line = annot.line,
adj = annot.adj,
cex = cex,
col = c("gray50", unlist(col)[seq_along(probl)]),
family = theme$font.family)
}
# [ OUTRO ] ====
if (!is.null(filename)) dev.off()
if (type == "Sens.Spec") {
invisible(list(Sensitivity = Sensitivity, Specificity = Specificity))
} else {
invisible(list(FPR = FPR, TPR = TPR))
}
} # rtemis::mplot3.roc
|
/R/mplot3.roc.R
|
no_license
|
zeta1999/rtemis
|
R
| false | false | 8,346 |
r
|
# mplot3.roc
# ::rtemis::
# 2017 Efstathios D. Gennatas egenn.lambdamd.org
#' \code{mplot3} ROC curves
#'
#' Plot ROC curve for a binary classifier
#'
#' @inheritParams mplot3.x
#' @param prob Vector, Float [0, 1]: Predicted probabilities (i.e. c(.1, .8, .2, .9))
#' @param labels Vector, Integer {0, 1}: True labels (i.e. c(0, 1, 0, 1))
#' @param method Character: "rt" or "pROC" will use \link{rtROC} and \code{pROC::roc} respectively
#' to get points of the ROC. Default = "rt"
#' @param type Character: "TPR.FPR" or "Sens.Spec". Only changes the x and y labels. True positive rate vs.
#' False positive rate and Sensitivity vs. Specificity. Default = "TPR.FPR"
#' @param balanced.accuracy Logical: If TRUE, annotate the point of maximal Balanced Accuracy. Default = FALSE
#' @param main Character: Plot title. Default = ""
#' @param col Color, vector: Colors to use for ROC curve(s)
#' @param cex Float: Character expansion factor. Default = 1.2
#' @param lwd Float: Line width. Default = 2.5
#' @param diagonal Logical: If TRUE, draw diagonal. Default = TRUE
#' @param diagonal.lwd Float: Line width for diagonal. Default = 2.5
#' @param diagonal.lty Integer: Line type for diagonal. Default = 1
#' @param group.legend Logical
#' @param ... Additional parameters to pass to \link{mplot3.xy}
#' @author Efstathios D. Gennatas
#' @export
mplot3.roc <- function(prob, labels,
method = c("rt", "pROC"),
type = "TPR.FPR",
balanced.accuracy = FALSE,
main = "",
col = NULL,
cex = 1.2,
lwd = 2.5,
diagonal = TRUE,
diagonal.lwd = 2.5,
diagonal.lty = 3,
group.legend = FALSE,
annotation = TRUE,
annotation.col = col,
annot.line = NULL,
annot.adj = 1,
annot.font = 1,
pty = "s",
mar = c(2.5, 3, 2, 1),
theme = getOption("rt.theme", "whitegrid"),
palette = getOption("rt.palette", "rtCol1"),
verbose = TRUE,
par.reset = TRUE,
filename = NULL,
pdf.width = 5,
pdf.height = 5, ...) {
# [ ARGUMENTS ] ====
# Output directory
if (!is.null(filename))
if (!dir.exists(dirname(filename)))
dir.create(dirname(filename), recursive = TRUE)
method <- match.arg(method)
# Compatibility with rtlayout()
if (exists("rtpar")) par.reset <- FALSE
# [ THEME ] ====
extraargs <- list(...)
if (is.character(theme)) {
theme <- do.call(paste0("theme_", theme), extraargs)
} else {
for (i in seq(extraargs)) {
theme[[names(extraargs)[i]]] <- extraargs[[i]]
}
}
theme$zerolines <- FALSE
# [ ROC ] ====
probl <- if (!is.list(prob)) list(prob) else prob
labelsl <- if (!is.list(labels)) list(labels) else labels
# if (length(probl) != length(labels)) stop("Input prob and labels do not contain same number of sets")
if (length(labelsl) < length(probl)) {
if (verbose) msg("Assuming same labels for each set of probabilities")
labelsl <- rep(labelsl, length(probl) / length(labelsl))
}
if (method == "rt") {
# '- method rt ====
.roc <- lapply(seq(probl), function(l) rtROC(labelsl[[l]], probl[[l]], verbose = FALSE))
TPR <- Sensitivity <- lapply(seq(probl), function(l) .roc[[l]]$Sensitivity)
Specificity <- lapply(seq(probl), function(l) .roc[[l]]$Specificity)
FPR <- lapply(seq(probl), function(l) 1 - Specificity[[l]])
AUC <- lapply(seq(probl), function(l) .roc[[l]]$AUC)
names(Sensitivity) <- names(Specificity) <- names(TPR) <- names(FPR) <- names(AUC) <- names(probl)
} else if (method == "pROC") {
# '- method pROC ====
for (i in seq(labelsl)) {
levels(labelsl[[i]]) <- c(1, 0)
}
if (!depCheck("pROC", verbose = FALSE)) {
cat("\n"); stop("Please install dependencies and try again")
}
.roc <- lapply(seq(probl), function(l) pROC::roc(labelsl[[l]], probl[[l]],
levels = c(0, 1), direction = "<"))
TPR <- Sensitivity <- lapply(seq(probl), function(l) rev(.roc[[l]]$sensitivities))
Specificity <- lapply(seq(probl), function(l) rev(.roc[[l]]$specificities))
FPR <- lapply(seq(probl), function(l) 1 - Specificity[[l]])
AUC <- lapply(seq(probl), function(l) .roc[[l]]$auc)
names(Sensitivity) <- names(Specificity) <- names(TPR) <- names(FPR) <- names(AUC) <- names(probl)
}
if (balanced.accuracy) {
BA <- lapply(seq(probl), function(l) (Sensitivity[[l]] + Specificity[[l]]) / 2)
BA.max.index <- lapply(seq(probl), function(l) which.max(BA[[l]]))
}
# Colors ====
if (is.null(col)) col <- rtPalette(palette)
# [ PLOT ] ====
if (exists("rtpar", envir = rtenv)) par.reset <- FALSE
par.orig <- par(no.readonly = TRUE)
if (par.reset) on.exit(suppressWarnings(par(par.orig)))
if (!is.null(filename)) pdf(filename, width = pdf.width, height = pdf.height, title = "rtemis Graphics")
if (type == "Sens.Spec") {
mplot3.xy(Specificity, Sensitivity,
main = main,
xlab = "Specificity", ylab = "Sensitivity",
line.alpha = 1, line.col = col, group.legend = group.legend,
diagonal.inv = diagonal, diagonal.lty = diagonal.lty, diagonal.lwd = diagonal.lwd,
pty = pty,
xlim = c(1, 0), xaxs = "i", yaxs = "i", cex = cex,
type = "l",
order.on.x = FALSE,
lwd = lwd, theme = theme,
zerolines = FALSE,
mar = mar,
xpd = TRUE, par.reset = FALSE)
if (balanced.accuracy) {
for (i in seq(probl)) {
points(x = Specificity[[i]][BA.max.index[[i]]],
y = Sensitivity[[i]][BA.max.index[[i]]],
col = col[[i]])
text(
x = Specificity[[i]][BA.max.index[[i]]] - .05,
y = Sensitivity[[i]][BA.max.index[[i]]] - .05,
labels = paste0("max BA = ", ddSci(max(BA[[i]])), "\n(p = ",
ddSci(.roc[[i]]$thresholds[BA.max.index[[i]]]), ")"),
col = col[[i]],
pos = 4,
family = theme$font.family)
}
}
} else {
mplot3.xy(FPR, TPR,
main = main,
xlab = "False Positive Rate", ylab = "True Positive Rate",
line.alpha = 1, line.col = col, group.legend = group.legend,
diagonal = diagonal, diagonal.lty = diagonal.lty, diagonal.lwd = diagonal.lwd,
xlim = c(0, 1), xaxs = "i", yaxs = "i", cex = cex,
type = "l",
pty = pty,
order.on.x = FALSE,
lwd = lwd,
theme = theme,
zerolines = FALSE,
mar = mar,
xpd = TRUE, par.reset = FALSE)
if (balanced.accuracy) {
for (i in seq(probl)) {
points(x = 1 - Specificity[[i]][BA.max.index[[i]]],
y = Sensitivity[[i]][BA.max.index[[i]]],
col = col[[i]])
text(x = 1 - Specificity[[i]][BA.max.index[[i]]] + .05,
y = Sensitivity[[i]][BA.max.index[[i]]],
labels = paste0("max BA = ", ddSci(max(BA[[i]])), "\n(thresh = ",
ddSci(.roc[[i]]$Thresholds[BA.max.index[[i]]]), ")"),
col = col[[i]],
pos = 4,
family = theme$font.family)
}
}
}
# [ AUC ANNOTATION ] ====
if (annotation) {
auc <- paste(names(probl), ddSci(unlist(AUC)), " ")
if (is.null(annot.line)) annot.line <- seq(-length(probl), 0) - 1.7
mtext(c("AUC ", auc),
font = annot.font,
side = 1,
line = annot.line,
adj = annot.adj,
cex = cex,
col = c("gray50", unlist(col)[seq_along(probl)]),
family = theme$font.family)
}
# [ OUTRO ] ====
if (!is.null(filename)) dev.off()
if (type == "Sens.Spec") {
invisible(list(Sensitivity = Sensitivity, Specificity = Specificity))
} else {
invisible(list(FPR = FPR, TPR = TPR))
}
} # rtemis::mplot3.roc
|
library(shiny)
pkgs <- c("Hmisc")
pkgs <- pkgs[!(pkgs %in% installed.packages()[,"Package"])]
if(length(pkgs)) install.packages(pkgs,repos="http://cran.cs.wwu.edu/")
library(Hmisc)
load("external/wind_ice.RData",envir=.GlobalEnv)
cuts <- rev(unique(w.beaufort.GFDL$Cut))
varlevels <- unique(w.beaufort.GFDL$Var)
years <- unique(w.beaufort.GFDL$Year)
decades <- years[years%%10==0]
dec.lab <- paste0(decades,"s")
seas <- capitalize(unique(sapply(strsplit(ls(pattern="^w.*.c$",envir=.GlobalEnv),"\\."),"[[",2)))
models <- unique(sapply(strsplit(ls(pattern="^w.*.c$",envir=.GlobalEnv),"\\."),"[[",3))
dpm <- c(31,28,31,30,31,30,31,31,30,31,30,31)
|
/Demo/Shiny-12-D-SNAP-seawinds/external/serverHead.R
|
no_license
|
DragonflyStats/Shiny-Tutorial-Notes
|
R
| false | false | 644 |
r
|
library(shiny)
pkgs <- c("Hmisc")
pkgs <- pkgs[!(pkgs %in% installed.packages()[,"Package"])]
if(length(pkgs)) install.packages(pkgs,repos="http://cran.cs.wwu.edu/")
library(Hmisc)
load("external/wind_ice.RData",envir=.GlobalEnv)
cuts <- rev(unique(w.beaufort.GFDL$Cut))
varlevels <- unique(w.beaufort.GFDL$Var)
years <- unique(w.beaufort.GFDL$Year)
decades <- years[years%%10==0]
dec.lab <- paste0(decades,"s")
seas <- capitalize(unique(sapply(strsplit(ls(pattern="^w.*.c$",envir=.GlobalEnv),"\\."),"[[",2)))
models <- unique(sapply(strsplit(ls(pattern="^w.*.c$",envir=.GlobalEnv),"\\."),"[[",3))
dpm <- c(31,28,31,30,31,30,31,31,30,31,30,31)
|
# Set options here
options(golem.app.prod = FALSE) # TRUE = production mode, FALSE = development mode
# Detach all loaded packages and clean your environment
golem::detach_all_attached()
# rm(list=ls(all.names = TRUE))
# Document and reload your package
golem::document_and_reload()
# Run the application
iapdashboard::run_app()
|
/dev/run_dev.R
|
permissive
|
lee269/iapdashboard
|
R
| false | false | 334 |
r
|
# Set options here
options(golem.app.prod = FALSE) # TRUE = production mode, FALSE = development mode
# Detach all loaded packages and clean your environment
golem::detach_all_attached()
# rm(list=ls(all.names = TRUE))
# Document and reload your package
golem::document_and_reload()
# Run the application
iapdashboard::run_app()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_data.R
\name{read_individual_week}
\alias{read_individual_week}
\title{read_individual_week load a single week of PBP data}
\usage{
read_individual_week(week)
}
\arguments{
\item{week}{the week to read}
}
\value{
a data.frame with the PBP data for the specified week
}
\description{
read_individual_week load a single week of PBP data
}
|
/man/read_individual_week.Rd
|
permissive
|
hjmbigdatabowl/bdb2021
|
R
| false | true | 420 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_data.R
\name{read_individual_week}
\alias{read_individual_week}
\title{read_individual_week load a single week of PBP data}
\usage{
read_individual_week(week)
}
\arguments{
\item{week}{the week to read}
}
\value{
a data.frame with the PBP data for the specified week
}
\description{
read_individual_week load a single week of PBP data
}
|
`concordance.index` <-
function(x, surv.time, surv.event, cl, weights, strat, alpha=0.05, outx=TRUE, method=c("conservative", "noether", "nam"), na.rm=FALSE) {
method <- match.arg(method)
if(!missing(weights)) {
if(length(weights) != length(x)) { stop("bad length for parameter weights!") }
} else { weights <- rep(1, length(x)) }
if(!missing(strat)) {
if(length(strat) != length(x)) { stop("bad length for parameter strat!") }
} else { strat <- rep(1, length(x)) }
if(missing(cl) && (missing(surv.time) || missing(surv.event))) { stop("binary classes and survival data are missing!") }
if(!missing(cl) && (!missing(surv.time) || !missing(surv.event))) { stop("choose binary classes or survival data but not both!") }
msurv <- FALSE
if(missing(cl)) { #survival data
msurv <- TRUE
cl <- rep(0, length(x))
} else { surv.time <- surv.event <- rep(0, length(x)) } #binary classes
cc.ix <- complete.cases(x, surv.time, surv.event, cl, weights, strat)
if(all(!cc.ix)) {
if(msurv) { data <- list("x"=x, "surv.time"=surv.time, "surv.event"=surv.event) } else { data <- list("x"=x, "cl"=cl) }
return(list("c.index"=NA, "se"=NA, "lower"=NA, "upper"=NA, "p.value"=NA, "n"=0, "data"=data))
}
if(any(!cc.ix) & !na.rm) { stop("NA values are present!") }
#remove samples whose the weight is equal to 0
cc.ix <- cc.ix & weights != 0
x2 <- x[cc.ix]
cl2 <- cl[cc.ix]
st <- surv.time[cc.ix]
se <- surv.event[cc.ix]
weights <- weights[cc.ix]
strat <- strat[cc.ix]
ustrat <- sort(unique(strat))
N <- length(x2)
ch <- dh <- uh <- rph <- NULL
for(s in 1:length(ustrat)) {
ixs <- strat == ustrat[s]
Ns <- sum(ixs)
xs <- x2[ixs]
cls <- cl2[ixs]
sts <- st[ixs]
ses <- se[ixs]
weightss <- weights[ixs]
chs <- dhs <- uhs <- rphs <- NULL
for(h in 1:Ns) {
chsj <- dhsj <- uhsj <- rphsj <- 0
for(j in 1:Ns) {
whj <- weightss[h] * weightss[j]
#comparison: h has an event before j or h has a higher class than j
if((msurv && (sts[h] < sts[j] && ses[h] == 1)) || (!msurv && cls[h] > cls[j])) {
rphsj <- rphsj + whj #whj=1 in non-weighted version
if(xs[h] > xs[j]) {
chsj <- chsj + whj #whj=1 in non-weighted version
} else if(xs[h] < xs[j]) {
dhsj <- dhsj + whj #whj=1 in non-weighted version
} else {
if(outx) {
uhsj <- uhsj + whj #whj=1 in non-weighted version
} else { #ties in x
dhsj <- dhsj + whj #whj=1 in non-weighted version
#dhsj <- dhsj
}
}
}
#comparison: j has an event before or j has a higher class than h
if((msurv && (sts[h] > sts[j] && ses[j] == 1)) || (!msurv && cls[h] < cls[j])) {
rphsj <- rphsj + whj #whj=1 in non-weighted version
if(xs[h] < xs[j]) {
chsj <- chsj + whj #whj=1 in non-weighted version
} else if(xs[h] > xs[j]) {
dhsj <- dhsj + whj #whj=1 in non-weighted version
} else {
if(outx) {
uhsj <- uhsj + whj #whj=1 in non-weighted version
} else { #ties in x
dhsj <- dhsj + whj #whj=1 in non-weighted version
#in tsuruta2006polychotomization, the authors added 1/2 (* weightss) instead of 1 (* weightss)
}
}
}
#else { no comparable pairs }
}
chs <- c(chs, chsj)
dhs <- c(dhs, dhsj)
uhs <- c(uhs, uhsj)
rphs <- c(rphs, rphsj)
}
ch <- c(ch, chs)
dh <- c(dh, dhs)
uh <- c(uh, uhs)
rph <- c(rph, rphs)
}
pc <- (1 / (N * (N - 1))) * sum(ch)
pd <- (1 / (N * (N - 1))) * sum(dh)
cindex <- pc / (pc + pd)
switch(method,
"noether"={
pcc <- (1 / (N * (N - 1) * (N - 2))) * sum(ch * (ch - 1))
pdd <- (1 / (N * (N - 1) * (N - 2))) * sum(dh * (dh - 1))
pcd <- (1 / (N * (N - 1) * (N - 2))) * sum(ch * dh)
varp <- (4 / (pc + pd)^4) * (pd^2 * pcc - 2 * pc * pd * pcd + pc^2 * pdd)
ci <- qnorm(p=alpha / 2, lower.tail=FALSE) * sqrt(varp / N)
lower <- cindex - ci
upper <- cindex + ci
p <- pnorm((cindex - 0.5) / sqrt(varp / N), lower.tail=cindex < 0.5)
},
"conservative"={
C <- cindex
sum.ch <- sum(ch)
sum.dh <- sum(dh)
pc <- (1 / (N * (N - 1))) * sum.ch
pd <- (1 / (N * (N - 1))) * sum.dh
w <- (2 * qnorm(p=alpha / 2, lower.tail=FALSE)^2) / (N * (pc + pd))
ci <- sqrt(w^2 + 4 * w * C * (1 - C)) / (2 * (1 + w))
point <- (w + 2 * C) / (2 * (1 + w))
lower <- point - ci
upper <- point + ci
cindex <- C
p <- NA
varp <- NA
},
"name"={
stop("method not implemented!")
})
#bound the confidence interval
lower <- ifelse(lower < 0, 0, lower)
lower <- ifelse(lower > 1, 1, lower)
upper <- ifelse(upper < 0, 0, upper)
upper <- ifelse(upper > 1, 1, upper)
if(msurv) { data <- list("x"=x, "surv.time"=surv.time, "surv.event"=surv.event) } else { data <- list("x"=x, "cl"=cl) }
return(list("c.index"=cindex, "se"=sqrt(varp / N), "lower"=lower, "upper"=upper, "p.value"=p, "n"=N, "data"=data))
}
|
/R/concordance.index.R
|
no_license
|
cran/survcomp
|
R
| false | false | 4,793 |
r
|
`concordance.index` <-
function(x, surv.time, surv.event, cl, weights, strat, alpha=0.05, outx=TRUE, method=c("conservative", "noether", "nam"), na.rm=FALSE) {
method <- match.arg(method)
if(!missing(weights)) {
if(length(weights) != length(x)) { stop("bad length for parameter weights!") }
} else { weights <- rep(1, length(x)) }
if(!missing(strat)) {
if(length(strat) != length(x)) { stop("bad length for parameter strat!") }
} else { strat <- rep(1, length(x)) }
if(missing(cl) && (missing(surv.time) || missing(surv.event))) { stop("binary classes and survival data are missing!") }
if(!missing(cl) && (!missing(surv.time) || !missing(surv.event))) { stop("choose binary classes or survival data but not both!") }
msurv <- FALSE
if(missing(cl)) { #survival data
msurv <- TRUE
cl <- rep(0, length(x))
} else { surv.time <- surv.event <- rep(0, length(x)) } #binary classes
cc.ix <- complete.cases(x, surv.time, surv.event, cl, weights, strat)
if(all(!cc.ix)) {
if(msurv) { data <- list("x"=x, "surv.time"=surv.time, "surv.event"=surv.event) } else { data <- list("x"=x, "cl"=cl) }
return(list("c.index"=NA, "se"=NA, "lower"=NA, "upper"=NA, "p.value"=NA, "n"=0, "data"=data))
}
if(any(!cc.ix) & !na.rm) { stop("NA values are present!") }
#remove samples whose the weight is equal to 0
cc.ix <- cc.ix & weights != 0
x2 <- x[cc.ix]
cl2 <- cl[cc.ix]
st <- surv.time[cc.ix]
se <- surv.event[cc.ix]
weights <- weights[cc.ix]
strat <- strat[cc.ix]
ustrat <- sort(unique(strat))
N <- length(x2)
ch <- dh <- uh <- rph <- NULL
for(s in 1:length(ustrat)) {
ixs <- strat == ustrat[s]
Ns <- sum(ixs)
xs <- x2[ixs]
cls <- cl2[ixs]
sts <- st[ixs]
ses <- se[ixs]
weightss <- weights[ixs]
chs <- dhs <- uhs <- rphs <- NULL
for(h in 1:Ns) {
chsj <- dhsj <- uhsj <- rphsj <- 0
for(j in 1:Ns) {
whj <- weightss[h] * weightss[j]
#comparison: h has an event before j or h has a higher class than j
if((msurv && (sts[h] < sts[j] && ses[h] == 1)) || (!msurv && cls[h] > cls[j])) {
rphsj <- rphsj + whj #whj=1 in non-weighted version
if(xs[h] > xs[j]) {
chsj <- chsj + whj #whj=1 in non-weighted version
} else if(xs[h] < xs[j]) {
dhsj <- dhsj + whj #whj=1 in non-weighted version
} else {
if(outx) {
uhsj <- uhsj + whj #whj=1 in non-weighted version
} else { #ties in x
dhsj <- dhsj + whj #whj=1 in non-weighted version
#dhsj <- dhsj
}
}
}
#comparison: j has an event before or j has a higher class than h
if((msurv && (sts[h] > sts[j] && ses[j] == 1)) || (!msurv && cls[h] < cls[j])) {
rphsj <- rphsj + whj #whj=1 in non-weighted version
if(xs[h] < xs[j]) {
chsj <- chsj + whj #whj=1 in non-weighted version
} else if(xs[h] > xs[j]) {
dhsj <- dhsj + whj #whj=1 in non-weighted version
} else {
if(outx) {
uhsj <- uhsj + whj #whj=1 in non-weighted version
} else { #ties in x
dhsj <- dhsj + whj #whj=1 in non-weighted version
#in tsuruta2006polychotomization, the authors added 1/2 (* weightss) instead of 1 (* weightss)
}
}
}
#else { no comparable pairs }
}
chs <- c(chs, chsj)
dhs <- c(dhs, dhsj)
uhs <- c(uhs, uhsj)
rphs <- c(rphs, rphsj)
}
ch <- c(ch, chs)
dh <- c(dh, dhs)
uh <- c(uh, uhs)
rph <- c(rph, rphs)
}
pc <- (1 / (N * (N - 1))) * sum(ch)
pd <- (1 / (N * (N - 1))) * sum(dh)
cindex <- pc / (pc + pd)
switch(method,
"noether"={
pcc <- (1 / (N * (N - 1) * (N - 2))) * sum(ch * (ch - 1))
pdd <- (1 / (N * (N - 1) * (N - 2))) * sum(dh * (dh - 1))
pcd <- (1 / (N * (N - 1) * (N - 2))) * sum(ch * dh)
varp <- (4 / (pc + pd)^4) * (pd^2 * pcc - 2 * pc * pd * pcd + pc^2 * pdd)
ci <- qnorm(p=alpha / 2, lower.tail=FALSE) * sqrt(varp / N)
lower <- cindex - ci
upper <- cindex + ci
p <- pnorm((cindex - 0.5) / sqrt(varp / N), lower.tail=cindex < 0.5)
},
"conservative"={
C <- cindex
sum.ch <- sum(ch)
sum.dh <- sum(dh)
pc <- (1 / (N * (N - 1))) * sum.ch
pd <- (1 / (N * (N - 1))) * sum.dh
w <- (2 * qnorm(p=alpha / 2, lower.tail=FALSE)^2) / (N * (pc + pd))
ci <- sqrt(w^2 + 4 * w * C * (1 - C)) / (2 * (1 + w))
point <- (w + 2 * C) / (2 * (1 + w))
lower <- point - ci
upper <- point + ci
cindex <- C
p <- NA
varp <- NA
},
"name"={
stop("method not implemented!")
})
#bound the confidence interval
lower <- ifelse(lower < 0, 0, lower)
lower <- ifelse(lower > 1, 1, lower)
upper <- ifelse(upper < 0, 0, upper)
upper <- ifelse(upper > 1, 1, upper)
if(msurv) { data <- list("x"=x, "surv.time"=surv.time, "surv.event"=surv.event) } else { data <- list("x"=x, "cl"=cl) }
return(list("c.index"=cindex, "se"=sqrt(varp / N), "lower"=lower, "upper"=upper, "p.value"=p, "n"=N, "data"=data))
}
|
packages <- c('shiny',
'shinydashboard',
'tidyverse',
'sf',
'RColorBrewer',
'viridis',
'GADMTools',
'tmap',
'leaflet',
'here',
'rnaturalearthdata',
'lubridate',
'plotly',
'htmltools',
'raster',
'maptools',
'rgdal',
'spatstat',
'sp')
for (p in packages){
if (!require(p,character.only=T)){
install.packages(p)
}
library(p, character.only=T)
}
# Prepare SA_df
ACLED_SA <- read_csv("Data/2016-01-01-2019-12-31-Southern_Asia.csv")
SA_df <- ACLED_SA %>%
mutate(event_date=parse_date(event_date, "%d %B %Y"))%>%
mutate(month=month(event_date)) %>%
mutate(monyear = as.Date(paste0(year,"-",month, "-01"),"%Y-%m-%d"))
if (!file.exists(paste0(here::here(),"/Data/prepared_files/SA_df.csv"))){
saveRDS(SA_df, paste0(here::here(),"/Data/prepared_files/SA_df.rds"))
}
# Prepare SA_sf
SA_sf <- st_as_sf(SA_df,
coords = c("longitude", "latitude"),
crs=4326)
SA_sf <- st_transform(SA_sf, 24313)
# convert units from m to km
SA_sf <- st_transform(SA_sf, "+proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs")
if (!file.exists(paste0(here::here(),"/Data/prepared_files/SA_sf.rds"))){
saveRDS(SA_sf, paste0(here::here(),"/Data/prepared_files/SA_sf.rds"))
}
# Prepare SA_sp
xy <- SA_df[,c("longitude","latitude")]
SA_sp <- SpatialPointsDataFrame(coords = xy, data=SA_df, proj4string =CRS("+init=epsg:4326"))
SA_sp <- spTransform(SA_sp, CRS=CRS("+init=epsg:24313"))
SA_sp <- spTransform(SA_sp, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
if (!file.exists(paste0(here::here(),"/Data/prepared_files/SA_sp.rds"))){
saveRDS(SA_sp, paste0(here::here(),"/Data/prepared_files/SA_sp.rds"))
}
# Prepare SA ppp object
lon <- SA_sp@coords[,1]
lat <- SA_sp@coords[,2]
xrange <- range(lon, na.rm=T)
yrange <- range(lat, na.rm=T)
SA_ppp <- ppp(lon, lat, xrange, yrange, data=SA_sp, marks=as.factor(SA_sp$event_type))
if (!file.exists(paste0(here::here(),"/Data/prepared_files/SA_ppp.rds"))){
saveRDS(SA_ppp, paste0(here::here(),"/Data/prepared_files/SA_ppp.rds"))
}
# Prepare SA_sh
# obtain shapefiles from rearthnaturaldata
# convert the geospatial data to sf object
SA_sh <- st_as_sf(rnaturalearthdata::countries50)%>%
filter(adm0_a3 %in% c("IND","BGD","LKA","NPL","PAK"))
SA_sh <- st_transform(SA_sh, 24313)
#convert units from m to km
SA_sh <- st_transform(SA_sh, "+proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs")
if (!file.exists(paste0(here::here(),"/Data/prepared_files/SA_sh.rds"))){
saveRDS(SA_sh, paste0(here::here(),"/Data/prepared_files/SA_sh.rds"))
}
# PREPARE PPP FOR EACH COUNTRY
PAK_sh <- readOGR(dsn = paste0(here::here(), "/Data/geopackage/gadm36_PAK.gpkg"), layer="gadm36_PAK_1")
BGD_sh <- readOGR(dsn = paste0(here::here(), "/Data/geopackage/gadm36_BGD.gpkg"), layer="gadm36_BGD_1")
LKA_sh <- readOGR(dsn = paste0(here::here(), "/Data/geopackage/gadm36_LKA.gpkg"), layer="gadm36_LKA_1")
NPL_sh <- readOGR(dsn = paste0(here::here(), "/Data/geopackage/gadm36_NPL.gpkg"), layer="gadm36_NPL_1")
IND_sh <- readOGR(dsn = paste0(here::here(), "/Data/geopackage/gadm36_IND.gpkg"), layer="gadm36_IND_1")
# convert to spatial polygons object
PAK_poly <- as(PAK_sh, "SpatialPolygons")
BGD_poly <- as(BGD_sh, "SpatialPolygons")
LKA_poly <- as(LKA_sh, "SpatialPolygons")
NPL_poly <- as(NPL_sh, "SpatialPolygons")
IND_poly <- as(IND_sh, "SpatialPolygons")
# Transform the CRS (source is using WGS84 datum), and convert units from m to km
PAK_poly <- spTransform(PAK_poly, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
BGD_poly <- spTransform(BGD_poly, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
LKA_poly <- spTransform(LKA_poly, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
NPL_poly <- spTransform(NPL_poly, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
IND_poly <- spTransform(IND_poly, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
# convert to owin
PAK_owin <- maptools::as.owin.SpatialPolygons(PAK_poly)
BGD_owin <- maptools::as.owin.SpatialPolygons(BGD_poly)
LKA_owin <- maptools::as.owin.SpatialPolygons(LKA_poly)
NPL_owin <- maptools::as.owin.SpatialPolygons(NPL_poly)
IND_owin <- maptools::as.owin.SpatialPolygons(IND_poly)
#combining point patterns and the study area (countries)
PAK_ppp <- SA_ppp[PAK_owin]
BGD_ppp <- SA_ppp[BGD_owin]
LKA_ppp <- SA_ppp[LKA_owin]
NPL_ppp <- SA_ppp[NPL_owin]
IND_ppp <- SA_ppp[IND_owin]
# individual geospatial files are already available for reading..
if (!file.exists(paste0(here::here(),"/Data/prepared_files/PAK_ppp.rds"))){
saveRDS(PAK_ppp, paste0(here::here(),"/Data/prepared_files/PAK_ppp.rds"))
}
if (!file.exists(paste0(here::here(),"/Data/prepared_files/BGD_ppp.rds"))){
saveRDS(BGD_ppp, paste0(here::here(),"/Data/prepared_files/BGD_ppp.rds"))
}
if (!file.exists(paste0(here::here(),"/Data/prepared_files/LKA_ppp.rds"))){
saveRDS(LKA_ppp, paste0(here::here(),"/Data/prepared_files/LKA_ppp.rds"))
}
if (!file.exists(paste0(here::here(),"/Data/prepared_files/NPL_ppp.rds"))){
saveRDS(NPL_ppp, paste0(here::here(),"/Data/prepared_files/NPL_ppp.rds"))
}
if (!file.exists(paste0(here::here(),"/Data/prepared_files/IND_ppp.rds"))){
saveRDS(IND_ppp, paste0(here::here(),"/Data/prepared_files/IND_ppp.rds"))
}
|
/PrepareDataFiles.R
|
no_license
|
oishee2202/Geo-spatial-Visual-Analytics-Tool
|
R
| false | false | 6,188 |
r
|
packages <- c('shiny',
'shinydashboard',
'tidyverse',
'sf',
'RColorBrewer',
'viridis',
'GADMTools',
'tmap',
'leaflet',
'here',
'rnaturalearthdata',
'lubridate',
'plotly',
'htmltools',
'raster',
'maptools',
'rgdal',
'spatstat',
'sp')
for (p in packages){
if (!require(p,character.only=T)){
install.packages(p)
}
library(p, character.only=T)
}
# Prepare SA_df
ACLED_SA <- read_csv("Data/2016-01-01-2019-12-31-Southern_Asia.csv")
SA_df <- ACLED_SA %>%
mutate(event_date=parse_date(event_date, "%d %B %Y"))%>%
mutate(month=month(event_date)) %>%
mutate(monyear = as.Date(paste0(year,"-",month, "-01"),"%Y-%m-%d"))
if (!file.exists(paste0(here::here(),"/Data/prepared_files/SA_df.csv"))){
saveRDS(SA_df, paste0(here::here(),"/Data/prepared_files/SA_df.rds"))
}
# Prepare SA_sf
SA_sf <- st_as_sf(SA_df,
coords = c("longitude", "latitude"),
crs=4326)
SA_sf <- st_transform(SA_sf, 24313)
# convert units from m to km
SA_sf <- st_transform(SA_sf, "+proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs")
if (!file.exists(paste0(here::here(),"/Data/prepared_files/SA_sf.rds"))){
saveRDS(SA_sf, paste0(here::here(),"/Data/prepared_files/SA_sf.rds"))
}
# Prepare SA_sp
xy <- SA_df[,c("longitude","latitude")]
SA_sp <- SpatialPointsDataFrame(coords = xy, data=SA_df, proj4string =CRS("+init=epsg:4326"))
SA_sp <- spTransform(SA_sp, CRS=CRS("+init=epsg:24313"))
SA_sp <- spTransform(SA_sp, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
if (!file.exists(paste0(here::here(),"/Data/prepared_files/SA_sp.rds"))){
saveRDS(SA_sp, paste0(here::here(),"/Data/prepared_files/SA_sp.rds"))
}
# Prepare SA ppp object
lon <- SA_sp@coords[,1]
lat <- SA_sp@coords[,2]
xrange <- range(lon, na.rm=T)
yrange <- range(lat, na.rm=T)
SA_ppp <- ppp(lon, lat, xrange, yrange, data=SA_sp, marks=as.factor(SA_sp$event_type))
if (!file.exists(paste0(here::here(),"/Data/prepared_files/SA_ppp.rds"))){
saveRDS(SA_ppp, paste0(here::here(),"/Data/prepared_files/SA_ppp.rds"))
}
# Prepare SA_sh
# obtain shapefiles from rearthnaturaldata
# convert the geospatial data to sf object
SA_sh <- st_as_sf(rnaturalearthdata::countries50)%>%
filter(adm0_a3 %in% c("IND","BGD","LKA","NPL","PAK"))
SA_sh <- st_transform(SA_sh, 24313)
#convert units from m to km
SA_sh <- st_transform(SA_sh, "+proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs")
if (!file.exists(paste0(here::here(),"/Data/prepared_files/SA_sh.rds"))){
saveRDS(SA_sh, paste0(here::here(),"/Data/prepared_files/SA_sh.rds"))
}
# PREPARE PPP FOR EACH COUNTRY
PAK_sh <- readOGR(dsn = paste0(here::here(), "/Data/geopackage/gadm36_PAK.gpkg"), layer="gadm36_PAK_1")
BGD_sh <- readOGR(dsn = paste0(here::here(), "/Data/geopackage/gadm36_BGD.gpkg"), layer="gadm36_BGD_1")
LKA_sh <- readOGR(dsn = paste0(here::here(), "/Data/geopackage/gadm36_LKA.gpkg"), layer="gadm36_LKA_1")
NPL_sh <- readOGR(dsn = paste0(here::here(), "/Data/geopackage/gadm36_NPL.gpkg"), layer="gadm36_NPL_1")
IND_sh <- readOGR(dsn = paste0(here::here(), "/Data/geopackage/gadm36_IND.gpkg"), layer="gadm36_IND_1")
# convert to spatial polygons object
PAK_poly <- as(PAK_sh, "SpatialPolygons")
BGD_poly <- as(BGD_sh, "SpatialPolygons")
LKA_poly <- as(LKA_sh, "SpatialPolygons")
NPL_poly <- as(NPL_sh, "SpatialPolygons")
IND_poly <- as(IND_sh, "SpatialPolygons")
# Transform the CRS (source is using WGS84 datum), and convert units from m to km
PAK_poly <- spTransform(PAK_poly, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
BGD_poly <- spTransform(BGD_poly, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
LKA_poly <- spTransform(LKA_poly, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
NPL_poly <- spTransform(NPL_poly, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
IND_poly <- spTransform(IND_poly, CRS=CRS("+init=epsg:24313 +proj=utm +zone=43 +a=6377301.243 +b=6356100.230165384 +towgs84=283,682,231,0,0,0,0 +units=km +no_defs"))
# convert to owin
PAK_owin <- maptools::as.owin.SpatialPolygons(PAK_poly)
BGD_owin <- maptools::as.owin.SpatialPolygons(BGD_poly)
LKA_owin <- maptools::as.owin.SpatialPolygons(LKA_poly)
NPL_owin <- maptools::as.owin.SpatialPolygons(NPL_poly)
IND_owin <- maptools::as.owin.SpatialPolygons(IND_poly)
#combining point patterns and the study area (countries)
PAK_ppp <- SA_ppp[PAK_owin]
BGD_ppp <- SA_ppp[BGD_owin]
LKA_ppp <- SA_ppp[LKA_owin]
NPL_ppp <- SA_ppp[NPL_owin]
IND_ppp <- SA_ppp[IND_owin]
# individual geospatial files are already available for reading..
if (!file.exists(paste0(here::here(),"/Data/prepared_files/PAK_ppp.rds"))){
saveRDS(PAK_ppp, paste0(here::here(),"/Data/prepared_files/PAK_ppp.rds"))
}
if (!file.exists(paste0(here::here(),"/Data/prepared_files/BGD_ppp.rds"))){
saveRDS(BGD_ppp, paste0(here::here(),"/Data/prepared_files/BGD_ppp.rds"))
}
if (!file.exists(paste0(here::here(),"/Data/prepared_files/LKA_ppp.rds"))){
saveRDS(LKA_ppp, paste0(here::here(),"/Data/prepared_files/LKA_ppp.rds"))
}
if (!file.exists(paste0(here::here(),"/Data/prepared_files/NPL_ppp.rds"))){
saveRDS(NPL_ppp, paste0(here::here(),"/Data/prepared_files/NPL_ppp.rds"))
}
if (!file.exists(paste0(here::here(),"/Data/prepared_files/IND_ppp.rds"))){
saveRDS(IND_ppp, paste0(here::here(),"/Data/prepared_files/IND_ppp.rds"))
}
|
\name{Marcus}
\alias{Marcus}
\title{
Marcus Charge Transfer Rates
}
\description{
Computes charge transfer (CT) rates using the following semi-classical Marcus expression:
\deqn{k = \frac{2\pi}{\hbar}J^2\sqrt{\frac{1}{4\pi\lambda k_{B}T}}exp \left( -\frac{ \left( \lambda + \Delta E^{0} + \Delta E_{Field} \right) ^{2}}{4\lambda k_{B}T} \right)}{%
k = 2\pi/h_bar * J^2 \sqrt(1/4\pi\lambdakbT) * exp(-(\lambda+\DeltaE0+\DeltaEField)^2/(4\lambdakbT))}
where \eqn{J}{J} and \eqn{\Delta E^0=E_{final}-E_{initial}}{\DeltaG0 = Ef - Ei} are respectively the electronic coupling and the site energy difference between the initial and final electronic states involved in the charge transfer reaction, and \eqn{\lambda}{\lambda} is the total reorganization energy. \eqn{\Delta E_{Field}}{dEField} is an additional contribution to the site energy difference due to an external electric field and \eqn{T}{T} is the temperature.
}
\usage{
Marcus(J, lambda, dE0 = 0, dEField = 0, temp = 300)
}
\arguments{
\item{J}{a scalar, a vector, a matrix or an array containing the electronic couplings (\bold{in eV}) used to calculate the CT rates.}
\item{lambda}{a scalar, a vector, a matrix or an array containing the total reorganization energies (\bold{in eV}) used to calculate the CT rates.}
\item{dE0}{a scalar, a vector, a matrix or an array containing the site energy differences (\bold{in eV}) used to calculate the CT rates. By default self-exchange reactions are considered (\code{dE0=0}).}
\item{dEField}{a scalar, a vector, a matrix or an array containing an additional contribution to the site energy differences due to an external electric field (\bold{in eV}). By default no electric field is applied (\code{dEField=0}).}
\item{temp}{a scalar giving the temperature (\bold{in Kelvin}) at which to evalute the CT rates. By default CT rates are evaluated at room temperature (temp=300).}
}
\details{
The arguments of these function can be scalars, vectors, matrices or arrays. Mixing scalar values with vectors, matrices or arrays is allowed but in all other cases the arguments must have the same dimensions and lengths. Using matrices or arrays is useful to compute simultaneously several charge transfer rates for different pairs of molecules, structures ...
}
\value{
Depending on the dimension of the objects passed to the function a scalar, a vector, a matrix or an array containing the Marcus CT rates (\bold{in s-1}) is returned.
}
\references{
R.A. Marcus, \var{Journal of Chemical Physics}, 24:966, \bold{1956}
}
\seealso{
\code{\link{energyConversion}}, \code{\link{dEField}}, \code{\link{MarcusLevichJortner}}, \code{\link{LandauZener}}, \code{\link{KMC}}
}
\examples{
## Produce a map of the decimal logarithm of the Marcus,
## Marcus-Levich-Jortner and Landau-Zener rate expressions for:
nuN <- 1445 # effective vibrational mode wavenumber in cm-1
lambdaI <- 0.14 # internal reorganization energy in eV
lambdaS <- 36E-3 # external reorganization energy in eV
N <- 301
J <- seq( 0 , 65,length.out=N)*1E-3 # eV
dE <- seq(-0.5,0.5,length.out=N) # eV
G <- expand.grid(J, dE)
J <- G[,1]
dE <- G[,2]
kMLJ <- MarcusLevichJortner(
J = J, lambdaI = lambdaI, lambdaS = lambdaS,
hBarW = centimeterMinusOne2electronVolt(nuN), dE0 = dE)
kMarcus <- Marcus(
J = J, lambda = lambdaI+lambdaS, dE0 = dE)
kLZ <- LandauZener(
J = J, lambda = lambdaI+lambdaS,
nuN = centimeterMinusOne2Hertz(nuN), dE0 = dE)
kMLJ <- matrix(kMLJ , nrow = N, ncol = N)
kMarcus <- matrix(kMarcus, nrow = N, ncol = N)
kLZ <- matrix(kLZ , nrow = N, ncol = N)
addAxis <- function(bottom = TRUE, left = FALSE, above = FALSE, right = FALSE){
useless <- lapply(1:4,axis, labels=FALSE)
if(bottom) axis(1, labels = TRUE)
if(left ) axis(2, labels = TRUE)
if(above ) axis(3, labels = TRUE)
if(right ) axis(4, labels = TRUE)
if(bottom) mtext(side=1,line=1.2, text=expression( abs(J)/eV), cex=par("cex"))
if(left ) mtext(side=2,line=1.2, text=expression(Delta*E/eV), cex=par("cex"))
if(right ) mtext(side=4,line=1.2, text=expression(Delta*E/eV), cex=par("cex"))
box()
}
layout(matrix(1:3, ncol=3))
par(cex=2, lwd=1.5, pty="s", mgp=c(1.1,0.1,0), tck=0.02, mar=rep(0.7,4), oma=rep(2,4))
contour(unique(J), unique(dE), log10(kMLJ ),
zlim = c(1,15), levels = -15:15, xaxt="n", yaxt="n", labcex=3)
addAxis(TRUE, TRUE, FALSE, FALSE)
title("Marcus-Levich-Jortner", line=1)
contour(unique(J), unique(dE), log10(kMarcus),
zlim = c(1,15), levels = -15:15, xaxt="n", yaxt="n", labcex=3)
addAxis(TRUE, FALSE, FALSE, FALSE)
title("Marcus", line=1)
contour(unique(J), unique(dE), log10(kLZ ),
zlim = c(1,15), levels = -15:15, xaxt="n", yaxt="n", labcex=3)
addAxis(TRUE, FALSE, FALSE, TRUE)
title("Landau-Zener", line=1)
}
\keyword{ manip }
|
/man/Marcus.Rd
|
no_license
|
khatiaxomiya/ChargeTransport
|
R
| false | false | 4,847 |
rd
|
\name{Marcus}
\alias{Marcus}
\title{
Marcus Charge Transfer Rates
}
\description{
Computes charge transfer (CT) rates using the following semi-classical Marcus expression:
\deqn{k = \frac{2\pi}{\hbar}J^2\sqrt{\frac{1}{4\pi\lambda k_{B}T}}exp \left( -\frac{ \left( \lambda + \Delta E^{0} + \Delta E_{Field} \right) ^{2}}{4\lambda k_{B}T} \right)}{%
k = 2\pi/h_bar * J^2 \sqrt(1/4\pi\lambdakbT) * exp(-(\lambda+\DeltaE0+\DeltaEField)^2/(4\lambdakbT))}
where \eqn{J}{J} and \eqn{\Delta E^0=E_{final}-E_{initial}}{\DeltaG0 = Ef - Ei} are respectively the electronic coupling and the site energy difference between the initial and final electronic states involved in the charge transfer reaction, and \eqn{\lambda}{\lambda} is the total reorganization energy. \eqn{\Delta E_{Field}}{dEField} is an additional contribution to the site energy difference due to an external electric field and \eqn{T}{T} is the temperature.
}
\usage{
Marcus(J, lambda, dE0 = 0, dEField = 0, temp = 300)
}
\arguments{
\item{J}{a scalar, a vector, a matrix or an array containing the electronic couplings (\bold{in eV}) used to calculate the CT rates.}
\item{lambda}{a scalar, a vector, a matrix or an array containing the total reorganization energies (\bold{in eV}) used to calculate the CT rates.}
\item{dE0}{a scalar, a vector, a matrix or an array containing the site energy differences (\bold{in eV}) used to calculate the CT rates. By default self-exchange reactions are considered (\code{dE0=0}).}
\item{dEField}{a scalar, a vector, a matrix or an array containing an additional contribution to the site energy differences due to an external electric field (\bold{in eV}). By default no electric field is applied (\code{dEField=0}).}
\item{temp}{a scalar giving the temperature (\bold{in Kelvin}) at which to evalute the CT rates. By default CT rates are evaluated at room temperature (temp=300).}
}
\details{
The arguments of these function can be scalars, vectors, matrices or arrays. Mixing scalar values with vectors, matrices or arrays is allowed but in all other cases the arguments must have the same dimensions and lengths. Using matrices or arrays is useful to compute simultaneously several charge transfer rates for different pairs of molecules, structures ...
}
\value{
Depending on the dimension of the objects passed to the function a scalar, a vector, a matrix or an array containing the Marcus CT rates (\bold{in s-1}) is returned.
}
\references{
R.A. Marcus, \var{Journal of Chemical Physics}, 24:966, \bold{1956}
}
\seealso{
\code{\link{energyConversion}}, \code{\link{dEField}}, \code{\link{MarcusLevichJortner}}, \code{\link{LandauZener}}, \code{\link{KMC}}
}
\examples{
## Produce a map of the decimal logarithm of the Marcus,
## Marcus-Levich-Jortner and Landau-Zener rate expressions for:
nuN <- 1445 # effective vibrational mode wavenumber in cm-1
lambdaI <- 0.14 # internal reorganization energy in eV
lambdaS <- 36E-3 # external reorganization energy in eV
N <- 301
J <- seq( 0 , 65,length.out=N)*1E-3 # eV
dE <- seq(-0.5,0.5,length.out=N) # eV
G <- expand.grid(J, dE)
J <- G[,1]
dE <- G[,2]
kMLJ <- MarcusLevichJortner(
J = J, lambdaI = lambdaI, lambdaS = lambdaS,
hBarW = centimeterMinusOne2electronVolt(nuN), dE0 = dE)
kMarcus <- Marcus(
J = J, lambda = lambdaI+lambdaS, dE0 = dE)
kLZ <- LandauZener(
J = J, lambda = lambdaI+lambdaS,
nuN = centimeterMinusOne2Hertz(nuN), dE0 = dE)
kMLJ <- matrix(kMLJ , nrow = N, ncol = N)
kMarcus <- matrix(kMarcus, nrow = N, ncol = N)
kLZ <- matrix(kLZ , nrow = N, ncol = N)
addAxis <- function(bottom = TRUE, left = FALSE, above = FALSE, right = FALSE){
useless <- lapply(1:4,axis, labels=FALSE)
if(bottom) axis(1, labels = TRUE)
if(left ) axis(2, labels = TRUE)
if(above ) axis(3, labels = TRUE)
if(right ) axis(4, labels = TRUE)
if(bottom) mtext(side=1,line=1.2, text=expression( abs(J)/eV), cex=par("cex"))
if(left ) mtext(side=2,line=1.2, text=expression(Delta*E/eV), cex=par("cex"))
if(right ) mtext(side=4,line=1.2, text=expression(Delta*E/eV), cex=par("cex"))
box()
}
layout(matrix(1:3, ncol=3))
par(cex=2, lwd=1.5, pty="s", mgp=c(1.1,0.1,0), tck=0.02, mar=rep(0.7,4), oma=rep(2,4))
contour(unique(J), unique(dE), log10(kMLJ ),
zlim = c(1,15), levels = -15:15, xaxt="n", yaxt="n", labcex=3)
addAxis(TRUE, TRUE, FALSE, FALSE)
title("Marcus-Levich-Jortner", line=1)
contour(unique(J), unique(dE), log10(kMarcus),
zlim = c(1,15), levels = -15:15, xaxt="n", yaxt="n", labcex=3)
addAxis(TRUE, FALSE, FALSE, FALSE)
title("Marcus", line=1)
contour(unique(J), unique(dE), log10(kLZ ),
zlim = c(1,15), levels = -15:15, xaxt="n", yaxt="n", labcex=3)
addAxis(TRUE, FALSE, FALSE, TRUE)
title("Landau-Zener", line=1)
}
\keyword{ manip }
|
results_page <- tabPanel(
title = "Average Results",
tabPanel("Model",
sidebarLayout(
sidebarPanel(
h5("Results interpretation:"),
awesomeRadio(inputId = 'interpretation',
label = NULL,
choices = c('Causal', 'Non-Causal'),
selected = 'Causal',
inline = T),
conditionalPanel(condition = "input.interpretation == 'Causal'",
p("The IHDP[auto fill] led to an increase of 4.2 points[auto fill] for students[auto fill] in this study"),
),
conditionalPanel(condition = "input.interpretation == 'Non-Causal'",
p("Students who participated in the IHDP scored 4.2 points higher, on average, than a simmilar group of students in the study who did not participate in the program. Simmilarity is conceptualized with respect to all covirates included in the analysis."),
),
awesomeRadio(inputId = "plot_result_type",
label = "Plot:",
choices = c('Sample', 'Individual'),
selected = 'Sample',
inline = T),
awesomeRadio(inputId = "plot_result_style",
label = NULL,
choices = c('Histigram', 'Density'),
selected = 'Density',
inline = T),
conditionalPanel(condition = "input.plot_result_type == 'Sample'",
awesomeCheckboxGroup(inputId = 'central_tendency',
label = "Show:",
inline = T,
choices = c('Mean', 'Median'),
selected = 0),
awesomeCheckboxGroup(inputId = 'show_interval',
label = NULL,
inline = T,
choices = list('80% ci' = .8, '95% ci' = .95),
selected = 'none')),
awesomeRadio(inputId = 'show_reference',
label = 'Include reference line:',
choices = c('Yes', 'No'),
inline = T,
selected = 'No'),
conditionalPanel(condition = "input.show_reference == 'Yes'",
numericInput(inputId = "reference_bar",
label = "Reference Number",
value = 0,
step = 1),),
br(),
tags$button(type = 'button',
class = 'btn btn-default help',
onclick = "openConceptsPage('Concept2')",
'What are these plots telling me?'),
br(), br(),
actionButton(inputId = "analysis_results_button_back",
label = "Diagnostics"),
br(),br(),
create_progress_bar(7/7*100)
),
mainPanel(
# br(),
# tabsetPanel(
# id = "analysis_results_tabs",
# tabPanel(
# title = "Estimated Treatment Effect",
# br(),
conditionalPanel(condition = "input.plot_result_type == 'Sample'",
plotOutput(outputId = 'analysis_results_plot_PATE',
height = 400)),
conditionalPanel(condition = "input.plot_result_type == 'Individual'",
plotOutput(outputId = 'analysis_results_plot_ITE',
height = 400)),
h4('Model results'),
htmlOutput('analysis_results_table_summary')
# )
# )
)
)
)
)
|
/student_work/Junhui/UI/pages/model_results_page.R
|
permissive
|
nsalani22/thinkCausal_dev
|
R
| false | false | 4,281 |
r
|
results_page <- tabPanel(
title = "Average Results",
tabPanel("Model",
sidebarLayout(
sidebarPanel(
h5("Results interpretation:"),
awesomeRadio(inputId = 'interpretation',
label = NULL,
choices = c('Causal', 'Non-Causal'),
selected = 'Causal',
inline = T),
conditionalPanel(condition = "input.interpretation == 'Causal'",
p("The IHDP[auto fill] led to an increase of 4.2 points[auto fill] for students[auto fill] in this study"),
),
conditionalPanel(condition = "input.interpretation == 'Non-Causal'",
p("Students who participated in the IHDP scored 4.2 points higher, on average, than a simmilar group of students in the study who did not participate in the program. Simmilarity is conceptualized with respect to all covirates included in the analysis."),
),
awesomeRadio(inputId = "plot_result_type",
label = "Plot:",
choices = c('Sample', 'Individual'),
selected = 'Sample',
inline = T),
awesomeRadio(inputId = "plot_result_style",
label = NULL,
choices = c('Histigram', 'Density'),
selected = 'Density',
inline = T),
conditionalPanel(condition = "input.plot_result_type == 'Sample'",
awesomeCheckboxGroup(inputId = 'central_tendency',
label = "Show:",
inline = T,
choices = c('Mean', 'Median'),
selected = 0),
awesomeCheckboxGroup(inputId = 'show_interval',
label = NULL,
inline = T,
choices = list('80% ci' = .8, '95% ci' = .95),
selected = 'none')),
awesomeRadio(inputId = 'show_reference',
label = 'Include reference line:',
choices = c('Yes', 'No'),
inline = T,
selected = 'No'),
conditionalPanel(condition = "input.show_reference == 'Yes'",
numericInput(inputId = "reference_bar",
label = "Reference Number",
value = 0,
step = 1),),
br(),
tags$button(type = 'button',
class = 'btn btn-default help',
onclick = "openConceptsPage('Concept2')",
'What are these plots telling me?'),
br(), br(),
actionButton(inputId = "analysis_results_button_back",
label = "Diagnostics"),
br(),br(),
create_progress_bar(7/7*100)
),
mainPanel(
# br(),
# tabsetPanel(
# id = "analysis_results_tabs",
# tabPanel(
# title = "Estimated Treatment Effect",
# br(),
conditionalPanel(condition = "input.plot_result_type == 'Sample'",
plotOutput(outputId = 'analysis_results_plot_PATE',
height = 400)),
conditionalPanel(condition = "input.plot_result_type == 'Individual'",
plotOutput(outputId = 'analysis_results_plot_ITE',
height = 400)),
h4('Model results'),
htmlOutput('analysis_results_table_summary')
# )
# )
)
)
)
)
|
## makeCacheMatrix will create a matrix in the cache mamory
## and store the inverse of it
## It stores the inverse of the matrix in the cache ..along with the matrix
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y){
x<<-y
inv<<-solve(x)
}
get<-function()x
setinv<-function(solve)inv<-solve
getinv<-function()inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## It calculates the inverse of the matrix if it is not previously present in the cache
cacheSolve <- function(x, ...) {
inv<- x$getinv()
if(!is.null(inv)){
message("Getting Cached Data")
return (inv)
}
mat<-x$get()
inv<-solve(mat)
x$setinv(inv)
x$set(mat)
inv
}
|
/cachematrix.R
|
no_license
|
kurchi1205/ProgrammingAssignment2
|
R
| false | false | 770 |
r
|
## makeCacheMatrix will create a matrix in the cache mamory
## and store the inverse of it
## It stores the inverse of the matrix in the cache ..along with the matrix
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y){
x<<-y
inv<<-solve(x)
}
get<-function()x
setinv<-function(solve)inv<-solve
getinv<-function()inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## It calculates the inverse of the matrix if it is not previously present in the cache
cacheSolve <- function(x, ...) {
inv<- x$getinv()
if(!is.null(inv)){
message("Getting Cached Data")
return (inv)
}
mat<-x$get()
inv<-solve(mat)
x$setinv(inv)
x$set(mat)
inv
}
|
setwd('~/Documents/DataScience/UCLADatascience/Project 7')
# Libraries needed:
library(readxl)
library(purrr)
library(tidyverse)
library(plm)
library(imputeTS)
library(corrplot)
library(caret)
library(glmnet)
library(mlbench)
library(psych)
library(VIM)
library(foreign)
library(car)
library(prediction)
# Dependent variable: 'Poverty gap at $3.20 a day (2012 PPP) (%)'
df <- read_excel('W03b_wdi.xlsx')
head(df)
map_dbl(df, ~sum(is.na(.))/ nrow(df)) # Lots of missing values (2017 seems to have partial data)
map_dbl(df[, 1:4], ~length(unique(.))) # 1591 variables: data is not tidy
df$`Country Code` <- factor(df$`Country Code`)
# Pre-processing ----------------------------------------------------------
# Simplify dataframe
indicator_reference <- subset(df, select = c("Indicator Name", "Indicator Code")) %>% distinct()
df <- subset(df, select = -`Indicator Name`)
Country_reference <- subset(df, select = c("Country Name", "Country Code")) %>% distinct()
df <- subset(df, select = -`Country Name`)
# Remove countries without value for Indicator Code: SI.POV.LMIC.GP in 2012
CountryCodes <- filter(df[df[, 2] == 'SI.POV.LMIC.GP', 1], df[df[, 2] == 'SI.POV.LMIC.GP', 55] != 'NA')
df <- subset(df, `Country Code` %in% CountryCodes$`Country Code`)
rm(CountryCodes)
# subset out years 1960 to 1980, and 2017 (2017 has partial data)
df <- subset(df, select = -c(`1960`:`1980`, `2017`))
# Separates dataframe into training and testing sets
df_test <- subset(df, select = c(1:2, 35:38)) # Excludes 2017 as well
df <- subset(df, select = 1:34)
# gathering time columns into one year column
df <- df %>% gather(key = 'Year', value = 'value', c(3:34)) # %>% filter(value != 'NA')
df_test <- df_test %>% gather(key = 'Year', value = 'value', c(3:6))
df$Year <- factor(df$Year)
# Spread Indicator code to different columns
df <- df %>% spread(key = 'Indicator Code', value = 'value')
df_test <- df_test %>% spread(key = 'Indicator Code', value = 'value')
# Remove Indicators with over 25% missing values in training set
dep_var <- subset(df, select = 'SI.POV.LMIC.GP')
df <- subset(df, select = map_dbl(df, ~sum(is.na(.))/ nrow(df)) <= .25)
df <- cbind(df, dep_var)
df_test <- subset(df_test, select = names(df)) # Selecting the same variables as the training dataset
rm(dep_var)
# Variable Selection ------------------------------------------------------
# Imputation would be too intensive to conduct on all the data. Instead we will subset a few years' worth of the data, conduct imputation ultimately variable selection to figure the best independent variables to use, and then subset the desired independent variables from the original dataset, impute the variables and apply the model chosen.
# Restrict dataset to 2007 and 2012
df_x <- df %>% filter(df[, 2] %in% c('2012', '2007')) # Add 2007 because some variables don't have values from 2009 - 2012. Choosing 2007 because we want a year that is similar to the test set 2013-2017 and 2008 differs considerably because of a major world fiancial collapse.
map_dbl(df_x, ~sum(is.na(.))/ nrow(df_x))
df_x <- kNN(df_x, variable = names(df_x[, 3:321]), k = 3, imp_var = FALSE)
map_dbl(df_x, ~sum(is.na(.))/ nrow(df_x))
# Filter methods do not adress multi-collinearity problems well and wrapper methods handles large numbers of explanatory variables poorly (long run-time). As a result, I will use an embbeded method for variable selection. These methods include: Ridge, LASSO, and Elastic Net. Note: LASSO (and Elastic Net) are what we want here because it does variable selection by taking into consideration multicollinearity.
# Dependent variable examination
hist(df$SI.POV.LMIC.GP) # Non-normal distrubution so we cannot use a linear model; use GLM instead.
# Custom Control Parameter
custom <- trainControl(method = 'repeatedcv',
number = 10,
repeats = 5,
verboseIter = FALSE) # if verboseIter = TRUE, See the model running
# glm models
set.seed(12)
ridge <- train(SI.POV.LMIC.GP ~ .,
df_x[3:322],
method = 'glmnet',
tuneGrid = expand.grid(alpha = 0, # alpha = 0: ridge | alpha = 1: lasso | alpha >0 & <1 means elastic net
lambda = seq(0.0001, 1, length = 5)),
trControl = custom,
na.action = na.omit)
set.seed(12)
lasso <- train(SI.POV.LMIC.GP ~ .,
df_x[3:322],
method = 'glmnet',
tuneGrid = expand.grid(alpha = 1, # alpha = 0: ridge | alpha = 1: lasso | alpha >0 & <1 means elastic net
lambda = seq(0.0001, 1, length = 5)),
trControl = custom,
na.action = na.omit)
set.seed(12)
elasticnet <- train(SI.POV.LMIC.GP ~ .,
df_x[3:322],
method = 'glmnet',
tuneGrid = expand.grid(alpha = seq(0, 1, length = 10), # alpha = 0: ridge | alpha = 1: lasso | alpha >0 & <1 means elastic net
lambda = seq(0.0001, 1, length = 5)),
trControl = custom,
na.action = na.omit)
# Compare Models
model_list <- list(Ridge = ridge, Lasso = lasso, ElasticNet = elasticnet)
res <- resamples(model_list)
summary(res)
# Elastic Net is a slightly better model.
# Plot Results
elasticnet
plot(elasticnet$finalModel, xvar = 'lambda', label = T)
plot(elasticnet$finalModel, xvar = 'dev', label = T) # We see great risks of overfitting past .9
plot(varImp(elasticnet, scale = F))
# Graphs above show that there is an exponential trend in explanatory importance in the variables.
# Meaningful variable names
imp <- data.frame(varImp(elasticnet, scale = F)[1])
(vars <- rownames(imp)[order(imp$Overall, decreasing=TRUE)[1:34]])
rm(df_x, imp, lasso, ridge, custom, res, model_list)
# Vars shows 5 of the best explanatory variables
filter(indicator_reference, `Indicator Code` %in% vars)
# Data Formatting ---------------------------------------------------------
# Reduce data sets with key variables
df <- subset(df, select = c("Country Code", "Year", "SI.POV.LMIC.GP", vars))
dependent_variable <- df_test$SI.POV.LMIC.GP
df_test <- subset(df_test, select = c("Country Code", "Year", vars))
df <- kNN(df, variable = vars, k = 5, imp_var = FALSE)
df_test <- kNN(df_test, variable = vars, k = 5, imp_var = FALSE)
df <- rename(df, 'Country' = 'Country Code')
df_test <- rename(df_test, 'Country' = 'Country Code')
# Panel Data Analysis Model -----------------------------------------------
# Our data is panel data because our time series is repartitionned across another variable, in this case countries. Here is a great video to explain how to handle panel data time series analysis: https://www.youtube.com/watch?v=f01WjeCdgEA
# For step-by-step help: https://www.princeton.edu/~otorres/Panel101R.pdf
scatterplot(SI.POV.LMIC.GP ~ Year|Country, boxplots=FALSE, smooth=TRUE, reg.line=FALSE, data=df)
linear_model <- lm(
as.formula(paste0("SI.POV.LMIC.GP ~ ", paste(vars, collapse = " + "))),
data = df)
# Fixed effect panel data model
fxd_effect <- plm(
as.formula(paste0("SI.POV.LMIC.GP ~ ", "Country + ", paste(vars, collapse = " + "))),
data = df,
method = "within",
index = c("Country", "Year"))
# Random effect panel model
random <- plm(
as.formula(paste0("SI.POV.LMIC.GP ~ ", paste(vars, collapse = " + "))),
data = df,
method = "within",
model = "random",
index = c("Country", "Year"))
# Time effect panel data model
time_effect <- plm(
as.formula(paste0("SI.POV.LMIC.GP ~ ", "lag(Year, 1) + ", paste(vars, collapse = " + "))),
data = df,
method = "within",
effect = "time",
index = c("Country", "Year")) # Fixed effect model
# Comparing the models ----------------------------------------------------
linear <- summary(linear_model) # linear model is usually the basis for comparison
linear$adj.r.squared
fxd <- summary(fxd_effect)
fxd$r.squared
# Comparing the models
pFtest(fxd_effect, linear_model) # P-value is small, fxd_effect is better.
rand <- summary(random)
rand$r.squared
# Choice b/w Fixed Effect and Random Effect
# Hausman Test
phtest(fxd_effect, random) # If p-value is <0.05, then the fixed effect model is better than random effect!
pFtest(fxd_effect, random) # we keep the fixed effect
time <- summary(time_effect)
time$r.squared
# Model Prediction --------------------------------------------------------
# prediction(time_effect, data = df_test, at = NULL, calculate_se = FALSE) returns the following error: Error in crossprod(beta, t(X)) : non-conformable arguments.
# Unfortunately, forecasting with panel data is complex and there are no good function presently in R that enables forecasting from unbalanced panel data models. For that reason, I will select the best model from the models I can generate a forecast.
# Linear model forecast
LMprediction <- predict(linear_model, df_test)
Predicted <- cbind(df_test, Actual = dependent_variable, Prediction = LMprediction)
# Prediction accuracy
Predicted$Prediction <- as.numeric(Predicted$Prediction)
Predicted$Actual <- as.numeric(Predicted$Actual)
Predicted <- Predicted %>% mutate(Error = Actual - Prediction)
sqrt(sum(Predicted$Error^2, na.rm = TRUE))
sum(!is.na(Predicted$Error))
|
/Project 7/Project6.R
|
no_license
|
gosselinmarcantoine/UCLA-Projects
|
R
| false | false | 9,268 |
r
|
setwd('~/Documents/DataScience/UCLADatascience/Project 7')
# Libraries needed:
library(readxl)
library(purrr)
library(tidyverse)
library(plm)
library(imputeTS)
library(corrplot)
library(caret)
library(glmnet)
library(mlbench)
library(psych)
library(VIM)
library(foreign)
library(car)
library(prediction)
# Dependent variable: 'Poverty gap at $3.20 a day (2012 PPP) (%)'
df <- read_excel('W03b_wdi.xlsx')
head(df)
map_dbl(df, ~sum(is.na(.))/ nrow(df)) # Lots of missing values (2017 seems to have partial data)
map_dbl(df[, 1:4], ~length(unique(.))) # 1591 variables: data is not tidy
df$`Country Code` <- factor(df$`Country Code`)
# Pre-processing ----------------------------------------------------------
# Simplify dataframe
indicator_reference <- subset(df, select = c("Indicator Name", "Indicator Code")) %>% distinct()
df <- subset(df, select = -`Indicator Name`)
Country_reference <- subset(df, select = c("Country Name", "Country Code")) %>% distinct()
df <- subset(df, select = -`Country Name`)
# Remove countries without value for Indicator Code: SI.POV.LMIC.GP in 2012
CountryCodes <- filter(df[df[, 2] == 'SI.POV.LMIC.GP', 1], df[df[, 2] == 'SI.POV.LMIC.GP', 55] != 'NA')
df <- subset(df, `Country Code` %in% CountryCodes$`Country Code`)
rm(CountryCodes)
# subset out years 1960 to 1980, and 2017 (2017 has partial data)
df <- subset(df, select = -c(`1960`:`1980`, `2017`))
# Separates dataframe into training and testing sets
df_test <- subset(df, select = c(1:2, 35:38)) # Excludes 2017 as well
df <- subset(df, select = 1:34)
# gathering time columns into one year column
df <- df %>% gather(key = 'Year', value = 'value', c(3:34)) # %>% filter(value != 'NA')
df_test <- df_test %>% gather(key = 'Year', value = 'value', c(3:6))
df$Year <- factor(df$Year)
# Spread Indicator code to different columns
df <- df %>% spread(key = 'Indicator Code', value = 'value')
df_test <- df_test %>% spread(key = 'Indicator Code', value = 'value')
# Remove Indicators with over 25% missing values in training set
dep_var <- subset(df, select = 'SI.POV.LMIC.GP')
df <- subset(df, select = map_dbl(df, ~sum(is.na(.))/ nrow(df)) <= .25)
df <- cbind(df, dep_var)
df_test <- subset(df_test, select = names(df)) # Selecting the same variables as the training dataset
rm(dep_var)
# Variable Selection ------------------------------------------------------
# Imputation would be too intensive to conduct on all the data. Instead we will subset a few years' worth of the data, conduct imputation ultimately variable selection to figure the best independent variables to use, and then subset the desired independent variables from the original dataset, impute the variables and apply the model chosen.
# Restrict dataset to 2007 and 2012
df_x <- df %>% filter(df[, 2] %in% c('2012', '2007')) # Add 2007 because some variables don't have values from 2009 - 2012. Choosing 2007 because we want a year that is similar to the test set 2013-2017 and 2008 differs considerably because of a major world fiancial collapse.
map_dbl(df_x, ~sum(is.na(.))/ nrow(df_x))
df_x <- kNN(df_x, variable = names(df_x[, 3:321]), k = 3, imp_var = FALSE)
map_dbl(df_x, ~sum(is.na(.))/ nrow(df_x))
# Filter methods do not adress multi-collinearity problems well and wrapper methods handles large numbers of explanatory variables poorly (long run-time). As a result, I will use an embbeded method for variable selection. These methods include: Ridge, LASSO, and Elastic Net. Note: LASSO (and Elastic Net) are what we want here because it does variable selection by taking into consideration multicollinearity.
# Dependent variable examination
hist(df$SI.POV.LMIC.GP) # Non-normal distrubution so we cannot use a linear model; use GLM instead.
# Custom Control Parameter
custom <- trainControl(method = 'repeatedcv',
number = 10,
repeats = 5,
verboseIter = FALSE) # if verboseIter = TRUE, See the model running
# glm models
set.seed(12)
ridge <- train(SI.POV.LMIC.GP ~ .,
df_x[3:322],
method = 'glmnet',
tuneGrid = expand.grid(alpha = 0, # alpha = 0: ridge | alpha = 1: lasso | alpha >0 & <1 means elastic net
lambda = seq(0.0001, 1, length = 5)),
trControl = custom,
na.action = na.omit)
set.seed(12)
lasso <- train(SI.POV.LMIC.GP ~ .,
df_x[3:322],
method = 'glmnet',
tuneGrid = expand.grid(alpha = 1, # alpha = 0: ridge | alpha = 1: lasso | alpha >0 & <1 means elastic net
lambda = seq(0.0001, 1, length = 5)),
trControl = custom,
na.action = na.omit)
set.seed(12)
elasticnet <- train(SI.POV.LMIC.GP ~ .,
df_x[3:322],
method = 'glmnet',
tuneGrid = expand.grid(alpha = seq(0, 1, length = 10), # alpha = 0: ridge | alpha = 1: lasso | alpha >0 & <1 means elastic net
lambda = seq(0.0001, 1, length = 5)),
trControl = custom,
na.action = na.omit)
# Compare Models
model_list <- list(Ridge = ridge, Lasso = lasso, ElasticNet = elasticnet)
res <- resamples(model_list)
summary(res)
# Elastic Net is a slightly better model.
# Plot Results
elasticnet
plot(elasticnet$finalModel, xvar = 'lambda', label = T)
plot(elasticnet$finalModel, xvar = 'dev', label = T) # We see great risks of overfitting past .9
plot(varImp(elasticnet, scale = F))
# Graphs above show that there is an exponential trend in explanatory importance in the variables.
# Meaningful variable names
imp <- data.frame(varImp(elasticnet, scale = F)[1])
(vars <- rownames(imp)[order(imp$Overall, decreasing=TRUE)[1:34]])
rm(df_x, imp, lasso, ridge, custom, res, model_list)
# Vars shows 5 of the best explanatory variables
filter(indicator_reference, `Indicator Code` %in% vars)
# Data Formatting ---------------------------------------------------------
# Reduce data sets with key variables
df <- subset(df, select = c("Country Code", "Year", "SI.POV.LMIC.GP", vars))
dependent_variable <- df_test$SI.POV.LMIC.GP
df_test <- subset(df_test, select = c("Country Code", "Year", vars))
df <- kNN(df, variable = vars, k = 5, imp_var = FALSE)
df_test <- kNN(df_test, variable = vars, k = 5, imp_var = FALSE)
df <- rename(df, 'Country' = 'Country Code')
df_test <- rename(df_test, 'Country' = 'Country Code')
# Panel Data Analysis Model -----------------------------------------------
# Our data is panel data because our time series is repartitionned across another variable, in this case countries. Here is a great video to explain how to handle panel data time series analysis: https://www.youtube.com/watch?v=f01WjeCdgEA
# For step-by-step help: https://www.princeton.edu/~otorres/Panel101R.pdf
scatterplot(SI.POV.LMIC.GP ~ Year|Country, boxplots=FALSE, smooth=TRUE, reg.line=FALSE, data=df)
linear_model <- lm(
as.formula(paste0("SI.POV.LMIC.GP ~ ", paste(vars, collapse = " + "))),
data = df)
# Fixed effect panel data model
fxd_effect <- plm(
as.formula(paste0("SI.POV.LMIC.GP ~ ", "Country + ", paste(vars, collapse = " + "))),
data = df,
method = "within",
index = c("Country", "Year"))
# Random effect panel model
random <- plm(
as.formula(paste0("SI.POV.LMIC.GP ~ ", paste(vars, collapse = " + "))),
data = df,
method = "within",
model = "random",
index = c("Country", "Year"))
# Time effect panel data model
time_effect <- plm(
as.formula(paste0("SI.POV.LMIC.GP ~ ", "lag(Year, 1) + ", paste(vars, collapse = " + "))),
data = df,
method = "within",
effect = "time",
index = c("Country", "Year")) # Fixed effect model
# Comparing the models ----------------------------------------------------
linear <- summary(linear_model) # linear model is usually the basis for comparison
linear$adj.r.squared
fxd <- summary(fxd_effect)
fxd$r.squared
# Comparing the models
pFtest(fxd_effect, linear_model) # P-value is small, fxd_effect is better.
rand <- summary(random)
rand$r.squared
# Choice b/w Fixed Effect and Random Effect
# Hausman Test
phtest(fxd_effect, random) # If p-value is <0.05, then the fixed effect model is better than random effect!
pFtest(fxd_effect, random) # we keep the fixed effect
time <- summary(time_effect)
time$r.squared
# Model Prediction --------------------------------------------------------
# prediction(time_effect, data = df_test, at = NULL, calculate_se = FALSE) returns the following error: Error in crossprod(beta, t(X)) : non-conformable arguments.
# Unfortunately, forecasting with panel data is complex and there are no good function presently in R that enables forecasting from unbalanced panel data models. For that reason, I will select the best model from the models I can generate a forecast.
# Linear model forecast
LMprediction <- predict(linear_model, df_test)
Predicted <- cbind(df_test, Actual = dependent_variable, Prediction = LMprediction)
# Prediction accuracy
Predicted$Prediction <- as.numeric(Predicted$Prediction)
Predicted$Actual <- as.numeric(Predicted$Actual)
Predicted <- Predicted %>% mutate(Error = Actual - Prediction)
sqrt(sum(Predicted$Error^2, na.rm = TRUE))
sum(!is.na(Predicted$Error))
|
\name{show.oas}
\alias{show.oas}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to display list of available orthogonal arrays
}
\description{
This function allows to inspect the list of available orthogonal arrays,
optionally specifying selection criteria
}
\usage{
show.oas(name = "all", nruns = "all", nlevels = "all", factors = "all",
regular = "all", GRgt3 = c("all", "tot", "ind"), Rgt3 = FALSE, show = 10,
parents.only = FALSE, showGRs = FALSE, showmetrics = FALSE, digits = 3)
}
\arguments{
\item{name}{
character string or vector of character strings giving name(s) of (an) orthogonal
array(s); results in an error if name does not contain any valid name;
warns if name contains any invalid name
}
\item{nruns}{
the requested number of runs or a 2-element vector
with a minimum and maximum for the number of runs
}
\item{nlevels}{
a vector of requested numbers of levels for a set of factors in question,
must contain integers > 1 only;\cr
nlevels cannot be specified together with factors
}
\item{factors}{
a list with the two elements \code{nlevels} and \code{number}, which are both integer
vectors of equal length;\cr
\code{nlevels} contains the number of levels and \code{number} the number of factors
for the corresponding number of levels
}
\item{regular}{
either unrestricted (the default \dQuote{all}), a logical which requests
(\code{TRUE}) or rejects (\code{FALSE}) regular arrays, or the character string
\dQuote{strict} to request strictly regular arrays, for which all confounded factors
are \emph{completely} confounded with a 2-factor interaction of two other factors (the
latter are fixed level arrays or crossed arrays)
}
\item{GRgt3}{
either unrestricted (the default \dQuote{all}), or a character string which requests
\code{GR} (\code{"tot"}) or \code{GRind} (\code{"ind"}) to be larger than 3\cr
}
\item{Rgt3}{
logical requesting inclusion of standard resolution 3 arrays as listed in
\code{\link{oacat}} per default, and restricting the output to arrays of
resolution at least IV (as listed in \code{\link{oacat3}}),
if changed to \code{TRUE}\cr
}
\item{show}{
an integer number specifying how many arrays are to be listed (upper bound),
or the character string \code{"all"} for showing all arrays, no matter how many.
The default is to show 10 arrays. \code{show = 0} switches off the display of the result
and only returns a value. Since August 2018, the number refers to stronger and weaker arrays, separately.
}
\item{parents.only}{
logical specifying whether to show only parent arrays or child arrays as well;
the default is \code{FALSE} for inclusion of child arrays
}
\item{showGRs}{
logical specifying whether to show the generalized resolution quality metrics
with the resulting arrays; the default is \code{FALSE}. If set to \code{TRUE},
three metrics are displayed (see Details section).
}
\item{showmetrics}{
logical specifying whether to show all array quality metrics with the resulting
arrays; the default is \code{FALSE}. If set to \code{TRUE}, several metrics
are displayed (see Details section).
}
\item{digits}{
integer number of significant digits to show for GR and A metrics;
irrelevant, if \code{showmetrics} is \code{FALSE}
}
}
\details{
The function shows the arrays that are listed in the data frames \code{\link{oacat}}
or \code{\link{oacat3}}.
For child arrays that have to be generated with a lineage rule
(can be automatically done with function \code{\link{oa.design}}), the lineage is displayed
together with the array name. The option \code{parent.only = TRUE}
suppresses printing and output of child arrays. The structure of the lineage entry
is documented under \code{\link{oacat}}.
If display of metrics is requested with \code{showmetrics=TRUE}, the printed output shows the metrics
GR*, GRind*, regular (logical, whether regular or not), SCones* (number of squared canonical correlations that are 1),
and the numbers of words of lengths 3 to 8 (A3 to A8). \code{showGRs=TRUE}
requests the metrics marked with asterisks only (without \code{SCones} in case \code{GRgt3="ind"}). More information on
all these metrics can be found \code{\link[=generalized.word.length]{here}}
}
\value{
A data frame with the three columns \code{name}, \code{nruns} and \code{lineage},
containing the array name, the number of runs and - if applicable - the lineage for generating the array
from other arrays. The lineage entry is empty for parent arrays that are either directly available
in the package and can be accessed by giving their name (e.g. \code{L18.3.6.6.1}) or are full factorials
(e.g. \code{L28.4.1.7.1}). If further information has been requested (e.g. with \code{showmetrics=TRUE}),
the data frame contains additional columns.
If no array has been found, the returned value is \code{NULL}.
}
\author{
Ulrike Groemping
}
\note{
Thanks to Peter Theodor Wilrich for proposing such a function.
}
\references{
Kuhfeld, W. (2009). Orthogonal arrays. Website courtesy of SAS Institute
\url{https://support.sas.com/techsup/technote/ts723b.pdf} and references therein.
Mee, R. (2009). \emph{A Comprehensive Guide to Factorial Two-Level Experimentation}.
New York: Springer.
}
\examples{
## the first 10 orthogonal arrays with 24 to 28 runs
show.oas(nruns = c(24,28))
## the first 10 orthogonal arrays with 24 to 28 runs
## excluding child arrays
show.oas(nruns = c(24,28), parents.only=TRUE)
## the orthogonal arrays with 4 2-level factors, one 4-level factor and one 5-level factor
show.oas(factors = list(nlevels=c(2,4,5),number=c(4,1,1)))
## show them all with quality metrics
show.oas(factors = list(nlevels=c(2,4,5),number=c(4,1,1)), show=Inf, showmetrics=TRUE)
## pick only those with no complete confounding of any degrees of freedom
show.oas(factors = list(nlevels=c(2,4,5),number=c(4,1,1)), GRgt3="ind", showmetrics=TRUE)
## the orthogonal arrays with 4 2-level factors, one 7-level factor and one 5-level factor
show.oas(factors = list(nlevels=c(2,7,5),number=c(4,1,1)))
## the latter orthogonal arrays with the nlevels notation
## (that can also be used in a call to oa.design subsequently)
show.oas(nlevels = c(2,7,2,2,5,2))
## calling arrays by name
show.oas(name=c("L12.2.11", "L18.2.1.3.7"))
}
\seealso{
\code{\link{oa.design}} for using the arrays from \code{\link{oacat}} in design creation\cr
\code{\link{oacat}} for the data frames underlying the function\cr
}
\keyword{ array }
\keyword{ design }
|
/man/show.oas.Rd
|
no_license
|
cran/DoE.base
|
R
| false | false | 6,932 |
rd
|
\name{show.oas}
\alias{show.oas}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to display list of available orthogonal arrays
}
\description{
This function allows to inspect the list of available orthogonal arrays,
optionally specifying selection criteria
}
\usage{
show.oas(name = "all", nruns = "all", nlevels = "all", factors = "all",
regular = "all", GRgt3 = c("all", "tot", "ind"), Rgt3 = FALSE, show = 10,
parents.only = FALSE, showGRs = FALSE, showmetrics = FALSE, digits = 3)
}
\arguments{
\item{name}{
character string or vector of character strings giving name(s) of (an) orthogonal
array(s); results in an error if name does not contain any valid name;
warns if name contains any invalid name
}
\item{nruns}{
the requested number of runs or a 2-element vector
with a minimum and maximum for the number of runs
}
\item{nlevels}{
a vector of requested numbers of levels for a set of factors in question,
must contain integers > 1 only;\cr
nlevels cannot be specified together with factors
}
\item{factors}{
a list with the two elements \code{nlevels} and \code{number}, which are both integer
vectors of equal length;\cr
\code{nlevels} contains the number of levels and \code{number} the number of factors
for the corresponding number of levels
}
\item{regular}{
either unrestricted (the default \dQuote{all}), a logical which requests
(\code{TRUE}) or rejects (\code{FALSE}) regular arrays, or the character string
\dQuote{strict} to request strictly regular arrays, for which all confounded factors
are \emph{completely} confounded with a 2-factor interaction of two other factors (the
latter are fixed level arrays or crossed arrays)
}
\item{GRgt3}{
either unrestricted (the default \dQuote{all}), or a character string which requests
\code{GR} (\code{"tot"}) or \code{GRind} (\code{"ind"}) to be larger than 3\cr
}
\item{Rgt3}{
logical requesting inclusion of standard resolution 3 arrays as listed in
\code{\link{oacat}} per default, and restricting the output to arrays of
resolution at least IV (as listed in \code{\link{oacat3}}),
if changed to \code{TRUE}\cr
}
\item{show}{
an integer number specifying how many arrays are to be listed (upper bound),
or the character string \code{"all"} for showing all arrays, no matter how many.
The default is to show 10 arrays. \code{show = 0} switches off the display of the result
and only returns a value. Since August 2018, the number refers to stronger and weaker arrays, separately.
}
\item{parents.only}{
logical specifying whether to show only parent arrays or child arrays as well;
the default is \code{FALSE} for inclusion of child arrays
}
\item{showGRs}{
logical specifying whether to show the generalized resolution quality metrics
with the resulting arrays; the default is \code{FALSE}. If set to \code{TRUE},
three metrics are displayed (see Details section).
}
\item{showmetrics}{
logical specifying whether to show all array quality metrics with the resulting
arrays; the default is \code{FALSE}. If set to \code{TRUE}, several metrics
are displayed (see Details section).
}
\item{digits}{
integer number of significant digits to show for GR and A metrics;
irrelevant, if \code{showmetrics} is \code{FALSE}
}
}
\details{
The function shows the arrays that are listed in the data frames \code{\link{oacat}}
or \code{\link{oacat3}}.
For child arrays that have to be generated with a lineage rule
(can be automatically done with function \code{\link{oa.design}}), the lineage is displayed
together with the array name. The option \code{parent.only = TRUE}
suppresses printing and output of child arrays. The structure of the lineage entry
is documented under \code{\link{oacat}}.
If display of metrics is requested with \code{showmetrics=TRUE}, the printed output shows the metrics
GR*, GRind*, regular (logical, whether regular or not), SCones* (number of squared canonical correlations that are 1),
and the numbers of words of lengths 3 to 8 (A3 to A8). \code{showGRs=TRUE}
requests the metrics marked with asterisks only (without \code{SCones} in case \code{GRgt3="ind"}). More information on
all these metrics can be found \code{\link[=generalized.word.length]{here}}
}
\value{
A data frame with the three columns \code{name}, \code{nruns} and \code{lineage},
containing the array name, the number of runs and - if applicable - the lineage for generating the array
from other arrays. The lineage entry is empty for parent arrays that are either directly available
in the package and can be accessed by giving their name (e.g. \code{L18.3.6.6.1}) or are full factorials
(e.g. \code{L28.4.1.7.1}). If further information has been requested (e.g. with \code{showmetrics=TRUE}),
the data frame contains additional columns.
If no array has been found, the returned value is \code{NULL}.
}
\author{
Ulrike Groemping
}
\note{
Thanks to Peter Theodor Wilrich for proposing such a function.
}
\references{
Kuhfeld, W. (2009). Orthogonal arrays. Website courtesy of SAS Institute
\url{https://support.sas.com/techsup/technote/ts723b.pdf} and references therein.
Mee, R. (2009). \emph{A Comprehensive Guide to Factorial Two-Level Experimentation}.
New York: Springer.
}
\examples{
## the first 10 orthogonal arrays with 24 to 28 runs
show.oas(nruns = c(24,28))
## the first 10 orthogonal arrays with 24 to 28 runs
## excluding child arrays
show.oas(nruns = c(24,28), parents.only=TRUE)
## the orthogonal arrays with 4 2-level factors, one 4-level factor and one 5-level factor
show.oas(factors = list(nlevels=c(2,4,5),number=c(4,1,1)))
## show them all with quality metrics
show.oas(factors = list(nlevels=c(2,4,5),number=c(4,1,1)), show=Inf, showmetrics=TRUE)
## pick only those with no complete confounding of any degrees of freedom
show.oas(factors = list(nlevels=c(2,4,5),number=c(4,1,1)), GRgt3="ind", showmetrics=TRUE)
## the orthogonal arrays with 4 2-level factors, one 7-level factor and one 5-level factor
show.oas(factors = list(nlevels=c(2,7,5),number=c(4,1,1)))
## the latter orthogonal arrays with the nlevels notation
## (that can also be used in a call to oa.design subsequently)
show.oas(nlevels = c(2,7,2,2,5,2))
## calling arrays by name
show.oas(name=c("L12.2.11", "L18.2.1.3.7"))
}
\seealso{
\code{\link{oa.design}} for using the arrays from \code{\link{oacat}} in design creation\cr
\code{\link{oacat}} for the data frames underlying the function\cr
}
\keyword{ array }
\keyword{ design }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mice.impute.2l.pan.R
\name{mice.impute.2l.pan}
\alias{mice.impute.2l.pan}
\alias{2l.pan}
\title{Imputation by a two-level normal model using \code{pan}}
\usage{
mice.impute.2l.pan(
y,
ry,
x,
type,
intercept = TRUE,
paniter = 500,
groupcenter.slope = FALSE,
...
)
}
\arguments{
\item{y}{Incomplete data vector of length \code{n}}
\item{ry}{Vector of missing data pattern (\code{FALSE}=missing,
\code{TRUE}=observed)}
\item{x}{Matrix (\code{n} x \code{p}) of complete covariates.}
\item{type}{Vector of length \code{ncol(x)} identifying random and class
variables. Random effects are identified by a '2'. The group variable (only
one is allowed) is coded as '-2'. Random effects also include the fixed
effect. If for a covariates X1 group means shall be calculated and included
as further fixed effects choose '3'. In addition to the effects in '3',
specification '4' also includes random effects of X1.}
\item{intercept}{Logical determining whether the intercept is automatically
added.}
\item{paniter}{Number of iterations in \code{pan}. Default is 500.}
\item{groupcenter.slope}{If \code{TRUE}, in case of group means (\code{type}
is '3' or'4') group mean centering for these predictors are conducted before
doing imputations. Default is \code{FALSE}.}
\item{...}{Other named arguments.}
}
\value{
A vector of length \code{nmis} with imputations.
}
\description{
Imputes univariate missing data using a two-level normal model with
homogeneous within group variances. Aggregated group effects (i.e. group
means) can be automatically created and included as predictors in the
two-level regression (see argument \code{type}). This function needs the
\code{pan} package.
}
\details{
Implements the Gibbs sampler for the linear two-level model with homogeneous
within group variances which is a special case of a multivariate linear mixed
effects model (Schafer & Yucel, 2002). For a two-level imputation with
heterogeneous within-group variances see \code{\link{mice.impute.2l.norm}}. %
The random intercept is automatically added in %
\code{mice.impute.2l.norm()}.
}
\note{
This function does not implement the \code{where} functionality. It
always produces \code{nmis} imputation, irrespective of the \code{where}
argument of the \code{mice} function.
}
\examples{
# simulate some data
# two-level regression model with fixed slope
# number of groups
G <- 250
# number of persons
n <- 20
# regression parameter
beta <- .3
# intraclass correlation
rho <- .30
# correlation with missing response
rho.miss <- .10
# missing proportion
missrate <- .50
y1 <- rep(rnorm(G, sd = sqrt(rho)), each = n) + rnorm(G * n, sd = sqrt(1 - rho))
x <- rnorm(G * n)
y <- y1 + beta * x
dfr0 <- dfr <- data.frame("group" = rep(1:G, each = n), "x" = x, "y" = y)
dfr[rho.miss * x + rnorm(G * n, sd = sqrt(1 - rho.miss)) < qnorm(missrate), "y"] <- NA
# empty imputation in mice
imp0 <- mice(as.matrix(dfr), maxit = 0)
predM <- imp0$predictorMatrix
impM <- imp0$method
# specify predictor matrix and method
predM1 <- predM
predM1["y", "group"] <- -2
predM1["y", "x"] <- 1 # fixed x effects imputation
impM1 <- impM
impM1["y"] <- "2l.pan"
# multilevel imputation
imp1 <- mice(as.matrix(dfr),
m = 1, predictorMatrix = predM1,
method = impM1, maxit = 1
)
# multilevel analysis
library(lme4)
mod <- lmer(y ~ (1 + x | group) + x, data = complete(imp1))
summary(mod)
# Examples of predictorMatrix specification
# random x effects
# predM1["y","x"] <- 2
# fixed x effects and group mean of x
# predM1["y","x"] <- 3
# random x effects and group mean of x
# predM1["y","x"] <- 4
}
\references{
Schafer J L, Yucel RM (2002). Computational strategies for multivariate
linear mixed-effects models with missing values. \emph{Journal of
Computational and Graphical Statistics}. \bold{11}, 437-457.
Van Buuren, S., Groothuis-Oudshoorn, K. (2011). \code{mice}: Multivariate
Imputation by Chained Equations in \code{R}. \emph{Journal of Statistical
Software}, \bold{45}(3), 1-67. \url{https://www.jstatsoft.org/v45/i03/}
}
\seealso{
Other univariate-2l:
\code{\link{mice.impute.2l.bin}()},
\code{\link{mice.impute.2l.lmer}()},
\code{\link{mice.impute.2l.norm}()}
}
\author{
Alexander Robitzsch (IPN - Leibniz Institute for Science and
Mathematics Education, Kiel, Germany), \email{robitzsch@ipn.uni-kiel.de}
Alexander Robitzsch (IPN - Leibniz Institute for Science and
Mathematics Education, Kiel, Germany), \email{robitzsch@ipn.uni-kiel.de}.
}
\concept{univariate-2l}
|
/man/mice.impute.2l.pan.Rd
|
no_license
|
carpenitoThomas/mice
|
R
| false | true | 4,545 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mice.impute.2l.pan.R
\name{mice.impute.2l.pan}
\alias{mice.impute.2l.pan}
\alias{2l.pan}
\title{Imputation by a two-level normal model using \code{pan}}
\usage{
mice.impute.2l.pan(
y,
ry,
x,
type,
intercept = TRUE,
paniter = 500,
groupcenter.slope = FALSE,
...
)
}
\arguments{
\item{y}{Incomplete data vector of length \code{n}}
\item{ry}{Vector of missing data pattern (\code{FALSE}=missing,
\code{TRUE}=observed)}
\item{x}{Matrix (\code{n} x \code{p}) of complete covariates.}
\item{type}{Vector of length \code{ncol(x)} identifying random and class
variables. Random effects are identified by a '2'. The group variable (only
one is allowed) is coded as '-2'. Random effects also include the fixed
effect. If for a covariates X1 group means shall be calculated and included
as further fixed effects choose '3'. In addition to the effects in '3',
specification '4' also includes random effects of X1.}
\item{intercept}{Logical determining whether the intercept is automatically
added.}
\item{paniter}{Number of iterations in \code{pan}. Default is 500.}
\item{groupcenter.slope}{If \code{TRUE}, in case of group means (\code{type}
is '3' or'4') group mean centering for these predictors are conducted before
doing imputations. Default is \code{FALSE}.}
\item{...}{Other named arguments.}
}
\value{
A vector of length \code{nmis} with imputations.
}
\description{
Imputes univariate missing data using a two-level normal model with
homogeneous within group variances. Aggregated group effects (i.e. group
means) can be automatically created and included as predictors in the
two-level regression (see argument \code{type}). This function needs the
\code{pan} package.
}
\details{
Implements the Gibbs sampler for the linear two-level model with homogeneous
within group variances which is a special case of a multivariate linear mixed
effects model (Schafer & Yucel, 2002). For a two-level imputation with
heterogeneous within-group variances see \code{\link{mice.impute.2l.norm}}. %
The random intercept is automatically added in %
\code{mice.impute.2l.norm()}.
}
\note{
This function does not implement the \code{where} functionality. It
always produces \code{nmis} imputation, irrespective of the \code{where}
argument of the \code{mice} function.
}
\examples{
# simulate some data
# two-level regression model with fixed slope
# number of groups
G <- 250
# number of persons
n <- 20
# regression parameter
beta <- .3
# intraclass correlation
rho <- .30
# correlation with missing response
rho.miss <- .10
# missing proportion
missrate <- .50
y1 <- rep(rnorm(G, sd = sqrt(rho)), each = n) + rnorm(G * n, sd = sqrt(1 - rho))
x <- rnorm(G * n)
y <- y1 + beta * x
dfr0 <- dfr <- data.frame("group" = rep(1:G, each = n), "x" = x, "y" = y)
dfr[rho.miss * x + rnorm(G * n, sd = sqrt(1 - rho.miss)) < qnorm(missrate), "y"] <- NA
# empty imputation in mice
imp0 <- mice(as.matrix(dfr), maxit = 0)
predM <- imp0$predictorMatrix
impM <- imp0$method
# specify predictor matrix and method
predM1 <- predM
predM1["y", "group"] <- -2
predM1["y", "x"] <- 1 # fixed x effects imputation
impM1 <- impM
impM1["y"] <- "2l.pan"
# multilevel imputation
imp1 <- mice(as.matrix(dfr),
m = 1, predictorMatrix = predM1,
method = impM1, maxit = 1
)
# multilevel analysis
library(lme4)
mod <- lmer(y ~ (1 + x | group) + x, data = complete(imp1))
summary(mod)
# Examples of predictorMatrix specification
# random x effects
# predM1["y","x"] <- 2
# fixed x effects and group mean of x
# predM1["y","x"] <- 3
# random x effects and group mean of x
# predM1["y","x"] <- 4
}
\references{
Schafer J L, Yucel RM (2002). Computational strategies for multivariate
linear mixed-effects models with missing values. \emph{Journal of
Computational and Graphical Statistics}. \bold{11}, 437-457.
Van Buuren, S., Groothuis-Oudshoorn, K. (2011). \code{mice}: Multivariate
Imputation by Chained Equations in \code{R}. \emph{Journal of Statistical
Software}, \bold{45}(3), 1-67. \url{https://www.jstatsoft.org/v45/i03/}
}
\seealso{
Other univariate-2l:
\code{\link{mice.impute.2l.bin}()},
\code{\link{mice.impute.2l.lmer}()},
\code{\link{mice.impute.2l.norm}()}
}
\author{
Alexander Robitzsch (IPN - Leibniz Institute for Science and
Mathematics Education, Kiel, Germany), \email{robitzsch@ipn.uni-kiel.de}
Alexander Robitzsch (IPN - Leibniz Institute for Science and
Mathematics Education, Kiel, Germany), \email{robitzsch@ipn.uni-kiel.de}.
}
\concept{univariate-2l}
|
# Feb 20, 2017
# clear environment
rm(list = ls())
# load library
library(e1071)
# prepare data
set.seed(1)
x <- matrix(rnorm(20*2), ncol = 2)
y <- c(rep(-1,10), rep(1,10))
x[y==1, ] <- x[y==1, ] + 1
plot(x, col=3-y)
# fit into SVM
dat <- data.frame(x=x, y=as.factor(y))
svm.fit <- svm(y~., data = dat, kernel = 'linear', cost = 10, scale = FALSE)
# the cost argument allows people to specify the cost of violation to the margin,
# when the cost is small, the margins will be wide
# to prevent overfitting, cost has to be contained within a reasonable range
# results
plot(svm.fit, dat)
summary(svm.fit)
# parameter tuning
set.seed(1)
tune.out <- tune(svm, y~., data = dat, kernel = 'linear',
ranges=list(cost=c(.001, .01, .1, 1, 5, 10, 100)))
# result
summary(tune.out)
# choose best model
bestmod <- tune.out$best.model
summary(bestmod)
# prepare testing data
xtest <- matrix(rnorm(20*2), ncol=2)
ytest <- sample(c(-1, 1), 20, rep=TRUE)
xtest[ytest == 1, ] <- xtest[ytest == 1, ] + 1
testdat <- data.frame(x=xtest, y=as.factor(ytest))
# test model on testing data
ypred <- predict(bestmod, testdat)
table(predict=ypred, truth=testdat$y)
# non-linear kernel
# 1, polynomial kernel
# 2, radial kernel
# prepare data
set.seed(1)
x <- matrix(rnorm(200*2), ncol=2)
x[1:100, ] <- x[1:100] + 2
x[101:150, ] <- x[101:150, ] - 2
y <- c(rep(1, 150), rep(2, 50))
dat <- data.frame(x=x, y=as.factor(y))
plot(x, col=y)
# radial kernel
train <- sample(200, 100)
svmfit <- svm(y~., data = dat[train, ], kernel='radial', gamma=1, cost=1)
plot(svmfit, dat[train, ])
summary(svmfit)
# parameter tuning
set.seed(1)
tune.out <- tune(svm, y~., data=dat[train, ], kernel='radial',
ranges = list(cost=c(.1, 1, 10, 100, 1000),
gamma=c(.5, 1, 2, 3, 4)))
# best model
bestmod <- tune.out$best.model
# performance
table(true = dat[-train, 'y'], pred=predict(bestmod, dat[-train, ]))
|
/C9, SVM.R
|
permissive
|
qjin2016/An-Introduction-to-Statistical-Learning
|
R
| false | false | 1,951 |
r
|
# Feb 20, 2017
# clear environment
rm(list = ls())
# load library
library(e1071)
# prepare data
set.seed(1)
x <- matrix(rnorm(20*2), ncol = 2)
y <- c(rep(-1,10), rep(1,10))
x[y==1, ] <- x[y==1, ] + 1
plot(x, col=3-y)
# fit into SVM
dat <- data.frame(x=x, y=as.factor(y))
svm.fit <- svm(y~., data = dat, kernel = 'linear', cost = 10, scale = FALSE)
# the cost argument allows people to specify the cost of violation to the margin,
# when the cost is small, the margins will be wide
# to prevent overfitting, cost has to be contained within a reasonable range
# results
plot(svm.fit, dat)
summary(svm.fit)
# parameter tuning
set.seed(1)
tune.out <- tune(svm, y~., data = dat, kernel = 'linear',
ranges=list(cost=c(.001, .01, .1, 1, 5, 10, 100)))
# result
summary(tune.out)
# choose best model
bestmod <- tune.out$best.model
summary(bestmod)
# prepare testing data
xtest <- matrix(rnorm(20*2), ncol=2)
ytest <- sample(c(-1, 1), 20, rep=TRUE)
xtest[ytest == 1, ] <- xtest[ytest == 1, ] + 1
testdat <- data.frame(x=xtest, y=as.factor(ytest))
# test model on testing data
ypred <- predict(bestmod, testdat)
table(predict=ypred, truth=testdat$y)
# non-linear kernel
# 1, polynomial kernel
# 2, radial kernel
# prepare data
set.seed(1)
x <- matrix(rnorm(200*2), ncol=2)
x[1:100, ] <- x[1:100] + 2
x[101:150, ] <- x[101:150, ] - 2
y <- c(rep(1, 150), rep(2, 50))
dat <- data.frame(x=x, y=as.factor(y))
plot(x, col=y)
# radial kernel
train <- sample(200, 100)
svmfit <- svm(y~., data = dat[train, ], kernel='radial', gamma=1, cost=1)
plot(svmfit, dat[train, ])
summary(svmfit)
# parameter tuning
set.seed(1)
tune.out <- tune(svm, y~., data=dat[train, ], kernel='radial',
ranges = list(cost=c(.1, 1, 10, 100, 1000),
gamma=c(.5, 1, 2, 3, 4)))
# best model
bestmod <- tune.out$best.model
# performance
table(true = dat[-train, 'y'], pred=predict(bestmod, dat[-train, ]))
|
#######################################################################
# rBLAST - Interface to BLAST
# Copyright (C) 2015 Michael Hahsler and Anurag Nagar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#' @export
blast <- function(db = NULL) {
if(is.null(db)) stop("No BLAST database specified!")
db <- file.path(normalizePath(dirname(db)), basename(db))
if(length(Sys.glob(paste(db, "*", sep="")))<1) stop("BLAST database does not exit!")
structure(list(db = db), class="BLAST")
}
#' @export
print.BLAST <- function(x, info=TRUE, ...) {
cat("BLAST Database\nLocation:", x$db, "\n")
if(info) {
out <- system(paste(.findExecutable("blastdbcmd"), "-db", x$db,
"-info"), intern=TRUE)
cat(paste(out, collapse="\n"))
cat("\n")
}
}
#' @export
blast_help <- function() {
system(paste(.findExecutable(c("blastn")),
"-help"))
}
#' \code{Get BLAST Results in a table}
#'
#' format the blast results.
#'
#' @param object BLAST object. [BLAST]
#' @param seq Biostring object. [Biostrings]
#' @return return a data.frame.
#'
#' @examples
#'
#' @export
getblast <- function(object, seq, BLAST_args="", custom_format ="",
...) {
db <- object$db
x <- seq
## get temp files and change working directory
wd <- tempdir()
dir <- getwd()
temp_file <- basename(tempfile(tmpdir = wd))
on.exit({
#cat(temp_file, "\n")
file.remove(Sys.glob(paste(temp_file, "*", sep="")))
setwd(dir)
})
setwd(wd)
infile <- paste(temp_file, ".fasta", sep="")
outfile <- paste(temp_file, "_BLAST_out.txt", sep="")
writeXStringSet(x, infile, append=FALSE, format="fasta")
system(paste(.findExecutable("blastn"), "-db", db,
"-query", infile, "-out", outfile, '-outfmt "10', custom_format,
'"', BLAST_args))
## rdp output column names
if(custom_format == "") {
c_names <- c("QueryID", "SubjectID", "Perc.Ident",
"Alignment.Length", "Mismatches", "Gap.Openings", "Q.start", "Q.end",
"S.start", "S.end", "E", "Bits" )
}else{
c_names <- unlist(strsplit(custom_format, split = " +"))
}
## read and parse rdp output
if(is(try(cl_tab <- read.table(outfile, sep=","), silent=TRUE), "try-error")) {
warning("BLAST did not return a match!")
cl_tab <- data.frame(matrix(ncol=length(c_names), nrow=0))
}
if(ncol(cl_tab) != length(c_names)) stop("Problem with format (e.g., custom_format)!")
colnames(cl_tab) <- c_names
cl_tab
}
#' @export
.findExecutable <- function(exe, interactive=TRUE) {
path <- Sys.which(exe)
if(all(path=="")) {
if(interactive) stop("Executable for ", paste(exe, collapse=" or "), " not found! Please make sure that the software is correctly installed and, if necessary, path variables are set.", call.=FALSE)
return(character(0))
}
path[which(path!="")[1]]
}
|
/R/BLAST.R
|
no_license
|
yangjl/findpos
|
R
| false | false | 3,533 |
r
|
#######################################################################
# rBLAST - Interface to BLAST
# Copyright (C) 2015 Michael Hahsler and Anurag Nagar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#' @export
blast <- function(db = NULL) {
if(is.null(db)) stop("No BLAST database specified!")
db <- file.path(normalizePath(dirname(db)), basename(db))
if(length(Sys.glob(paste(db, "*", sep="")))<1) stop("BLAST database does not exit!")
structure(list(db = db), class="BLAST")
}
#' @export
print.BLAST <- function(x, info=TRUE, ...) {
cat("BLAST Database\nLocation:", x$db, "\n")
if(info) {
out <- system(paste(.findExecutable("blastdbcmd"), "-db", x$db,
"-info"), intern=TRUE)
cat(paste(out, collapse="\n"))
cat("\n")
}
}
#' @export
blast_help <- function() {
system(paste(.findExecutable(c("blastn")),
"-help"))
}
#' \code{Get BLAST Results in a table}
#'
#' format the blast results.
#'
#' @param object BLAST object. [BLAST]
#' @param seq Biostring object. [Biostrings]
#' @return return a data.frame.
#'
#' @examples
#'
#' @export
getblast <- function(object, seq, BLAST_args="", custom_format ="",
...) {
db <- object$db
x <- seq
## get temp files and change working directory
wd <- tempdir()
dir <- getwd()
temp_file <- basename(tempfile(tmpdir = wd))
on.exit({
#cat(temp_file, "\n")
file.remove(Sys.glob(paste(temp_file, "*", sep="")))
setwd(dir)
})
setwd(wd)
infile <- paste(temp_file, ".fasta", sep="")
outfile <- paste(temp_file, "_BLAST_out.txt", sep="")
writeXStringSet(x, infile, append=FALSE, format="fasta")
system(paste(.findExecutable("blastn"), "-db", db,
"-query", infile, "-out", outfile, '-outfmt "10', custom_format,
'"', BLAST_args))
## rdp output column names
if(custom_format == "") {
c_names <- c("QueryID", "SubjectID", "Perc.Ident",
"Alignment.Length", "Mismatches", "Gap.Openings", "Q.start", "Q.end",
"S.start", "S.end", "E", "Bits" )
}else{
c_names <- unlist(strsplit(custom_format, split = " +"))
}
## read and parse rdp output
if(is(try(cl_tab <- read.table(outfile, sep=","), silent=TRUE), "try-error")) {
warning("BLAST did not return a match!")
cl_tab <- data.frame(matrix(ncol=length(c_names), nrow=0))
}
if(ncol(cl_tab) != length(c_names)) stop("Problem with format (e.g., custom_format)!")
colnames(cl_tab) <- c_names
cl_tab
}
#' @export
.findExecutable <- function(exe, interactive=TRUE) {
path <- Sys.which(exe)
if(all(path=="")) {
if(interactive) stop("Executable for ", paste(exe, collapse=" or "), " not found! Please make sure that the software is correctly installed and, if necessary, path variables are set.", call.=FALSE)
return(character(0))
}
path[which(path!="")[1]]
}
|
## cacheMatrix.R - functions to return the inverse of a given matrix, storing the result in cache for faster
## execution in a repeat situation.
## makeCacheMatrix - given a matrix argument, create a list of four functions to store and retrieve from the cache
## both the matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- matrix()
set <- function(y) {
x <<- y
m <<- matrix()
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve - given a matrix, return its inverse. If given the same matrix again, retrieve inverse from cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!all(is.na(m))) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
lmarin17/ProgrammingAssignment2
|
R
| false | false | 1,027 |
r
|
## cacheMatrix.R - functions to return the inverse of a given matrix, storing the result in cache for faster
## execution in a repeat situation.
## makeCacheMatrix - given a matrix argument, create a list of four functions to store and retrieve from the cache
## both the matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- matrix()
set <- function(y) {
x <<- y
m <<- matrix()
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve - given a matrix, return its inverse. If given the same matrix again, retrieve inverse from cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!all(is.na(m))) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
library(PBSmapping)
### Name: plotLines
### Title: Plot a PolySet as Polylines
### Aliases: plotLines
### Keywords: hplot
### ** Examples
local(envir=.PBSmapEnv,expr={
oldpar = par(no.readonly=TRUE)
#--- create a PolySet to plot
polys <- data.frame(PID=rep(1,4),POS=1:4,X=c(0,1,1,0),Y=c(0,0,1,1))
#--- plot the PolySet
plotLines(polys, xlim=c(-.5,1.5), ylim=c(-.5,1.5))
par(oldpar)
})
|
/data/genthat_extracted_code/PBSmapping/examples/plotLines.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 404 |
r
|
library(PBSmapping)
### Name: plotLines
### Title: Plot a PolySet as Polylines
### Aliases: plotLines
### Keywords: hplot
### ** Examples
local(envir=.PBSmapEnv,expr={
oldpar = par(no.readonly=TRUE)
#--- create a PolySet to plot
polys <- data.frame(PID=rep(1,4),POS=1:4,X=c(0,1,1,0),Y=c(0,0,1,1))
#--- plot the PolySet
plotLines(polys, xlim=c(-.5,1.5), ylim=c(-.5,1.5))
par(oldpar)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/str-split-twice.R
\name{str_split_twice}
\alias{str_split_twice}
\title{Extract numeric values from string.}
\usage{
str_split_twice(char, min_only = TRUE)
}
\arguments{
\item{char}{Character string.}
\item{min_only}{Logical specifying if only the first numeric value (\code{TRUE}) or
all numeric values (\code{FALSE}) should be returned. Default is \code{TRUE}.}
}
\value{
numeric values inside \code{char} string.
}
\description{
The function splits any character string at each tab and space and returns
all (min_only = FALSE) or only the first (min_only = T) numeric value found in the string.
}
\examples{
str_split_twice(char = "Hello 15")
str_split_twice(char = "flag1 15 16\\t15", min_only = FALSE)
}
|
/man/str_split_twice.Rd
|
no_license
|
cran/atlantistools
|
R
| false | true | 816 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/str-split-twice.R
\name{str_split_twice}
\alias{str_split_twice}
\title{Extract numeric values from string.}
\usage{
str_split_twice(char, min_only = TRUE)
}
\arguments{
\item{char}{Character string.}
\item{min_only}{Logical specifying if only the first numeric value (\code{TRUE}) or
all numeric values (\code{FALSE}) should be returned. Default is \code{TRUE}.}
}
\value{
numeric values inside \code{char} string.
}
\description{
The function splits any character string at each tab and space and returns
all (min_only = FALSE) or only the first (min_only = T) numeric value found in the string.
}
\examples{
str_split_twice(char = "Hello 15")
str_split_twice(char = "flag1 15 16\\t15", min_only = FALSE)
}
|
##### 04/06/2018: R Script to homogenize the taxonomic nomenclature (species names) across all v6 zooplankton datasets
### Aims to
# - load the classification keys (excel sheets of corrected species names) and the v6 datasets
# - use them to correct the species names in the v6 datasets
# - check resulting labels/ species names
# - fill in the potential gaps at the higher taxonomic levels (order? family ?)
module load R/3.4.3/ # To load latest R version on kryo
### Latest update: 05/06/2018
library("dplyr")
library("tidyr")
library("stringr")
library("reshape2")
### ----------------------------------------------------------------------------------------------------------------------------
##### 1) v6-v5.1v3.1 ----------------------------------------------------------------------------------------------------------
### First, load the classif file containing the corrected species labels
setwd("/UP_home/fabioben/Desktop/OVERSEE/data")
names_main <- read.csv("species_v6-v5.1v3.2.csv", h = TRUE, sep = ";")
dim(names_main)
str(names_main)
head(names_main)
# Plus the v6-v5.1v3.1 species
names <- read.csv("species_v6-v5.1v3.1.csv", h = TRUE, sep = ";")
dim(names)
str(names)
head(names)
# rbind both
names <- rbind(names_main, names)
rm(names_main)
# unique(names$correct_name)
### Second, load the observation data
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.1v3.1/")
# dir()
# Identify the group files to clean, apply correction and save in v7 directory per file
files <- dir()[c(1:3,5,7:19,21:22)]
# files
### For each file:
# - remove the obs that correspond to species names that are labelled as "to remove"
# - correct the labels that are labelled as 'to correct
# - save in v7 dir, check results
# f <- files[3]
for(f in files) {
# Useless message
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.1v3.1/")
message(paste(f, sep = ""))
# Load the data
data <- get(load(f))
# Add underscore to species names if you have not done so already
data$species <- str_replace_all(as.character(data$species), " ", "_")
# unique(data$species)
# class(data$species)
# Remove the species that are marked as 'remove'
toremove <- as.character(unique(names[which(names$action == "remove"),"species"]))
# class(toremove)
data2 <- data[!(data$species %in% toremove),]
# dim(data) ; dim(data2)
# And correct species labels when necessary
tocorrect <- unique(names[which(names$action == "correct"),"correct_name"])
### BEWARE: 'tocorrect' contains correct labels but only for the species to be corrected,
### the current wrong species names will be in the 'wrongnames' string
### For each label to be corrected, find the wrong labels in 'data2' and replace them
data3 <- as.matrix(data2) # needed to replace factor levels...
for(sp in tocorrect) {
# Useless message, again
message(paste(sp, sep = ""))
# Find the wrong names that correspond to 'sp', the real name
wrongnames <- names[names$correct_name == sp,"species"]
# Correct
data3[which(data3[,"species"] %in% wrongnames),"species"] <- as.character(sp)
} # eo for loop
# Check data3 if necessary
data3 <- as.data.frame(data3)
# dim(data3)
# str(data3)
# head(data3)
# unique(data3$species)
### Save in proper v7 dir
message(paste("------------------------------------------------------------------------------------------", sep = ""))
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.1v3.1/")
save(data3, file = str_replace(f, "15_05_18", "05_06_18") )
} # eo for loop
### Check v7 results
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.1v3.1/")
files <- dir()
res <- lapply(files, function(f) {
dd <- get(load(f))
return(dd)
}
) # eo lapply
table <- do.call(rbind, res)
dim(table) # 1'532'615
str(table)
length(unique(table$species)) # 1973 species
rm(res,data3,data2,data,tocorrect,toremove,wrongnames) ; gc()
### Check if they match with the correct_names from names
unique(table$species)
dplyr::setdiff(unique(table$species), names$correct_name)
table[table$species == "Caesaromysis_hispida",]
names[names$correct_name == "Caesaromysis_hispida",]
# OK
unique(table[table$class == "Scyphozoa","species"])
# OK only the 3 holoplanktonic species :)
unique(table[table$class == "Hexanauplia","species"])
# count species and order per n
data.frame(table[table$class == "Hexanauplia",] %>% count(species))
# OK gut !!
rm(table)
gc()
### You can keep these v7 datasets, just copy/paste the two PANGAEA datasets
##### 2) v6-v5.1v3.2 ----------------------------------------------------------------------------------------------------------
### First, load the classif file containing the corrected species labels
setwd("/UP_home/fabioben/Desktop/OVERSEE/data")
names <- read.csv("species_v6-v5.1v3.2.csv", h = TRUE, sep = ";")
dim(names)
str(names)
head(names)
### Second, load the observation data
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.1v3.2/")
# dir()
# Identify the group files to clean, apply correction and save in v7 directory per file
files <- dir()[c(1:3,5,7:19,21:22)]
# files
### For each file:
# - remove the obs that correspond to species names that are labelled as "to remove"
# - correct the labels that are labelled as 'to correct
# - save in v7 dir, check results
# f <- files[2]
for(f in files) {
# Useless message
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.1v3.2/")
message(paste(f, sep = ""))
# Load the data
data <- get(load(f))
# Add underscore to species names if you have not done so already
data$species <- str_replace_all(as.character(data$species), " ", "_")
# Remove the species that are marked as 'remove'
toremove <- as.character(unique(names[which(names$action == "remove"),"species"]))
# class(toremove)
data2 <- data[!(data$species %in% toremove),]
# dim(data) ; dim(data2)
# And correct species labels when necessary
tocorrect <- unique(names[which(names$action == "correct"),"correct_name"])
### BEWARE: 'tocorrect' contains correct labels but only for the species to be corrected,
### the current wrong species names will be in the 'wrongnames' string
### For each label to be corrected, find the wrong labels in 'data2' and replace them
data3 <- as.matrix(data2) # needed to replace factor levels...
for(sp in tocorrect) {
# Useless message, again
message(paste(sp, sep = ""))
# Find the wrong names that correspond to 'sp', the real name
wrongnames <- names[names$correct_name == sp,"species"]
# Correct
data3[which(data3[,"species"] %in% wrongnames),"species"] <- as.character(sp)
} # eo for loop
# Check data3 if necessary
data3 <- as.data.frame(data3)
# dim(data3)
# str(data3)
# head(data3)
# unique(data3$species)
### Save in proper v7 dir
message(paste("------------------------------------------------------------------------------------------", sep = ""))
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.1v3.2/")
save(data3, file = str_replace(f, "15_05_18", "05_06_18") )
} # eo for loop
### Check v7 results
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.1v3.2/")
files <- dir()
res <- lapply(files, function(f) {
dd <- get(load(f))
return(dd)
}
) # eo lapply
table <- do.call(rbind, res)
dim(table) # 2'393'495
str(table)
length(unique(table$species)) # 2211 species
# Clean
rm(res, data3, data2, data, tocorrect, toremove, wrongnames) ; gc()
### Check if they match with the correct_names from names
unique(table$species)
dplyr::setdiff(unique(table$species), names$correct_name)
table[table$species == "Caesaromysis_hispida",]
names[names$correct_name == "Caesaromysis_hispida",]
# OK
unique(table[table$class == "Scyphozoa","species"])
# OK only the 3 holoplanktonic species :)
unique(table[table$class == "Hexanauplia","species"])
# count species and order per n
data.frame(table[table$class == "Hexanauplia",] %>% count(species))
# OK gut !!
rm(table)
gc()
##### 3) v6-v5.2v3.1 ----------------------------------------------------------------------------------------------------------
### First, load the classif file containing the corrected species labels
setwd("/UP_home/fabioben/Desktop/OVERSEE/data")
names_main <- read.csv("species_v6-v5.1v3.2.csv", h = TRUE, sep = ";")
dim(names_main)
str(names_main)
head(names_main)
# Plus the v6-v5.1v3.1 species
names <- read.csv("species_v6-v5.2v3.1.csv", h = TRUE, sep = ";")
dim(names)
str(names)
head(names)
# rbind both
names <- rbind(names_main, names)
rm(names_main)
### Second, load the observation data
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.1/")
# dir()
# Identify the group files to clean, apply correction and save in v7 directory per file
files <- dir()[c(1:3,5,7:19,21:22)]
# files
### For each file:
# - remove the obs that correspond to species names that are labelled as "to remove"
# - correct the labels that are labelled as 'to correct
# - save in v7 dir, check results
# f <- files[2]
for(f in files) {
# Load the data
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.1/")
data <- get(load(f))
# Add underscore to species names if you have not done so already
data$species <- str_replace_all(as.character(data$species), " ", "_")
# Remove the species that are marked as 'remove'
toremove <- as.character(unique(names[which(names$action == "remove"),"species"]))
# class(toremove)
data2 <- data[!(data$species %in% toremove),]
# dim(data) ; dim(data2)
# And correct species labels when necessary
tocorrect <- unique(names[which(names$action == "correct"),"correct_name"])
### BEWARE: 'tocorrect' contains correct labels but only for the species to be corrected,
### the current wrong species names will be in the 'wrongnames' string
### For each label to be corrected, find the wrong labels in 'data2' and replace them
data3 <- as.matrix(data2) # needed to replace factor levels...
for(sp in tocorrect) {
# Useless message, again
message(paste(sp, sep = ""))
# Find the wrong names that correspond to 'sp', the real name
wrongnames <- names[names$correct_name == sp,"species"]
# Correct
data3[which(data3[,"species"] %in% wrongnames),"species"] <- as.character(sp)
} # eo for loop
# Check data3 if necessary
data3 <- as.data.frame(data3)
# dim(data3)
# str(data3)
# head(data3)
# unique(data3$species)
### Save in proper v7 dir
message(paste("------------------------------------------------------------------------------------------", sep = ""))
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.1/")
save(data3, file = str_replace(f, "15_05_18", "05_06_18") )
} # eo for loop
### Check v7 results
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.1/")
files <- dir()
res <- lapply(files, function(f) {
dd <- get(load(f))
return(dd)
}
) # eo lapply
table <- do.call(rbind, res)
dim(table) # 1'072'102
str(table)
length(unique(table$species)) # 1313 species
rm(res, data3, data2, data, tocorrect, toremove, wrongnames) ; gc()
### Check if they match with the correct_names from names
unique(table$species)
dplyr::setdiff(unique(table$species), names$correct_name)
table[table$species == "Caesaromysis_hispida",]
names[names$correct_name == "Caesaromysis_hispida",]
# OK
unique(table[table$class == "Scyphozoa","species"])
# OK only the 3 holoplanktonic species :)
unique(table[table$class == "Hexanauplia","species"])
# count species and order per n
data.frame(table[table$class == "Hexanauplia",] %>% count(species))
# OK gut !!
rm(table)
gc()
##### 4) v6-v5.2v3.2 ----------------------------------------------------------------------------------------------------------
### First, load the classif file containing the corrected species labels
setwd("/UP_home/fabioben/Desktop/OVERSEE/data")
names_main <- read.csv("species_v6-v5.1v3.2.csv", h = TRUE, sep = ";")
dim(names_main)
str(names_main)
head(names_main)
# Plus the v6-v5.1v3.1 species
names <- read.csv("species_v6-v5.2v3.2.csv", h = TRUE, sep = ";")
dim(names)
str(names)
head(names)
# rbind both
names <- rbind(names_main, names)
rm(names_main)
### Second, load the observation data
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.2/")
# dir()
# Identify the group files to clean, apply correction and save in v7 directory per file
files <- dir()[c(1:3,5,7:19,21:22)]
# files
### For each file:
# - remove the obs that correspond to species names that are labelled as "to remove"
# - correct the labels that are labelled as 'to correct
# - save in v7 dir, check results
# f <- files[2]
for(f in files) {
# Useless message
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.2/")
message(paste(f, sep = ""))
# Load the data
data <- get(load(f))
# Add underscore to species names if you have not done so already
data$species <- str_replace_all(as.character(data$species), " ", "_")
# Remove the species that are marked as 'remove'
toremove <- as.character(unique(names[which(names$action == "remove"),"species"]))
# class(toremove)
data2 <- data[!(data$species %in% toremove),]
# dim(data) ; dim(data2)
# And correct species labels when necessary
tocorrect <- unique(names[which(names$action == "correct"),"correct_name"])
### BEWARE: 'tocorrect' contains correct labels but only for the species to be corrected,
### the current wrong species names will be in the 'wrongnames' string
### For each label to be corrected, find the wrong labels in 'data2' and replace them
data3 <- as.matrix(data2) # needed to replace factor levels...
for(sp in tocorrect) {
# Useless message, again
message(paste(sp, sep = ""))
# Find the wrong names that correspond to 'sp', the real name
wrongnames <- names[names$correct_name == sp,"species"]
# Correct
data3[which(data3[,"species"] %in% wrongnames),"species"] <- as.character(sp)
} # eo for loop
# Check data3 if necessary
data3 <- as.data.frame(data3)
# dim(data3)
# str(data3)
# head(data3)
# unique(data3$species)
### Save in proper v7 dir
message(paste("------------------------------------------------------------------------------------------", sep = ""))
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.2/")
save(data3, file = str_replace(f, "15_05_18", "05_06_18") )
} # eo for loop
### Check v7 results
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.2/")
files <- dir()
res <- lapply(files, function(f) {
dd <- get(load(f))
return(dd)
}
) # eo lapply
table <- do.call(rbind, res)
dim(table) # 1'864'450 obs
str(table)
length(unique(table$species)) # 1663 species
rm(res, data3, data2, data, tocorrect, toremove, wrongnames) ; gc()
### Check if they match with the correct_names from names
unique(table$species)
dplyr::setdiff(unique(table$species), names$correct_name)
table[table$species == "Heteromysis_panamaensis",]
names[names$correct_name == "Heteromysis_panamaensis",]
# OK
unique(table[table$class == "Scyphozoa","species"])
# OK only the 3 holoplanktonic species :)
unique(table[table$class == "Hexanauplia","species"])
# count species and order per n
data.frame(table[table$class == "Hexanauplia",] %>% count(species))
# OK gut !!
rm(table)
gc()
##### ----------------------------------------------------------------------------------------------------------
### For each v7 dataset, report n obs and n species onto the workflow excel sheet
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.2/")
files <- dir()
for(f in files) {
message(paste(f, sep = ""))
d <- get(load(f))
message(paste("n obs = ", nrow(d), sep = ""))
message(paste("n species = ", length(unique(d$species)), sep = ""))
message(paste("", sep = ""))
message(paste("", sep = ""))
rm(d)
gc()
} # eo for loop
##### ----------------------------------------------------------------------------------------------------------
### Finally, just c/p the 2 PANGAEA datasets in the v7 directories
# Load
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.2/")
d1 <- get(load("Copepoda_PANGAEA_04_06_18.Rdata"))
d2 <- get(load("Thecosomata_MAREDAT_31_05_18.Rdata"))
# Save in the v7 dir
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.2/")
save(d1, file = "Copepoda_PANGAEA_05_06_18.Rdata")
save(d2, file = "Thecosomata_MAREDAT_05_06_18.Rdata")
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.2/")
d1 <- get(load("Copepoda_PANGAEA_05_06_18.Rdata"))
nrow(d1)
unique(d1$species)
d2 <- get(load("Thecosomata_MAREDAT_05_06_18.Rdata"))
nrow(d2)
unique(d2$species)
### And gather all datasets to report total nb of occurrences and species number
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.2/")
dir()
res <- lapply(dir(), function(f) {
dd <- get(load(f))
return( dd[,c("x","y","species")] )
}
) # eo lapply
table <- do.call(rbind, res)
dim(table)
str(table)
head(table)
length(unique(table$species))
|
/ScripT#7_OVERSEE_v7.R
|
permissive
|
benfabio/Benedetti-et-al.-NCOMMS-20-37764A-
|
R
| false | false | 17,585 |
r
|
##### 04/06/2018: R Script to homogenize the taxonomic nomenclature (species names) across all v6 zooplankton datasets
### Aims to
# - load the classification keys (excel sheets of corrected species names) and the v6 datasets
# - use them to correct the species names in the v6 datasets
# - check resulting labels/ species names
# - fill in the potential gaps at the higher taxonomic levels (order? family ?)
module load R/3.4.3/ # To load latest R version on kryo
### Latest update: 05/06/2018
library("dplyr")
library("tidyr")
library("stringr")
library("reshape2")
### ----------------------------------------------------------------------------------------------------------------------------
##### 1) v6-v5.1v3.1 ----------------------------------------------------------------------------------------------------------
### First, load the classif file containing the corrected species labels
setwd("/UP_home/fabioben/Desktop/OVERSEE/data")
names_main <- read.csv("species_v6-v5.1v3.2.csv", h = TRUE, sep = ";")
dim(names_main)
str(names_main)
head(names_main)
# Plus the v6-v5.1v3.1 species
names <- read.csv("species_v6-v5.1v3.1.csv", h = TRUE, sep = ";")
dim(names)
str(names)
head(names)
# rbind both
names <- rbind(names_main, names)
rm(names_main)
# unique(names$correct_name)
### Second, load the observation data
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.1v3.1/")
# dir()
# Identify the group files to clean, apply correction and save in v7 directory per file
files <- dir()[c(1:3,5,7:19,21:22)]
# files
### For each file:
# - remove the obs that correspond to species names that are labelled as "to remove"
# - correct the labels that are labelled as 'to correct
# - save in v7 dir, check results
# f <- files[3]
for(f in files) {
# Useless message
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.1v3.1/")
message(paste(f, sep = ""))
# Load the data
data <- get(load(f))
# Add underscore to species names if you have not done so already
data$species <- str_replace_all(as.character(data$species), " ", "_")
# unique(data$species)
# class(data$species)
# Remove the species that are marked as 'remove'
toremove <- as.character(unique(names[which(names$action == "remove"),"species"]))
# class(toremove)
data2 <- data[!(data$species %in% toremove),]
# dim(data) ; dim(data2)
# And correct species labels when necessary
tocorrect <- unique(names[which(names$action == "correct"),"correct_name"])
### BEWARE: 'tocorrect' contains correct labels but only for the species to be corrected,
### the current wrong species names will be in the 'wrongnames' string
### For each label to be corrected, find the wrong labels in 'data2' and replace them
data3 <- as.matrix(data2) # needed to replace factor levels...
for(sp in tocorrect) {
# Useless message, again
message(paste(sp, sep = ""))
# Find the wrong names that correspond to 'sp', the real name
wrongnames <- names[names$correct_name == sp,"species"]
# Correct
data3[which(data3[,"species"] %in% wrongnames),"species"] <- as.character(sp)
} # eo for loop
# Check data3 if necessary
data3 <- as.data.frame(data3)
# dim(data3)
# str(data3)
# head(data3)
# unique(data3$species)
### Save in proper v7 dir
message(paste("------------------------------------------------------------------------------------------", sep = ""))
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.1v3.1/")
save(data3, file = str_replace(f, "15_05_18", "05_06_18") )
} # eo for loop
### Check v7 results
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.1v3.1/")
files <- dir()
res <- lapply(files, function(f) {
dd <- get(load(f))
return(dd)
}
) # eo lapply
table <- do.call(rbind, res)
dim(table) # 1'532'615
str(table)
length(unique(table$species)) # 1973 species
rm(res,data3,data2,data,tocorrect,toremove,wrongnames) ; gc()
### Check if they match with the correct_names from names
unique(table$species)
dplyr::setdiff(unique(table$species), names$correct_name)
table[table$species == "Caesaromysis_hispida",]
names[names$correct_name == "Caesaromysis_hispida",]
# OK
unique(table[table$class == "Scyphozoa","species"])
# OK only the 3 holoplanktonic species :)
unique(table[table$class == "Hexanauplia","species"])
# count species and order per n
data.frame(table[table$class == "Hexanauplia",] %>% count(species))
# OK gut !!
rm(table)
gc()
### You can keep these v7 datasets, just copy/paste the two PANGAEA datasets
##### 2) v6-v5.1v3.2 ----------------------------------------------------------------------------------------------------------
### First, load the classif file containing the corrected species labels
setwd("/UP_home/fabioben/Desktop/OVERSEE/data")
names <- read.csv("species_v6-v5.1v3.2.csv", h = TRUE, sep = ";")
dim(names)
str(names)
head(names)
### Second, load the observation data
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.1v3.2/")
# dir()
# Identify the group files to clean, apply correction and save in v7 directory per file
files <- dir()[c(1:3,5,7:19,21:22)]
# files
### For each file:
# - remove the obs that correspond to species names that are labelled as "to remove"
# - correct the labels that are labelled as 'to correct
# - save in v7 dir, check results
# f <- files[2]
for(f in files) {
# Useless message
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.1v3.2/")
message(paste(f, sep = ""))
# Load the data
data <- get(load(f))
# Add underscore to species names if you have not done so already
data$species <- str_replace_all(as.character(data$species), " ", "_")
# Remove the species that are marked as 'remove'
toremove <- as.character(unique(names[which(names$action == "remove"),"species"]))
# class(toremove)
data2 <- data[!(data$species %in% toremove),]
# dim(data) ; dim(data2)
# And correct species labels when necessary
tocorrect <- unique(names[which(names$action == "correct"),"correct_name"])
### BEWARE: 'tocorrect' contains correct labels but only for the species to be corrected,
### the current wrong species names will be in the 'wrongnames' string
### For each label to be corrected, find the wrong labels in 'data2' and replace them
data3 <- as.matrix(data2) # needed to replace factor levels...
for(sp in tocorrect) {
# Useless message, again
message(paste(sp, sep = ""))
# Find the wrong names that correspond to 'sp', the real name
wrongnames <- names[names$correct_name == sp,"species"]
# Correct
data3[which(data3[,"species"] %in% wrongnames),"species"] <- as.character(sp)
} # eo for loop
# Check data3 if necessary
data3 <- as.data.frame(data3)
# dim(data3)
# str(data3)
# head(data3)
# unique(data3$species)
### Save in proper v7 dir
message(paste("------------------------------------------------------------------------------------------", sep = ""))
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.1v3.2/")
save(data3, file = str_replace(f, "15_05_18", "05_06_18") )
} # eo for loop
### Check v7 results
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.1v3.2/")
files <- dir()
res <- lapply(files, function(f) {
dd <- get(load(f))
return(dd)
}
) # eo lapply
table <- do.call(rbind, res)
dim(table) # 2'393'495
str(table)
length(unique(table$species)) # 2211 species
# Clean
rm(res, data3, data2, data, tocorrect, toremove, wrongnames) ; gc()
### Check if they match with the correct_names from names
unique(table$species)
dplyr::setdiff(unique(table$species), names$correct_name)
table[table$species == "Caesaromysis_hispida",]
names[names$correct_name == "Caesaromysis_hispida",]
# OK
unique(table[table$class == "Scyphozoa","species"])
# OK only the 3 holoplanktonic species :)
unique(table[table$class == "Hexanauplia","species"])
# count species and order per n
data.frame(table[table$class == "Hexanauplia",] %>% count(species))
# OK gut !!
rm(table)
gc()
##### 3) v6-v5.2v3.1 ----------------------------------------------------------------------------------------------------------
### First, load the classif file containing the corrected species labels
setwd("/UP_home/fabioben/Desktop/OVERSEE/data")
names_main <- read.csv("species_v6-v5.1v3.2.csv", h = TRUE, sep = ";")
dim(names_main)
str(names_main)
head(names_main)
# Plus the v6-v5.1v3.1 species
names <- read.csv("species_v6-v5.2v3.1.csv", h = TRUE, sep = ";")
dim(names)
str(names)
head(names)
# rbind both
names <- rbind(names_main, names)
rm(names_main)
### Second, load the observation data
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.1/")
# dir()
# Identify the group files to clean, apply correction and save in v7 directory per file
files <- dir()[c(1:3,5,7:19,21:22)]
# files
### For each file:
# - remove the obs that correspond to species names that are labelled as "to remove"
# - correct the labels that are labelled as 'to correct
# - save in v7 dir, check results
# f <- files[2]
for(f in files) {
# Load the data
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.1/")
data <- get(load(f))
# Add underscore to species names if you have not done so already
data$species <- str_replace_all(as.character(data$species), " ", "_")
# Remove the species that are marked as 'remove'
toremove <- as.character(unique(names[which(names$action == "remove"),"species"]))
# class(toremove)
data2 <- data[!(data$species %in% toremove),]
# dim(data) ; dim(data2)
# And correct species labels when necessary
tocorrect <- unique(names[which(names$action == "correct"),"correct_name"])
### BEWARE: 'tocorrect' contains correct labels but only for the species to be corrected,
### the current wrong species names will be in the 'wrongnames' string
### For each label to be corrected, find the wrong labels in 'data2' and replace them
data3 <- as.matrix(data2) # needed to replace factor levels...
for(sp in tocorrect) {
# Useless message, again
message(paste(sp, sep = ""))
# Find the wrong names that correspond to 'sp', the real name
wrongnames <- names[names$correct_name == sp,"species"]
# Correct
data3[which(data3[,"species"] %in% wrongnames),"species"] <- as.character(sp)
} # eo for loop
# Check data3 if necessary
data3 <- as.data.frame(data3)
# dim(data3)
# str(data3)
# head(data3)
# unique(data3$species)
### Save in proper v7 dir
message(paste("------------------------------------------------------------------------------------------", sep = ""))
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.1/")
save(data3, file = str_replace(f, "15_05_18", "05_06_18") )
} # eo for loop
### Check v7 results
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.1/")
files <- dir()
res <- lapply(files, function(f) {
dd <- get(load(f))
return(dd)
}
) # eo lapply
table <- do.call(rbind, res)
dim(table) # 1'072'102
str(table)
length(unique(table$species)) # 1313 species
rm(res, data3, data2, data, tocorrect, toremove, wrongnames) ; gc()
### Check if they match with the correct_names from names
unique(table$species)
dplyr::setdiff(unique(table$species), names$correct_name)
table[table$species == "Caesaromysis_hispida",]
names[names$correct_name == "Caesaromysis_hispida",]
# OK
unique(table[table$class == "Scyphozoa","species"])
# OK only the 3 holoplanktonic species :)
unique(table[table$class == "Hexanauplia","species"])
# count species and order per n
data.frame(table[table$class == "Hexanauplia",] %>% count(species))
# OK gut !!
rm(table)
gc()
##### 4) v6-v5.2v3.2 ----------------------------------------------------------------------------------------------------------
### First, load the classif file containing the corrected species labels
setwd("/UP_home/fabioben/Desktop/OVERSEE/data")
names_main <- read.csv("species_v6-v5.1v3.2.csv", h = TRUE, sep = ";")
dim(names_main)
str(names_main)
head(names_main)
# Plus the v6-v5.1v3.1 species
names <- read.csv("species_v6-v5.2v3.2.csv", h = TRUE, sep = ";")
dim(names)
str(names)
head(names)
# rbind both
names <- rbind(names_main, names)
rm(names_main)
### Second, load the observation data
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.2/")
# dir()
# Identify the group files to clean, apply correction and save in v7 directory per file
files <- dir()[c(1:3,5,7:19,21:22)]
# files
### For each file:
# - remove the obs that correspond to species names that are labelled as "to remove"
# - correct the labels that are labelled as 'to correct
# - save in v7 dir, check results
# f <- files[2]
for(f in files) {
# Useless message
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.2/")
message(paste(f, sep = ""))
# Load the data
data <- get(load(f))
# Add underscore to species names if you have not done so already
data$species <- str_replace_all(as.character(data$species), " ", "_")
# Remove the species that are marked as 'remove'
toremove <- as.character(unique(names[which(names$action == "remove"),"species"]))
# class(toremove)
data2 <- data[!(data$species %in% toremove),]
# dim(data) ; dim(data2)
# And correct species labels when necessary
tocorrect <- unique(names[which(names$action == "correct"),"correct_name"])
### BEWARE: 'tocorrect' contains correct labels but only for the species to be corrected,
### the current wrong species names will be in the 'wrongnames' string
### For each label to be corrected, find the wrong labels in 'data2' and replace them
data3 <- as.matrix(data2) # needed to replace factor levels...
for(sp in tocorrect) {
# Useless message, again
message(paste(sp, sep = ""))
# Find the wrong names that correspond to 'sp', the real name
wrongnames <- names[names$correct_name == sp,"species"]
# Correct
data3[which(data3[,"species"] %in% wrongnames),"species"] <- as.character(sp)
} # eo for loop
# Check data3 if necessary
data3 <- as.data.frame(data3)
# dim(data3)
# str(data3)
# head(data3)
# unique(data3$species)
### Save in proper v7 dir
message(paste("------------------------------------------------------------------------------------------", sep = ""))
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.2/")
save(data3, file = str_replace(f, "15_05_18", "05_06_18") )
} # eo for loop
### Check v7 results
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.2/")
files <- dir()
res <- lapply(files, function(f) {
dd <- get(load(f))
return(dd)
}
) # eo lapply
table <- do.call(rbind, res)
dim(table) # 1'864'450 obs
str(table)
length(unique(table$species)) # 1663 species
rm(res, data3, data2, data, tocorrect, toremove, wrongnames) ; gc()
### Check if they match with the correct_names from names
unique(table$species)
dplyr::setdiff(unique(table$species), names$correct_name)
table[table$species == "Heteromysis_panamaensis",]
names[names$correct_name == "Heteromysis_panamaensis",]
# OK
unique(table[table$class == "Scyphozoa","species"])
# OK only the 3 holoplanktonic species :)
unique(table[table$class == "Hexanauplia","species"])
# count species and order per n
data.frame(table[table$class == "Hexanauplia",] %>% count(species))
# OK gut !!
rm(table)
gc()
##### ----------------------------------------------------------------------------------------------------------
### For each v7 dataset, report n obs and n species onto the workflow excel sheet
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.2/")
files <- dir()
for(f in files) {
message(paste(f, sep = ""))
d <- get(load(f))
message(paste("n obs = ", nrow(d), sep = ""))
message(paste("n species = ", length(unique(d$species)), sep = ""))
message(paste("", sep = ""))
message(paste("", sep = ""))
rm(d)
gc()
} # eo for loop
##### ----------------------------------------------------------------------------------------------------------
### Finally, just c/p the 2 PANGAEA datasets in the v7 directories
# Load
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.2/")
d1 <- get(load("Copepoda_PANGAEA_04_06_18.Rdata"))
d2 <- get(load("Thecosomata_MAREDAT_31_05_18.Rdata"))
# Save in the v7 dir
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.2/")
save(d1, file = "Copepoda_PANGAEA_05_06_18.Rdata")
save(d2, file = "Thecosomata_MAREDAT_05_06_18.Rdata")
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v7-5.2v3.2/")
d1 <- get(load("Copepoda_PANGAEA_05_06_18.Rdata"))
nrow(d1)
unique(d1$species)
d2 <- get(load("Thecosomata_MAREDAT_05_06_18.Rdata"))
nrow(d2)
unique(d2$species)
### And gather all datasets to report total nb of occurrences and species number
setwd("/UP_home/fabioben/Desktop/OVERSEE/data/biology/occurence_data_groups/v6-v5.2v3.2/")
dir()
res <- lapply(dir(), function(f) {
dd <- get(load(f))
return( dd[,c("x","y","species")] )
}
) # eo lapply
table <- do.call(rbind, res)
dim(table)
str(table)
head(table)
length(unique(table$species))
|
library(survival)
library(dplyr)
library(xtable)
options(digits = 3)
# survival times
times <- 1:4 * 180
# true survival if everyone follows the regime.
all.files <- paste0("./outfiles/sim_truth_all_follow_out_", 1:5, ".txt")
tt_all <- do.call(rbind, lapply(all.files, read.table, header = TRUE))
(tmeans_all <- colMeans(tt_all[, paste0("surv", times)]))
sprintf("%.13f", tmeans_all)
one.files <- paste0("./outfiles/sim_truth_one_follows_out_", 1:5, ".txt")
tt_one <- do.call(rbind, lapply(one.files, read.table, header = TRUE))
event.time <- tt_one$ev_pt_time
event.indicator <- tt_one$ev_ind
surv <- survfit(Surv(event.time, event.indicator) ~ 1)
(tmeans_one <- summary(surv, times)$surv)
sprintf("%.13f", tmeans_one)
outfiles <- paste0("./outfiles/sim_out_", 1:5, ".txt")
dfs <- lapply(outfiles, function(file) read.table(file, header = TRUE))
est <- do.call(rbind, dfs)
### bias --------------------------------------------------------------------
(emeans_all <- colMeans(est[, paste0("allsurv", times)]))
(bias_all <- emeans_all - tmeans_all)
emeans_one2 <- colMeans(est[, paste0("one2surv", times)])
(bias_one2 <- emeans_one2 - tmeans_one)
nc_means <- colMeans(est[, paste0("compsurv", times)])
### coverage probability ----------------------------------------------------
z <- qnorm(0.975)
tdf <- data.frame(matrix(c(tmeans_all, tmeans_one), nrow = 1))
names(tdf) <- c(paste0("ta", times), paste0("to", times))
cpdf <- data.frame(est, tdf)
cpdf <- mutate(cpdf,
cp.all.180 = allsurv180 - z * allse180 < ta180 & ta180 < allsurv180 + z * allse180,
cp.all.360 = allsurv360 - z * allse360 < ta360 & ta360 < allsurv360 + z * allse360,
cp.all.540 = allsurv540 - z * allse540 < ta540 & ta540 < allsurv540 + z * allse540,
cp.all.720 = allsurv720 - z * allse720 < ta720 & ta720 < allsurv720 + z * allse720,
xcp.all.180 = allsurv180 - z * allse180 < to180 & to180 < allsurv180 + z * allse180,
xcp.all.360 = allsurv360 - z * allse360 < to360 & to360 < allsurv360 + z * allse360,
xcp.all.540 = allsurv540 - z * allse540 < to540 & to540 < allsurv540 + z * allse540,
xcp.all.720 = allsurv720 - z * allse720 < to720 & to720 < allsurv720 + z * allse720
)
cpdf <- mutate(cpdf,
cp.one.180 = one2surv180 - z * one2se180 < to180 & to180 < one2surv180 + z * one2se180,
cp.one.360 = one2surv360 - z * one2se360 < to360 & to360 < one2surv360 + z * one2se360,
cp.one.540 = one2surv540 - z * one2se540 < to540 & to540 < one2surv540 + z * one2se540,
cp.one.720 = one2surv720 - z * one2se720 < to720 & to720 < one2surv720 + z * one2se720,
xcp.one.180 = one2surv180 - z * one2se180 < ta180 & ta180 < one2surv180 + z * one2se180,
xcp.one.360 = one2surv360 - z * one2se360 < ta360 & ta360 < one2surv360 + z * one2se360,
xcp.one.540 = one2surv540 - z * one2se540 < ta540 & ta540 < one2surv540 + z * one2se540,
xcp.one.720 = one2surv720 - z * one2se720 < ta720 & ta720 < one2surv720 + z * one2se720
)
(cpse_all <- colMeans(cpdf[, paste0("cp.all.", times)]))
(cpse_one <- colMeans(cpdf[, paste0("cp.one.", times)]))
xcpse_all <- colMeans(cpdf[, paste0("xcp.all.", times)])
xcpse_one <- colMeans(cpdf[, paste0("xcp.one.", times)])
# Mean SE / Monte Carlo SE
colSd <- function(x) apply(x, 2, sd)
colMeans(est[, paste0("allse", times)]) / colSd(est[, paste0("allsurv", times)])
colMeans(est[, paste0("one2se", times)]) / colSd(est[, paste0("one2surv", times)])
### Table showing Bias and coverage probabilties
(bias_cp <- cbind(rep(1:4 * 180, 2),
c(tmeans_one, tmeans_all),
c(nc_means - tmeans_one, nc_means - tmeans_all),
c(emeans_one2 - tmeans_one, emeans_one2 - tmeans_all),
c(emeans_all - tmeans_one, emeans_all - tmeans_all),
c(cpse_one, xcpse_one),
c(xcpse_all, cpse_all))
)
print(xtable(bias_cp, digits = c(0, 0, 3, 3, 3, 3, 3, 3)), include.rownames = FALSE,
include.colnames = FALSE)
|
/summary.R
|
no_license
|
jeffrey-boatman/transplant_simulation
|
R
| false | false | 3,851 |
r
|
library(survival)
library(dplyr)
library(xtable)
options(digits = 3)
# survival times
times <- 1:4 * 180
# true survival if everyone follows the regime.
all.files <- paste0("./outfiles/sim_truth_all_follow_out_", 1:5, ".txt")
tt_all <- do.call(rbind, lapply(all.files, read.table, header = TRUE))
(tmeans_all <- colMeans(tt_all[, paste0("surv", times)]))
sprintf("%.13f", tmeans_all)
one.files <- paste0("./outfiles/sim_truth_one_follows_out_", 1:5, ".txt")
tt_one <- do.call(rbind, lapply(one.files, read.table, header = TRUE))
event.time <- tt_one$ev_pt_time
event.indicator <- tt_one$ev_ind
surv <- survfit(Surv(event.time, event.indicator) ~ 1)
(tmeans_one <- summary(surv, times)$surv)
sprintf("%.13f", tmeans_one)
outfiles <- paste0("./outfiles/sim_out_", 1:5, ".txt")
dfs <- lapply(outfiles, function(file) read.table(file, header = TRUE))
est <- do.call(rbind, dfs)
### bias --------------------------------------------------------------------
(emeans_all <- colMeans(est[, paste0("allsurv", times)]))
(bias_all <- emeans_all - tmeans_all)
emeans_one2 <- colMeans(est[, paste0("one2surv", times)])
(bias_one2 <- emeans_one2 - tmeans_one)
nc_means <- colMeans(est[, paste0("compsurv", times)])
### coverage probability ----------------------------------------------------
z <- qnorm(0.975)
tdf <- data.frame(matrix(c(tmeans_all, tmeans_one), nrow = 1))
names(tdf) <- c(paste0("ta", times), paste0("to", times))
cpdf <- data.frame(est, tdf)
cpdf <- mutate(cpdf,
cp.all.180 = allsurv180 - z * allse180 < ta180 & ta180 < allsurv180 + z * allse180,
cp.all.360 = allsurv360 - z * allse360 < ta360 & ta360 < allsurv360 + z * allse360,
cp.all.540 = allsurv540 - z * allse540 < ta540 & ta540 < allsurv540 + z * allse540,
cp.all.720 = allsurv720 - z * allse720 < ta720 & ta720 < allsurv720 + z * allse720,
xcp.all.180 = allsurv180 - z * allse180 < to180 & to180 < allsurv180 + z * allse180,
xcp.all.360 = allsurv360 - z * allse360 < to360 & to360 < allsurv360 + z * allse360,
xcp.all.540 = allsurv540 - z * allse540 < to540 & to540 < allsurv540 + z * allse540,
xcp.all.720 = allsurv720 - z * allse720 < to720 & to720 < allsurv720 + z * allse720
)
cpdf <- mutate(cpdf,
cp.one.180 = one2surv180 - z * one2se180 < to180 & to180 < one2surv180 + z * one2se180,
cp.one.360 = one2surv360 - z * one2se360 < to360 & to360 < one2surv360 + z * one2se360,
cp.one.540 = one2surv540 - z * one2se540 < to540 & to540 < one2surv540 + z * one2se540,
cp.one.720 = one2surv720 - z * one2se720 < to720 & to720 < one2surv720 + z * one2se720,
xcp.one.180 = one2surv180 - z * one2se180 < ta180 & ta180 < one2surv180 + z * one2se180,
xcp.one.360 = one2surv360 - z * one2se360 < ta360 & ta360 < one2surv360 + z * one2se360,
xcp.one.540 = one2surv540 - z * one2se540 < ta540 & ta540 < one2surv540 + z * one2se540,
xcp.one.720 = one2surv720 - z * one2se720 < ta720 & ta720 < one2surv720 + z * one2se720
)
(cpse_all <- colMeans(cpdf[, paste0("cp.all.", times)]))
(cpse_one <- colMeans(cpdf[, paste0("cp.one.", times)]))
xcpse_all <- colMeans(cpdf[, paste0("xcp.all.", times)])
xcpse_one <- colMeans(cpdf[, paste0("xcp.one.", times)])
# Mean SE / Monte Carlo SE
colSd <- function(x) apply(x, 2, sd)
colMeans(est[, paste0("allse", times)]) / colSd(est[, paste0("allsurv", times)])
colMeans(est[, paste0("one2se", times)]) / colSd(est[, paste0("one2surv", times)])
### Table showing Bias and coverage probabilties
(bias_cp <- cbind(rep(1:4 * 180, 2),
c(tmeans_one, tmeans_all),
c(nc_means - tmeans_one, nc_means - tmeans_all),
c(emeans_one2 - tmeans_one, emeans_one2 - tmeans_all),
c(emeans_all - tmeans_one, emeans_all - tmeans_all),
c(cpse_one, xcpse_one),
c(xcpse_all, cpse_all))
)
print(xtable(bias_cp, digits = c(0, 0, 3, 3, 3, 3, 3, 3)), include.rownames = FALSE,
include.colnames = FALSE)
|
dat <- read.csv("https://raw.githubusercontent.com/cheryltky/bikeshare_studies/main/csvdata/Q1.csv", sep=",", header = T, stringsAsFactors=F)
library(ggplot2)
library(tidyr)
dat$index <- seq_len(dim(dat)[1])
lnb <- loess(trips_bluebike ~ index, data=dat)
lnd <- loess(trips_divvy ~ index, data=dat, method="loess")
pdf("trips_bludiv.pdf")
plot(dat$index, dat$trips_bluebike, pch=20, col="blue", bty='n', xaxt='n', xlab='', ylab="Number of Trips", ylim=c(min(c(dat$trips_bluebike, dat$trips_divvy)), max(c(dat$trips_bluebike, dat$trips_divvy))))
points(dat$trips_divvy, pch=20, col='red')
axis(1, at=dat$index, labels=dat$month_year, las=2)
lines(dat$index, dat$trips_bluebike, col="blue", lwd=1)
lines(dat$index, dat$trips_divvy, col="red", lwd=1)
dev.off()
datl <- gather(dat, company, number_of_trips, trips_bluebike:trips_divvy)
ggplot(datl, aes(x=index, y=number_of_trips, color=as.factor(company))) + geom_point() + scale_x_continuous(breaks=dat$index, labels=dat$month_year) + theme(axis.text.x = element_text(angle=90)) + xlab("Date") + ylab("Number of Trips") + geom_line(data = datl, aes(x=index, y=number_of_trips))
geom_smooth(method="loess")
|
/scripts/q1plots.R
|
no_license
|
cheryltky/bikeshare_studies
|
R
| false | false | 1,168 |
r
|
dat <- read.csv("https://raw.githubusercontent.com/cheryltky/bikeshare_studies/main/csvdata/Q1.csv", sep=",", header = T, stringsAsFactors=F)
library(ggplot2)
library(tidyr)
dat$index <- seq_len(dim(dat)[1])
lnb <- loess(trips_bluebike ~ index, data=dat)
lnd <- loess(trips_divvy ~ index, data=dat, method="loess")
pdf("trips_bludiv.pdf")
plot(dat$index, dat$trips_bluebike, pch=20, col="blue", bty='n', xaxt='n', xlab='', ylab="Number of Trips", ylim=c(min(c(dat$trips_bluebike, dat$trips_divvy)), max(c(dat$trips_bluebike, dat$trips_divvy))))
points(dat$trips_divvy, pch=20, col='red')
axis(1, at=dat$index, labels=dat$month_year, las=2)
lines(dat$index, dat$trips_bluebike, col="blue", lwd=1)
lines(dat$index, dat$trips_divvy, col="red", lwd=1)
dev.off()
datl <- gather(dat, company, number_of_trips, trips_bluebike:trips_divvy)
ggplot(datl, aes(x=index, y=number_of_trips, color=as.factor(company))) + geom_point() + scale_x_continuous(breaks=dat$index, labels=dat$month_year) + theme(axis.text.x = element_text(angle=90)) + xlab("Date") + ylab("Number of Trips") + geom_line(data = datl, aes(x=index, y=number_of_trips))
geom_smooth(method="loess")
|
# Title: POSC-3410 Lab1 ####
# Author: George R. DeCarvalho
# Date: 13 Jan 2021
# Lesson 1 ####
# The Eternal Sunshine of A Spotless Mind
# Lesson 2 ####
# Let's Try Some Basic Arithmetic #####
# Sum of 1 and 1
1+1
# Divide 365 by 12
365/12
# Your turn, Multiply 10 by 12
10*12
# Your turn, add 28 + 38
28+38
# Order of Operations in R
1+1*(365/12)
# What is the product of 6 and 6
6*6
# Divide 9 by 3.142
9/3.142
# Learning to assign variables ####
# Run this line of code to assign your first variable.
myFirstvar<- "Hello World!"
# Run myFirstvar in the Console
myFirstvar
# Code a second variable with a number.
mySecondVar<-16
# Run mySecondVar in the console
mySecondVar
#Now let's practice assigning variables.
# Assign "Clemson" to`home`
home <- "Clemson"
# Assign "Tigers" to "h_mascot"
h_mascot <- "Tigers"
# Assign "Ohio State" to "away"
away <- "Ohio State"
# Assign "Buckeyes" to "a_mascot"
a_mascot <- "Buckeyes"
# Assign 31 to "homeScore"
homeScore <- 31
# Assign 0 to "awayScore"
awayScore <- 0
# Assign TRUE to homeWin
homeWin <- "TRUE"
# Assign FALSE to awayWin
awayWin <- "FALSE"
# Use class to identify the data type for the following variables.
class(h_mascot)
class(homeScore)
class(homeWin)
# Use str to identify the data types for the following variables
str(h_mascot)
str(homeScore)
str(homeWin)
# Can sports scores ever be represented as decimals? No. We need to convert the`homeScore`and`awayScore'
# Convert 'homeScore' to integer and assign to same var name.
homeScore <- as.integer(homeScore)
homeScore
# Now its your turn.
# Convert awayScore to integer and assign to same var name
awayScore <- as.integer(awayScore)
awayScore
# Make a numeric vector by using the syntax`c(#,#,#,#)`; check its structure.
vector_numeric <-c(12, 8, 16, 4, 15)
str(vector_numeric)
#Make a numeric vector,`myNumericVector`, on your own; check its structure.
myNumericVector <-c(10, 25, 98, 47)
str(myNumericVector)
# Make a logical vector; check its structure.
vector_logical <-c(TRUE, TRUE, FALSE, T, F)
str(vector_logical)
# Make a logical vector,`myLogicalVector`on your own; check its structure.
myLogicalVector <-c(TRUE, TRUE, FALSE, TRUE)
str(myLogicalVector)
# Make a character vector; check it's structure.
vector_character <-c("Montana", "Aikman", "Manning", "Favre", "Mahomes")
str(vector_character)
# Make a character vector "myCharVector" and check its structure
myCharVector <-c("Trout", "Bass", "Bream", "Catfish")
str(myCharVector)
# Make a list of the vectors that I created: drAllardList; check its structure.
drAllardList <-list(vector_numeric, vector_logical, vector_character)
str(drAllardList)
# Make a list of the vectors YOU created: myList; check its structure.
myList <-list(myNumericVector, myLogicalVector, myCharVector)
str(myList)
# Create data fame: QB_df; print in console; check structure
QB_df <-data.frame(vector_character, vector_numeric, vector_logical)
QB_df
str(QB_df)
# print the numeric column to the console; use syntax: dataframe$columnName
QB_df$vector_numeric
# print the the character column to the console; use syntax: dataframe$columnName
QB_df$vector_character
# Rename QB_df$vector_character as QB_df$qbName
names(QB_df)[names(QB_df)=="vector_character"] <- "qbName"
str(QB_df)
# What is going on here?
# Select the first row of QB_df
QB_df[1,]
# Select the first column of QB_df
QB_df[,1]
# Select the`qbName`column using ""
QB_df[,"qbName"]
# If you want to keep the complexity of the data frame, you can use the following formats.
QB_df[1]
QB_df["qbName"]
# It is important to know the difference between these approaches because we will use each for differen
# Select the cell that is at the intersection of the 3rd row and 2nd column
QB_df[3,2]
#Now it is your turn.
# Select the first row of your dataframe
# Create my own data frame
MY_df <-data.frame(myNumericVector, myLogicalVector, myCharVector)
MY_df
# Select the first row of your data frame
MY_df[1,]
# Select the third Column of your Data Frame
MY_df[,3]
# Select the cell that is at the intersection of the 1st row and 2nd column of your data frame
MY_df[1,2]
# What type of data structure is returned by calling names(QB_df)?
names(QB_df)
## [1] "qbName" "vector_numeric" "vector_logical"
# Answer: a vector
# If we want to rename a specific element of the vector, then we need to use indexing to select the element
names(QB_df)[names(QB_df)=="vector_numeric"]
# Now we need to assign a new value to it.
names(QB_df)[names(QB_df)=="vector_numeric"] <- "jerseyNum"
# Repeat this process for the 3rd column: HoFer (Hall of Famer)
names(QB_df)[names(QB_df)=="Hall of Famer"]
|
/DAL1/decarvalho-DAL1.R
|
no_license
|
gdecarv/DECARVALHO-POSC-3410-REPO
|
R
| false | false | 4,671 |
r
|
# Title: POSC-3410 Lab1 ####
# Author: George R. DeCarvalho
# Date: 13 Jan 2021
# Lesson 1 ####
# The Eternal Sunshine of A Spotless Mind
# Lesson 2 ####
# Let's Try Some Basic Arithmetic #####
# Sum of 1 and 1
1+1
# Divide 365 by 12
365/12
# Your turn, Multiply 10 by 12
10*12
# Your turn, add 28 + 38
28+38
# Order of Operations in R
1+1*(365/12)
# What is the product of 6 and 6
6*6
# Divide 9 by 3.142
9/3.142
# Learning to assign variables ####
# Run this line of code to assign your first variable.
myFirstvar<- "Hello World!"
# Run myFirstvar in the Console
myFirstvar
# Code a second variable with a number.
mySecondVar<-16
# Run mySecondVar in the console
mySecondVar
#Now let's practice assigning variables.
# Assign "Clemson" to`home`
home <- "Clemson"
# Assign "Tigers" to "h_mascot"
h_mascot <- "Tigers"
# Assign "Ohio State" to "away"
away <- "Ohio State"
# Assign "Buckeyes" to "a_mascot"
a_mascot <- "Buckeyes"
# Assign 31 to "homeScore"
homeScore <- 31
# Assign 0 to "awayScore"
awayScore <- 0
# Assign TRUE to homeWin
homeWin <- "TRUE"
# Assign FALSE to awayWin
awayWin <- "FALSE"
# Use class to identify the data type for the following variables.
class(h_mascot)
class(homeScore)
class(homeWin)
# Use str to identify the data types for the following variables
str(h_mascot)
str(homeScore)
str(homeWin)
# Can sports scores ever be represented as decimals? No. We need to convert the`homeScore`and`awayScore'
# Convert 'homeScore' to integer and assign to same var name.
homeScore <- as.integer(homeScore)
homeScore
# Now its your turn.
# Convert awayScore to integer and assign to same var name
awayScore <- as.integer(awayScore)
awayScore
# Make a numeric vector by using the syntax`c(#,#,#,#)`; check its structure.
vector_numeric <-c(12, 8, 16, 4, 15)
str(vector_numeric)
#Make a numeric vector,`myNumericVector`, on your own; check its structure.
myNumericVector <-c(10, 25, 98, 47)
str(myNumericVector)
# Make a logical vector; check its structure.
vector_logical <-c(TRUE, TRUE, FALSE, T, F)
str(vector_logical)
# Make a logical vector,`myLogicalVector`on your own; check its structure.
myLogicalVector <-c(TRUE, TRUE, FALSE, TRUE)
str(myLogicalVector)
# Make a character vector; check it's structure.
vector_character <-c("Montana", "Aikman", "Manning", "Favre", "Mahomes")
str(vector_character)
# Make a character vector "myCharVector" and check its structure
myCharVector <-c("Trout", "Bass", "Bream", "Catfish")
str(myCharVector)
# Make a list of the vectors that I created: drAllardList; check its structure.
drAllardList <-list(vector_numeric, vector_logical, vector_character)
str(drAllardList)
# Make a list of the vectors YOU created: myList; check its structure.
myList <-list(myNumericVector, myLogicalVector, myCharVector)
str(myList)
# Create data fame: QB_df; print in console; check structure
QB_df <-data.frame(vector_character, vector_numeric, vector_logical)
QB_df
str(QB_df)
# print the numeric column to the console; use syntax: dataframe$columnName
QB_df$vector_numeric
# print the the character column to the console; use syntax: dataframe$columnName
QB_df$vector_character
# Rename QB_df$vector_character as QB_df$qbName
names(QB_df)[names(QB_df)=="vector_character"] <- "qbName"
str(QB_df)
# What is going on here?
# Select the first row of QB_df
QB_df[1,]
# Select the first column of QB_df
QB_df[,1]
# Select the`qbName`column using ""
QB_df[,"qbName"]
# If you want to keep the complexity of the data frame, you can use the following formats.
QB_df[1]
QB_df["qbName"]
# It is important to know the difference between these approaches because we will use each for differen
# Select the cell that is at the intersection of the 3rd row and 2nd column
QB_df[3,2]
#Now it is your turn.
# Select the first row of your dataframe
# Create my own data frame
MY_df <-data.frame(myNumericVector, myLogicalVector, myCharVector)
MY_df
# Select the first row of your data frame
MY_df[1,]
# Select the third Column of your Data Frame
MY_df[,3]
# Select the cell that is at the intersection of the 1st row and 2nd column of your data frame
MY_df[1,2]
# What type of data structure is returned by calling names(QB_df)?
names(QB_df)
## [1] "qbName" "vector_numeric" "vector_logical"
# Answer: a vector
# If we want to rename a specific element of the vector, then we need to use indexing to select the element
names(QB_df)[names(QB_df)=="vector_numeric"]
# Now we need to assign a new value to it.
names(QB_df)[names(QB_df)=="vector_numeric"] <- "jerseyNum"
# Repeat this process for the 3rd column: HoFer (Hall of Famer)
names(QB_df)[names(QB_df)=="Hall of Famer"]
|
# decimalplaces <- function(x) {
# if ((x %% 1) != 0) {
# nchar(strsplit(sub('0+$', '', as.character(x)), ".", fixed=TRUE)[[1]][[2]])
# } else {
# return(0)
# }
# }
# decimalplaces(xn)
#
# num.decimals <- function(x) {
# stopifnot(class(x)=="numeric")
# x <- sub("0+$","",x)
# x <- sub("^.+[.]","",x)
# nchar(x)
# }
#
# num.decimals(xn)
#
#
#
# x <- c("0.0000", "0", "159.283", "1.45e+10", "1.4599E+10","11.12342525256789123456" )
# x <- as.character(dataSet[,notIntCols])
# x<-as.character(blah)
# Ndec(x);min(Ndec(x));max(Ndec(x))
# num.dec <- as.integer(Ndec(x))
# Ndec(x)
# num.dec
# paste(rep(paste(vec2-vec2,2,sep = ",",collapse = ""), 1),collapse = ",")
# paste((vec2-vec2),rep(vec2, 01),sep = ",",collapse = "")
#
# rep(vec2, 1)
# rep((vec2-vec2), 1)
#
# sapply(rep(vec2, 1)*rep((vec2-vec2)+2, 1), as.numeric)
#
# vec <- Ndec(x)
# vec <- as.data.frame(cbind(dec.places=vec,tens=10))
# vec
# str(vec)
#
#
#
# vec2<- sapply(vec+1,as.numeric)
# vec2
#
# paste(rep(c(rep((vec2-vec2), 1)), times = c(vec2)),collapse = ",")
#
# x=vec2
#
# # Method 1
# rep(x,times = c(vec2))
#
# # Method 2
# matrix(x,length(x),c(vec2))
#
#
#
# xn <- 11.12342525256789123456
#
# min(match(TRUE, round(xn, 1:20) == xn))
#
# min(xn)
#
# specify_decimal <- function(x, k) format(x, nsmall=k)
# specify_decimal(.11, 10)
#
#
#
# sub(".", "", as.character(specify_decimal(1, 10)))
#
# specify_decimal(Ndec(x), decimalplaces(Ndec(x)))
roundUp <- function(x,to=10)
{
to*(x%/%to + as.logical(x%%to))
}
roundUp(0,50)
is.finite.data.frame <- function(obj){
sapply(obj,FUN = function(x) all(is.finite(x)))
}
rc <- function(x) {
if (nrow(x)>0 || ncol(x)>0) {
rows <- nrow(x)
columns <- ncol(x)
result <- paste('rows:',rows,'columns:', columns,sep = " ")
}
else {
result <- paste('rows:',NULL,'columns:', NULL,sep = ",")
}
return(result)
}
homedir<-getSrcDirectory(function(x) {x})
# Setting working directory
# need to be adjusted to server conditions
if (homedir==""|is.na(homedir)){
homedir <-"C://Users//Neal//Documents//www.DAYTRADINGLOGIC.com//_neal//swing//R"
#homedir <-"C://Users//nbb//Downloads//rscripts20160529"
}
# homedir <-"~//Zlecenia//ODesk//Neal Webster//swing prediction"
setwd(homedir)
# Load data (using iris dataset from Google Drive because link @ uci.edu was not working for me today)
#iris <- read.csv(url("http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"), header = FALSE)
#iris <- read.csv(url("https://docs.google.com/spreadsheets/d/1ovz31Y6PrV5OwpqFI_wvNHlMTf9IiPfVy1c3fiQJMcg/pub?gid=811038462&single=true&output=csv"), header = FALSE)
#iris <- read.csv("vw_barFeatures_train_r_SwingLo.csv", header = TRUE)
dataSet <- read.csv("data/DataSet_Balanced_20160819.csv", header = TRUE)
dataSet <- read.csv("data/vw_barFeatures_train_r_20160819_SUBSET_BALANCED_TRAIN.csv", header = TRUE)
dataSet <- read.csv("data/vw_barFeatures_train_r_20160819_SUBSET_BALANCED.csv", header = TRUE)
dataSet <- read.csv("data/vw_barFeatures_train_r_20161123.csv", header = TRUE)
dataSet <- read.csv("data/vw_barFeatures_train_r_20161123_balanced.csv", header = TRUE)
rc(dataSet)
levels(as.factor(sapply(dataSet, class)))
table(sapply(dataSet, class))
table(dataSet[,1])
intCols <- sapply(dataSet, is.integer)
notIntCols <- !sapply(dataSet, is.integer)
notIntCols <- as.character(names(dataSet[,notIntCols]))
notIntCols <- notIntCols[which(notIntCols!="swingRecordType")]
notIntCols <- notIntCols[which(notIntCols!="numMinutesBarWasOpn")]
notIntCols;ncol(dataSet[,notIntCols]);
numCols <- sapply(dataSet, is.numeric)
notNumCols <- !sapply(dataSet, is.numeric)
factCols <- sapply(dataSet, is.factor)
notFactCols <- !sapply(dataSet, is.factor)
#min(dataSet[,notIntCols]);min(dataSet[,intCols]);
#max(dataSet[,notIntCols]);max(dataSet[,intCols]);
#tst <- -0.00143417*100000000;tst
#str(dataSet)
#linearlyRescale where [x] is some lookback period and range is "hard coded" by setting [floor] and [limit] values to literals
#(limit - floor) / (max[x] - min[x]) * (x - max[x]) + limit
#linearlyRescale <- function(x,floor,limit) (limit - floor) / ((max(x) - min(x)) * (x - max(x)) + limit)
#rescale <- function(x) (x-min(x))/(max(x) - min(x)) * 10000000000
notIntCols
#install.packages("DescTools")
library(DescTools)
x <- as.character(dataSet[,notIntCols])
paste(Ndec(x),collapse = ",")
#numDec <- as.data.frame(as.integer(Ndec(x)))
#df <- as.data.frame(numDec);colnames(df)[1] <- "Column_A";
#df
#B<-10**(df$Column_A)
#df<-cbind(df,B)
#df
# #create dataframe
# num_row <- nrow(df);num_col <- ncol(dataSet[,notIntCols]);
# m <- as.data.frame(matrix(df[,1], ncol = 1, nrow = num_row))
# m <- cbind(m, 10^m[,1])
# m <- rbind(m, blah)
# m <- m[2,]
# m
# ncol(m);ncol(dataSet[,notIntCols]);
# as.data.frame(dataSet[3,notIntCols])
# str(dataSet[3,notIntCols])
# str(as.data.frame(as.list(m[,2])))
# str(m[,2])
# str(df[,2])
# str(mults)
# #multiply numeric columns by multiple
# str(df[,2])
# df.aree <- as.data.frame(t(df[,2]))
# df.aree
dataSet[,notIntCols]<- mapply("*",as.data.frame(dataSet[,notIntCols]),100000000)
#dataSet[,notIntCols]<- sapply(dataSet[,notIntCols]*df[,2],as.numeric)
x2 <- as.character(dataSet[,notIntCols])
paste(Ndec(x2),collapse = ",")
#min(dataSet[,notIntCols]);min(dataSet[,intCols]);
#max(dataSet[,notIntCols]);max(dataSet[,intCols]);
#"7,7,7,7,4,5,5,5,5,5,8,11,7,8,8,2,11,11,11,2"
#"0,0,0,0,0,0,0,0,0,0,0,2 ,0,0,0,8,3 ,3 ,2 ,8"
dataSet[,notIntCols] <- sapply(dataSet[,notIntCols],as.integer)
table(sapply(dataSet[,2], class))
notIntCols <- !sapply(dataSet, is.integer)
notIntCols <- as.character(names(dataSet[,notIntCols]))
notIntCols <- notIntCols[which(notIntCols!="swingRecordType")]
dataSet[,notIntCols] <- sapply(dataSet[,notIntCols],as.integer)
table(sapply(dataSet, class))
#assign column names
#names(dataSet) <- c("SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "Species")
#backup dataset after transformations are complete
dataSet.bkup <- dataSet
#features ordinality
x_col_start_pos <- 5
x_col_end_pos <- 3249
#dataSet <- dataSet[,c(1,x_col_start_pos:x_col_end_pos)]
names(dataSet)
#col names
dsColNames <- as.character(names(dataSet))
dsColNames
as.character(names(dataSet[,1:2]))
#num of columns and rows
dsColCount <- as.integer(ncol(dataSet))
dsRowCount <- as.integer(nrow(dataSet))
dsColCount
dsRowCount
#class ordinality and name
classColumn <- 2
classColumnName <- dsColNames[classColumn]
y_col_pos <- classColumn
unique(dataSet[,classColumn])
unique(dataSet[,1])
unique(dataSet[,2])
dataSet <- dataSet[,c(classColumn,x_col_start_pos:x_col_end_pos)]
nrow(dataSet)
ncol(dataSet)
classColumn <- 1
classColumnName <- dsColNames[classColumn]
y_col_pos <- classColumn
unique(dataSet[,classColumn])
unique(dataSet[,1])
unique(dataSet.bkup[,2:3])
#features ordinality
x_col_start_pos <- 2
x_col_end_pos <- 3246
y_col_pos
x_col_start_pos
x_col_end_pos
firstCol <- ifelse(x_col_start_pos < classColumn, x_col_start_pos, classColumn)
firstCol
lastCol <- ifelse(x_col_end_pos > classColumn, x_col_end_pos, classColumn)
lastCol
#distinct list and count of classes from column assumed to contain class values
dsClassValues <- as.character(unique(dataSet[,classColumn])) #levels(dataSet[,classColumn])
dsClassCount <- as.integer(length(dsClassValues)) #sqldf("select distinct(x) from df1")
dsClasses <- dataSet[,classColumn]
dsClassCount
dsClassValues
dataSet[1,1:10]
#levels(as.factor(dataSet[,123]))
#class distribution in terms of row count/freqency and percentage/proportions
dsClassFreq <- table(dsClasses)
dsClassDistribution <- round(prop.table(table(dsClasses)) * 100, digits = 2)
dsClassFreq
dsClassDistribution
# #Randomly sample a percentage of rows to balance class distribution
#mydf <- mydf[ sample( which(mydf$swingRecordType=="SwingNULL"), round(0.1235*length(which(mydf$swingRecordType=="SwingNULL")))), ]
#get all "SwingHi" rows
dataSet.SwingHi <- dataSet[ sample( which(dataSet$ClassChar=="SwingHi"), round(43.82*length(which(dataSet$ClassChar=="SwingHi"))),replace = TRUE), ]
table(dataSet.SwingHi[,classColumn])
round(prop.table(table(dataSet.SwingHi[,classColumn])) * 100, digits = 2)
#get all "SwingLo" rows
dataSet.SwingLo <- dataSet[ sample( which(dataSet$ClassChar=="SwingLo"), round(41.92*length(which(dataSet$ClassChar=="SwingLo"))),replace = TRUE), ]
table(dataSet.SwingLo[,classColumn])
round(prop.table(table(dataSet.SwingLo[,classColumn])) * 100, digits = 2)
#get all "SwingNULL" rows and append all "SwingHi" and "SwingLo" rows
dataSet2 <- rbind(dataSet[ which(dataSet$ClassChar=="SwingNULL"), ],dataSet.SwingHi,dataSet.SwingLo)
table(dataSet2[,classColumn])
round(prop.table(table(dataSet2[,classColumn])) * 100, digits = 2)
dataSet <- dataSet2
#Free RAM. Remove objects / data from workspace
rm(dataSet.SwingHi, dataSet.SwingLo, dataSet2)
gc()
write.csv(dataSet, file = "data/vw_barFeatures_train_r_20161123_balanced.csv",row.names=FALSE,append = FALSE)
#num of columns and rows
dsColCount <- as.integer(ncol(dataSet))
dsRowCount <- as.integer(nrow(dataSet))
dsColCount
dsRowCount
# % of [dataset] reserved for training/test and validation
set.seed(123)
sampleAmt <- 0.5
mainSplit <- sample(2, dsRowCount, replace=TRUE, prob=c(sampleAmt, 1-sampleAmt))
#split [dataSet] into two sets
dsTrainingTest <- dataSet[mainSplit==1, ]#firstCol:lastCol]
dsValidation <- dataSet[mainSplit==2, ]#firstCol:lastCol]
nrow(dataSet);nrow(dsTrainingTest);nrow(dsValidation);
nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValidation)
print(round(prop.table(table(dataSet[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTrainingTest[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsValidation[,classColumn]))* 100, digits = 1))
write.csv(dsTrainingTest, file = "data/vw_barFeatures_train_r_20161123_balanced_TrainingTest.csv",row.names=FALSE,append = FALSE)
write.csv(dsValidation, file = "data/vw_barFeatures_train_r_20161123_balanced_Validation.csv",row.names=FALSE,append = FALSE)
#SwingHi SwingLo SwingNULL
#33.3 33.4 33.3
#33.4 33.2 33.4
# % of [dsTrainingTest] reserved for training
sampleAmt <- 0.5
secondarySplit <- sample(2, nrow(dsTrainingTest), replace=TRUE, prob=c(sampleAmt, 1-sampleAmt))
#split [dsTrainingTest] into two sets
dsTraining <- dsTrainingTest[secondarySplit==1, ]#firstCol:lastCol]
dsTest <- dsTrainingTest[secondarySplit==2, ]#firstCol:lastCol]
nrow(dsTraining);nrow(dsTest);
nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValidation)
nrow(dsTrainingTest) == nrow(dsTraining)+nrow(dsTest)
# % of [dsValidation] reserved for Validation
sampleAmt <- 0.5
secondarySplit <- sample(2, nrow(dsValidation), replace=TRUE, prob=c(sampleAmt, 1-sampleAmt))
# #split [dsValidation] into two sets
# dsValp1 <- dsValidation[secondarySplit==1, firstCol:lastCol]
# dsValp2 <- dsValidation[secondarySplit==2, firstCol:lastCol]
# nrow(dsValp1);nrow(dsValp2);
# nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValp1)+nrow(dsValp2)
# nrow(dsValidation) == nrow(dsValp1)+nrow(dsValp2)
# # % of [dsValp1] reserved for Validation
# sampleAmt <- 0.5075
# secondarySplit <- sample(2, nrow(dsValp1), replace=TRUE, prob=c(sampleAmt, 1-sampleAmt))
#
# #split [dsValp1] into two sets
# dsValp1a <- dsValp1[secondarySplit==1, firstCol:lastCol]
# dsValp1b <- dsValp1[secondarySplit==2, firstCol:lastCol]
# nrow(dsValp1a);nrow(dsValp1b);
#
# nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValp1a)+nrow(dsValp1b)+nrow(dsValp2)
# nrow(dsValp1) == nrow(dsValp1a)+nrow(dsValp1b)
#
# # % of [dsValp2] reserved for Validation
# sampleAmt <- 0.5075
# secondarySplit <- sample(2, nrow(dsValp2), replace=TRUE, prob=c(sampleAmt, 1-sampleAmt))
#
# #split [dsValp2] into two sets
# dsValp2a <- dsValp2[secondarySplit==1, firstCol:lastCol]
# dsValp2b <- dsValp2[secondarySplit==2, firstCol:lastCol]
# nrow(dsValp2a);nrow(dsValp2b);
# nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValp1a)+nrow(dsValp1b)+nrow(dsValp2a)+nrow(dsValp2b)
# nrow(dsValp2) == nrow(dsValp2a)+nrow(dsValp2b)
nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValidation)
nrow(dataSet) == nrow(dsTraining)+nrow(dsTest)+nrow(dsValidation)
#Free RAM. Remove objects / data from workspace
#rm(dsTest,dsTraining,dsValidation,x)
rm(dsTrainingTest,dataSet)
gc()
# #install.packages("data.table")
# library(data.table)
#
# dsSummary <- as.data.frame(data.table(nrow(dataSet),nrow(dsTrainingTest),nrow(dsTraining),nrow(dsTest),nrow(dsValidation),nrow(dsValp1a),nrow(dsValp1b),nrow(dsValp2a),nrow(dsValp2b)))
# names(dsSummary) <- c("completeDataset","dsTrainingTest", "TrainingDataset", "TestDataset", "ValidationDataset","dsValp1a","dsValp1b","dsValp2a","dsValp2b")
# ncol(dsSummary)
# dsSummary
#0. complete dataset
nrow(dataSet.bkup);
nrow(dataSet);nrow(dsTrainingTest);nrow(dsValidation);
#1. Training dataset #2. Test dataset #4. Validation datasets
nrow(dsTraining);nrow(dsTest);nrow(dsValidation);
# nrow(dsValp1a);nrow(dsValp1b);nrow(dsValp2a);nrow(dsValp2b);
#is.finite.data.frame(dataSet)
#print(round(prop.table(table(dataSet.bkup[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dataSet[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTrainingTest[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTraining[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTest[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsValidation[,classColumn]))* 100, digits = 1))
#install.packages("DMwR")
library(DMwR)
# maxTrainIteration <- 500
# trainIteration <- 0; loopNum <- 0; bestTrainNum <- NULL;
# while (trainIteration <= maxTrainIteration) {
# #for (num0 in seq(1,10,1)) {for (num1 in seq(1,10,1)){
# orig.distPercent <- as.data.frame(round(prop.table(table(dsTraining.bal[,classColumn]))* 100, digits = 1))
# orig.distRecordCount <- as.data.frame(table(dsTraining.bal[,classColumn]))
# #set random values for "perc.over" and "perc.under"
# randVar0 <- roundUp(sample(4:19,1,replace=TRUE),1)*50;randVar1 <- roundUp(sample(2:6,1,replace=TRUE),1)*50;
# randVar2 <- roundUp(sample(4:19,1,replace=TRUE),1)*50;randVar3 <- roundUp(sample(2:6,1,replace=TRUE),1)*50;
#
# if (loopNum == 0) {
# bestParams <- data.frame(cbind(recordType=1,SwingHi.p=orig.distPercent[1,2],SwingLo.p=orig.distPercent[2,2]
# ,SwingNULL.p=orig.distPercent[3,2],SwingHi.c=orig.distRecordCount[1,2]
# ,SwingLo.c=orig.distRecordCount[2,2],SwingNULL.c=orig.distRecordCount[3,2]
# ,perc.over=abs(randVar0-randVar0),perc.under=abs(randVar1-randVar1),perc.over2=abs(randVar2-randVar2),perc.under2=abs(randVar3-randVar3)
# ,ratioClassAvsBvsC=(orig.distPercent[1,2]-orig.distPercent[2,2])+orig.distPercent[3,2]))
# print(bestParams)
# }
# #randVar0 <- roundUp(num0,1)*100;randVar1 <- roundUp(num1,1)*100;
# print(paste("Begin test:",loopNum,"params --> perc.over",randVar0,"/ perc.under",randVar1,"perc.over2",randVar2,"/ perc.under2",randVar3));
#
# ## use SMOTE to balance classes
# dsTraining.bal <- SMOTE(swingRecordType ~ ., dsTraining, perc.over=randVar0,perc.under=randVar1)
# dsTraining.bal2 <- SMOTE(swingRecordType ~ ., dsTraining.bal, perc.over=randVar2,perc.under=randVar3)
#
# # Calculate error
# distPercent <- as.data.frame(round(prop.table(table(dsTraining.bal2[,classColumn]))* 100, digits = 1))
# distRecordCount <- as.data.frame(table(dsTraining.bal2[,classColumn]))
# error1 <- distPercent[1,2]; error2 <- distPercent[2,2]; error3 <- distPercent[3,2];ratioClassAvsBvsC <- (error1-error2)+error3
#
# #output ideal params
# if ( (ratioClassAvsBvsC >= 19 & ratioClassAvsBvsC <= 41) ) {
# bestTrainNum <- loopNum
# new_row <- c(0,distPercent[1,2],distPercent[2,2],distPercent[3,2],distRecordCount[1,2],distRecordCount[2,2]
# ,distRecordCount[3,2],randVar0,randVar1,randVar2,randVar3,ratioClassAvsBvsC)
# bestParams <- rbind(bestParams, new_row)
# bestParams <- bestParams[order(bestParams$recordType,bestParams$ratioClassAvsBvsC,bestParams$SwingHi.p,bestParams$SwingLo.p,bestParams$SwingNULL.p, decreasing=TRUE),]
# print(paste("--> Class Distribution",error1,error2,error3,"bestTrainNum =",bestTrainNum))
# print(bestParams)
# }
# else {
# print(paste("--> Class Distribution",error1,error2,error3
# ,"bestTrainNum =",bestTrainNum))
# }#}}
# trainIteration <- trainIteration+1;loopNum <- loopNum + 1;
# gc()
# }
#
# bestParams
# use SMOTE to balance classes
dsTraining.bal <- SMOTE(swingRecordType ~ ., dsTraining, perc.over=900,perc.under=100)#perc.over=950,perc.under=100)
dsTraining.bal2 <- SMOTE(swingRecordType ~ ., dsTraining.bal, perc.over=950,perc.under=200)#perc.over=700,perc.under=200)
round(prop.table(table(dsTraining[,classColumn]))* 100, digits = 1);round(prop.table(table(dsTraining.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsTraining.bal2[,classColumn]))* 100, digits = 1);
table(dsTraining[,classColumn]); table(dsTraining.bal[,classColumn]);table(dsTraining.bal2[,classColumn]);
dsTest.bal <- SMOTE(swingRecordType ~ ., dsTest, perc.over=900,perc.under=100)
dsTest.bal2 <- SMOTE(swingRecordType ~ ., dsTest.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsTest[,classColumn]))* 100, digits = 1);round(prop.table(table(dsTest.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsTest.bal2[,classColumn]))* 100, digits = 1)
table(dsTest[,classColumn]);table(dsTest.bal[,classColumn]);table(dsTest.bal2[,classColumn])
dsValidation.bal <- SMOTE(swingRecordType ~ ., dsValidation, perc.over=900,perc.under=100)
dsValidation.bal2 <- SMOTE(swingRecordType ~ ., dsValidation.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsValidation[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValidation.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValidation.bal2[,classColumn]))* 100, digits = 1)
table(dsValidation[,classColumn]);table(dsValidation.bal[,classColumn]);table(dsValidation.bal2[,classColumn])
dsValp1a.bal <- SMOTE(swingRecordType ~ ., dsValp1a, perc.over=900,perc.under=100)
dsValp1a.bal2 <- SMOTE(swingRecordType ~ ., dsValp1a.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsValp1a[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp1a.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp1a.bal2[,classColumn]))* 100, digits = 1)
table(dsValp1a[,classColumn]);table(dsValp1a.bal[,classColumn]);table(dsValp1a.bal2[,classColumn])
dsValp1b.bal <- SMOTE(swingRecordType ~ ., dsValp1b, perc.over=900,perc.under=100)
dsValp1b.bal2 <- SMOTE(swingRecordType ~ ., dsValp1b.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsValp2b[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2b.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2b.bal2[,classColumn]))* 100, digits = 1)
table(dsValp2b[,classColumn]);table(dsValp2b.bal[,classColumn]);table(dsValp2b.bal2[,classColumn])
dsValp2a.bal <- SMOTE(swingRecordType ~ ., dsValp2a, perc.over=900,perc.under=100)
dsValp2a.bal2 <- SMOTE(swingRecordType ~ ., dsValp2a.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsValp2a[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2a.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2a.bal2[,classColumn]))* 100, digits = 1)
table(dsValp2a[,classColumn]);table(dsValp2a.bal[,classColumn]);table(dsValp2a.bal2[,classColumn])
dsValp2b.bal <- SMOTE(swingRecordType ~ ., dsValp2b, perc.over=900,perc.under=100)
dsValp2b.bal2 <- SMOTE(swingRecordType ~ ., dsValp2b.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsValp2b[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2b.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2b.bal2[,classColumn]))* 100, digits = 1)
table(dsValp2b[,classColumn]);table(dsValp2b.bal[,classColumn]);table(dsValp2b.bal2[,classColumn])
# print(round(prop.table(table(dsTraining.bal2[,classColumn]))* 100, digits = 1))
# print(round(prop.table(table(dsTest.bal2[,classColumn]))* 100, digits = 1))
# print(round(prop.table(table(dsValp1a.bal2[,classColumn]))* 100, digits = 1))
# print(round(prop.table(table(dsValp1b.bal2[,classColumn]))* 100, digits = 1))
# print(round(prop.table(table(dsValp2a.bal2[,classColumn]))* 100, digits = 1))
# print(round(prop.table(table(dsValp2b.bal2[,classColumn]))* 100, digits = 1))
# dsTrainingTest <- dsTrainingTest[,c(1,x_col_start_pos:x_col_end_pos)]
# dsTest <- dsTest[,c(1,x_col_start_pos:x_col_end_pos)]
# dsValidation <- dsValidation[,c(1,x_col_start_pos:x_col_end_pos)]
dsTraining.bal <- dsTrainingTest
dsTest.bal <- dsTest
dsValidation.bal <- dsValidation
#dsTraining.bal <- dsTraining
#dsTest.bal <- dsTest
# dsValp1a.bal <- dsValp1a#.bal2
# dsValp1b.bal <- dsValp1b#.bal2
# dsValp2a.bal <- dsValp2a#.bal2
# dsValp2b.bal <- dsValp2b#.bal2
# blah <- dsTraining.bal; meta.na <- as.data.frame(is.finite.data.frame(blah));
# ncol(blah);nrow(blah);table(sapply(blah, class));table(meta.na[,]);table(blah[,classColumn]);round(prop.table(table(blah[,classColumn])) * 100, digits = 2);
#
# blah <- dsTest.bal; meta.na <- as.data.frame(is.finite.data.frame(blah));
# ncol(blah);nrow(blah);table(sapply(blah, class));table(meta.na[,]);table(blah[,classColumn]);round(prop.table(table(blah[,classColumn])) * 100, digits = 2);
#
# blah <- dataSet.bkup; meta.na <- as.data.frame(is.finite.data.frame(blah));
# ncol(blah);nrow(blah);table(sapply(blah, class));table(meta.na[,]);table(blah[,classColumn]);round(prop.table(table(blah[,classColumn])) * 100, digits = 2);
#print(round(prop.table(table(dataSet.bal[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTraining.bal[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTest.bal[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsValidation.bal[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dataSet.bkup[,classColumn]))* 100, digits = 1))
#print(round(prop.table(table(dsValidation.bal[,classColumn]))* 100, digits = 1))
round(prop.table(table(dsTraining.bal[,1])) * 100, digits = 2);nrow(dsTraining.bal);
round(prop.table(table(dsTest.bal[,1])) * 100, digits = 2);nrow(dsTest.bal);
round(prop.table(table(dsValp1a.bal[,1])) * 100, digits = 2);nrow(dsValp1a.bal);
round(prop.table(table(dsValp1b.bal[,1])) * 100, digits = 2);nrow(dsValp1b.bal);
round(prop.table(table(dsValp2a.bal[,1])) * 100, digits = 2);nrow(dsValp2a.bal);
round(prop.table(table(dsValp2b.bal[,1])) * 100, digits = 2);nrow(dsValp2b.bal);
table(sapply(dsTraining.bal, class))
table(sapply(dsTest.bal, class))
# dsTraining.bal[,2:822] <- sapply(dsTraining.bal[,2:822],as.integer)
# table(sapply(dsTraining.bal, class))
# dsTest.bal[,2:822] <- sapply(dsTest.bal[,2:822],as.integer)
# table(sapply(dsTest.bal, class))
#
# dsTraining.bal[,"MAMAP25"] <- sapply(dsTraining.bal[,"MAMAP25"],as.integer)
# table(sapply(dsTraining.bal, class))
# dsTest.bal[,"MAMAP25"] <- sapply(dsTest.bal[,"MAMAP25"],as.integer)
# table(sapply(dsTest.bal, class))
#
# notIntCols <- !sapply(dsTest.bal, is.integer)
# notIntCols <- as.character(names(dsTest.bal[,notIntCols]))
# notIntCols <- notIntCols[which(notIntCols!="swingRecordType")]
# notIntCols
#dsTest.bal[,notIntCols]<- mapply("*",as.data.frame(dsTest.bal[,notIntCols]),100000000)#df.aree)
# x2 <- as.character(dsTest.bal[,"MAMAP25"])
# max(paste(Ndec(x2),collapse = ","))
# dsSummary.bal <- data.table(nrow(dsTraining.bal),nrow(dsTest.bal),nrow(dsValp1a.bal),nrow(dsValp1b.bal),nrow(dsValp2a.bal),nrow(dsValp2b.bal))
# names(dsSummary.bal) <- c("TrainingDataset.bal", "TestDataset.bal", "dsValp1a.bal","dsValp1b.bal","dsValp2a.bal","dsValp2b.bal")
# ncol(dsSummary.bal)
# dsSummary.bal
# dsTraining.bal <- as.integer(dsTraining.bal[,])
# write.csv(dataSet, file = "export_completeDataset.csv",row.names=TRUE)
# write.csv(dsTraining, file = "export_TrainingDataset.csv",row.names=TRUE)
# write.csv(dsTest, file = "export_TestDataset.csv",row.names=TRUE)
# write.csv(dsValidation, file = "export_ValidationDataset.csv",row.names=TRUE)
#
# var1 <- dataSet[,"stofP1"]
# var2 <- dataSet[,"cci"]
# var3 <- dataSet[,"HMAP1"]
# var4 <- dataSet[,"bbdM2"]
# var5 <- dataSet[,"stdebP1"]
#
# #"stofP1" "cci" "HMAP1" "bbdM2" "stdebP1"
#
# #Initial Overview Of The Data Set
# #install.packages('ggvis')
# library(ggvis)
#
# #create scatter plot - is there a visible correlation between var1 and var2 or var3 and var4 for all classes?
# dataSet %>% ggvis(~var1, ~var1, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var1, ~var2, fill = ~dsClasses) %>% layer_points(opacity:=1/1.25)
# dataSet %>% ggvis(~var1, ~var3, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var1, ~var4, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var1, ~var5, fill = ~dsClasses) %>% layer_points()
#
# dataSet %>% ggvis(~var2, ~var1, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var2, ~var2, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var2, ~var3, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var2, ~var4, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var2, ~var5, fill = ~dsClasses) %>% layer_points()
#
# dataSet %>% ggvis(~var3, ~var1, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var3, ~var2, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var3, ~var3, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var3, ~var4, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var3, ~var5, fill = ~dsClasses) %>% layer_points()
#
# dataSet %>% ggvis(~var4, ~var1, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var4, ~var2, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var4, ~var3, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var4, ~var4, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var4, ~var5, fill = ~dsClasses) %>% layer_points()
#
# dataSet %>% ggvis(~var5, ~var1, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var5, ~var2, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var5, ~var3, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var5, ~var4, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var5, ~var5, fill = ~dsClasses) %>% layer_points()
#trainSetEnum <- dsTraining.bal[,firstCol:lastCol]
#trainSetEnum[,classColumn] <- as.character(trainSetEnum[,classColumn])
#trainSetEnum[,classColumn][trainSetEnum[,classColumn]=="SwingHi"] <- 2
#trainSetEnum[,classColumn][trainSetEnum[,classColumn]=="SwingLo"] <- 1
#trainSetEnum[,classColumn][trainSetEnum[,classColumn]=="SwingNULL"] <- 2
#trainSetEnum[,classColumn][trainSetEnum[,classColumn]=="NotSwingLo"] <- 2
#trainSetEnum[,classColumn][trainSetEnum[,classColumn]=="NotSwingHi"] <- 2
#trainSetEnum[,classColumn] <- as.integer(trainSetEnum[,classColumn])
#x <- as.matrix(trainSetEnum[,x_col_start_pos:x_col_end_pos])
xTrain.bal <- as.data.frame(dsTraining.bal[,x_col_start_pos:x_col_end_pos])
yTrain.bal <- as.factor(dsTraining.bal[,classColumn])
# xTrain.bal <- as.data.frame(dsTraining.bal2[,x_col_start_pos:x_col_end_pos])
# yTrain.bal <- as.factor(dsTraining.bal2[,classColumn])
#testSetEnum <- dsTest.bal[,firstCol:lastCol]
#testSetEnum[,classColumn] <- as.character(testSetEnum[,classColumn])
#testSetEnum[,classColumn][testSetEnum[,classColumn]=="SwingHi"] <- 2
#testSetEnum[,classColumn][testSetEnum[,classColumn]=="SwingLo"] <- 1
#testSetEnum[,classColumn][testSetEnum[,classColumn]=="SwingNULL"] <- 2
#testSetEnum[,classColumn][testSetEnum[,classColumn]=="NotSwingLo"] <- 2
#testSetEnum[,classColumn][testSetEnum[,classColumn]=="NotSwingHi"] <- 2
#testSetEnum[,classColumn] <- as.integer(testSetEnum[,classColumn])
#xTest <- as.matrix(testSetEnum[,x_col_start_pos:x_col_end_pos])
xTest.bal <- as.data.frame(dsTest.bal[,x_col_start_pos:x_col_end_pos])
yTest.bal <- as.factor(dsTest.bal[,classColumn])
# xTest.bal <- as.data.frame(dsTest.bal2[,x_col_start_pos:x_col_end_pos])
# yTest.bal <- as.factor(dsTest.bal2[,classColumn])
# xValp1a.bal <- as.data.frame(dsValp1a.bal[,x_col_start_pos:x_col_end_pos])
# yValp1a.bal <- as.factor(dsValp1a.bal[,classColumn])
#
# xValp1b.bal <- as.data.frame(dsValp1b.bal[,x_col_start_pos:x_col_end_pos])
# yValp1b.bal <- as.factor(dsValp1b.bal[,classColumn])
#
# xValp2a.bal <- as.data.frame(dsValp2a.bal[,x_col_start_pos:x_col_end_pos])
# yValp2a.bal <- as.factor(dsValp2a.bal[,classColumn])
#
# xValp2b.bal <- as.data.frame(dsValp2b.bal[,x_col_start_pos:x_col_end_pos])
# yValp2b.bal <- as.factor(dsValp2b.bal[,classColumn])
#nrow(dsValidation);nrow(dsValp1a);nrow(dsValp1b);nrow(dsValp2a);nrow(dsValp2b);
xVal.bal <- as.data.frame(dsValidation.bal[,x_col_start_pos:x_col_end_pos])
yVal.bal <- as.factor(dsValidation.bal[,classColumn])#dsValidation[,classColumn])
# xVal.bal <- as.data.frame(dsValidation.bal2[,x_col_start_pos:x_col_end_pos])
# yVal.bal <- as.factor(dsValidation.bal2[,classColumn])#dsValidation[,classColumn])
xBkup.bal <- as.data.frame(dataSet.bkup[,11:3870])
yBkup.bal <- as.factor(dataSet.bkup[,classColumn])
# xValp1a <- as.data.frame(dsValp1a[,x_col_start_pos:x_col_end_pos])
# yValp1a <- as.factor(dsValp1a[,classColumn])
#
# xValp1b <- as.data.frame(dsValp1b[,x_col_start_pos:x_col_end_pos])
# yValp1b <- as.factor(dsValp1b[,classColumn])
#
# xValp2a <- as.data.frame(dsValp2a[,x_col_start_pos:x_col_end_pos])
# yValp2a <- as.factor(dsValp2a[,classColumn])
#
# xValp2b <- as.data.frame(dsValp2b[,x_col_start_pos:x_col_end_pos])
# yValp2b <- as.factor(dsValp2b[,classColumn])
#
#
# blah <- dsTraining.bal; meta.na <- as.data.frame(is.finite.data.frame(blah));
# ncol(blah);nrow(blah);table(sapply(blah, class));table(meta.na[,]);table(blah[,classColumn]);round(prop.table(table(blah[,classColumn])) * 100, digits = 2);
#https://dl.dropboxusercontent.com/u/45301435/inTreesDemo.R
#install.packages("inTrees");
#install.packages("randomForest");
#install.packages("RRF");
#install.packages("gbm");
#install.packages("foreach");
#install.packages("ranger");
#library(foreach);
detach("package:ranger", unload=TRUE)
library(ranger);
library(inTrees);
library(randomForest);
library(RRF);
library(gbm);
set.seed(1)
#rm(list=ls(all=TRUE));graphics.off()
lists = list()
# measure user-defined conditions
#myRule <- "X[,3] > 5 & X[,4] > 1"
#measureRule(myRule,X,target) # without providing the outcome of the condition
#measureRule(myRule,X,target,"versicolor") # providing the outcome of the condition
maxTrainIteration <- 3
trainIteration <- 0; loopNum <- 0; bestTrainNum <- NULL; smallestError <- 100;
while (trainIteration <= maxTrainIteration) {
#for (num0 in seq(1,10,1)) {for (num1 in seq(1,10,1)){
orig.distPercent <- as.data.frame(round(prop.table(table(dsTraining.bal[,classColumn]))* 100, digits = 1))
orig.distRecordCount <- as.data.frame(table(dsTraining.bal[,classColumn]))
ratioClassAvsBvsC <- (orig.distPercent[1,2]-orig.distPercent[2,2])+orig.distPercent[3,2]
#set random values for model params
randVar0 <- roundUp(sample(10:85,1,replace=TRUE),5);#ntree
randVar1 <- roundUp(sample(1000:(nrow(xTrain.bal)*.6),1,replace=TRUE),100);#sampsize
randVar2 <- roundUp(sample(10:(nrow(xTrain.bal)*.05),1,replace=TRUE),5);#nodesize
randVar3 <- roundUp(sample((sqrt(((2*randVar1 - 16*randVar2)/randVar2)^2)):(sqrt(((2*randVar1 - 200*randVar2)/randVar2)^2)),1,replace=TRUE),5);#maxnodes
randVar4 <- roundUp(sample(10:(ncol(xTrain.bal)*.2),1,replace=TRUE),5);#mtry
rv5 <- roundUp(sample(1:100,1,replace=TRUE),5);
rv6 <- roundUp(sample(1:100,1,replace=TRUE),5);
if (loopNum == 0) {
bestParams <- data.frame(cbind(recordType=1
,rv0=0,rv1=0,rv2=0,rv3=0
,rv4=0,rv5=0,rv6=0
,type=0#rf$type
,localImportance=0#rf$localImportance
,proximity=0#rf$proximity
,mtry=0#rf$mtry
,nrnodes=0#rf$forest$nrnodes
,ntree=0L#rf$forest$ntree
,Hi_Hi=0,Hi_Lo=0,Hi_NULL=0
,Lo_Lo=0,Lo_Hi=0,Lo_NULL=0
,NULL_NULL=0,NULL_Hi=0,NULL_Lo=0
,Hi.Err.test=0.1,Lo.Err.test=0.1,NULL.Err.test=0.1
,Hi.Err.train=0.1,Lo.Err.train=0.1,NULL.Err.train=0.1
,Hi.Err.diff=0.1,Lo.Err.diff=0.1,NULL.Err.diff=0.1
,smallest.Error=0.1,TrainNum=0
))
print(bestParams)
}
loopNum <- loopNum + 1
print(paste("Begin test:",loopNum,"sampsize=",randVar1,"nodesize=",randVar2,"maxnodes=",randVar3));
# train RandomForest
rf <- ranger(data = dsTraining,dependent.variable.name = "ClassChar", num.trees=50#,mtry = 15,min.node.size = 25,num.threads = 4
,replace=TRUE,importance = "impurity",classification = TRUE,write.forest = TRUE
,verbose = TRUE,save.memory = FALSE)
classwt <- c(.95,.95,.025)
rf <- randomForest(dsTraining[,x_col_start_pos:x_col_end_pos],dsTraining[,classColumn],nodesize = 15,ntree=250#,sampsize=randVar1,,maxnodes= randVar3,mtry = randVar4
,replace=TRUE,importance = TRUE,do.trace=TRUE,classwt=classwt)#,classwt=c("SwingHi"=rv5,"SwingLo"=rv5,"SwingNULL"=rv6)
# )
#,set.seed(1)
#,samplesize = c("SwingNULL" = 100, "SwingHi" = 50, "SwingLo" = 50)
#,strata = yTrain.bal
# rf <- foreach(ntree=rep(randVar0, 4), .combine=combine, .packages='randomForest') %dopar% +
# randomForest(xTrain.bal,yTrain.bal,ntree=ntree,sampsize=randVar1#, maxnodes= randVar3
# ,nodesize = randVar2,mtry = randVar4,replace=TRUE,importance = TRUE,set.seed(1))
#rf.all <- combine(rf, rf2, rf3, rf4)
#print(rf.all)
#rf <- rf.all
# predict
#predictions <- predict(rf, xTest.bal); predictions.t <- predict(rf, xTrain.bal);
predictions <- predict(rf, dsTest[,x_col_start_pos:x_col_end_pos],type="response");
predictions.t <- predict(rf, dsTraining[,x_col_start_pos:x_col_end_pos],type="response");
predictions.v <- predict(rf, dsValidation[,x_col_start_pos:x_col_end_pos],type="response");
#create confussion matrix
actual <- as.matrix(dsTest[,classColumn]); #test
actual.t <- as.matrix(dsTraining[,classColumn]); #train
actual.v <- as.matrix(dsValidation[,classColumn]); #validate
predicted <- as.matrix(predictions);
predicted.t <- as.matrix(predictions.t);
predicted.v <- as.matrix(predictions.v);
#predicted <- as.matrix(predictions$predictions); predicted.t <- as.matrix(predictions.t$predictions);
conf_matrix <- table(actual,predicted);
conf_matrix.t <- as.data.frame(rf$confusion);
conf_matrix.t <- conf_matrix.t[,1:3]
conf_matrix.v <- table(actual.v,predicted.v);
conf_matrix.t #train
conf_matrix #test
conf_matrix.v #validate
rf <- randomForest(rf.form,
cross.sell.dev,
ntree=500,
importance=T)
plot(cross.sell.rf)
# Variable Importance Plot
varImpPlot(rf,sort = T,main="Variable Importance",n.var=50)
# Variable Importance Table
var.imp <- data.frame(importance(rf,type=2))
# make row names as columns
var.imp$Variables <- row.names(var.imp)
VariableImportanceTable <- as.data.frame(var.imp[order(var.imp$MeanDecreaseGini,decreasing = T),])
VariableImportanceTable[1:10,]
#(1=mean decrease in accuracy, 2=mean decrease in node impurity
plot(margin(rf,sort=T))
rf$forest
importantVars <- importantVars[order(conf_matrix.df$predicted,conf_matrix.df$actual, decreasing=FALSE),]
conf_matrix.df <- conf_matrix.df[order(conf_matrix.df$predicted,conf_matrix.df$actual, decreasing=FALSE),]
?order
# Calculate error
Class1error <- sum(conf_matrix[1,2:3])/sum(conf_matrix[1,])*100;#SwingHi
Class2error <- sum(conf_matrix[2,c(1,3)])/sum(conf_matrix[2,])*100;#SwingLo
Class3error <- sum(conf_matrix[3,1:2])/sum(conf_matrix[3,])*100;#SwingNULL
Class1error.t <- sum(conf_matrix.t[1,2:3])/sum(conf_matrix.t[1,])*100;#SwingHi
Class2error.t <- sum(conf_matrix.t[2,c(1,3)])/sum(conf_matrix.t[2,])*100;#SwingLo
Class3error.t <- sum(conf_matrix.t[3,1:2])/sum(conf_matrix.t[3,])*100;#SwingNULL
ClassErrs <- c(SwingHi=Class1error,SwingLo=Class2error,SwingNULL=Class3error)
avgClassErr <- mean(exp(ClassErrs/100)/(exp(ClassErrs/100)+1))
err <- avgClassErr;
conf_matrix.df <- as.data.frame(conf_matrix)
conf_matrix.df <- conf_matrix.df[order(conf_matrix.df$predicted,conf_matrix.df$actual, decreasing=FALSE),]
conf_matrix.df <- cbind(conf_matrix.df,Err=c(Class1error,Class1error,Class1error,Class2error,Class2error,Class2error,Class3error,Class3error,Class3error))
#output ideal params
if ( (err <= smallestError+(smallestError/30)) ) {
delta <- (smallestError - err)
smallestError <- err
bestTrainNum <- loopNum
new_row <- c(0,randVar0,randVar1,randVar2,randVar3
,randVar4,rv5,rv6
,ifelse(is.null(rf$type),0,rf$type)
,ifelse(is.null(rf$localImportance),0,rf$localImportance)
,ifelse(is.null(rf$proximity),0,rf$proximity)
,ifelse(is.null(rf$mtry),0,rf$mtry)
,ifelse(is.null(rf$forest$nrnodes),0,rf$forest$nrnodes)
,ifelse(is.null(rf$forest$ntree),0,rf$forest$ntree)
,conf_matrix.df[1,3],conf_matrix.df[2,3],conf_matrix.df[3,3]
,conf_matrix.df[5,3],conf_matrix.df[4,3],conf_matrix.df[6,3]
,conf_matrix.df[9,3],conf_matrix.df[7,3],conf_matrix.df[8,3]
,sprintf("%.4f",Class1error),sprintf("%.4f",Class2error),sprintf("%.4f",Class3error)
,sprintf("%.4f",Class1error.t),sprintf("%.4f",Class2error.t),sprintf("%.4f",Class3error.t)
,sprintf("%.4f",Class1error.t-Class1error),sprintf("%.4f",Class2error.t-Class2error),sprintf("%.4f",Class3error.t-Class3error)
,sprintf("%.6f",smallestError),bestTrainNum)
bestParams <- rbind(bestParams, new_row)
bestParams <- bestParams[order(bestParams$smallest.Error,decreasing=FALSE),]
print(bestParams)
#Extract raw rules from a random forest:
treeList <- RF2List(rf) # transform rf object to an inTrees' format
exec <- extractRules(treeList,dsTrainingTest) # R-executable conditions
#Measure rules. "len" is the number of variable-value pairs in a condition, "freq" is the percentage of data satisfying a condition, "pred" is the outcome of a rule, i.e., "condition" => "pred", "err" is the error rate of a rule.
target <- yTest.bal
X <- xTest.bal
ruleMetric <- getRuleMetric(exec,X,target) # get rule metrics
ruleMetric_orig <- ruleMetric
#Prune each rule:
ruleMetric <- pruneRule(ruleMetric,X,target)
ruleMetric_pruned <- ruleMetric
#Select a compact rule set:
ruleMetric <- selectRuleRRF(ruleMetric,X,target)
#ruleMetric_compact <- ruleMetric
#Build an ordered rule list as a classifier:
#learner <- buildLearner(ruleMetric,X,target)
#learner_orig <- learner
#Make rules more readable:
#readableRules <- presentRules(learner,colnames(X))
readableRules <- presentRules(ruleMetric,colnames(X))
readableRules <- presentRules(ruleMetric_pruned,colnames(X))
readableRules.df <- as.data.frame(readableRules)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.character)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.numeric)
table(sapply(readableRules.df, class))
df <- readableRules.df[ order(-readableRules.df[,2], readableRules.df[,3]), ]
dfu <- unique(df[,1:5])
dfu <- cbind(dfu,bestParams[2,])
if (nrow(bestParams) <= 2) {
write.csv(df, file = "readableRules.csv",row.names=TRUE,append = FALSE)
write.csv(dfu, file = "readableRulesUnique.csv",row.names=TRUE,append = FALSE)
}
else {
write.csv(df, file = "readableRules.csv",row.names=TRUE,append = T)
write.csv(dfu, file = "readableRulesUnique.csv",row.names=TRUE,append = T)
}
library(xtable)
print(xtable(dfu), include.rownames=FALSE)
}
#else {
# print(paste("bestTrainNum: ",bestTrainNum))
#}
#}}
if (trainIteration == maxTrainIteration) {
write.csv(bestParams, file = "bestParams_RF.csv",row.names=TRUE)
}
trainIteration <- trainIteration+1;
gc()
}
X <- iris[,1:(ncol(iris)-1)]
target <- iris[,"Species"]
rf <- RRF(X,as.factor(target),ntree=100) # build an ordinary RF
treeList <- RF2List(rf)
ruleExec <- extractRules(treeList,X) # transform to R-executable rules
#The MSR and % variance explained are based on OOB or out-of-bag estimates, a very clever device
#in random forests to get honest error estimates. The model reports that mtry=4, which is the
#number of variables randomly chosen at each split. Since \( p=13 \) here, we could try all 13
#possible values of mtry. We will do so, record the results, and make a plot.
#x_col_start_pos <- 2
#x_col_end_pos <- 823
#https://lagunita.stanford.edu/c4x/HumanitiesSciences/StatLearning/asset/ch8.html
oob.err = double(822)
test.err = double(822)
for (mtry in 1:822) {
fit = randomForest(yTrain.bal ~ ., data = xTrain.bal, mtry = mtry,
ntree = 10)
pred = predict(fit, xTest.bal)
#create confussion matrix
actual <- as.matrix(yTest.bal);
predicted <- as.matrix(pred);
conf_matrix <- table(pred,actual);
# Calculate error
Class1error <- sum(conf_matrix[1,2:3])/sum(conf_matrix[1,])*100;#SwingHi
Class2error <- sum(conf_matrix[2,c(1,3)])/sum(conf_matrix[2,])*100;#SwingLo
Class3error <- sum(conf_matrix[3,1:2])/sum(conf_matrix[3,])*100;#SwingNULL
ClassErrs <- c(SwingHi=Class1error,SwingLo=Class2error,SwingNULL=Class3error)
avgClassErr <- mean(exp(ClassErrs/100)/(exp(ClassErrs/100)+1))
oob.err[mtry] = mean(ClassErrs)
test.err[mtry] = with(xTest.bal, mean((yTest.bal - pred)^2))
cat(mtry, "= {");cat(sprintf("%.2f",mean(ClassErrs)),", ");cat(sprintf("%.2f",avgClassErr),"} ");conf_matrix;
}
matplot(1:mtry, cbind(test.err, oob.err), pch = 19, col = c("red", "blue"), type = "b", ylab = "Mean Squared Error")
legend("topright", legend = c("OOB", "Test"), pch = 19, col = c("red", "blue"))
sampsize <- 1200; nodesize <- 280; maxnodes <- 1200/280;
nrnodes <- 2 * trunc(sampsize / nodesize) + 1
maxnodes > nrnodes
maxnodes < nrnodes
maxnodes = 2 * sampsize / nodesize + 1
(2*sampsize + 3*nodesize)/nodesize
levels(yTrain.bal)
classwt=c("SwingHi"=0,"SwingLo"=0,"SwingNULL"=0)
# random forest
rf <- randomForest(x,y,ntree=100#,maxnodes = 5
,mtry = floor(sqrt(ncol(x)))/10
,sampsize = .9*nrow(x),nodesize = floor(sqrt(nrow(x)))*1
,replace=TRUE,importance = TRUE)
lists[['rf']] <- RF2List(rf) # extract a list of trees
rf$confusion
rf$err.rate
rf.cv <- rfcv(xTest.bal, yTest.bal, cv.fold=10)
with(rf.cv, plot(n.var, error.cv))
lists[['rf']]
# regularized random forest
?RRF
rrf <- RRF(x,as.factor(y),ntree=100, flagReg = 1)
lists[['rrf']] <- RF2List(rrf)
X <- xTrain.bal; class <- yTrain.bal;
#ordinary random forest.
rf <- RRF(X,as.factor(class), flagReg = 0)
impRF <- rf$importance
impRF <- impRF[,"MeanDecreaseGini"]
rf$feaSet
#regularized random forest
rrf <- RRF(X,as.factor(class), flagReg = 1)
rrf$feaSet
#guided regularized random forest
imp <- impRF/(max(impRF))#normalize the importance score
gamma <- 0.5
coefReg <- (1-gamma)+gamma*imp #weighted average
grrf <- RRF(X,as.factor(class),coefReg=coefReg, flagReg=1)
grrf$feaSet
#guided random forest
gamma <- 1
coefReg <- (1-gamma)+gamma*imp
grf <- RRF(X,as.factor(class),coefReg=coefReg, flagReg=0)
grf$feaSet
# boosted trees
?gbm
gbmFit <- gbm(y~ ., data=cbind(xTest.bal,yTest.bal), n.tree = 100,
interaction.depth = 10,distribution="multinomial")
lists[['gbm']] <- GBM2List(gbmFit,x)
dataSet <- read.csv("data/vw_barFeatures_train_r_20160819_SUBSET_BALANCED.csv", header = TRUE)
rc(dataSet)
rc(dataSet)
rc(dsTraining)
rc(dsTest)
unique(dataSet[,1])
unique(dsTraining[,1])
unique(dsTest[,1])
dsColNames <- as.character(names(dataSet))
dsColNames
dsColNames <- as.character(names(dsTraining))
dsColNames
dsColNames <- as.character(names(dsTest))
dsColNames
homedir<-getSrcDirectory(function(x) {x})
# Setting working directory
# need to be adjusted to server conditions
if (homedir==""|is.na(homedir)){
homedir <-"C://Users//Neal//Documents//www.DAYTRADINGLOGIC.com//_neal//swing//R"
#homedir <-"C://Users//nbb//Downloads//rscripts20160529"
}
# homedir <-"~//Zlecenia//ODesk//Neal Webster//swing prediction"
setwd(homedir)
#if ("package:gbm" %in% search()) { detach("package:gbm", unload=TRUE) }
#if ("gbm" %in% rownames(installed.packages())) { remove.packages("gbm") }
library(inTrees);
#install.packages("gbm")
library(gbm);
dsTrainingTest <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_TrainingTest.csv", header = TRUE)
dsValidation <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_Validation.csv", header = TRUE)
dsTrainingTest <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_SwingHi_TrainingTest.csv", header = TRUE)
dsValidation <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_SwingHi_Validation.csv.csv", header = TRUE)
dsTrainingTest <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_SwingLo_TrainingTest.csv", header = TRUE)
dsValidation <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_SwingLo_Validation.csv", header = TRUE)
ncol(dsTrainingTest);ncol(dsTrainingTest)==ncol(dsValidation);
nrow(dsTrainingTest);nrow(dsTrainingTest)!=nrow(dsValidation);
#ordinality of class and features
classColumn <- 1
y_col_pos <- classColumn
x_col_start_pos <- 2
x_col_end_pos <- 3246
y_col_pos; x_col_start_pos; x_col_end_pos
dsColNames <- as.character(names(dsTrainingTest))
classColumnName <- dsColNames[classColumn]
print(round(prop.table(table(dsTrainingTest[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsValidation[,classColumn]))* 100, digits = 1))
unique(dsTrainingTest[,classColumn])
unique(dsTrainingTest[,1])
gbm1 <- gbm(dsTrainingTest[,classColumn]~ ., data=dsTrainingTest[,x_col_start_pos:x_col_end_pos],n.cores=2,verbose=TRUE
,n.trees=15, interaction.depth=10, n.minobsinnode = 2, shrinkage=0.61#,train.fraction = 0.15
,distribution="multinomial"#,train.fraction = .1
#shrinkage=0.00001,bag.fraction = 0.1
)
# n.trees=20)#, # number of trees
# shrinkage=0.25, # learning rate, 0.001 to 0.1 usually work
# interaction.depth=10, # 1: additive model, 2: two-way interactions, etc.
# bag.fraction = 0.25, # subsampling fraction, 0.5 is probably best
# train.fraction = 0.15, # fraction of data for training,first train.fraction*N used for training
# n.minobsinnode = 60, # minimum total weight needed in each node
# cv.folds=10, # do 3-fold cross-validation cross-validation returned w/ "cv.error"
# keep.data=TRUE, # keep a copy of the dataset with the object
# verbose=TRUE, # don't print out progress
# n.cores=4) # use only a single core (detecting #cores is error-prone, so avoided here)
warnings()
# check performance using an out-of-bag estimator
# OOB underestimates the optimal number of iterations
best.iter <- gbm.perf(gbm1,method="OOB")
print(best.iter)
# check performance using a 50% heldout test set
best.iter <- gbm.perf(gbm1,method="test")
print(best.iter)
# check performance using 5-fold cross-validation
best.iter <- gbm.perf(gbm1,method="cv")
print(best.iter)
# plot the performance # plot variable influence
summary(gbm1,n.trees=1) # based on the first tree
summary(gbm1,n.trees=best.iter) # based on the estimated best number of trees
# compactly print the first and last trees for curiosity
print(pretty.gbm.tree(gbm1,1))
print(pretty.gbm.tree(gbm1,gbm1$n.trees))
#plot best num of trees
gbm.perf(gbm1)
gbm.perf(gbm1,
plot.it = TRUE,
oobag.curve = F,
overlay = TRUE,
method="test")# method="OOB",method="test",method="cv"
gbm1
import <- as.data.frame(relative.influence(gbm1, n.trees = 10))
import
dsTraining <- dataSet
# predict
x <- dsTraining[,x_col_start_pos:x_col_end_pos]; y <- dsTraining[,classColumn];
x <- dsTest[,x_col_start_pos:x_col_end_pos]; y <- dsTest[,classColumn];
x <- dsTrainingTest[,x_col_start_pos:x_col_end_pos]; y <- dsTrainingTest[,classColumn];
nrow(dsTrainingTest)
#x <- dsValidation[,x_col_start_pos:x_col_end_pos]; y <- dsValidation[,classColumn];
#nrow(dsValidation)
#nrow(x)
#x <- xBkup.bal; y <- yBkup.bal;
pred <- as.data.frame(predict(gbm1, x, n.trees = 15,type = 'response'));
#pred <- as.data.frame(pred)
names(pred) <- c(levels(y))#"SwingHi", "SwingLo", "SwingNULL")
#pred <- apply(pred, 1,which.max(pred))
pred.test <- rep(NA,1)
for (i in 1:nrow(x)) {
pred.test[i] <- colnames(pred)[(which.max(pred[i,]))]
}
pred <- as.factor(pred.test)
actual <- as.matrix(y);
predicted <- as.matrix(pred);
conf_matrix <- table(predicted,actual);
conf_matrix
## save this model
save(gbm1, file = "model_GBM_20161202_1.rda")
## load the model
load("model_GBM_20161202_1.rda")
## under sampling NULL class to balance class distributions appears to work best
##gbm1 <- gbm(yTrain.bal~ ., data=cbind(xTrain.bal,yTrain.bal), distribution="multinomial",
## interaction.depth=25,n.trees=50,cv.folds=5,n.cores=4,verbose=TRUE)
# actual
# predicted SwingHi SwingLo SwingNULL
# SwingHi 1053 2 77
# SwingLo 0 1033 72
# SwingNULL 25 14 908
#
# actual
# predicted SwingHi SwingLo SwingNULL
# SwingHi 528 2 40
# SwingLo 0 518 44
# SwingNULL 10 8 467
#
# actual
# predicted SwingHi SwingLo SwingNULL
# SwingHi 1023 2 92
# SwingLo 1 1075 86
# SwingNULL 18 24 885
#
# actual
# predicted SwingHi SwingLo SwingNULL
# SwingHi 2076 4 1514
# SwingLo 1 2108 1353
# SwingNULL 43 38 14301
gbmFit <- gbm1
#X <- xTrain.bal; target <- yTrain.bal;
#X <- xTest.bal; target <- yTest.bal;
#X <- xVal.bal; target <- yVal.bal;
X <- dataSet[,x_col_start_pos:x_col_end_pos]; target <- dataSet[,classColumn];
treeList <- GBM2List(gbmFit,X)
ruleExec = extractRules(treeList,X,maxdepth = 50)
ruleExec <- unique(ruleExec)
#ruleExec <- ruleExec[1:min(2000,length(ruleExec)),,drop=FALSE]
ruleMetric <- getRuleMetric(ruleExec,X,target)
ruleMetric <- pruneRule(ruleMetric,X,target, maxDecay = 0, typeDecay = 1)
ruleMetric <- unique(ruleMetric)
learner <- buildLearner(ruleMetric,X,target,minFreq = 0.0000001)
pred <- applyLearner(learner,X)
#get column names that match those in EasyLanguage
hdr <- read.csv("header.csv", sep = ",", skip = 0, header = TRUE,comment.char = "", check.names = FALSE)
hdr <- as.character(names(hdr), check.names = FALSE)
#set column nates in dataset to those from header.csv
names(X) <- hdr
# more readable format
readableLearner <- presentRules(learner,colnames(X))
#err <- 1-sum(pred==target)/length(pred);
write.csv(readableLearner, file = "readableRules_GBM_20160917_3.csv",row.names=TRUE)
readableLearner
library(RRF)
target <- yTrain.bal
X <- xTrain.bal
rf <- RRF(X,as.factor(target),ntree=100) # build an ordinary RF
treeList <- RF2List(rf)
ruleExec <- extractRules(treeList,X) # transform to R-executable rules
ruleExec <- unique(ruleExec)
ruleMetric <- getRuleMetric(ruleExec,X,target) # measure rules
write.csv(readableLearner, file = "readableRules_RF.csv",row.names=TRUE)
#Extract raw rules from a GBM:
treeList <- GBM2List(gbm1,x) # transform rf object to an inTrees' format
exec <- extractRules(treeList,X) # R-executable conditions
exec <- unique(exec)
#Measure rules. "len" is the number of variable-value pairs in a condition, "freq" is the percentage of data satisfying a condition, "pred" is the outcome of a rule, i.e., "condition" => "pred", "err" is the error rate of a rule.
ruleMetric <- getRuleMetric(exec,X,target) # get rule metrics
ruleMetric_orig <- ruleMetric
#Prune each rule:
ruleMetric <- pruneRule(ruleMetric,X,target)
ruleMetric_pruned <- ruleMetric
#Select a compact rule set:
ruleMetric <- selectRuleRRF(ruleMetric,X,target)
#ruleMetric_compact <- ruleMetric
#Build an ordered rule list as a classifier:
#learner <- buildLearner(ruleMetric,X,target)
#learner_orig <- learner
#Make rules more readable:
#readableRules <- presentRules(learner,colnames(X))
readableRules <- presentRules(ruleMetric,colnames(X))
readableRules <- presentRules(ruleMetric_pruned,colnames(X))
readableRules.df <- as.data.frame(readableRules)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.character)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.numeric)
table(sapply(readableRules.df, class))
df <- readableRules.df[ order(-readableRules.df[,2], readableRules.df[,3]), ]
dfu <- unique(df[,])
dfu <- cbind(dfu,bestParams[2,])
dfu
if (nrow(bestParams) <= 2) {
write.csv(df, file = "readableRules.csv",row.names=TRUE,append = F)
write.csv(dfu, file = "readableRulesUnique.csv",row.names=TRUE,append = F)
}
else {
write.csv(df, file = "readableRules.csv",row.names=TRUE,append = T)
write.csv(dfu, file = "readableRulesUnique.csv",row.names=TRUE,append = T)
}
library(xtable)
print(xtable(dfu), include.rownames=FALSE)
#http://www.numbrcrunch.com/blog/comparing-tree-based-classification-methods-using-the-kaggle-otto-competition
# Install gbm package
install.packages('gbm')
library(gbm)
# Set a unique seed number so you get the same results everytime you run the below model,
# the number does not matter
set.seed(17)
# Begin recording the time it takes to create the model
ptm5 <- proc.time()
# Create a random forest model using the target field as the response and all 93 features as inputs (.)
fit5 <- gbm(target ~ ., data=strain, distribution="multinomial", n.trees=1000,
shrinkage=0.05, interaction.depth=12, cv.folds=2)
# Finish timing the model
fit5.time <- proc.time() - ptm5
# Test the boosting model on the holdout test dataset
trees <- gbm.perf(fit5)
fit5.stest <- predict(fit5, stest, n.trees=trees, type="response")
fit5.stest <- as.data.frame(fit5.stest)
names(fit5.stest) <- c("Class_1","Class_2","Class_3","Class_4","Class_5","Class_6","Class_7","Class_8","Class_9")
fit5.stest.pred <- rep(NA,2000)
for (i in 1:nrow(stest)) {
fit5.stest.pred[i] <- colnames(fit5.stest)[(which.max(fit5.stest[i,]))]}
fit5.pred <- as.factor(fit5.stest.pred)
# Create a confusion matrix of predictions vs actuals
table(fit5.pred,stest$target)
# Determine the error rate for the model
fit5$error <- 1-(sum(fit5.pred==stest$target)/length(stest$target))
fit5$error
y <- yVal.bal
x <- colnames(x)[apply(x, 1 ,which.max)]
x[1:5,]
colnames(x)[apply(x, 1 ,which.max)]
confusion <- function(a, b){
tbl <- table(a, b)
mis <- 1 - sum(diag(tbl))/sum(tbl)
list(table = tbl, misclass.prob = mis)
}
# Here is the best predictor:
confusion(predict(gbm1, cbind(xVal.bal,yVal.bal),n.trees=5), yVal.bal)
tbl <- table(x,y)
str(yVal.bal)
nrow(as.list(predictions.Test.gbm))
nrow(as.list(yVal.bal))
confusion(predict(fit.gbm1, test.data2, n.trees = 69) > 0, test.data2$y > 0)
$table
b
a FALSE TRUE
FALSE 4409 694
TRUE 533 4364
$misclass.prob
[1] 0.1227
# 200 is better:
confusion(predict(fit.gbm1, test.data2, n.trees = 200) > 0, test.data2$y > 0)
$table
b
a FALSE TRUE
FALSE 4635 437
TRUE 307 4621
$misclass.prob
[1] 0.0744
# Even with 400 trees, not seriously overfit
confusion(predict(fit.gbm1, test.data2) > 0, test.data2$y > 0)
$table
b
a FALSE TRUE
FALSE 4680 405
TRUE 262 4653
$misclass.prob
[1] 0.0667
# Note that we have almost perfect classification on training sample
confusion(predict(fit.gbm1, train.data2) > 0, train.data2$y > 0)
$table
b
a FALSE TRUE
FALSE 994 1
TRUE 0 1005
$misclass.prob
[1] 5e-04
gbm.perf(gbm1, method = "cv")
library(dplyr)
preds <- predict(gbm1,xVal.bal,n.trees=gbm1$n.trees,type='response')
density(preds) %>% plot
install.packages("caret")
library(caret)
# Here is the best predictor:
confusion(predict(gbm1, test.data2, n.trees = 69) > 0, test.data2$y > 0)$table
getModelInfo()$gbm$type
pretty.gbm.tree(gbm1, i.tree = 1)
confusionMatrix(predictions.Test.gbm,yTest.bal)
?confusionMatrix
summary(gbm1)
print(gbm1)
str(gbm1)
str(predictions.Train.gbm)
table(predicted.gbm<50,actual.gbm)
actual.gbm <- as.matrix(yTest.bal); actual.t <- as.matrix(yTrain.bal);
predicted.gbm <- as.matrix(predictions.Test.gbm); predicted.t <- as.matrix(predictions.Train.gbm);
conf_matrix.gbm <- table(predictions.Test.gbm,actual.gbm); conf_matrix.t <- table(predicted.t,actual.t);
gbm.perf(gbm1,
plot.it = TRUE,
oobag.curve = FALSE,
overlay = TRUE,
method="OOB")#method="cv")#method="test")
# Calculate error
Class1error.gbm <- sum(conf_matrix.gbm[1,2:3])/sum(conf_matrix.gbm[1,])*100;#SwingHi
Class2error.gbm <- sum(conf_matrix.gbm[2,c(1,3)])/sum(conf_matrix.gbm[2,])*100;#SwingLo
Class3error.gbm <- sum(conf_matrix.gbm[3,1:2])/sum(conf_matrix.gbm[3,])*100;#SwingNULL
Class1error.t <- sum(conf_matrix.t[1,2:3])/sum(conf_matrix.t[1,])*100;#SwingHi
Class2error.t <- sum(conf_matrix.t[2,c(1,3)])/sum(conf_matrix.t[2,])*100;#SwingLo
Class3error.t <- sum(conf_matrix.t[3,1:2])/sum(conf_matrix.t[3,])*100;#SwingNULL
ClassErrs.gbm <- c(SwingHi=Class1error.gbm,SwingLo=Class2error.gbm,SwingNULL=Class3error.gbm)
avgClassErr.gbm <- mean(exp(ClassErrs.gbm/100)/(exp(ClassErrs.gbm/100)+1))
err.gbm <- avgClassErr.gbm;
conf_matrix.df.gbm <- as.data.frame(conf_matrix.gbm)
conf_matrix.df.gbm
unseenXData <- xValp1a #xValp1a.bal,xValp1b.bal,xValp2a.bal,xValp2b.bal,xValp1a,xValp1b,xValp2a,xValp2b,xVal
unseenYData <- yValp1a #yValp1a.bal,yValp1b.bal,yValp2a.bal,yValp2b.bal,yValp1a,yValp1b,yValp2a,yValp2b,yVal
# predict
predictions <- predict(rf, unseenXData)
#create confussion matrix
actual <- as.matrix(unseenYData); predicted <- as.matrix(predictions); conf_matrix <- table(predicted,actual);conf_matrix;
# Calculate error
error1 <- conf_matrix[1,2]/(conf_matrix[1,1]+conf_matrix[1,2])
error2 <- conf_matrix[2,1]/(conf_matrix[2,1]+conf_matrix[2,2])
avgErr <- ((exp(error1)/(exp(error1)+1))+(exp(error2)/(exp(error2)+1)))/2;error1;error2;avgErr;
#Extract raw rules from a random forest:
treeList <- RF2List(rf) # transform rf object to an inTrees' format
exec <- extractRules(treeList,dsTrainingTest) # R-executable conditions
exec[1:2,]
#Measure rules. "len" is the number of variable-value pairs in a condition, "freq" is the percentage of data satisfying a condition, "pred" is the outcome of a rule, i.e., "condition" => "pred", "err" is the error rate of a rule.
target <- yVal#yTest.bal
X <- xVal#xTest.bal
ruleMetric <- getRuleMetric(exec,X,target) # get rule metrics
ruleMetric_orig <- ruleMetric
ruleMetric_orig[1:20,]
#Prune each rule:
ruleMetric <- pruneRule(ruleMetric,X,target)
ruleMetric_pruned <- ruleMetric
ruleMetric_pruned[1:20,]
#Select a compact rule set:
ruleMetric <- selectRuleRRF(ruleMetric,X,target)
ruleMetric_compact <- ruleMetric
ruleMetric_compact[,]
#Build an ordered rule list as a classifier:
learner <- buildLearner(ruleMetric,X,target)
learner_orig <- learner
learner_orig
#Make rules more readable:
readableRules <- presentRules(learner,colnames(X))
readableRules[,]
readableRules <- presentRules(ruleMetric,colnames(X))
readableRules[,]
readableRules <- presentRules(ruleMetric_pruned,colnames(X))
readableRules.df <- as.data.frame(readableRules)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.character)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.numeric)
table(sapply(readableRules.df, class))
df <- readableRules.df[ order(-readableRules.df[,2], readableRules.df[,3]), ]
dfu <- unique(df[,1:5])
write.csv(df, file = "readableRules.csv",row.names=TRUE)
write.csv(dfu, file = "readableRulesUnique.csv",row.names=TRUE)
append = T
library(xtable)
print(xtable(dfu), include.rownames=FALSE)
#Extract frequent variable interactions (note the rules are not pruned or selected):
treeList <- RF2List(rf) # transform rf object to an inTrees' format
exec <- extractRules(treeList,X) # R-executable conditions
ruleMetricFreq <- getRuleMetric(exec,X,target) # get rule metrics
freqPattern <- getFreqPattern(ruleMetricFreq)
nrow(freqPattern)
freqRules <- freqPattern[which(as.numeric(freqPattern[,"len"])>3),][,] # interactions of at least two predictor variables
freqRules
readableRulesFreq <- presentRules(freqRules,colnames(X))
readableRulesFreq
library(xtable)
print(xtable(readableRulesFreq), include.rownames=FALSE)
#[1] "Training#: 134 ,trainIteration: 133 params[ 2 , 7 , 6 , 3427 , 30 , 30 ] smallestError == 0.502067331543828 delta == 0.0000 bestTrainNum == 134 error1 0.0006 error2 0.0159"
# actual
# predicted NotSwingLo SwingLo
# NotSwingLo 3259 2
# SwingLo 48 2966
# random forest
rf <- randomForest(x,y,ntree=100#,maxnodes = 5
,mtry = floor(sqrt(ncol(x)))/10
,sampsize = .9*nrow(x),nodesize = floor(sqrt(nrow(x)))*1
,replace=TRUE,importance = TRUE)
lists[['rf']] <- RF2List(rf) # extract a list of trees
rf$confusion
rf$err.rate
lists[['rf']]
# regularized random forest
rrf <- RRF(x,as.factor(y),ntree=100)
lists[['rrf']] <- RF2List(rrf)
# boosted trees
gbmFit <- gbm(y~ ., data=cbind(x,y), n.tree = 100,
interaction.depth = 10,distribution="multinomial")
lists[['gbm']] <- GBM2List(gbmFit,x)
v <- c("rf","rrf","gbm")
v
v <- c("rf") # only use rf
#v <- c("gbm") # only use gbm
#v <- c("rrf") # only use rf
X <- x
for(i in v){X
target <- y
#X <- x
treeList <- lists[[i]]
ruleExec0 <- extractRules(treeList,X) # transform to R-executable conditions
ruleExec <- unique(ruleExec0) # unique rules
cat( paste("There are ", length(ruleExec), " unique conditions. \n",sep="") )
# Too many conditions could make the following steps time-consuming,
# so one could randomly select a subset of the conditions
ix <- sample(1:length(ruleExec),min(2000,length(ruleExec))) #randomly select 2000 rules
ruleExec <- ruleExec[ix,,drop=FALSE]
ruleMetric <- getRuleMetric(ruleExec,X,target) # measure rules
lookup <- lookupRule(ruleMetric,c("X[,4]","X[,3]")) # look up rules including X[,4] and X[,3]
ruleMetric <- pruneRule(ruleMetric,X,target) # prune each rule
# selecting rules by threholds of frequency & error
ix <- which(as.numeric(ruleMetric[,"freq"])>0.001 & as.numeric(ruleMetric[,"err"])< 0.5)
ruleMetric <- ruleMetric[ix,]
ruleMetric <- selectRuleRRF(ruleMetric,X,target) # rule selection
learner <- buildLearner(ruleMetric,X,target) #build the simplified tree ensemble learner
pred <- applyLearner(learner,X) #appy learner to data
readableLearner <- presentRules(learner,colnames(X)) # present the rules with a more readable format
# print(readableLearner)
# -- frequent variable interactions or conditions in a tree ensemble
# NOTE: the calculation is based on ruleExec0 WITHOUT pruning or selection
ruleMetric <- getRuleMetric(ruleExec0,X,target)
freqPattern <- getFreqPattern(ruleMetric)
#ruleMetric <- getRuleMetric(freqPattern,X,target)
}
#readableLearner
#ruleMetric
#freqPattern
ruleMetric <- pruneRule(ruleMetric,X,target)
ruleMetric[1:2,]
readableRules <- presentRules(ruleMetric,colnames(X))
readableRules[1:2,]
#format the rule and metrics as a table in latex code
#install.packages("xtable")
library(xtable)
print(xtable(ruleMetric), include.rownames=F)
print(xtable(readableLearner), include.rownames=F)#use in sharelatex.com
# --- transform regression rules to classification rules
# make Sepal.Length as the target, other as predictors
X <- iris[,-1]; target <- iris[,"Sepal.Length"]
rf <- randomForest(x,target,ntree=30) # random forest
ruleExec0 <- extractRules(RF2List(rf),X)
ruleExec <- unique(ruleExec0)
nrow(ruleExec0)
nrow(ruleExec)
target <- dicretizeVector(target) # discretize it into three levels with equal frenquency
# methods for classification rules can then be used for
# the conditions extracted from the regression trees
ruleMetric <- getRuleMetric(ruleExec,X,target)
ruleMetric
# --- decision tree and logistic regression
X.double <- apply(X,2,as.numeric)
#install.packages("glmnet")
library(glmnet)
cvNet <- cv.glmnet(X.double,as.factor(target), family = "multinomial",type.measure = "class")
coef <- coef(cvNet)
#install.packages("rpart")
library(rpart)
r <- rpart(target ~. , X)
|
/r/inTrees.R
|
no_license
|
jtresko/GBM_reversion
|
R
| false | false | 66,963 |
r
|
# decimalplaces <- function(x) {
# if ((x %% 1) != 0) {
# nchar(strsplit(sub('0+$', '', as.character(x)), ".", fixed=TRUE)[[1]][[2]])
# } else {
# return(0)
# }
# }
# decimalplaces(xn)
#
# num.decimals <- function(x) {
# stopifnot(class(x)=="numeric")
# x <- sub("0+$","",x)
# x <- sub("^.+[.]","",x)
# nchar(x)
# }
#
# num.decimals(xn)
#
#
#
# x <- c("0.0000", "0", "159.283", "1.45e+10", "1.4599E+10","11.12342525256789123456" )
# x <- as.character(dataSet[,notIntCols])
# x<-as.character(blah)
# Ndec(x);min(Ndec(x));max(Ndec(x))
# num.dec <- as.integer(Ndec(x))
# Ndec(x)
# num.dec
# paste(rep(paste(vec2-vec2,2,sep = ",",collapse = ""), 1),collapse = ",")
# paste((vec2-vec2),rep(vec2, 01),sep = ",",collapse = "")
#
# rep(vec2, 1)
# rep((vec2-vec2), 1)
#
# sapply(rep(vec2, 1)*rep((vec2-vec2)+2, 1), as.numeric)
#
# vec <- Ndec(x)
# vec <- as.data.frame(cbind(dec.places=vec,tens=10))
# vec
# str(vec)
#
#
#
# vec2<- sapply(vec+1,as.numeric)
# vec2
#
# paste(rep(c(rep((vec2-vec2), 1)), times = c(vec2)),collapse = ",")
#
# x=vec2
#
# # Method 1
# rep(x,times = c(vec2))
#
# # Method 2
# matrix(x,length(x),c(vec2))
#
#
#
# xn <- 11.12342525256789123456
#
# min(match(TRUE, round(xn, 1:20) == xn))
#
# min(xn)
#
# specify_decimal <- function(x, k) format(x, nsmall=k)
# specify_decimal(.11, 10)
#
#
#
# sub(".", "", as.character(specify_decimal(1, 10)))
#
# specify_decimal(Ndec(x), decimalplaces(Ndec(x)))
roundUp <- function(x,to=10)
{
to*(x%/%to + as.logical(x%%to))
}
roundUp(0,50)
is.finite.data.frame <- function(obj){
sapply(obj,FUN = function(x) all(is.finite(x)))
}
rc <- function(x) {
if (nrow(x)>0 || ncol(x)>0) {
rows <- nrow(x)
columns <- ncol(x)
result <- paste('rows:',rows,'columns:', columns,sep = " ")
}
else {
result <- paste('rows:',NULL,'columns:', NULL,sep = ",")
}
return(result)
}
homedir<-getSrcDirectory(function(x) {x})
# Setting working directory
# need to be adjusted to server conditions
if (homedir==""|is.na(homedir)){
homedir <-"C://Users//Neal//Documents//www.DAYTRADINGLOGIC.com//_neal//swing//R"
#homedir <-"C://Users//nbb//Downloads//rscripts20160529"
}
# homedir <-"~//Zlecenia//ODesk//Neal Webster//swing prediction"
setwd(homedir)
# Load data (using iris dataset from Google Drive because link @ uci.edu was not working for me today)
#iris <- read.csv(url("http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"), header = FALSE)
#iris <- read.csv(url("https://docs.google.com/spreadsheets/d/1ovz31Y6PrV5OwpqFI_wvNHlMTf9IiPfVy1c3fiQJMcg/pub?gid=811038462&single=true&output=csv"), header = FALSE)
#iris <- read.csv("vw_barFeatures_train_r_SwingLo.csv", header = TRUE)
dataSet <- read.csv("data/DataSet_Balanced_20160819.csv", header = TRUE)
dataSet <- read.csv("data/vw_barFeatures_train_r_20160819_SUBSET_BALANCED_TRAIN.csv", header = TRUE)
dataSet <- read.csv("data/vw_barFeatures_train_r_20160819_SUBSET_BALANCED.csv", header = TRUE)
dataSet <- read.csv("data/vw_barFeatures_train_r_20161123.csv", header = TRUE)
dataSet <- read.csv("data/vw_barFeatures_train_r_20161123_balanced.csv", header = TRUE)
rc(dataSet)
levels(as.factor(sapply(dataSet, class)))
table(sapply(dataSet, class))
table(dataSet[,1])
intCols <- sapply(dataSet, is.integer)
notIntCols <- !sapply(dataSet, is.integer)
notIntCols <- as.character(names(dataSet[,notIntCols]))
notIntCols <- notIntCols[which(notIntCols!="swingRecordType")]
notIntCols <- notIntCols[which(notIntCols!="numMinutesBarWasOpn")]
notIntCols;ncol(dataSet[,notIntCols]);
numCols <- sapply(dataSet, is.numeric)
notNumCols <- !sapply(dataSet, is.numeric)
factCols <- sapply(dataSet, is.factor)
notFactCols <- !sapply(dataSet, is.factor)
#min(dataSet[,notIntCols]);min(dataSet[,intCols]);
#max(dataSet[,notIntCols]);max(dataSet[,intCols]);
#tst <- -0.00143417*100000000;tst
#str(dataSet)
#linearlyRescale where [x] is some lookback period and range is "hard coded" by setting [floor] and [limit] values to literals
#(limit - floor) / (max[x] - min[x]) * (x - max[x]) + limit
#linearlyRescale <- function(x,floor,limit) (limit - floor) / ((max(x) - min(x)) * (x - max(x)) + limit)
#rescale <- function(x) (x-min(x))/(max(x) - min(x)) * 10000000000
notIntCols
#install.packages("DescTools")
library(DescTools)
x <- as.character(dataSet[,notIntCols])
paste(Ndec(x),collapse = ",")
#numDec <- as.data.frame(as.integer(Ndec(x)))
#df <- as.data.frame(numDec);colnames(df)[1] <- "Column_A";
#df
#B<-10**(df$Column_A)
#df<-cbind(df,B)
#df
# #create dataframe
# num_row <- nrow(df);num_col <- ncol(dataSet[,notIntCols]);
# m <- as.data.frame(matrix(df[,1], ncol = 1, nrow = num_row))
# m <- cbind(m, 10^m[,1])
# m <- rbind(m, blah)
# m <- m[2,]
# m
# ncol(m);ncol(dataSet[,notIntCols]);
# as.data.frame(dataSet[3,notIntCols])
# str(dataSet[3,notIntCols])
# str(as.data.frame(as.list(m[,2])))
# str(m[,2])
# str(df[,2])
# str(mults)
# #multiply numeric columns by multiple
# str(df[,2])
# df.aree <- as.data.frame(t(df[,2]))
# df.aree
dataSet[,notIntCols]<- mapply("*",as.data.frame(dataSet[,notIntCols]),100000000)
#dataSet[,notIntCols]<- sapply(dataSet[,notIntCols]*df[,2],as.numeric)
x2 <- as.character(dataSet[,notIntCols])
paste(Ndec(x2),collapse = ",")
#min(dataSet[,notIntCols]);min(dataSet[,intCols]);
#max(dataSet[,notIntCols]);max(dataSet[,intCols]);
#"7,7,7,7,4,5,5,5,5,5,8,11,7,8,8,2,11,11,11,2"
#"0,0,0,0,0,0,0,0,0,0,0,2 ,0,0,0,8,3 ,3 ,2 ,8"
dataSet[,notIntCols] <- sapply(dataSet[,notIntCols],as.integer)
table(sapply(dataSet[,2], class))
notIntCols <- !sapply(dataSet, is.integer)
notIntCols <- as.character(names(dataSet[,notIntCols]))
notIntCols <- notIntCols[which(notIntCols!="swingRecordType")]
dataSet[,notIntCols] <- sapply(dataSet[,notIntCols],as.integer)
table(sapply(dataSet, class))
#assign column names
#names(dataSet) <- c("SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "Species")
#backup dataset after transformations are complete
dataSet.bkup <- dataSet
#features ordinality
x_col_start_pos <- 5
x_col_end_pos <- 3249
#dataSet <- dataSet[,c(1,x_col_start_pos:x_col_end_pos)]
names(dataSet)
#col names
dsColNames <- as.character(names(dataSet))
dsColNames
as.character(names(dataSet[,1:2]))
#num of columns and rows
dsColCount <- as.integer(ncol(dataSet))
dsRowCount <- as.integer(nrow(dataSet))
dsColCount
dsRowCount
#class ordinality and name
classColumn <- 2
classColumnName <- dsColNames[classColumn]
y_col_pos <- classColumn
unique(dataSet[,classColumn])
unique(dataSet[,1])
unique(dataSet[,2])
dataSet <- dataSet[,c(classColumn,x_col_start_pos:x_col_end_pos)]
nrow(dataSet)
ncol(dataSet)
classColumn <- 1
classColumnName <- dsColNames[classColumn]
y_col_pos <- classColumn
unique(dataSet[,classColumn])
unique(dataSet[,1])
unique(dataSet.bkup[,2:3])
#features ordinality
x_col_start_pos <- 2
x_col_end_pos <- 3246
y_col_pos
x_col_start_pos
x_col_end_pos
firstCol <- ifelse(x_col_start_pos < classColumn, x_col_start_pos, classColumn)
firstCol
lastCol <- ifelse(x_col_end_pos > classColumn, x_col_end_pos, classColumn)
lastCol
#distinct list and count of classes from column assumed to contain class values
dsClassValues <- as.character(unique(dataSet[,classColumn])) #levels(dataSet[,classColumn])
dsClassCount <- as.integer(length(dsClassValues)) #sqldf("select distinct(x) from df1")
dsClasses <- dataSet[,classColumn]
dsClassCount
dsClassValues
dataSet[1,1:10]
#levels(as.factor(dataSet[,123]))
#class distribution in terms of row count/freqency and percentage/proportions
dsClassFreq <- table(dsClasses)
dsClassDistribution <- round(prop.table(table(dsClasses)) * 100, digits = 2)
dsClassFreq
dsClassDistribution
# #Randomly sample a percentage of rows to balance class distribution
#mydf <- mydf[ sample( which(mydf$swingRecordType=="SwingNULL"), round(0.1235*length(which(mydf$swingRecordType=="SwingNULL")))), ]
#get all "SwingHi" rows
dataSet.SwingHi <- dataSet[ sample( which(dataSet$ClassChar=="SwingHi"), round(43.82*length(which(dataSet$ClassChar=="SwingHi"))),replace = TRUE), ]
table(dataSet.SwingHi[,classColumn])
round(prop.table(table(dataSet.SwingHi[,classColumn])) * 100, digits = 2)
#get all "SwingLo" rows
dataSet.SwingLo <- dataSet[ sample( which(dataSet$ClassChar=="SwingLo"), round(41.92*length(which(dataSet$ClassChar=="SwingLo"))),replace = TRUE), ]
table(dataSet.SwingLo[,classColumn])
round(prop.table(table(dataSet.SwingLo[,classColumn])) * 100, digits = 2)
#get all "SwingNULL" rows and append all "SwingHi" and "SwingLo" rows
dataSet2 <- rbind(dataSet[ which(dataSet$ClassChar=="SwingNULL"), ],dataSet.SwingHi,dataSet.SwingLo)
table(dataSet2[,classColumn])
round(prop.table(table(dataSet2[,classColumn])) * 100, digits = 2)
dataSet <- dataSet2
#Free RAM. Remove objects / data from workspace
rm(dataSet.SwingHi, dataSet.SwingLo, dataSet2)
gc()
write.csv(dataSet, file = "data/vw_barFeatures_train_r_20161123_balanced.csv",row.names=FALSE,append = FALSE)
#num of columns and rows
dsColCount <- as.integer(ncol(dataSet))
dsRowCount <- as.integer(nrow(dataSet))
dsColCount
dsRowCount
# % of [dataset] reserved for training/test and validation
set.seed(123)
sampleAmt <- 0.5
mainSplit <- sample(2, dsRowCount, replace=TRUE, prob=c(sampleAmt, 1-sampleAmt))
#split [dataSet] into two sets
dsTrainingTest <- dataSet[mainSplit==1, ]#firstCol:lastCol]
dsValidation <- dataSet[mainSplit==2, ]#firstCol:lastCol]
nrow(dataSet);nrow(dsTrainingTest);nrow(dsValidation);
nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValidation)
print(round(prop.table(table(dataSet[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTrainingTest[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsValidation[,classColumn]))* 100, digits = 1))
write.csv(dsTrainingTest, file = "data/vw_barFeatures_train_r_20161123_balanced_TrainingTest.csv",row.names=FALSE,append = FALSE)
write.csv(dsValidation, file = "data/vw_barFeatures_train_r_20161123_balanced_Validation.csv",row.names=FALSE,append = FALSE)
#SwingHi SwingLo SwingNULL
#33.3 33.4 33.3
#33.4 33.2 33.4
# % of [dsTrainingTest] reserved for training
sampleAmt <- 0.5
secondarySplit <- sample(2, nrow(dsTrainingTest), replace=TRUE, prob=c(sampleAmt, 1-sampleAmt))
#split [dsTrainingTest] into two sets
dsTraining <- dsTrainingTest[secondarySplit==1, ]#firstCol:lastCol]
dsTest <- dsTrainingTest[secondarySplit==2, ]#firstCol:lastCol]
nrow(dsTraining);nrow(dsTest);
nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValidation)
nrow(dsTrainingTest) == nrow(dsTraining)+nrow(dsTest)
# % of [dsValidation] reserved for Validation
sampleAmt <- 0.5
secondarySplit <- sample(2, nrow(dsValidation), replace=TRUE, prob=c(sampleAmt, 1-sampleAmt))
# #split [dsValidation] into two sets
# dsValp1 <- dsValidation[secondarySplit==1, firstCol:lastCol]
# dsValp2 <- dsValidation[secondarySplit==2, firstCol:lastCol]
# nrow(dsValp1);nrow(dsValp2);
# nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValp1)+nrow(dsValp2)
# nrow(dsValidation) == nrow(dsValp1)+nrow(dsValp2)
# # % of [dsValp1] reserved for Validation
# sampleAmt <- 0.5075
# secondarySplit <- sample(2, nrow(dsValp1), replace=TRUE, prob=c(sampleAmt, 1-sampleAmt))
#
# #split [dsValp1] into two sets
# dsValp1a <- dsValp1[secondarySplit==1, firstCol:lastCol]
# dsValp1b <- dsValp1[secondarySplit==2, firstCol:lastCol]
# nrow(dsValp1a);nrow(dsValp1b);
#
# nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValp1a)+nrow(dsValp1b)+nrow(dsValp2)
# nrow(dsValp1) == nrow(dsValp1a)+nrow(dsValp1b)
#
# # % of [dsValp2] reserved for Validation
# sampleAmt <- 0.5075
# secondarySplit <- sample(2, nrow(dsValp2), replace=TRUE, prob=c(sampleAmt, 1-sampleAmt))
#
# #split [dsValp2] into two sets
# dsValp2a <- dsValp2[secondarySplit==1, firstCol:lastCol]
# dsValp2b <- dsValp2[secondarySplit==2, firstCol:lastCol]
# nrow(dsValp2a);nrow(dsValp2b);
# nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValp1a)+nrow(dsValp1b)+nrow(dsValp2a)+nrow(dsValp2b)
# nrow(dsValp2) == nrow(dsValp2a)+nrow(dsValp2b)
nrow(dataSet) == nrow(dsTrainingTest)+nrow(dsValidation)
nrow(dataSet) == nrow(dsTraining)+nrow(dsTest)+nrow(dsValidation)
#Free RAM. Remove objects / data from workspace
#rm(dsTest,dsTraining,dsValidation,x)
rm(dsTrainingTest,dataSet)
gc()
# #install.packages("data.table")
# library(data.table)
#
# dsSummary <- as.data.frame(data.table(nrow(dataSet),nrow(dsTrainingTest),nrow(dsTraining),nrow(dsTest),nrow(dsValidation),nrow(dsValp1a),nrow(dsValp1b),nrow(dsValp2a),nrow(dsValp2b)))
# names(dsSummary) <- c("completeDataset","dsTrainingTest", "TrainingDataset", "TestDataset", "ValidationDataset","dsValp1a","dsValp1b","dsValp2a","dsValp2b")
# ncol(dsSummary)
# dsSummary
#0. complete dataset
nrow(dataSet.bkup);
nrow(dataSet);nrow(dsTrainingTest);nrow(dsValidation);
#1. Training dataset #2. Test dataset #4. Validation datasets
nrow(dsTraining);nrow(dsTest);nrow(dsValidation);
# nrow(dsValp1a);nrow(dsValp1b);nrow(dsValp2a);nrow(dsValp2b);
#is.finite.data.frame(dataSet)
#print(round(prop.table(table(dataSet.bkup[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dataSet[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTrainingTest[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTraining[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTest[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsValidation[,classColumn]))* 100, digits = 1))
#install.packages("DMwR")
library(DMwR)
# maxTrainIteration <- 500
# trainIteration <- 0; loopNum <- 0; bestTrainNum <- NULL;
# while (trainIteration <= maxTrainIteration) {
# #for (num0 in seq(1,10,1)) {for (num1 in seq(1,10,1)){
# orig.distPercent <- as.data.frame(round(prop.table(table(dsTraining.bal[,classColumn]))* 100, digits = 1))
# orig.distRecordCount <- as.data.frame(table(dsTraining.bal[,classColumn]))
# #set random values for "perc.over" and "perc.under"
# randVar0 <- roundUp(sample(4:19,1,replace=TRUE),1)*50;randVar1 <- roundUp(sample(2:6,1,replace=TRUE),1)*50;
# randVar2 <- roundUp(sample(4:19,1,replace=TRUE),1)*50;randVar3 <- roundUp(sample(2:6,1,replace=TRUE),1)*50;
#
# if (loopNum == 0) {
# bestParams <- data.frame(cbind(recordType=1,SwingHi.p=orig.distPercent[1,2],SwingLo.p=orig.distPercent[2,2]
# ,SwingNULL.p=orig.distPercent[3,2],SwingHi.c=orig.distRecordCount[1,2]
# ,SwingLo.c=orig.distRecordCount[2,2],SwingNULL.c=orig.distRecordCount[3,2]
# ,perc.over=abs(randVar0-randVar0),perc.under=abs(randVar1-randVar1),perc.over2=abs(randVar2-randVar2),perc.under2=abs(randVar3-randVar3)
# ,ratioClassAvsBvsC=(orig.distPercent[1,2]-orig.distPercent[2,2])+orig.distPercent[3,2]))
# print(bestParams)
# }
# #randVar0 <- roundUp(num0,1)*100;randVar1 <- roundUp(num1,1)*100;
# print(paste("Begin test:",loopNum,"params --> perc.over",randVar0,"/ perc.under",randVar1,"perc.over2",randVar2,"/ perc.under2",randVar3));
#
# ## use SMOTE to balance classes
# dsTraining.bal <- SMOTE(swingRecordType ~ ., dsTraining, perc.over=randVar0,perc.under=randVar1)
# dsTraining.bal2 <- SMOTE(swingRecordType ~ ., dsTraining.bal, perc.over=randVar2,perc.under=randVar3)
#
# # Calculate error
# distPercent <- as.data.frame(round(prop.table(table(dsTraining.bal2[,classColumn]))* 100, digits = 1))
# distRecordCount <- as.data.frame(table(dsTraining.bal2[,classColumn]))
# error1 <- distPercent[1,2]; error2 <- distPercent[2,2]; error3 <- distPercent[3,2];ratioClassAvsBvsC <- (error1-error2)+error3
#
# #output ideal params
# if ( (ratioClassAvsBvsC >= 19 & ratioClassAvsBvsC <= 41) ) {
# bestTrainNum <- loopNum
# new_row <- c(0,distPercent[1,2],distPercent[2,2],distPercent[3,2],distRecordCount[1,2],distRecordCount[2,2]
# ,distRecordCount[3,2],randVar0,randVar1,randVar2,randVar3,ratioClassAvsBvsC)
# bestParams <- rbind(bestParams, new_row)
# bestParams <- bestParams[order(bestParams$recordType,bestParams$ratioClassAvsBvsC,bestParams$SwingHi.p,bestParams$SwingLo.p,bestParams$SwingNULL.p, decreasing=TRUE),]
# print(paste("--> Class Distribution",error1,error2,error3,"bestTrainNum =",bestTrainNum))
# print(bestParams)
# }
# else {
# print(paste("--> Class Distribution",error1,error2,error3
# ,"bestTrainNum =",bestTrainNum))
# }#}}
# trainIteration <- trainIteration+1;loopNum <- loopNum + 1;
# gc()
# }
#
# bestParams
# use SMOTE to balance classes
dsTraining.bal <- SMOTE(swingRecordType ~ ., dsTraining, perc.over=900,perc.under=100)#perc.over=950,perc.under=100)
dsTraining.bal2 <- SMOTE(swingRecordType ~ ., dsTraining.bal, perc.over=950,perc.under=200)#perc.over=700,perc.under=200)
round(prop.table(table(dsTraining[,classColumn]))* 100, digits = 1);round(prop.table(table(dsTraining.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsTraining.bal2[,classColumn]))* 100, digits = 1);
table(dsTraining[,classColumn]); table(dsTraining.bal[,classColumn]);table(dsTraining.bal2[,classColumn]);
dsTest.bal <- SMOTE(swingRecordType ~ ., dsTest, perc.over=900,perc.under=100)
dsTest.bal2 <- SMOTE(swingRecordType ~ ., dsTest.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsTest[,classColumn]))* 100, digits = 1);round(prop.table(table(dsTest.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsTest.bal2[,classColumn]))* 100, digits = 1)
table(dsTest[,classColumn]);table(dsTest.bal[,classColumn]);table(dsTest.bal2[,classColumn])
dsValidation.bal <- SMOTE(swingRecordType ~ ., dsValidation, perc.over=900,perc.under=100)
dsValidation.bal2 <- SMOTE(swingRecordType ~ ., dsValidation.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsValidation[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValidation.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValidation.bal2[,classColumn]))* 100, digits = 1)
table(dsValidation[,classColumn]);table(dsValidation.bal[,classColumn]);table(dsValidation.bal2[,classColumn])
dsValp1a.bal <- SMOTE(swingRecordType ~ ., dsValp1a, perc.over=900,perc.under=100)
dsValp1a.bal2 <- SMOTE(swingRecordType ~ ., dsValp1a.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsValp1a[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp1a.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp1a.bal2[,classColumn]))* 100, digits = 1)
table(dsValp1a[,classColumn]);table(dsValp1a.bal[,classColumn]);table(dsValp1a.bal2[,classColumn])
dsValp1b.bal <- SMOTE(swingRecordType ~ ., dsValp1b, perc.over=900,perc.under=100)
dsValp1b.bal2 <- SMOTE(swingRecordType ~ ., dsValp1b.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsValp2b[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2b.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2b.bal2[,classColumn]))* 100, digits = 1)
table(dsValp2b[,classColumn]);table(dsValp2b.bal[,classColumn]);table(dsValp2b.bal2[,classColumn])
dsValp2a.bal <- SMOTE(swingRecordType ~ ., dsValp2a, perc.over=900,perc.under=100)
dsValp2a.bal2 <- SMOTE(swingRecordType ~ ., dsValp2a.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsValp2a[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2a.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2a.bal2[,classColumn]))* 100, digits = 1)
table(dsValp2a[,classColumn]);table(dsValp2a.bal[,classColumn]);table(dsValp2a.bal2[,classColumn])
dsValp2b.bal <- SMOTE(swingRecordType ~ ., dsValp2b, perc.over=900,perc.under=100)
dsValp2b.bal2 <- SMOTE(swingRecordType ~ ., dsValp2b.bal, perc.over=950,perc.under=200)
round(prop.table(table(dsValp2b[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2b.bal[,classColumn]))* 100, digits = 1);round(prop.table(table(dsValp2b.bal2[,classColumn]))* 100, digits = 1)
table(dsValp2b[,classColumn]);table(dsValp2b.bal[,classColumn]);table(dsValp2b.bal2[,classColumn])
# print(round(prop.table(table(dsTraining.bal2[,classColumn]))* 100, digits = 1))
# print(round(prop.table(table(dsTest.bal2[,classColumn]))* 100, digits = 1))
# print(round(prop.table(table(dsValp1a.bal2[,classColumn]))* 100, digits = 1))
# print(round(prop.table(table(dsValp1b.bal2[,classColumn]))* 100, digits = 1))
# print(round(prop.table(table(dsValp2a.bal2[,classColumn]))* 100, digits = 1))
# print(round(prop.table(table(dsValp2b.bal2[,classColumn]))* 100, digits = 1))
# dsTrainingTest <- dsTrainingTest[,c(1,x_col_start_pos:x_col_end_pos)]
# dsTest <- dsTest[,c(1,x_col_start_pos:x_col_end_pos)]
# dsValidation <- dsValidation[,c(1,x_col_start_pos:x_col_end_pos)]
dsTraining.bal <- dsTrainingTest
dsTest.bal <- dsTest
dsValidation.bal <- dsValidation
#dsTraining.bal <- dsTraining
#dsTest.bal <- dsTest
# dsValp1a.bal <- dsValp1a#.bal2
# dsValp1b.bal <- dsValp1b#.bal2
# dsValp2a.bal <- dsValp2a#.bal2
# dsValp2b.bal <- dsValp2b#.bal2
# blah <- dsTraining.bal; meta.na <- as.data.frame(is.finite.data.frame(blah));
# ncol(blah);nrow(blah);table(sapply(blah, class));table(meta.na[,]);table(blah[,classColumn]);round(prop.table(table(blah[,classColumn])) * 100, digits = 2);
#
# blah <- dsTest.bal; meta.na <- as.data.frame(is.finite.data.frame(blah));
# ncol(blah);nrow(blah);table(sapply(blah, class));table(meta.na[,]);table(blah[,classColumn]);round(prop.table(table(blah[,classColumn])) * 100, digits = 2);
#
# blah <- dataSet.bkup; meta.na <- as.data.frame(is.finite.data.frame(blah));
# ncol(blah);nrow(blah);table(sapply(blah, class));table(meta.na[,]);table(blah[,classColumn]);round(prop.table(table(blah[,classColumn])) * 100, digits = 2);
#print(round(prop.table(table(dataSet.bal[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTraining.bal[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsTest.bal[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsValidation.bal[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dataSet.bkup[,classColumn]))* 100, digits = 1))
#print(round(prop.table(table(dsValidation.bal[,classColumn]))* 100, digits = 1))
round(prop.table(table(dsTraining.bal[,1])) * 100, digits = 2);nrow(dsTraining.bal);
round(prop.table(table(dsTest.bal[,1])) * 100, digits = 2);nrow(dsTest.bal);
round(prop.table(table(dsValp1a.bal[,1])) * 100, digits = 2);nrow(dsValp1a.bal);
round(prop.table(table(dsValp1b.bal[,1])) * 100, digits = 2);nrow(dsValp1b.bal);
round(prop.table(table(dsValp2a.bal[,1])) * 100, digits = 2);nrow(dsValp2a.bal);
round(prop.table(table(dsValp2b.bal[,1])) * 100, digits = 2);nrow(dsValp2b.bal);
table(sapply(dsTraining.bal, class))
table(sapply(dsTest.bal, class))
# dsTraining.bal[,2:822] <- sapply(dsTraining.bal[,2:822],as.integer)
# table(sapply(dsTraining.bal, class))
# dsTest.bal[,2:822] <- sapply(dsTest.bal[,2:822],as.integer)
# table(sapply(dsTest.bal, class))
#
# dsTraining.bal[,"MAMAP25"] <- sapply(dsTraining.bal[,"MAMAP25"],as.integer)
# table(sapply(dsTraining.bal, class))
# dsTest.bal[,"MAMAP25"] <- sapply(dsTest.bal[,"MAMAP25"],as.integer)
# table(sapply(dsTest.bal, class))
#
# notIntCols <- !sapply(dsTest.bal, is.integer)
# notIntCols <- as.character(names(dsTest.bal[,notIntCols]))
# notIntCols <- notIntCols[which(notIntCols!="swingRecordType")]
# notIntCols
#dsTest.bal[,notIntCols]<- mapply("*",as.data.frame(dsTest.bal[,notIntCols]),100000000)#df.aree)
# x2 <- as.character(dsTest.bal[,"MAMAP25"])
# max(paste(Ndec(x2),collapse = ","))
# dsSummary.bal <- data.table(nrow(dsTraining.bal),nrow(dsTest.bal),nrow(dsValp1a.bal),nrow(dsValp1b.bal),nrow(dsValp2a.bal),nrow(dsValp2b.bal))
# names(dsSummary.bal) <- c("TrainingDataset.bal", "TestDataset.bal", "dsValp1a.bal","dsValp1b.bal","dsValp2a.bal","dsValp2b.bal")
# ncol(dsSummary.bal)
# dsSummary.bal
# dsTraining.bal <- as.integer(dsTraining.bal[,])
# write.csv(dataSet, file = "export_completeDataset.csv",row.names=TRUE)
# write.csv(dsTraining, file = "export_TrainingDataset.csv",row.names=TRUE)
# write.csv(dsTest, file = "export_TestDataset.csv",row.names=TRUE)
# write.csv(dsValidation, file = "export_ValidationDataset.csv",row.names=TRUE)
#
# var1 <- dataSet[,"stofP1"]
# var2 <- dataSet[,"cci"]
# var3 <- dataSet[,"HMAP1"]
# var4 <- dataSet[,"bbdM2"]
# var5 <- dataSet[,"stdebP1"]
#
# #"stofP1" "cci" "HMAP1" "bbdM2" "stdebP1"
#
# #Initial Overview Of The Data Set
# #install.packages('ggvis')
# library(ggvis)
#
# #create scatter plot - is there a visible correlation between var1 and var2 or var3 and var4 for all classes?
# dataSet %>% ggvis(~var1, ~var1, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var1, ~var2, fill = ~dsClasses) %>% layer_points(opacity:=1/1.25)
# dataSet %>% ggvis(~var1, ~var3, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var1, ~var4, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var1, ~var5, fill = ~dsClasses) %>% layer_points()
#
# dataSet %>% ggvis(~var2, ~var1, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var2, ~var2, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var2, ~var3, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var2, ~var4, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var2, ~var5, fill = ~dsClasses) %>% layer_points()
#
# dataSet %>% ggvis(~var3, ~var1, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var3, ~var2, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var3, ~var3, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var3, ~var4, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var3, ~var5, fill = ~dsClasses) %>% layer_points()
#
# dataSet %>% ggvis(~var4, ~var1, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var4, ~var2, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var4, ~var3, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var4, ~var4, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var4, ~var5, fill = ~dsClasses) %>% layer_points()
#
# dataSet %>% ggvis(~var5, ~var1, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var5, ~var2, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var5, ~var3, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var5, ~var4, fill = ~dsClasses) %>% layer_points()
# dataSet %>% ggvis(~var5, ~var5, fill = ~dsClasses) %>% layer_points()
#trainSetEnum <- dsTraining.bal[,firstCol:lastCol]
#trainSetEnum[,classColumn] <- as.character(trainSetEnum[,classColumn])
#trainSetEnum[,classColumn][trainSetEnum[,classColumn]=="SwingHi"] <- 2
#trainSetEnum[,classColumn][trainSetEnum[,classColumn]=="SwingLo"] <- 1
#trainSetEnum[,classColumn][trainSetEnum[,classColumn]=="SwingNULL"] <- 2
#trainSetEnum[,classColumn][trainSetEnum[,classColumn]=="NotSwingLo"] <- 2
#trainSetEnum[,classColumn][trainSetEnum[,classColumn]=="NotSwingHi"] <- 2
#trainSetEnum[,classColumn] <- as.integer(trainSetEnum[,classColumn])
#x <- as.matrix(trainSetEnum[,x_col_start_pos:x_col_end_pos])
xTrain.bal <- as.data.frame(dsTraining.bal[,x_col_start_pos:x_col_end_pos])
yTrain.bal <- as.factor(dsTraining.bal[,classColumn])
# xTrain.bal <- as.data.frame(dsTraining.bal2[,x_col_start_pos:x_col_end_pos])
# yTrain.bal <- as.factor(dsTraining.bal2[,classColumn])
#testSetEnum <- dsTest.bal[,firstCol:lastCol]
#testSetEnum[,classColumn] <- as.character(testSetEnum[,classColumn])
#testSetEnum[,classColumn][testSetEnum[,classColumn]=="SwingHi"] <- 2
#testSetEnum[,classColumn][testSetEnum[,classColumn]=="SwingLo"] <- 1
#testSetEnum[,classColumn][testSetEnum[,classColumn]=="SwingNULL"] <- 2
#testSetEnum[,classColumn][testSetEnum[,classColumn]=="NotSwingLo"] <- 2
#testSetEnum[,classColumn][testSetEnum[,classColumn]=="NotSwingHi"] <- 2
#testSetEnum[,classColumn] <- as.integer(testSetEnum[,classColumn])
#xTest <- as.matrix(testSetEnum[,x_col_start_pos:x_col_end_pos])
xTest.bal <- as.data.frame(dsTest.bal[,x_col_start_pos:x_col_end_pos])
yTest.bal <- as.factor(dsTest.bal[,classColumn])
# xTest.bal <- as.data.frame(dsTest.bal2[,x_col_start_pos:x_col_end_pos])
# yTest.bal <- as.factor(dsTest.bal2[,classColumn])
# xValp1a.bal <- as.data.frame(dsValp1a.bal[,x_col_start_pos:x_col_end_pos])
# yValp1a.bal <- as.factor(dsValp1a.bal[,classColumn])
#
# xValp1b.bal <- as.data.frame(dsValp1b.bal[,x_col_start_pos:x_col_end_pos])
# yValp1b.bal <- as.factor(dsValp1b.bal[,classColumn])
#
# xValp2a.bal <- as.data.frame(dsValp2a.bal[,x_col_start_pos:x_col_end_pos])
# yValp2a.bal <- as.factor(dsValp2a.bal[,classColumn])
#
# xValp2b.bal <- as.data.frame(dsValp2b.bal[,x_col_start_pos:x_col_end_pos])
# yValp2b.bal <- as.factor(dsValp2b.bal[,classColumn])
#nrow(dsValidation);nrow(dsValp1a);nrow(dsValp1b);nrow(dsValp2a);nrow(dsValp2b);
xVal.bal <- as.data.frame(dsValidation.bal[,x_col_start_pos:x_col_end_pos])
yVal.bal <- as.factor(dsValidation.bal[,classColumn])#dsValidation[,classColumn])
# xVal.bal <- as.data.frame(dsValidation.bal2[,x_col_start_pos:x_col_end_pos])
# yVal.bal <- as.factor(dsValidation.bal2[,classColumn])#dsValidation[,classColumn])
xBkup.bal <- as.data.frame(dataSet.bkup[,11:3870])
yBkup.bal <- as.factor(dataSet.bkup[,classColumn])
# xValp1a <- as.data.frame(dsValp1a[,x_col_start_pos:x_col_end_pos])
# yValp1a <- as.factor(dsValp1a[,classColumn])
#
# xValp1b <- as.data.frame(dsValp1b[,x_col_start_pos:x_col_end_pos])
# yValp1b <- as.factor(dsValp1b[,classColumn])
#
# xValp2a <- as.data.frame(dsValp2a[,x_col_start_pos:x_col_end_pos])
# yValp2a <- as.factor(dsValp2a[,classColumn])
#
# xValp2b <- as.data.frame(dsValp2b[,x_col_start_pos:x_col_end_pos])
# yValp2b <- as.factor(dsValp2b[,classColumn])
#
#
# blah <- dsTraining.bal; meta.na <- as.data.frame(is.finite.data.frame(blah));
# ncol(blah);nrow(blah);table(sapply(blah, class));table(meta.na[,]);table(blah[,classColumn]);round(prop.table(table(blah[,classColumn])) * 100, digits = 2);
#https://dl.dropboxusercontent.com/u/45301435/inTreesDemo.R
#install.packages("inTrees");
#install.packages("randomForest");
#install.packages("RRF");
#install.packages("gbm");
#install.packages("foreach");
#install.packages("ranger");
#library(foreach);
detach("package:ranger", unload=TRUE)
library(ranger);
library(inTrees);
library(randomForest);
library(RRF);
library(gbm);
set.seed(1)
#rm(list=ls(all=TRUE));graphics.off()
lists = list()
# measure user-defined conditions
#myRule <- "X[,3] > 5 & X[,4] > 1"
#measureRule(myRule,X,target) # without providing the outcome of the condition
#measureRule(myRule,X,target,"versicolor") # providing the outcome of the condition
maxTrainIteration <- 3
trainIteration <- 0; loopNum <- 0; bestTrainNum <- NULL; smallestError <- 100;
while (trainIteration <= maxTrainIteration) {
#for (num0 in seq(1,10,1)) {for (num1 in seq(1,10,1)){
orig.distPercent <- as.data.frame(round(prop.table(table(dsTraining.bal[,classColumn]))* 100, digits = 1))
orig.distRecordCount <- as.data.frame(table(dsTraining.bal[,classColumn]))
ratioClassAvsBvsC <- (orig.distPercent[1,2]-orig.distPercent[2,2])+orig.distPercent[3,2]
#set random values for model params
randVar0 <- roundUp(sample(10:85,1,replace=TRUE),5);#ntree
randVar1 <- roundUp(sample(1000:(nrow(xTrain.bal)*.6),1,replace=TRUE),100);#sampsize
randVar2 <- roundUp(sample(10:(nrow(xTrain.bal)*.05),1,replace=TRUE),5);#nodesize
randVar3 <- roundUp(sample((sqrt(((2*randVar1 - 16*randVar2)/randVar2)^2)):(sqrt(((2*randVar1 - 200*randVar2)/randVar2)^2)),1,replace=TRUE),5);#maxnodes
randVar4 <- roundUp(sample(10:(ncol(xTrain.bal)*.2),1,replace=TRUE),5);#mtry
rv5 <- roundUp(sample(1:100,1,replace=TRUE),5);
rv6 <- roundUp(sample(1:100,1,replace=TRUE),5);
if (loopNum == 0) {
bestParams <- data.frame(cbind(recordType=1
,rv0=0,rv1=0,rv2=0,rv3=0
,rv4=0,rv5=0,rv6=0
,type=0#rf$type
,localImportance=0#rf$localImportance
,proximity=0#rf$proximity
,mtry=0#rf$mtry
,nrnodes=0#rf$forest$nrnodes
,ntree=0L#rf$forest$ntree
,Hi_Hi=0,Hi_Lo=0,Hi_NULL=0
,Lo_Lo=0,Lo_Hi=0,Lo_NULL=0
,NULL_NULL=0,NULL_Hi=0,NULL_Lo=0
,Hi.Err.test=0.1,Lo.Err.test=0.1,NULL.Err.test=0.1
,Hi.Err.train=0.1,Lo.Err.train=0.1,NULL.Err.train=0.1
,Hi.Err.diff=0.1,Lo.Err.diff=0.1,NULL.Err.diff=0.1
,smallest.Error=0.1,TrainNum=0
))
print(bestParams)
}
loopNum <- loopNum + 1
print(paste("Begin test:",loopNum,"sampsize=",randVar1,"nodesize=",randVar2,"maxnodes=",randVar3));
# train RandomForest
rf <- ranger(data = dsTraining,dependent.variable.name = "ClassChar", num.trees=50#,mtry = 15,min.node.size = 25,num.threads = 4
,replace=TRUE,importance = "impurity",classification = TRUE,write.forest = TRUE
,verbose = TRUE,save.memory = FALSE)
classwt <- c(.95,.95,.025)
rf <- randomForest(dsTraining[,x_col_start_pos:x_col_end_pos],dsTraining[,classColumn],nodesize = 15,ntree=250#,sampsize=randVar1,,maxnodes= randVar3,mtry = randVar4
,replace=TRUE,importance = TRUE,do.trace=TRUE,classwt=classwt)#,classwt=c("SwingHi"=rv5,"SwingLo"=rv5,"SwingNULL"=rv6)
# )
#,set.seed(1)
#,samplesize = c("SwingNULL" = 100, "SwingHi" = 50, "SwingLo" = 50)
#,strata = yTrain.bal
# rf <- foreach(ntree=rep(randVar0, 4), .combine=combine, .packages='randomForest') %dopar% +
# randomForest(xTrain.bal,yTrain.bal,ntree=ntree,sampsize=randVar1#, maxnodes= randVar3
# ,nodesize = randVar2,mtry = randVar4,replace=TRUE,importance = TRUE,set.seed(1))
#rf.all <- combine(rf, rf2, rf3, rf4)
#print(rf.all)
#rf <- rf.all
# predict
#predictions <- predict(rf, xTest.bal); predictions.t <- predict(rf, xTrain.bal);
predictions <- predict(rf, dsTest[,x_col_start_pos:x_col_end_pos],type="response");
predictions.t <- predict(rf, dsTraining[,x_col_start_pos:x_col_end_pos],type="response");
predictions.v <- predict(rf, dsValidation[,x_col_start_pos:x_col_end_pos],type="response");
#create confussion matrix
actual <- as.matrix(dsTest[,classColumn]); #test
actual.t <- as.matrix(dsTraining[,classColumn]); #train
actual.v <- as.matrix(dsValidation[,classColumn]); #validate
predicted <- as.matrix(predictions);
predicted.t <- as.matrix(predictions.t);
predicted.v <- as.matrix(predictions.v);
#predicted <- as.matrix(predictions$predictions); predicted.t <- as.matrix(predictions.t$predictions);
conf_matrix <- table(actual,predicted);
conf_matrix.t <- as.data.frame(rf$confusion);
conf_matrix.t <- conf_matrix.t[,1:3]
conf_matrix.v <- table(actual.v,predicted.v);
conf_matrix.t #train
conf_matrix #test
conf_matrix.v #validate
rf <- randomForest(rf.form,
cross.sell.dev,
ntree=500,
importance=T)
plot(cross.sell.rf)
# Variable Importance Plot
varImpPlot(rf,sort = T,main="Variable Importance",n.var=50)
# Variable Importance Table
var.imp <- data.frame(importance(rf,type=2))
# make row names as columns
var.imp$Variables <- row.names(var.imp)
VariableImportanceTable <- as.data.frame(var.imp[order(var.imp$MeanDecreaseGini,decreasing = T),])
VariableImportanceTable[1:10,]
#(1=mean decrease in accuracy, 2=mean decrease in node impurity
plot(margin(rf,sort=T))
rf$forest
importantVars <- importantVars[order(conf_matrix.df$predicted,conf_matrix.df$actual, decreasing=FALSE),]
conf_matrix.df <- conf_matrix.df[order(conf_matrix.df$predicted,conf_matrix.df$actual, decreasing=FALSE),]
?order
# Calculate error
Class1error <- sum(conf_matrix[1,2:3])/sum(conf_matrix[1,])*100;#SwingHi
Class2error <- sum(conf_matrix[2,c(1,3)])/sum(conf_matrix[2,])*100;#SwingLo
Class3error <- sum(conf_matrix[3,1:2])/sum(conf_matrix[3,])*100;#SwingNULL
Class1error.t <- sum(conf_matrix.t[1,2:3])/sum(conf_matrix.t[1,])*100;#SwingHi
Class2error.t <- sum(conf_matrix.t[2,c(1,3)])/sum(conf_matrix.t[2,])*100;#SwingLo
Class3error.t <- sum(conf_matrix.t[3,1:2])/sum(conf_matrix.t[3,])*100;#SwingNULL
ClassErrs <- c(SwingHi=Class1error,SwingLo=Class2error,SwingNULL=Class3error)
avgClassErr <- mean(exp(ClassErrs/100)/(exp(ClassErrs/100)+1))
err <- avgClassErr;
conf_matrix.df <- as.data.frame(conf_matrix)
conf_matrix.df <- conf_matrix.df[order(conf_matrix.df$predicted,conf_matrix.df$actual, decreasing=FALSE),]
conf_matrix.df <- cbind(conf_matrix.df,Err=c(Class1error,Class1error,Class1error,Class2error,Class2error,Class2error,Class3error,Class3error,Class3error))
#output ideal params
if ( (err <= smallestError+(smallestError/30)) ) {
delta <- (smallestError - err)
smallestError <- err
bestTrainNum <- loopNum
new_row <- c(0,randVar0,randVar1,randVar2,randVar3
,randVar4,rv5,rv6
,ifelse(is.null(rf$type),0,rf$type)
,ifelse(is.null(rf$localImportance),0,rf$localImportance)
,ifelse(is.null(rf$proximity),0,rf$proximity)
,ifelse(is.null(rf$mtry),0,rf$mtry)
,ifelse(is.null(rf$forest$nrnodes),0,rf$forest$nrnodes)
,ifelse(is.null(rf$forest$ntree),0,rf$forest$ntree)
,conf_matrix.df[1,3],conf_matrix.df[2,3],conf_matrix.df[3,3]
,conf_matrix.df[5,3],conf_matrix.df[4,3],conf_matrix.df[6,3]
,conf_matrix.df[9,3],conf_matrix.df[7,3],conf_matrix.df[8,3]
,sprintf("%.4f",Class1error),sprintf("%.4f",Class2error),sprintf("%.4f",Class3error)
,sprintf("%.4f",Class1error.t),sprintf("%.4f",Class2error.t),sprintf("%.4f",Class3error.t)
,sprintf("%.4f",Class1error.t-Class1error),sprintf("%.4f",Class2error.t-Class2error),sprintf("%.4f",Class3error.t-Class3error)
,sprintf("%.6f",smallestError),bestTrainNum)
bestParams <- rbind(bestParams, new_row)
bestParams <- bestParams[order(bestParams$smallest.Error,decreasing=FALSE),]
print(bestParams)
#Extract raw rules from a random forest:
treeList <- RF2List(rf) # transform rf object to an inTrees' format
exec <- extractRules(treeList,dsTrainingTest) # R-executable conditions
#Measure rules. "len" is the number of variable-value pairs in a condition, "freq" is the percentage of data satisfying a condition, "pred" is the outcome of a rule, i.e., "condition" => "pred", "err" is the error rate of a rule.
target <- yTest.bal
X <- xTest.bal
ruleMetric <- getRuleMetric(exec,X,target) # get rule metrics
ruleMetric_orig <- ruleMetric
#Prune each rule:
ruleMetric <- pruneRule(ruleMetric,X,target)
ruleMetric_pruned <- ruleMetric
#Select a compact rule set:
ruleMetric <- selectRuleRRF(ruleMetric,X,target)
#ruleMetric_compact <- ruleMetric
#Build an ordered rule list as a classifier:
#learner <- buildLearner(ruleMetric,X,target)
#learner_orig <- learner
#Make rules more readable:
#readableRules <- presentRules(learner,colnames(X))
readableRules <- presentRules(ruleMetric,colnames(X))
readableRules <- presentRules(ruleMetric_pruned,colnames(X))
readableRules.df <- as.data.frame(readableRules)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.character)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.numeric)
table(sapply(readableRules.df, class))
df <- readableRules.df[ order(-readableRules.df[,2], readableRules.df[,3]), ]
dfu <- unique(df[,1:5])
dfu <- cbind(dfu,bestParams[2,])
if (nrow(bestParams) <= 2) {
write.csv(df, file = "readableRules.csv",row.names=TRUE,append = FALSE)
write.csv(dfu, file = "readableRulesUnique.csv",row.names=TRUE,append = FALSE)
}
else {
write.csv(df, file = "readableRules.csv",row.names=TRUE,append = T)
write.csv(dfu, file = "readableRulesUnique.csv",row.names=TRUE,append = T)
}
library(xtable)
print(xtable(dfu), include.rownames=FALSE)
}
#else {
# print(paste("bestTrainNum: ",bestTrainNum))
#}
#}}
if (trainIteration == maxTrainIteration) {
write.csv(bestParams, file = "bestParams_RF.csv",row.names=TRUE)
}
trainIteration <- trainIteration+1;
gc()
}
X <- iris[,1:(ncol(iris)-1)]
target <- iris[,"Species"]
rf <- RRF(X,as.factor(target),ntree=100) # build an ordinary RF
treeList <- RF2List(rf)
ruleExec <- extractRules(treeList,X) # transform to R-executable rules
#The MSR and % variance explained are based on OOB or out-of-bag estimates, a very clever device
#in random forests to get honest error estimates. The model reports that mtry=4, which is the
#number of variables randomly chosen at each split. Since \( p=13 \) here, we could try all 13
#possible values of mtry. We will do so, record the results, and make a plot.
#x_col_start_pos <- 2
#x_col_end_pos <- 823
#https://lagunita.stanford.edu/c4x/HumanitiesSciences/StatLearning/asset/ch8.html
oob.err = double(822)
test.err = double(822)
for (mtry in 1:822) {
fit = randomForest(yTrain.bal ~ ., data = xTrain.bal, mtry = mtry,
ntree = 10)
pred = predict(fit, xTest.bal)
#create confussion matrix
actual <- as.matrix(yTest.bal);
predicted <- as.matrix(pred);
conf_matrix <- table(pred,actual);
# Calculate error
Class1error <- sum(conf_matrix[1,2:3])/sum(conf_matrix[1,])*100;#SwingHi
Class2error <- sum(conf_matrix[2,c(1,3)])/sum(conf_matrix[2,])*100;#SwingLo
Class3error <- sum(conf_matrix[3,1:2])/sum(conf_matrix[3,])*100;#SwingNULL
ClassErrs <- c(SwingHi=Class1error,SwingLo=Class2error,SwingNULL=Class3error)
avgClassErr <- mean(exp(ClassErrs/100)/(exp(ClassErrs/100)+1))
oob.err[mtry] = mean(ClassErrs)
test.err[mtry] = with(xTest.bal, mean((yTest.bal - pred)^2))
cat(mtry, "= {");cat(sprintf("%.2f",mean(ClassErrs)),", ");cat(sprintf("%.2f",avgClassErr),"} ");conf_matrix;
}
matplot(1:mtry, cbind(test.err, oob.err), pch = 19, col = c("red", "blue"), type = "b", ylab = "Mean Squared Error")
legend("topright", legend = c("OOB", "Test"), pch = 19, col = c("red", "blue"))
sampsize <- 1200; nodesize <- 280; maxnodes <- 1200/280;
nrnodes <- 2 * trunc(sampsize / nodesize) + 1
maxnodes > nrnodes
maxnodes < nrnodes
maxnodes = 2 * sampsize / nodesize + 1
(2*sampsize + 3*nodesize)/nodesize
levels(yTrain.bal)
classwt=c("SwingHi"=0,"SwingLo"=0,"SwingNULL"=0)
# random forest
rf <- randomForest(x,y,ntree=100#,maxnodes = 5
,mtry = floor(sqrt(ncol(x)))/10
,sampsize = .9*nrow(x),nodesize = floor(sqrt(nrow(x)))*1
,replace=TRUE,importance = TRUE)
lists[['rf']] <- RF2List(rf) # extract a list of trees
rf$confusion
rf$err.rate
rf.cv <- rfcv(xTest.bal, yTest.bal, cv.fold=10)
with(rf.cv, plot(n.var, error.cv))
lists[['rf']]
# regularized random forest
?RRF
rrf <- RRF(x,as.factor(y),ntree=100, flagReg = 1)
lists[['rrf']] <- RF2List(rrf)
X <- xTrain.bal; class <- yTrain.bal;
#ordinary random forest.
rf <- RRF(X,as.factor(class), flagReg = 0)
impRF <- rf$importance
impRF <- impRF[,"MeanDecreaseGini"]
rf$feaSet
#regularized random forest
rrf <- RRF(X,as.factor(class), flagReg = 1)
rrf$feaSet
#guided regularized random forest
imp <- impRF/(max(impRF))#normalize the importance score
gamma <- 0.5
coefReg <- (1-gamma)+gamma*imp #weighted average
grrf <- RRF(X,as.factor(class),coefReg=coefReg, flagReg=1)
grrf$feaSet
#guided random forest
gamma <- 1
coefReg <- (1-gamma)+gamma*imp
grf <- RRF(X,as.factor(class),coefReg=coefReg, flagReg=0)
grf$feaSet
# boosted trees
?gbm
gbmFit <- gbm(y~ ., data=cbind(xTest.bal,yTest.bal), n.tree = 100,
interaction.depth = 10,distribution="multinomial")
lists[['gbm']] <- GBM2List(gbmFit,x)
dataSet <- read.csv("data/vw_barFeatures_train_r_20160819_SUBSET_BALANCED.csv", header = TRUE)
rc(dataSet)
rc(dataSet)
rc(dsTraining)
rc(dsTest)
unique(dataSet[,1])
unique(dsTraining[,1])
unique(dsTest[,1])
dsColNames <- as.character(names(dataSet))
dsColNames
dsColNames <- as.character(names(dsTraining))
dsColNames
dsColNames <- as.character(names(dsTest))
dsColNames
homedir<-getSrcDirectory(function(x) {x})
# Setting working directory
# need to be adjusted to server conditions
if (homedir==""|is.na(homedir)){
homedir <-"C://Users//Neal//Documents//www.DAYTRADINGLOGIC.com//_neal//swing//R"
#homedir <-"C://Users//nbb//Downloads//rscripts20160529"
}
# homedir <-"~//Zlecenia//ODesk//Neal Webster//swing prediction"
setwd(homedir)
#if ("package:gbm" %in% search()) { detach("package:gbm", unload=TRUE) }
#if ("gbm" %in% rownames(installed.packages())) { remove.packages("gbm") }
library(inTrees);
#install.packages("gbm")
library(gbm);
dsTrainingTest <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_TrainingTest.csv", header = TRUE)
dsValidation <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_Validation.csv", header = TRUE)
dsTrainingTest <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_SwingHi_TrainingTest.csv", header = TRUE)
dsValidation <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_SwingHi_Validation.csv.csv", header = TRUE)
dsTrainingTest <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_SwingLo_TrainingTest.csv", header = TRUE)
dsValidation <- read.csv("data/vw_barFeatures_train_r_20161123_balanced_SwingLo_Validation.csv", header = TRUE)
ncol(dsTrainingTest);ncol(dsTrainingTest)==ncol(dsValidation);
nrow(dsTrainingTest);nrow(dsTrainingTest)!=nrow(dsValidation);
#ordinality of class and features
classColumn <- 1
y_col_pos <- classColumn
x_col_start_pos <- 2
x_col_end_pos <- 3246
y_col_pos; x_col_start_pos; x_col_end_pos
dsColNames <- as.character(names(dsTrainingTest))
classColumnName <- dsColNames[classColumn]
print(round(prop.table(table(dsTrainingTest[,classColumn]))* 100, digits = 1))
print(round(prop.table(table(dsValidation[,classColumn]))* 100, digits = 1))
unique(dsTrainingTest[,classColumn])
unique(dsTrainingTest[,1])
gbm1 <- gbm(dsTrainingTest[,classColumn]~ ., data=dsTrainingTest[,x_col_start_pos:x_col_end_pos],n.cores=2,verbose=TRUE
,n.trees=15, interaction.depth=10, n.minobsinnode = 2, shrinkage=0.61#,train.fraction = 0.15
,distribution="multinomial"#,train.fraction = .1
#shrinkage=0.00001,bag.fraction = 0.1
)
# n.trees=20)#, # number of trees
# shrinkage=0.25, # learning rate, 0.001 to 0.1 usually work
# interaction.depth=10, # 1: additive model, 2: two-way interactions, etc.
# bag.fraction = 0.25, # subsampling fraction, 0.5 is probably best
# train.fraction = 0.15, # fraction of data for training,first train.fraction*N used for training
# n.minobsinnode = 60, # minimum total weight needed in each node
# cv.folds=10, # do 3-fold cross-validation cross-validation returned w/ "cv.error"
# keep.data=TRUE, # keep a copy of the dataset with the object
# verbose=TRUE, # don't print out progress
# n.cores=4) # use only a single core (detecting #cores is error-prone, so avoided here)
warnings()
# check performance using an out-of-bag estimator
# OOB underestimates the optimal number of iterations
best.iter <- gbm.perf(gbm1,method="OOB")
print(best.iter)
# check performance using a 50% heldout test set
best.iter <- gbm.perf(gbm1,method="test")
print(best.iter)
# check performance using 5-fold cross-validation
best.iter <- gbm.perf(gbm1,method="cv")
print(best.iter)
# plot the performance # plot variable influence
summary(gbm1,n.trees=1) # based on the first tree
summary(gbm1,n.trees=best.iter) # based on the estimated best number of trees
# compactly print the first and last trees for curiosity
print(pretty.gbm.tree(gbm1,1))
print(pretty.gbm.tree(gbm1,gbm1$n.trees))
#plot best num of trees
gbm.perf(gbm1)
gbm.perf(gbm1,
plot.it = TRUE,
oobag.curve = F,
overlay = TRUE,
method="test")# method="OOB",method="test",method="cv"
gbm1
import <- as.data.frame(relative.influence(gbm1, n.trees = 10))
import
dsTraining <- dataSet
# predict
x <- dsTraining[,x_col_start_pos:x_col_end_pos]; y <- dsTraining[,classColumn];
x <- dsTest[,x_col_start_pos:x_col_end_pos]; y <- dsTest[,classColumn];
x <- dsTrainingTest[,x_col_start_pos:x_col_end_pos]; y <- dsTrainingTest[,classColumn];
nrow(dsTrainingTest)
#x <- dsValidation[,x_col_start_pos:x_col_end_pos]; y <- dsValidation[,classColumn];
#nrow(dsValidation)
#nrow(x)
#x <- xBkup.bal; y <- yBkup.bal;
pred <- as.data.frame(predict(gbm1, x, n.trees = 15,type = 'response'));
#pred <- as.data.frame(pred)
names(pred) <- c(levels(y))#"SwingHi", "SwingLo", "SwingNULL")
#pred <- apply(pred, 1,which.max(pred))
pred.test <- rep(NA,1)
for (i in 1:nrow(x)) {
pred.test[i] <- colnames(pred)[(which.max(pred[i,]))]
}
pred <- as.factor(pred.test)
actual <- as.matrix(y);
predicted <- as.matrix(pred);
conf_matrix <- table(predicted,actual);
conf_matrix
## save this model
save(gbm1, file = "model_GBM_20161202_1.rda")
## load the model
load("model_GBM_20161202_1.rda")
## under sampling NULL class to balance class distributions appears to work best
##gbm1 <- gbm(yTrain.bal~ ., data=cbind(xTrain.bal,yTrain.bal), distribution="multinomial",
## interaction.depth=25,n.trees=50,cv.folds=5,n.cores=4,verbose=TRUE)
# actual
# predicted SwingHi SwingLo SwingNULL
# SwingHi 1053 2 77
# SwingLo 0 1033 72
# SwingNULL 25 14 908
#
# actual
# predicted SwingHi SwingLo SwingNULL
# SwingHi 528 2 40
# SwingLo 0 518 44
# SwingNULL 10 8 467
#
# actual
# predicted SwingHi SwingLo SwingNULL
# SwingHi 1023 2 92
# SwingLo 1 1075 86
# SwingNULL 18 24 885
#
# actual
# predicted SwingHi SwingLo SwingNULL
# SwingHi 2076 4 1514
# SwingLo 1 2108 1353
# SwingNULL 43 38 14301
gbmFit <- gbm1
#X <- xTrain.bal; target <- yTrain.bal;
#X <- xTest.bal; target <- yTest.bal;
#X <- xVal.bal; target <- yVal.bal;
X <- dataSet[,x_col_start_pos:x_col_end_pos]; target <- dataSet[,classColumn];
treeList <- GBM2List(gbmFit,X)
ruleExec = extractRules(treeList,X,maxdepth = 50)
ruleExec <- unique(ruleExec)
#ruleExec <- ruleExec[1:min(2000,length(ruleExec)),,drop=FALSE]
ruleMetric <- getRuleMetric(ruleExec,X,target)
ruleMetric <- pruneRule(ruleMetric,X,target, maxDecay = 0, typeDecay = 1)
ruleMetric <- unique(ruleMetric)
learner <- buildLearner(ruleMetric,X,target,minFreq = 0.0000001)
pred <- applyLearner(learner,X)
#get column names that match those in EasyLanguage
hdr <- read.csv("header.csv", sep = ",", skip = 0, header = TRUE,comment.char = "", check.names = FALSE)
hdr <- as.character(names(hdr), check.names = FALSE)
#set column nates in dataset to those from header.csv
names(X) <- hdr
# more readable format
readableLearner <- presentRules(learner,colnames(X))
#err <- 1-sum(pred==target)/length(pred);
write.csv(readableLearner, file = "readableRules_GBM_20160917_3.csv",row.names=TRUE)
readableLearner
library(RRF)
target <- yTrain.bal
X <- xTrain.bal
rf <- RRF(X,as.factor(target),ntree=100) # build an ordinary RF
treeList <- RF2List(rf)
ruleExec <- extractRules(treeList,X) # transform to R-executable rules
ruleExec <- unique(ruleExec)
ruleMetric <- getRuleMetric(ruleExec,X,target) # measure rules
write.csv(readableLearner, file = "readableRules_RF.csv",row.names=TRUE)
#Extract raw rules from a GBM:
treeList <- GBM2List(gbm1,x) # transform rf object to an inTrees' format
exec <- extractRules(treeList,X) # R-executable conditions
exec <- unique(exec)
#Measure rules. "len" is the number of variable-value pairs in a condition, "freq" is the percentage of data satisfying a condition, "pred" is the outcome of a rule, i.e., "condition" => "pred", "err" is the error rate of a rule.
ruleMetric <- getRuleMetric(exec,X,target) # get rule metrics
ruleMetric_orig <- ruleMetric
#Prune each rule:
ruleMetric <- pruneRule(ruleMetric,X,target)
ruleMetric_pruned <- ruleMetric
#Select a compact rule set:
ruleMetric <- selectRuleRRF(ruleMetric,X,target)
#ruleMetric_compact <- ruleMetric
#Build an ordered rule list as a classifier:
#learner <- buildLearner(ruleMetric,X,target)
#learner_orig <- learner
#Make rules more readable:
#readableRules <- presentRules(learner,colnames(X))
readableRules <- presentRules(ruleMetric,colnames(X))
readableRules <- presentRules(ruleMetric_pruned,colnames(X))
readableRules.df <- as.data.frame(readableRules)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.character)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.numeric)
table(sapply(readableRules.df, class))
df <- readableRules.df[ order(-readableRules.df[,2], readableRules.df[,3]), ]
dfu <- unique(df[,])
dfu <- cbind(dfu,bestParams[2,])
dfu
if (nrow(bestParams) <= 2) {
write.csv(df, file = "readableRules.csv",row.names=TRUE,append = F)
write.csv(dfu, file = "readableRulesUnique.csv",row.names=TRUE,append = F)
}
else {
write.csv(df, file = "readableRules.csv",row.names=TRUE,append = T)
write.csv(dfu, file = "readableRulesUnique.csv",row.names=TRUE,append = T)
}
library(xtable)
print(xtable(dfu), include.rownames=FALSE)
#http://www.numbrcrunch.com/blog/comparing-tree-based-classification-methods-using-the-kaggle-otto-competition
# Install gbm package
install.packages('gbm')
library(gbm)
# Set a unique seed number so you get the same results everytime you run the below model,
# the number does not matter
set.seed(17)
# Begin recording the time it takes to create the model
ptm5 <- proc.time()
# Create a random forest model using the target field as the response and all 93 features as inputs (.)
fit5 <- gbm(target ~ ., data=strain, distribution="multinomial", n.trees=1000,
shrinkage=0.05, interaction.depth=12, cv.folds=2)
# Finish timing the model
fit5.time <- proc.time() - ptm5
# Test the boosting model on the holdout test dataset
trees <- gbm.perf(fit5)
fit5.stest <- predict(fit5, stest, n.trees=trees, type="response")
fit5.stest <- as.data.frame(fit5.stest)
names(fit5.stest) <- c("Class_1","Class_2","Class_3","Class_4","Class_5","Class_6","Class_7","Class_8","Class_9")
fit5.stest.pred <- rep(NA,2000)
for (i in 1:nrow(stest)) {
fit5.stest.pred[i] <- colnames(fit5.stest)[(which.max(fit5.stest[i,]))]}
fit5.pred <- as.factor(fit5.stest.pred)
# Create a confusion matrix of predictions vs actuals
table(fit5.pred,stest$target)
# Determine the error rate for the model
fit5$error <- 1-(sum(fit5.pred==stest$target)/length(stest$target))
fit5$error
y <- yVal.bal
x <- colnames(x)[apply(x, 1 ,which.max)]
x[1:5,]
colnames(x)[apply(x, 1 ,which.max)]
confusion <- function(a, b){
tbl <- table(a, b)
mis <- 1 - sum(diag(tbl))/sum(tbl)
list(table = tbl, misclass.prob = mis)
}
# Here is the best predictor:
confusion(predict(gbm1, cbind(xVal.bal,yVal.bal),n.trees=5), yVal.bal)
tbl <- table(x,y)
str(yVal.bal)
nrow(as.list(predictions.Test.gbm))
nrow(as.list(yVal.bal))
confusion(predict(fit.gbm1, test.data2, n.trees = 69) > 0, test.data2$y > 0)
$table
b
a FALSE TRUE
FALSE 4409 694
TRUE 533 4364
$misclass.prob
[1] 0.1227
# 200 is better:
confusion(predict(fit.gbm1, test.data2, n.trees = 200) > 0, test.data2$y > 0)
$table
b
a FALSE TRUE
FALSE 4635 437
TRUE 307 4621
$misclass.prob
[1] 0.0744
# Even with 400 trees, not seriously overfit
confusion(predict(fit.gbm1, test.data2) > 0, test.data2$y > 0)
$table
b
a FALSE TRUE
FALSE 4680 405
TRUE 262 4653
$misclass.prob
[1] 0.0667
# Note that we have almost perfect classification on training sample
confusion(predict(fit.gbm1, train.data2) > 0, train.data2$y > 0)
$table
b
a FALSE TRUE
FALSE 994 1
TRUE 0 1005
$misclass.prob
[1] 5e-04
gbm.perf(gbm1, method = "cv")
library(dplyr)
preds <- predict(gbm1,xVal.bal,n.trees=gbm1$n.trees,type='response')
density(preds) %>% plot
install.packages("caret")
library(caret)
# Here is the best predictor:
confusion(predict(gbm1, test.data2, n.trees = 69) > 0, test.data2$y > 0)$table
getModelInfo()$gbm$type
pretty.gbm.tree(gbm1, i.tree = 1)
confusionMatrix(predictions.Test.gbm,yTest.bal)
?confusionMatrix
summary(gbm1)
print(gbm1)
str(gbm1)
str(predictions.Train.gbm)
table(predicted.gbm<50,actual.gbm)
actual.gbm <- as.matrix(yTest.bal); actual.t <- as.matrix(yTrain.bal);
predicted.gbm <- as.matrix(predictions.Test.gbm); predicted.t <- as.matrix(predictions.Train.gbm);
conf_matrix.gbm <- table(predictions.Test.gbm,actual.gbm); conf_matrix.t <- table(predicted.t,actual.t);
gbm.perf(gbm1,
plot.it = TRUE,
oobag.curve = FALSE,
overlay = TRUE,
method="OOB")#method="cv")#method="test")
# Calculate error
Class1error.gbm <- sum(conf_matrix.gbm[1,2:3])/sum(conf_matrix.gbm[1,])*100;#SwingHi
Class2error.gbm <- sum(conf_matrix.gbm[2,c(1,3)])/sum(conf_matrix.gbm[2,])*100;#SwingLo
Class3error.gbm <- sum(conf_matrix.gbm[3,1:2])/sum(conf_matrix.gbm[3,])*100;#SwingNULL
Class1error.t <- sum(conf_matrix.t[1,2:3])/sum(conf_matrix.t[1,])*100;#SwingHi
Class2error.t <- sum(conf_matrix.t[2,c(1,3)])/sum(conf_matrix.t[2,])*100;#SwingLo
Class3error.t <- sum(conf_matrix.t[3,1:2])/sum(conf_matrix.t[3,])*100;#SwingNULL
ClassErrs.gbm <- c(SwingHi=Class1error.gbm,SwingLo=Class2error.gbm,SwingNULL=Class3error.gbm)
avgClassErr.gbm <- mean(exp(ClassErrs.gbm/100)/(exp(ClassErrs.gbm/100)+1))
err.gbm <- avgClassErr.gbm;
conf_matrix.df.gbm <- as.data.frame(conf_matrix.gbm)
conf_matrix.df.gbm
unseenXData <- xValp1a #xValp1a.bal,xValp1b.bal,xValp2a.bal,xValp2b.bal,xValp1a,xValp1b,xValp2a,xValp2b,xVal
unseenYData <- yValp1a #yValp1a.bal,yValp1b.bal,yValp2a.bal,yValp2b.bal,yValp1a,yValp1b,yValp2a,yValp2b,yVal
# predict
predictions <- predict(rf, unseenXData)
#create confussion matrix
actual <- as.matrix(unseenYData); predicted <- as.matrix(predictions); conf_matrix <- table(predicted,actual);conf_matrix;
# Calculate error
error1 <- conf_matrix[1,2]/(conf_matrix[1,1]+conf_matrix[1,2])
error2 <- conf_matrix[2,1]/(conf_matrix[2,1]+conf_matrix[2,2])
avgErr <- ((exp(error1)/(exp(error1)+1))+(exp(error2)/(exp(error2)+1)))/2;error1;error2;avgErr;
#Extract raw rules from a random forest:
treeList <- RF2List(rf) # transform rf object to an inTrees' format
exec <- extractRules(treeList,dsTrainingTest) # R-executable conditions
exec[1:2,]
#Measure rules. "len" is the number of variable-value pairs in a condition, "freq" is the percentage of data satisfying a condition, "pred" is the outcome of a rule, i.e., "condition" => "pred", "err" is the error rate of a rule.
target <- yVal#yTest.bal
X <- xVal#xTest.bal
ruleMetric <- getRuleMetric(exec,X,target) # get rule metrics
ruleMetric_orig <- ruleMetric
ruleMetric_orig[1:20,]
#Prune each rule:
ruleMetric <- pruneRule(ruleMetric,X,target)
ruleMetric_pruned <- ruleMetric
ruleMetric_pruned[1:20,]
#Select a compact rule set:
ruleMetric <- selectRuleRRF(ruleMetric,X,target)
ruleMetric_compact <- ruleMetric
ruleMetric_compact[,]
#Build an ordered rule list as a classifier:
learner <- buildLearner(ruleMetric,X,target)
learner_orig <- learner
learner_orig
#Make rules more readable:
readableRules <- presentRules(learner,colnames(X))
readableRules[,]
readableRules <- presentRules(ruleMetric,colnames(X))
readableRules[,]
readableRules <- presentRules(ruleMetric_pruned,colnames(X))
readableRules.df <- as.data.frame(readableRules)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.character)
readableRules.df[,1:3] <- sapply(readableRules.df[,1:3],as.numeric)
table(sapply(readableRules.df, class))
df <- readableRules.df[ order(-readableRules.df[,2], readableRules.df[,3]), ]
dfu <- unique(df[,1:5])
write.csv(df, file = "readableRules.csv",row.names=TRUE)
write.csv(dfu, file = "readableRulesUnique.csv",row.names=TRUE)
append = T
library(xtable)
print(xtable(dfu), include.rownames=FALSE)
#Extract frequent variable interactions (note the rules are not pruned or selected):
treeList <- RF2List(rf) # transform rf object to an inTrees' format
exec <- extractRules(treeList,X) # R-executable conditions
ruleMetricFreq <- getRuleMetric(exec,X,target) # get rule metrics
freqPattern <- getFreqPattern(ruleMetricFreq)
nrow(freqPattern)
freqRules <- freqPattern[which(as.numeric(freqPattern[,"len"])>3),][,] # interactions of at least two predictor variables
freqRules
readableRulesFreq <- presentRules(freqRules,colnames(X))
readableRulesFreq
library(xtable)
print(xtable(readableRulesFreq), include.rownames=FALSE)
#[1] "Training#: 134 ,trainIteration: 133 params[ 2 , 7 , 6 , 3427 , 30 , 30 ] smallestError == 0.502067331543828 delta == 0.0000 bestTrainNum == 134 error1 0.0006 error2 0.0159"
# actual
# predicted NotSwingLo SwingLo
# NotSwingLo 3259 2
# SwingLo 48 2966
# random forest
rf <- randomForest(x,y,ntree=100#,maxnodes = 5
,mtry = floor(sqrt(ncol(x)))/10
,sampsize = .9*nrow(x),nodesize = floor(sqrt(nrow(x)))*1
,replace=TRUE,importance = TRUE)
lists[['rf']] <- RF2List(rf) # extract a list of trees
rf$confusion
rf$err.rate
lists[['rf']]
# regularized random forest
rrf <- RRF(x,as.factor(y),ntree=100)
lists[['rrf']] <- RF2List(rrf)
# boosted trees
gbmFit <- gbm(y~ ., data=cbind(x,y), n.tree = 100,
interaction.depth = 10,distribution="multinomial")
lists[['gbm']] <- GBM2List(gbmFit,x)
v <- c("rf","rrf","gbm")
v
v <- c("rf") # only use rf
#v <- c("gbm") # only use gbm
#v <- c("rrf") # only use rf
X <- x
for(i in v){X
target <- y
#X <- x
treeList <- lists[[i]]
ruleExec0 <- extractRules(treeList,X) # transform to R-executable conditions
ruleExec <- unique(ruleExec0) # unique rules
cat( paste("There are ", length(ruleExec), " unique conditions. \n",sep="") )
# Too many conditions could make the following steps time-consuming,
# so one could randomly select a subset of the conditions
ix <- sample(1:length(ruleExec),min(2000,length(ruleExec))) #randomly select 2000 rules
ruleExec <- ruleExec[ix,,drop=FALSE]
ruleMetric <- getRuleMetric(ruleExec,X,target) # measure rules
lookup <- lookupRule(ruleMetric,c("X[,4]","X[,3]")) # look up rules including X[,4] and X[,3]
ruleMetric <- pruneRule(ruleMetric,X,target) # prune each rule
# selecting rules by threholds of frequency & error
ix <- which(as.numeric(ruleMetric[,"freq"])>0.001 & as.numeric(ruleMetric[,"err"])< 0.5)
ruleMetric <- ruleMetric[ix,]
ruleMetric <- selectRuleRRF(ruleMetric,X,target) # rule selection
learner <- buildLearner(ruleMetric,X,target) #build the simplified tree ensemble learner
pred <- applyLearner(learner,X) #appy learner to data
readableLearner <- presentRules(learner,colnames(X)) # present the rules with a more readable format
# print(readableLearner)
# -- frequent variable interactions or conditions in a tree ensemble
# NOTE: the calculation is based on ruleExec0 WITHOUT pruning or selection
ruleMetric <- getRuleMetric(ruleExec0,X,target)
freqPattern <- getFreqPattern(ruleMetric)
#ruleMetric <- getRuleMetric(freqPattern,X,target)
}
#readableLearner
#ruleMetric
#freqPattern
ruleMetric <- pruneRule(ruleMetric,X,target)
ruleMetric[1:2,]
readableRules <- presentRules(ruleMetric,colnames(X))
readableRules[1:2,]
#format the rule and metrics as a table in latex code
#install.packages("xtable")
library(xtable)
print(xtable(ruleMetric), include.rownames=F)
print(xtable(readableLearner), include.rownames=F)#use in sharelatex.com
# --- transform regression rules to classification rules
# make Sepal.Length as the target, other as predictors
X <- iris[,-1]; target <- iris[,"Sepal.Length"]
rf <- randomForest(x,target,ntree=30) # random forest
ruleExec0 <- extractRules(RF2List(rf),X)
ruleExec <- unique(ruleExec0)
nrow(ruleExec0)
nrow(ruleExec)
target <- dicretizeVector(target) # discretize it into three levels with equal frenquency
# methods for classification rules can then be used for
# the conditions extracted from the regression trees
ruleMetric <- getRuleMetric(ruleExec,X,target)
ruleMetric
# --- decision tree and logistic regression
X.double <- apply(X,2,as.numeric)
#install.packages("glmnet")
library(glmnet)
cvNet <- cv.glmnet(X.double,as.factor(target), family = "multinomial",type.measure = "class")
coef <- coef(cvNet)
#install.packages("rpart")
library(rpart)
r <- rpart(target ~. , X)
|
## Coursera Exploratory Data Analysis
## 2014-10-12
## Plot1.R
#==========================================================================================
# i. Prepare directory and load any required packages
#==========================================================================================
# check working directory
getwd()
# if wrong working directory then set directory
# insert file path between the ""
setwd("~/Desktop/Coursera/Data Science/4-Exploratory Data Analysis/4-Project_1/ExData_Plotting1")
# Clean up workspace
rm(list = ls())
# Check if "data" directory already exists.
# If not, then it creates "data" directory, where all data pertaining to this code
# are stored.
if(!file.exists("data")) {
dir.create("data")
}
# Downloads data as data.table, converts date and time columns classes as "character
# and the rest of the columns as numeric.
AllData <- read.table("./data/household_power_consumption.txt",
header = TRUE,
sep = ";",
colClasses = c(rep("character", 2), rep("numeric",7)),
na = "?")
dim(AllData)
# [1] 2075259 9
# Reformat Date (dd/mm/yyy)
AllData$Date <- as.Date(AllData$Date, format = "%d/%m/%Y")
class(AllData$Date)
# [1] "Date"
# Only analyze Feb 2007 dataset
FebData <- AllData[AllData$Date == "2007-02-01" | AllData$Date == "2007-02-02", ]
dim(FebData)
# [1] 2880 9
# Generate Graph
png(filename = "plot1.png", width = 480, height = 480)
with(FebData, hist(Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)"))
dev.off()
|
/Plot1.R
|
no_license
|
evillega/ExData_Plotting1
|
R
| false | false | 1,661 |
r
|
## Coursera Exploratory Data Analysis
## 2014-10-12
## Plot1.R
#==========================================================================================
# i. Prepare directory and load any required packages
#==========================================================================================
# check working directory
getwd()
# if wrong working directory then set directory
# insert file path between the ""
setwd("~/Desktop/Coursera/Data Science/4-Exploratory Data Analysis/4-Project_1/ExData_Plotting1")
# Clean up workspace
rm(list = ls())
# Check if "data" directory already exists.
# If not, then it creates "data" directory, where all data pertaining to this code
# are stored.
if(!file.exists("data")) {
dir.create("data")
}
# Downloads data as data.table, converts date and time columns classes as "character
# and the rest of the columns as numeric.
AllData <- read.table("./data/household_power_consumption.txt",
header = TRUE,
sep = ";",
colClasses = c(rep("character", 2), rep("numeric",7)),
na = "?")
dim(AllData)
# [1] 2075259 9
# Reformat Date (dd/mm/yyy)
AllData$Date <- as.Date(AllData$Date, format = "%d/%m/%Y")
class(AllData$Date)
# [1] "Date"
# Only analyze Feb 2007 dataset
FebData <- AllData[AllData$Date == "2007-02-01" | AllData$Date == "2007-02-02", ]
dim(FebData)
# [1] 2880 9
# Generate Graph
png(filename = "plot1.png", width = 480, height = 480)
with(FebData, hist(Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{Budget}
\alias{Budget}
\title{Budget Object}
\usage{
Budget(accountId = NULL, billingId = NULL, budgetAmount = NULL,
currencyCode = NULL, id = NULL)
}
\arguments{
\item{accountId}{The id of the account}
\item{billingId}{The billing id to determine which adgroup to provide budget information for}
\item{budgetAmount}{The daily budget amount in unit amount of the account currency to apply for the billingId provided}
\item{currencyCode}{The currency code for the buyer}
\item{id}{The unique id that describes this item}
}
\value{
Budget object
}
\description{
Budget Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The configuration data for Ad Exchange RTB - Budget API.
}
\seealso{
Other Budget functions: \code{\link{budget.patch}},
\code{\link{budget.update}}
}
|
/googleadexchangebuyerv14.auto/man/Budget.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false | true | 924 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{Budget}
\alias{Budget}
\title{Budget Object}
\usage{
Budget(accountId = NULL, billingId = NULL, budgetAmount = NULL,
currencyCode = NULL, id = NULL)
}
\arguments{
\item{accountId}{The id of the account}
\item{billingId}{The billing id to determine which adgroup to provide budget information for}
\item{budgetAmount}{The daily budget amount in unit amount of the account currency to apply for the billingId provided}
\item{currencyCode}{The currency code for the buyer}
\item{id}{The unique id that describes this item}
}
\value{
Budget object
}
\description{
Budget Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The configuration data for Ad Exchange RTB - Budget API.
}
\seealso{
Other Budget functions: \code{\link{budget.patch}},
\code{\link{budget.update}}
}
|
# the function receives a matrix as an argument
# and returns a list of 4 functions
# needed by the cacheSolve function
makeCacheMatrix <- function(x = matrix()) {
# m is the inverted matrix that will be returned
# initialiazed to NULL
m <- NULL
# in case the matrix passed as a parameter to makeCacheMatrix (i.e. "x")
# should be changed, x is set to the new value, in the parent enviroment
# so as not to return the inverse matrix computed for the previous x
# but to compute a new inverse for the new x
# and m is set to NULL again, in the enviroment of makeCacheMatrix, not just the one
# of the set function, since both m and x will need to be used by the
# following functions
set <- function(y){
x <<- y
m <<- NULL
}
# get stores the value of x (the matrix)
get <- function() {x}
# setinverse assigns the value of the inverse matrix to m
# in the makeCacheMatrix enviroment, computed in the cacheSolve function
setinverse <- function(solve)
m <<- solve
# stores the value of the inverted matrix
getinverse <- function() {m}
# returns a list of 4 functions needed by the cacheSolve function below
list(set=set, get=get,
getinverse=getinverse,
setinverse=setinverse)
}
## returns the inverse of a matrix
# if the value was already computed, it will return the value
# stored by the makeCaheMatrix function
# else it will compute it
cacheSolve <- function(x, ...) {
## m ( the value to be returned by this function) is set to the value
# stored by the makeCacheMatrix function
m <- x$getinverse()
# check to see if the value was already computed so as not to make the calculations again
# if it was, it will return a message to signal to the user that it returns the already computed inverse matrix
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# if it was not already computed, data parameter is assigned the value of the matrix that needs to be inverted
# stored in the makeCacheMatrix function
data <- x$get()
# the matrix is inverted by the solve function, with the data parameter passed to it
# the result is assigned to m, that will be returned by the function
m <- solve(data, ...)
# the result is then passed to the makeCacheMatrix to be stored
x$setinverse(m)
# the inverse of the matrix , m, will be returned
m
}
|
/cachematrix.R
|
no_license
|
AdrianBadica/ProgrammingAssignment2
|
R
| false | false | 2,475 |
r
|
# the function receives a matrix as an argument
# and returns a list of 4 functions
# needed by the cacheSolve function
makeCacheMatrix <- function(x = matrix()) {
# m is the inverted matrix that will be returned
# initialiazed to NULL
m <- NULL
# in case the matrix passed as a parameter to makeCacheMatrix (i.e. "x")
# should be changed, x is set to the new value, in the parent enviroment
# so as not to return the inverse matrix computed for the previous x
# but to compute a new inverse for the new x
# and m is set to NULL again, in the enviroment of makeCacheMatrix, not just the one
# of the set function, since both m and x will need to be used by the
# following functions
set <- function(y){
x <<- y
m <<- NULL
}
# get stores the value of x (the matrix)
get <- function() {x}
# setinverse assigns the value of the inverse matrix to m
# in the makeCacheMatrix enviroment, computed in the cacheSolve function
setinverse <- function(solve)
m <<- solve
# stores the value of the inverted matrix
getinverse <- function() {m}
# returns a list of 4 functions needed by the cacheSolve function below
list(set=set, get=get,
getinverse=getinverse,
setinverse=setinverse)
}
## returns the inverse of a matrix
# if the value was already computed, it will return the value
# stored by the makeCaheMatrix function
# else it will compute it
cacheSolve <- function(x, ...) {
## m ( the value to be returned by this function) is set to the value
# stored by the makeCacheMatrix function
m <- x$getinverse()
# check to see if the value was already computed so as not to make the calculations again
# if it was, it will return a message to signal to the user that it returns the already computed inverse matrix
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# if it was not already computed, data parameter is assigned the value of the matrix that needs to be inverted
# stored in the makeCacheMatrix function
data <- x$get()
# the matrix is inverted by the solve function, with the data parameter passed to it
# the result is assigned to m, that will be returned by the function
m <- solve(data, ...)
# the result is then passed to the makeCacheMatrix to be stored
x$setinverse(m)
# the inverse of the matrix , m, will be returned
m
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.